#define NUM_OF_PINS 32
void qe_config_iopin(u8 port, u8 pin, int dir, int open_drain, int assign)
{
- u32 pin_2bit_mask;
- u32 pin_2bit_dir;
- u32 pin_2bit_assign;
- u32 pin_1bit_mask;
- u32 tmp_val;
- volatile immap_t *im = (volatile immap_t *)CONFIG_SYS_IMMR;
- volatile qepio83xx_t *par_io = (volatile qepio83xx_t *)&im->qepio;
+ u32 2bit_mask;
+ u32 2bit_dir;
+ u32 2bit_assign;
+ u32 1bit_mask;
+ u32 tmp_val;
+ immap_t *im;
+ qepio83xx_t *par_io;
+ int offset;
+
+ im = (immap_t *)CONFIG_SYS_IMMR;
+ par_io = (qepio83xx_t *)&im->qepio;
+ offset = (NUM_OF_PINS - (pin % (NUM_OF_PINS / 2) + 1) * 2);
/* Calculate pin location and 2bit mask and dir */
- pin_2bit_mask = (u32)(0x3 << (NUM_OF_PINS-(pin%(NUM_OF_PINS/2)+1)*2));
- pin_2bit_dir = (u32)(dir << (NUM_OF_PINS-(pin%(NUM_OF_PINS/2)+1)*2));
+ 2bit_mask = (u32)(0x3 << offset);
+ 2bit_dir = (u32)(dir << offset);
/* Setup the direction */
- tmp_val = (pin > (NUM_OF_PINS/2) - 1) ? \
+ tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
in_be32(&par_io->ioport[port].dir2) :
in_be32(&par_io->ioport[port].dir1);
- if (pin > (NUM_OF_PINS/2) -1) {
- out_be32(&par_io->ioport[port].dir2, ~pin_2bit_mask & tmp_val);
- out_be32(&par_io->ioport[port].dir2, pin_2bit_dir | tmp_val);
+ if (pin > (NUM_OF_PINS / 2) - 1) {
+ out_be32(&par_io->ioport[port].dir2, ~2bit_mask & tmp_val);
+ out_be32(&par_io->ioport[port].dir2, 2bit_dir | tmp_val);
} else {
- out_be32(&par_io->ioport[port].dir1, ~pin_2bit_mask & tmp_val);
- out_be32(&par_io->ioport[port].dir1, pin_2bit_dir | tmp_val);
+ out_be32(&par_io->ioport[port].dir1, ~2bit_mask & tmp_val);
+ out_be32(&par_io->ioport[port].dir1, 2bit_dir | tmp_val);
}
/* Calculate pin location for 1bit mask */
- pin_1bit_mask = (u32)(1 << (NUM_OF_PINS - (pin+1)));
+ 1bit_mask = (u32)(1 << (NUM_OF_PINS - (pin + 1)));
/* Setup the open drain */
tmp_val = in_be32(&par_io->ioport[port].podr);
- if (open_drain) {
- out_be32(&par_io->ioport[port].podr, pin_1bit_mask | tmp_val);
- } else {
- out_be32(&par_io->ioport[port].podr, ~pin_1bit_mask & tmp_val);
- }
+ if (open_drain)
+ out_be32(&par_io->ioport[port].podr, 1bit_mask | tmp_val);
+ else
+ out_be32(&par_io->ioport[port].podr, ~1bit_mask & tmp_val);
/* Setup the assignment */
- tmp_val = (pin > (NUM_OF_PINS/2) - 1) ?
- in_be32(&par_io->ioport[port].ppar2):
+ tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
+ in_be32(&par_io->ioport[port].ppar2) :
in_be32(&par_io->ioport[port].ppar1);
- pin_2bit_assign = (u32)(assign
- << (NUM_OF_PINS - (pin%(NUM_OF_PINS/2)+1)*2));
+ 2bit_assign = (u32)(assign << offset);
/* Clear and set 2 bits mask */
- if (pin > (NUM_OF_PINS/2) - 1) {
- out_be32(&par_io->ioport[port].ppar2, ~pin_2bit_mask & tmp_val);
- out_be32(&par_io->ioport[port].ppar2, pin_2bit_assign | tmp_val);
+ if (pin > (NUM_OF_PINS / 2) - 1) {
+ out_be32(&par_io->ioport[port].ppar2, ~2bit_mask & tmp_val);
+ out_be32(&par_io->ioport[port].ppar2, 2bit_assign | tmp_val);
} else {
- out_be32(&par_io->ioport[port].ppar1, ~pin_2bit_mask & tmp_val);
- out_be32(&par_io->ioport[port].ppar1, pin_2bit_assign | tmp_val);
+ out_be32(&par_io->ioport[port].ppar1, ~2bit_mask & tmp_val);
+ out_be32(&par_io->ioport[port].ppar1, 2bit_assign | tmp_val);
}
}
#define MPC85xx_DEVDISR_QE_DISABLE 0x1
-qe_map_t *qe_immr = NULL;
+qe_map_t *qe_immr;
#ifdef CONFIG_QE
static qe_snum_t snums[QE_NUM_OF_SNUM];
#endif
u32 cecr;
if (cmd == QE_RESET) {
- out_be32(&qe_immr->cp.cecr,(u32) (cmd | QE_CR_FLG));
+ out_be32(&qe_immr->cp.cecr, (u32)(cmd | QE_CR_FLG));
} else {
out_be32(&qe_immr->cp.cecdr, cmd_data);
out_be32(&qe_immr->cp.cecr, (sbc | QE_CR_FLG |
- ((u32) mcn<<QE_CR_PROTOCOL_SHIFT) | cmd));
+ ((u32)mcn << QE_CR_PROTOCOL_SHIFT) | cmd));
}
/* Wait for the QE_CR_FLG to clear */
do {
cecr = in_be32(&qe_immr->cp.cecr);
} while (cecr & QE_CR_FLG);
-
- return;
}
#ifdef CONFIG_QE
if (off != 0)
gd->arch.mp_alloc_base += (align - off);
- if ((off = size & align_mask) != 0)
+ off = size & align_mask;
+ if (off != 0)
size += (align - off);
if ((gd->arch.mp_alloc_base + size) >= gd->arch.mp_alloc_top) {
gd->arch.mp_alloc_base = savebase;
- printf("%s: ran out of ram.\n", __FUNCTION__);
+ printf("%s: ran out of ram.\n", __func__);
}
retloc = gd->arch.mp_alloc_base;
#ifdef CONFIG_QE
static void qe_sdma_init(void)
{
- volatile sdma_t *p;
- uint sdma_buffer_base;
+ sdma_t *p;
+ uint sdma_buffer_base;
- p = (volatile sdma_t *)&qe_immr->sdma;
+ p = (sdma_t *)&qe_immr->sdma;
/* All of DMA transaction in bus 1 */
out_be32(&p->sdaqr, 0);
qe_upload_firmware((const void *)CONFIG_SYS_QE_FW_ADDR);
/* enable the microcode in IRAM */
- out_be32(&qe_immr->iram.iready,QE_IRAM_READY);
+ out_be32(&qe_immr->iram.iready, QE_IRAM_READY);
#endif
gd->arch.mp_alloc_base = QE_DATAONLY_BASE;
void *addr = (void *)CONFIG_SYS_QE_FW_ADDR;
if (src == BOOT_SOURCE_IFC_NOR)
- addr = (void *)(CONFIG_SYS_QE_FW_ADDR + CONFIG_SYS_FSL_IFC_BASE);
+ addr = (void *)(CONFIG_SYS_QE_FW_ADDR +
+ CONFIG_SYS_FSL_IFC_BASE);
if (src == BOOT_SOURCE_QSPI_NOR)
- addr = (void *)(CONFIG_SYS_QE_FW_ADDR + CONFIG_SYS_FSL_QSPI_BASE);
+ addr = (void *)(CONFIG_SYS_QE_FW_ADDR +
+ CONFIG_SYS_FSL_QSPI_BASE);
if (src == BOOT_SOURCE_SD_MMC) {
int dev = CONFIG_SYS_MMC_ENV_DEV;
void qe_reset(void)
{
qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
- (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
}
#ifdef CONFIG_QE
u32 cecr;
out_be32(&qe_immr->cp.cecdr, para_ram_base);
- out_be32(&qe_immr->cp.cecr, ((u32) snum<<QE_CR_ASSIGN_PAGE_SNUM_SHIFT)
+ out_be32(&qe_immr->cp.cecr, ((u32)snum << QE_CR_ASSIGN_PAGE_SNUM_SHIFT)
| QE_CR_FLG | QE_ASSIGN_PAGE);
/* Wait for the QE_CR_FLG to clear */
do {
cecr = in_be32(&qe_immr->cp.cecr);
- } while (cecr & QE_CR_FLG );
-
- return;
+ } while (cecr & QE_CR_FLG);
}
#endif
/*
* brg: 0~15 as BRG1~BRG16
- rate: baud rate
+ * rate: baud rate
* BRG input clock comes from the BRGCLK (internal clock generated from
- the QE clock, it is one-half of the QE clock), If need the clock source
- from CLKn pin, we have te change the function.
+ * the QE clock, it is one-half of the QE clock), If need the clock source
+ * from CLKn pin, we have te change the function.
*/
#define BRG_CLK (gd->arch.brg_clk)
#ifdef CONFIG_QE
int qe_set_brg(uint brg, uint rate)
{
- volatile uint *bp;
- u32 divisor;
- int div16 = 0;
+ uint *bp;
+ u32 divisor;
+ u32 val;
+ int div16 = 0;
if (brg >= QE_NUM_OF_BRGS)
return -EINVAL;
+
bp = (uint *)&qe_immr->brg.brgc1;
bp += brg;
divisor /= 16;
}
- *bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
- __asm__ __volatile__("sync");
+ /* CHECK TODO */
+ /*
+ * was
+ * *bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
+ * __asm__ __volatile__("sync");
+ */
- if (div16) {
- *bp |= QE_BRGC_DIV16;
- __asm__ __volatile__("sync");
- }
+ val = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
+ if (div16)
+ val |= QE_BRGC_DIV16;
+
+ out_be32(bp, val);
return 0;
}
#endif
-/* Set ethernet MII clock master
-*/
+/* Set ethernet MII clock master */
int qe_set_mii_clk_src(int ucc_num)
{
u32 cmxgcr;
/* check if the UCC number is in range. */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
- printf("%s: ucc num not in ranges\n", __FUNCTION__);
+ if ((ucc_num > UCC_MAX_NUM - 1) || ucc_num < 0) {
+ printf("%s: ucc num not in ranges\n", __func__);
return -EINVAL;
}
cmxgcr = in_be32(&qe_immr->qmx.cmxgcr);
cmxgcr &= ~QE_CMXGCR_MII_ENET_MNG_MASK;
- cmxgcr |= (ucc_num <<QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ cmxgcr |= (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
out_be32(&qe_immr->qmx.cmxgcr, cmxgcr);
return 0;
* the actual uploading of the microcode.
*/
static void qe_upload_microcode(const void *base,
- const struct qe_microcode *ucode)
+ const struct qe_microcode *ucode)
{
const u32 *code = base + be32_to_cpu(ucode->code_offset);
unsigned int i;
}
/* Validate some of the fields */
- if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ if (firmware->count < 1 || firmware->count > MAX_QE_RISC) {
printf("Invalid data\n");
return -EINVAL;
}
* function isn't available unless you turn on JFFS support.
*/
crc = be32_to_cpu(*(u32 *)((void *)firmware + calc_size));
- if (crc != (crc32(-1, (const void *) firmware, calc_size) ^ -1)) {
+ if (crc != (crc32(-1, (const void *)firmware, calc_size) ^ -1)) {
printf("Firmware CRC is invalid\n");
return -EIO;
}
*/
if (!firmware->split) {
out_be16(&qe_immr->cp.cercr,
- in_be16(&qe_immr->cp.cercr) | QE_CP_CERCR_CIR);
+ in_be16(&qe_immr->cp.cercr) | QE_CP_CERCR_CIR);
}
if (firmware->soc.model)
printf("Firmware '%s' for %u V%u.%u\n",
- firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->id, be16_to_cpu(firmware->soc.model),
firmware->soc.major, firmware->soc.minor);
else
printf("Firmware '%s'\n", firmware->id);
strncpy(qe_firmware_info.id, (char *)firmware->id, 62);
qe_firmware_info.extended_modes = firmware->extended_modes;
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
- sizeof(firmware->vtraps));
+ sizeof(firmware->vtraps));
qe_firmware_uploaded = 1;
/* Loop through each microcode. */
}
/* Validate some of the fields */
- if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ if (firmware->count < 1 || firmware->count > MAX_QE_RISC) {
printf("Invalid data\n");
return -EINVAL;
}
if (argc > 3) {
ulong length = simple_strtoul(argv[3], NULL, 16);
- struct qe_firmware *firmware = (void *) addr;
+ struct qe_firmware *firmware = (void *)addr;
if (length != be32_to_cpu(firmware->header.length)) {
printf("Length mismatch\n");
}
}
- return qe_upload_firmware((const struct qe_firmware *) addr);
+ return qe_upload_firmware((const struct qe_firmware *)addr);
}
return cmd_usage(cmdtp);
U_BOOT_CMD(
qe, 4, 0, qe_cmd,
"QUICC Engine commands",
- "fw <addr> [<length>] - Upload firmware binary at address <addr> to "
- "the QE,\n"
+ "fw <addr> [<length>] - Upload firmware binary at address <addr> to the QE,\n"
"\twith optional length <length> verification."
);
#include "uccf.h"
#include <fsl_qe.h>
-void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf)
+void ucc_fast_transmit_on_demand(struct ucc_fast_priv *uccf)
{
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
}
u32 ucc_fast_get_qe_cr_subblock(int ucc_num)
{
switch (ucc_num) {
- case 0: return QE_CR_SUBBLOCK_UCCFAST1;
- case 1: return QE_CR_SUBBLOCK_UCCFAST2;
- case 2: return QE_CR_SUBBLOCK_UCCFAST3;
- case 3: return QE_CR_SUBBLOCK_UCCFAST4;
- case 4: return QE_CR_SUBBLOCK_UCCFAST5;
- case 5: return QE_CR_SUBBLOCK_UCCFAST6;
- case 6: return QE_CR_SUBBLOCK_UCCFAST7;
- case 7: return QE_CR_SUBBLOCK_UCCFAST8;
- default: return QE_CR_SUBBLOCK_INVALID;
+ case 0:
+ return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1:
+ return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2:
+ return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3:
+ return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4:
+ return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5:
+ return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6:
+ return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7:
+ return QE_CR_SUBBLOCK_UCCFAST8;
+ default:
+ return QE_CR_SUBBLOCK_INVALID;
}
}
-static void ucc_get_cmxucr_reg(int ucc_num, volatile u32 **p_cmxucr,
- u8 *reg_num, u8 *shift)
+static void ucc_get_cmxucr_reg(int ucc_num, u32 **p_cmxucr,
+ u8 *reg_num, u8 *shift)
{
switch (ucc_num) {
- case 0: /* UCC1 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr1);
- *reg_num = 1;
- *shift = 16;
- break;
- case 2: /* UCC3 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr1);
- *reg_num = 1;
- *shift = 0;
- break;
- case 4: /* UCC5 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr2);
- *reg_num = 2;
- *shift = 16;
- break;
- case 6: /* UCC7 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr2);
- *reg_num = 2;
- *shift = 0;
- break;
- case 1: /* UCC2 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr3);
- *reg_num = 3;
- *shift = 16;
- break;
- case 3: /* UCC4 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr3);
- *reg_num = 3;
- *shift = 0;
- break;
- case 5: /* UCC6 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr4);
- *reg_num = 4;
- *shift = 16;
- break;
- case 7: /* UCC8 */
- *p_cmxucr = &(qe_immr->qmx.cmxucr4);
- *reg_num = 4;
- *shift = 0;
- break;
- default:
- break;
+ case 0: /* UCC1 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr1;
+ *reg_num = 1;
+ *shift = 16;
+ break;
+ case 2: /* UCC3 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr1;
+ *reg_num = 1;
+ *shift = 0;
+ break;
+ case 4: /* UCC5 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr2;
+ *reg_num = 2;
+ *shift = 16;
+ break;
+ case 6: /* UCC7 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr2;
+ *reg_num = 2;
+ *shift = 0;
+ break;
+ case 1: /* UCC2 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr3;
+ *reg_num = 3;
+ *shift = 16;
+ break;
+ case 3: /* UCC4 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr3;
+ *reg_num = 3;
+ *shift = 0;
+ break;
+ case 5: /* UCC6 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr4;
+ *reg_num = 4;
+ *shift = 16;
+ break;
+ case 7: /* UCC8 */
+ *p_cmxucr = &qe_immr->qmx.cmxucr4;
+ *reg_num = 4;
+ *shift = 0;
+ break;
+ default:
+ break;
}
}
static int ucc_set_clk_src(int ucc_num, qe_clock_e clock, comm_dir_e mode)
{
- volatile u32 *p_cmxucr = NULL;
- u8 reg_num = 0;
- u8 shift = 0;
- u32 clockBits;
- u32 clockMask;
- int source = -1;
+ u32 *p_cmxucr = NULL;
+ u8 reg_num = 0;
+ u8 shift = 0;
+ u32 clk_bits;
+ u32 clk_mask;
+ int source = -1;
/* check if the UCC number is in range. */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+ if ((ucc_num > UCC_MAX_NUM - 1) || ucc_num < 0)
return -EINVAL;
- if (! ((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) {
- printf("%s: bad comm mode type passed\n", __FUNCTION__);
+ if (!(mode == COMM_DIR_RX || mode == COMM_DIR_TX)) {
+ printf("%s: bad comm mode type passed\n", __func__);
return -EINVAL;
}
ucc_get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift);
switch (reg_num) {
- case 1:
- switch (clock) {
- case QE_BRG1: source = 1; break;
- case QE_BRG2: source = 2; break;
- case QE_BRG7: source = 3; break;
- case QE_BRG8: source = 4; break;
- case QE_CLK9: source = 5; break;
- case QE_CLK10: source = 6; break;
- case QE_CLK11: source = 7; break;
- case QE_CLK12: source = 8; break;
- case QE_CLK15: source = 9; break;
- case QE_CLK16: source = 10; break;
- default: source = -1; break;
- }
- break;
- case 2:
- switch (clock) {
- case QE_BRG5: source = 1; break;
- case QE_BRG6: source = 2; break;
- case QE_BRG7: source = 3; break;
- case QE_BRG8: source = 4; break;
- case QE_CLK13: source = 5; break;
- case QE_CLK14: source = 6; break;
- case QE_CLK19: source = 7; break;
- case QE_CLK20: source = 8; break;
- case QE_CLK15: source = 9; break;
- case QE_CLK16: source = 10; break;
- default: source = -1; break;
- }
- break;
- case 3:
- switch (clock) {
- case QE_BRG9: source = 1; break;
- case QE_BRG10: source = 2; break;
- case QE_BRG15: source = 3; break;
- case QE_BRG16: source = 4; break;
- case QE_CLK3: source = 5; break;
- case QE_CLK4: source = 6; break;
- case QE_CLK17: source = 7; break;
- case QE_CLK18: source = 8; break;
- case QE_CLK7: source = 9; break;
- case QE_CLK8: source = 10; break;
- case QE_CLK16: source = 11; break;
- default: source = -1; break;
- }
- break;
- case 4:
- switch (clock) {
- case QE_BRG13: source = 1; break;
- case QE_BRG14: source = 2; break;
- case QE_BRG15: source = 3; break;
- case QE_BRG16: source = 4; break;
- case QE_CLK5: source = 5; break;
- case QE_CLK6: source = 6; break;
- case QE_CLK21: source = 7; break;
- case QE_CLK22: source = 8; break;
- case QE_CLK7: source = 9; break;
- case QE_CLK8: source = 10; break;
- case QE_CLK16: source = 11; break;
- default: source = -1; break;
- }
+ case 1:
+ switch (clock) {
+ case QE_BRG1:
+ source = 1;
+ break;
+ case QE_BRG2:
+ source = 2;
+ break;
+ case QE_BRG7:
+ source = 3;
+ break;
+ case QE_BRG8:
+ source = 4;
+ break;
+ case QE_CLK9:
+ source = 5;
+ break;
+ case QE_CLK10:
+ source = 6;
+ break;
+ case QE_CLK11:
+ source = 7;
+ break;
+ case QE_CLK12:
+ source = 8;
+ break;
+ case QE_CLK15:
+ source = 9;
+ break;
+ case QE_CLK16:
+ source = 10;
+ break;
+ default:
+ source = -1;
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5:
+ source = 1;
+ break;
+ case QE_BRG6:
+ source = 2;
+ break;
+ case QE_BRG7:
+ source = 3;
+ break;
+ case QE_BRG8:
+ source = 4;
+ break;
+ case QE_CLK13:
+ source = 5;
+ break;
+ case QE_CLK14:
+ source = 6;
+ break;
+ case QE_CLK19:
+ source = 7;
+ break;
+ case QE_CLK20:
+ source = 8;
+ break;
+ case QE_CLK15:
+ source = 9;
+ break;
+ case QE_CLK16:
+ source = 10;
break;
default:
source = -1;
break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ case QE_BRG15:
+ source = 3;
+ break;
+ case QE_BRG16:
+ source = 4;
+ break;
+ case QE_CLK3:
+ source = 5;
+ break;
+ case QE_CLK4:
+ source = 6;
+ break;
+ case QE_CLK17:
+ source = 7;
+ break;
+ case QE_CLK18:
+ source = 8;
+ break;
+ case QE_CLK7:
+ source = 9;
+ break;
+ case QE_CLK8:
+ source = 10;
+ break;
+ case QE_CLK16:
+ source = 11;
+ break;
+ default:
+ source = -1;
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ case QE_BRG15:
+ source = 3;
+ break;
+ case QE_BRG16:
+ source = 4;
+ break;
+ case QE_CLK5:
+ source = 5;
+ break;
+ case QE_CLK6:
+ source = 6;
+ break;
+ case QE_CLK21:
+ source = 7;
+ break;
+ case QE_CLK22:
+ source = 8;
+ break;
+ case QE_CLK7:
+ source = 9;
+ break;
+ case QE_CLK8:
+ source = 10;
+ break;
+ case QE_CLK16:
+ source = 11;
+ break;
+ default:
+ source = -1;
+ break;
+ }
+ break;
+ default:
+ source = -1;
+ break;
}
if (source == -1) {
- printf("%s: Bad combination of clock and UCC\n", __FUNCTION__);
+ printf("%s: Bad combination of clock and UCC\n", __func__);
return -ENOENT;
}
- clockBits = (u32) source;
- clockMask = QE_CMXUCR_TX_CLK_SRC_MASK;
+ clk_bits = (u32)source;
+ clk_mask = QE_CMXUCR_TX_CLK_SRC_MASK;
if (mode == COMM_DIR_RX) {
- clockBits <<= 4; /* Rx field is 4 bits to left of Tx field */
- clockMask <<= 4; /* Rx field is 4 bits to left of Tx field */
+ clk_bits <<= 4; /* Rx field is 4 bits to left of Tx field */
+ clk_mask <<= 4; /* Rx field is 4 bits to left of Tx field */
}
- clockBits <<= shift;
- clockMask <<= shift;
+ clk_bits <<= shift;
+ clk_mask <<= shift;
- out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clockMask) | clockBits);
+ out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clk_mask) | clk_bits);
return 0;
}
uint base = 0;
/* check if the UCC number is in range */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
- printf("%s: the UCC num not in ranges\n", __FUNCTION__);
+ if ((ucc_num > UCC_MAX_NUM - 1) || ucc_num < 0) {
+ printf("%s: the UCC num not in ranges\n", __func__);
return 0;
}
switch (ucc_num) {
- case 0: base = 0x00002000; break;
- case 1: base = 0x00003000; break;
- case 2: base = 0x00002200; break;
- case 3: base = 0x00003200; break;
- case 4: base = 0x00002400; break;
- case 5: base = 0x00003400; break;
- case 6: base = 0x00002600; break;
- case 7: base = 0x00003600; break;
- default: break;
+ case 0:
+ base = 0x00002000;
+ break;
+ case 1:
+ base = 0x00003000;
+ break;
+ case 2:
+ base = 0x00002200;
+ break;
+ case 3:
+ base = 0x00003200;
+ break;
+ case 4:
+ base = 0x00002400;
+ break;
+ case 5:
+ base = 0x00003400;
+ break;
+ case 6:
+ base = 0x00002600;
+ break;
+ case 7:
+ base = 0x00003600;
+ break;
+ default:
+ break;
}
base = (uint)qe_immr + base;
return base;
}
-void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode)
+void ucc_fast_enable(struct ucc_fast_priv *uccf, comm_dir_e mode)
{
ucc_fast_t *uf_regs;
u32 gumr;
out_be32(&uf_regs->gumr, gumr);
}
-void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode)
+void ucc_fast_disable(struct ucc_fast_priv *uccf, comm_dir_e mode)
{
ucc_fast_t *uf_regs;
u32 gumr;
out_be32(&uf_regs->gumr, gumr);
}
-int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t **uccf_ret)
+int ucc_fast_init(struct ucc_fast_inf *uf_info,
+ struct ucc_fast_priv **uccf_ret)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_priv *uccf;
ucc_fast_t *uf_regs;
if (!uf_info)
return -EINVAL;
- if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
- printf("%s: Illagal UCC number!\n", __FUNCTION__);
+ if (uf_info->ucc_num < 0 || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printf("%s: Illagal UCC number!\n", __func__);
return -EINVAL;
}
- uccf = (ucc_fast_private_t *)malloc(sizeof(ucc_fast_private_t));
+ uccf = (struct ucc_fast_priv *)malloc(sizeof(struct ucc_fast_priv));
if (!uccf) {
printf("%s: No memory for UCC fast data structure!\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
- memset(uccf, 0, sizeof(ucc_fast_private_t));
+ memset(uccf, 0, sizeof(struct ucc_fast_priv));
/* Save fast UCC structure */
uccf->uf_info = uf_info;
uccf->uf_regs = (ucc_fast_t *)ucc_get_reg_baseaddr(uf_info->ucc_num);
- if (uccf->uf_regs == NULL) {
+ if (!uccf->uf_regs) {
printf("%s: No memory map for UCC fast controller!\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
uccf->enabled_rx = 0;
uf_regs = uccf->uf_regs;
- uccf->p_ucce = (u32 *) &(uf_regs->ucce);
- uccf->p_uccm = (u32 *) &(uf_regs->uccm);
+ uccf->p_ucce = (u32 *)&uf_regs->ucce;
+ uccf->p_uccm = (u32 *)&uf_regs->uccm;
/* Init GUEMR register, UCC both Rx and Tx is Fast protocol */
out_8(&uf_regs->guemr, UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_FAST_RX
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(UCC_GETH_UTFS_GIGA_INIT,
- UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
/* Allocate memory for Rx Virtual Fifo */
uccf->ucc_fast_rx_virtual_fifo_base_offset =
qe_muram_alloc(UCC_GETH_URFS_GIGA_INIT +
- UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
- UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
/* utfb, urfb are offsets from MURAM base */
out_be32(&uf_regs->utfb,
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(UCC_GETH_UTFS_INIT,
- UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
/* Allocate memory for Rx Virtual Fifo */
uccf->ucc_fast_rx_virtual_fifo_base_offset =
/* Rx clock routing */
if (uf_info->rx_clock != QE_CLK_NONE) {
if (ucc_set_clk_src(uf_info->ucc_num,
- uf_info->rx_clock, COMM_DIR_RX)) {
+ uf_info->rx_clock, COMM_DIR_RX)) {
printf("%s: Illegal value for parameter 'RxClock'.\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
}
/* Tx clock routing */
if (uf_info->tx_clock != QE_CLK_NONE) {
if (ucc_set_clk_src(uf_info->ucc_num,
- uf_info->tx_clock, COMM_DIR_TX)) {
+ uf_info->tx_clock, COMM_DIR_TX)) {
printf("%s: Illegal value for parameter 'TxClock'.\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
}
#include "linux/immap_qe.h"
#include <fsl_qe.h>
-/* Fast or Giga ethernet
-*/
-typedef enum enet_type {
+/* Fast or Giga ethernet */
+enum enet_type {
FAST_ETH,
GIGA_ETH,
-} enet_type_e;
+};
-/* General UCC Extended Mode Register
-*/
+/* General UCC Extended Mode Register */
#define UCC_GUEMR_MODE_MASK_RX 0x02
#define UCC_GUEMR_MODE_MASK_TX 0x01
#define UCC_GUEMR_MODE_FAST_RX 0x02
#define UCC_GUEMR_MODE_FAST_TX 0x01
#define UCC_GUEMR_MODE_SLOW_RX 0x00
#define UCC_GUEMR_MODE_SLOW_TX 0x00
-#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 must be set 1 */
+/* Bit 3 must be set 1 */
+#define UCC_GUEMR_SET_RESERVED3 0x10
-/* General UCC FAST Mode Register
-*/
+/* General UCC FAST Mode Register */
#define UCC_FAST_GUMR_TCI 0x20000000
#define UCC_FAST_GUMR_TRX 0x10000000
#define UCC_FAST_GUMR_TTX 0x08000000
#define UCC_FAST_GUMR_ENR 0x00000020
#define UCC_FAST_GUMR_ENT 0x00000010
-/* GUMR [MODE] bit maps
-*/
+/* GUMR [MODE] bit maps */
#define UCC_FAST_GUMR_HDLC 0x00000000
#define UCC_FAST_GUMR_QMC 0x00000002
#define UCC_FAST_GUMR_UART 0x00000004
#define UCC_FAST_GUMR_ATM 0x0000000a
#define UCC_FAST_GUMR_ETH 0x0000000c
-/* Transmit On Demand (UTORD)
-*/
+/* Transmit On Demand (UTORD) */
#define UCC_SLOW_TOD 0x8000
#define UCC_FAST_TOD 0x8000
-/* Fast Ethernet (10/100 Mbps)
-*/
-#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size */
-#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
-#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
-#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size */
-#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+/* Fast Ethernet (10/100 Mbps) */
+/* Rx virtual FIFO size */
+#define UCC_GETH_URFS_INIT 512
+/* 1/2 urfs */
+#define UCC_GETH_URFET_INIT 256
+/* 3/4 urfs */
+#define UCC_GETH_URFSET_INIT 384
+/* Tx virtual FIFO size */
+#define UCC_GETH_UTFS_INIT 512
+/* 1/2 utfs */
+#define UCC_GETH_UTFET_INIT 256
#define UCC_GETH_UTFTT_INIT 128
-/* Gigabit Ethernet (1000 Mbps)
-*/
-#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual FIFO size */
-#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
-#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
-#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual FIFO size */
-#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
-#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
+/* Gigabit Ethernet (1000 Mbps) */
+/* Rx virtual FIFO size */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/
+/* 1/2 urfs */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/
+/* 3/4 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/
+/* Tx virtual FIFO size */
+#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/
+/* 1/2 utfs */
+#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/
+#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/
-/* UCC fast alignment
-*/
+/* UCC fast alignment */
#define UCC_FAST_RX_ALIGN 4
#define UCC_FAST_MRBLR_ALIGNMENT 4
#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8
-/* Sizes
-*/
+/* Sizes */
#define UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD 8
-/* UCC fast structure.
-*/
-typedef struct ucc_fast_info {
+/* UCC fast structure. */
+struct ucc_fast_inf {
int ucc_num;
qe_clock_e rx_clock;
qe_clock_e tx_clock;
- enet_type_e eth_type;
-} ucc_fast_info_t;
+ enum enet_type eth_type;
+};
-typedef struct ucc_fast_private {
- ucc_fast_info_t *uf_info;
+struct ucc_fast_priv {
+ struct ucc_fast_inf *uf_info;
ucc_fast_t *uf_regs; /* a pointer to memory map of UCC regs */
u32 *p_ucce; /* a pointer to the event register */
u32 *p_uccm; /* a pointer to the mask register */
int enabled_rx; /* whether UCC is enabled for Rx (ENR) */
u32 ucc_fast_tx_virtual_fifo_base_offset;
u32 ucc_fast_rx_virtual_fifo_base_offset;
-} ucc_fast_private_t;
+};
-void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf);
+void ucc_fast_transmit_on_demand(struct ucc_fast_priv *uccf);
u32 ucc_fast_get_qe_cr_subblock(int ucc_num);
-void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode);
-void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode);
-int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t **uccf_ret);
+void ucc_fast_enable(struct ucc_fast_priv *uccf, comm_dir_e mode);
+void ucc_fast_disable(struct ucc_fast_priv *uccf, comm_dir_e mode);
+int ucc_fast_init(struct ucc_fast_inf *uf_info,
+ struct ucc_fast_priv **uccf_ret);
#endif /* __UCCF_H__ */
#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
#endif
-static uec_info_t uec_info[] = {
+static struct uec_inf uec_info[] = {
#ifdef CONFIG_UEC_ETH1
STD_UEC_INFO(1), /* UEC1 */
#endif
static struct eth_device *devlist[MAXCONTROLLERS];
-static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
+static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
{
uec_t *uec_regs;
u32 maccfg1;
if (!uec) {
- printf("%s: uec not initial\n", __FUNCTION__);
+ printf("%s: uec not initial\n", __func__);
return -EINVAL;
}
uec_regs = uec->uec_regs;
return 0;
}
-static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
+static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
{
uec_t *uec_regs;
u32 maccfg1;
if (!uec) {
- printf("%s: uec not initial\n", __FUNCTION__);
+ printf("%s: uec not initial\n", __func__);
return -EINVAL;
}
uec_regs = uec->uec_regs;
return 0;
}
-static int uec_graceful_stop_tx(uec_private_t *uec)
+static int uec_graceful_stop_tx(struct uec_priv *uec)
{
ucc_fast_t *uf_regs;
u32 cecr_subblock;
u32 ucce;
if (!uec || !uec->uccf) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
cecr_subblock =
ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
/* Wait for command to complete */
do {
ucce = in_be32(&uf_regs->ucce);
- } while (! (ucce & UCCE_GRA));
+ } while (!(ucce & UCCE_GRA));
uec->grace_stopped_tx = 1;
return 0;
}
-static int uec_graceful_stop_rx(uec_private_t *uec)
+static int uec_graceful_stop_rx(struct uec_priv *uec)
{
u32 cecr_subblock;
u8 ack;
if (!uec) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
if (!uec->p_rx_glbl_pram) {
- printf("%s: No init rx global parameter\n", __FUNCTION__);
+ printf("%s: No init rx global parameter\n", __func__);
return -EINVAL;
}
cecr_subblock =
ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
ack = uec->p_rx_glbl_pram->rxgstpack;
- } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
+ } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
uec->grace_stopped_rx = 1;
return 0;
}
-static int uec_restart_tx(uec_private_t *uec)
+static int uec_restart_tx(struct uec_priv *uec)
{
u32 cecr_subblock;
if (!uec || !uec->uec_info) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
cecr_subblock =
ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
uec->grace_stopped_tx = 0;
return 0;
}
-static int uec_restart_rx(uec_private_t *uec)
+static int uec_restart_rx(struct uec_priv *uec)
{
u32 cecr_subblock;
if (!uec || !uec->uec_info) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
cecr_subblock =
ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
uec->grace_stopped_rx = 0;
return 0;
}
-static int uec_open(uec_private_t *uec, comm_dir_e mode)
+static int uec_open(struct uec_priv *uec, comm_dir_e mode)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_priv *uccf;
if (!uec || !uec->uccf) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
uccf = uec->uccf;
/* check if the UCC number is in range. */
if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
- printf("%s: ucc_num out of range.\n", __FUNCTION__);
+ printf("%s: ucc_num out of range.\n", __func__);
return -EINVAL;
}
ucc_fast_enable(uccf, mode);
/* RISC microcode start */
- if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
+ if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
uec_restart_tx(uec);
- }
- if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
+ if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
uec_restart_rx(uec);
- }
return 0;
}
-static int uec_stop(uec_private_t *uec, comm_dir_e mode)
+static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
{
if (!uec || !uec->uccf) {
- printf("%s: No handle passed.\n", __FUNCTION__);
+ printf("%s: No handle passed.\n", __func__);
return -EINVAL;
}
/* check if the UCC number is in range. */
if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
- printf("%s: ucc_num out of range.\n", __FUNCTION__);
+ printf("%s: ucc_num out of range.\n", __func__);
return -EINVAL;
}
/* Stop any transmissions */
- if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
+ if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
uec_graceful_stop_tx(uec);
- }
+
/* Stop any receptions */
- if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
+ if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
uec_graceful_stop_rx(uec);
- }
/* Disable the UCC fast */
ucc_fast_disable(uec->uccf, mode);
return 0;
}
-static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
+static int uec_set_mac_duplex(struct uec_priv *uec, int duplex)
{
uec_t *uec_regs;
u32 maccfg2;
if (!uec) {
- printf("%s: uec not initial\n", __FUNCTION__);
+ printf("%s: uec not initial\n", __func__);
return -EINVAL;
}
uec_regs = uec->uec_regs;
return 0;
}
-static int uec_set_mac_if_mode(uec_private_t *uec,
- phy_interface_t if_mode, int speed)
+static int uec_set_mac_if_mode(struct uec_priv *uec,
+ phy_interface_t if_mode, int speed)
{
phy_interface_t enet_if_mode;
uec_t *uec_regs;
u32 maccfg2;
if (!uec) {
- printf("%s: uec not initial\n", __FUNCTION__);
+ printf("%s: uec not initial\n", __func__);
return -EINVAL;
}
upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
switch (speed) {
- case SPEED_10:
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- switch (enet_if_mode) {
- case PHY_INTERFACE_MODE_MII:
- break;
- case PHY_INTERFACE_MODE_RGMII:
- upsmr |= (UPSMR_RPM | UPSMR_R10M);
- break;
- case PHY_INTERFACE_MODE_RMII:
- upsmr |= (UPSMR_R10M | UPSMR_RMM);
- break;
- default:
- return -EINVAL;
- break;
- }
+ case SPEED_10:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ switch (enet_if_mode) {
+ case PHY_INTERFACE_MODE_MII:
break;
- case SPEED_100:
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- switch (enet_if_mode) {
- case PHY_INTERFACE_MODE_MII:
- break;
- case PHY_INTERFACE_MODE_RGMII:
- upsmr |= UPSMR_RPM;
- break;
- case PHY_INTERFACE_MODE_RMII:
- upsmr |= UPSMR_RMM;
- break;
- default:
- return -EINVAL;
- break;
- }
+ case PHY_INTERFACE_MODE_RGMII:
+ upsmr |= (UPSMR_RPM | UPSMR_R10M);
break;
- case SPEED_1000:
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- switch (enet_if_mode) {
- case PHY_INTERFACE_MODE_GMII:
- break;
- case PHY_INTERFACE_MODE_TBI:
- upsmr |= UPSMR_TBIM;
- break;
- case PHY_INTERFACE_MODE_RTBI:
- upsmr |= (UPSMR_RPM | UPSMR_TBIM);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- upsmr |= UPSMR_RPM;
- break;
- case PHY_INTERFACE_MODE_SGMII:
- upsmr |= UPSMR_SGMM;
- break;
- default:
- return -EINVAL;
- break;
- }
+ case PHY_INTERFACE_MODE_RMII:
+ upsmr |= (UPSMR_R10M | UPSMR_RMM);
break;
default:
return -EINVAL;
+ }
+ break;
+ case SPEED_100:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ switch (enet_if_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ upsmr |= UPSMR_RPM;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ upsmr |= UPSMR_RMM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SPEED_1000:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ switch (enet_if_mode) {
+ case PHY_INTERFACE_MODE_GMII:
+ break;
+ case PHY_INTERFACE_MODE_TBI:
+ upsmr |= UPSMR_TBIM;
+ break;
+ case PHY_INTERFACE_MODE_RTBI:
+ upsmr |= (UPSMR_RPM | UPSMR_TBIM);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ upsmr |= UPSMR_RPM;
break;
+ case PHY_INTERFACE_MODE_SGMII:
+ upsmr |= UPSMR_SGMM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
}
out_be32(&uec_regs->maccfg2, maccfg2);
out_be32(&uec_mii_regs->miimcfg, miimcfg);
/* Wait until the bus is free */
- while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
+ while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--)
+ ;
if (timeout <= 0) {
- printf("%s: The MII Bus is stuck!", __FUNCTION__);
+ printf("%s: The MII Bus is stuck!", __func__);
return -ETIMEDOUT;
}
static int init_phy(struct eth_device *dev)
{
- uec_private_t *uec;
+ struct uec_priv *uec;
uec_mii_t *umii_regs;
struct uec_mii_info *mii_info;
struct phy_info *curphy;
int err;
- uec = (uec_private_t *)dev->priv;
+ uec = (struct uec_priv *)dev->priv;
umii_regs = uec->uec_mii_regs;
uec->oldlink = 0;
}
memset(mii_info, 0, sizeof(*mii_info));
- if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
+ if (uec->uec_info->uf_info.eth_type == GIGA_ETH)
mii_info->speed = SPEED_1000;
- } else {
+ else
mii_info->speed = SPEED_100;
- }
mii_info->duplex = DUPLEX_FULL;
mii_info->pause = 0;
static void adjust_link(struct eth_device *dev)
{
- uec_private_t *uec = (uec_private_t *)dev->priv;
+ struct uec_priv *uec = (struct uec_priv *)dev->priv;
struct uec_mii_info *mii_info = uec->mii_info;
- extern void change_phy_interface_mode(struct eth_device *dev,
- phy_interface_t mode, int speed);
-
if (mii_info->link) {
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
+ /*
+ * Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode.
+ */
if (mii_info->duplex != uec->oldduplex) {
if (!(mii_info->duplex)) {
uec_set_mac_duplex(uec, DUPLEX_HALF);
case SPEED_1000:
break;
case SPEED_100:
- printf ("switching to rgmii 100\n");
+ printf("switching to rgmii 100\n");
mode = PHY_INTERFACE_MODE_RGMII;
break;
case SPEED_10:
- printf ("switching to rgmii 10\n");
+ printf("switching to rgmii 10\n");
mode = PHY_INTERFACE_MODE_RGMII;
break;
default:
printf("%s: Ack,Speed(%d)is illegal\n",
- dev->name, mii_info->speed);
+ dev->name, mii_info->speed);
break;
}
}
static void phy_change(struct eth_device *dev)
{
- uec_private_t *uec = (uec_private_t *)dev->priv;
+ struct uec_priv *uec = (struct uec_priv *)dev->priv;
#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
- /* QE9 and QE12 need to be set for enabling QE MII managment signals */
+ /* QE9 and QE12 need to be set for enabling QE MII management signals */
setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
#endif
int i;
for (i = 0; i < MAXCONTROLLERS; i++) {
- if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) {
+ if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0)
break;
- }
}
/* If device cannot be found, returns -1 */
if (i == MAXCONTROLLERS) {
- debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname);
+ debug("%s: device %s not found in devlist\n", __func__,
+ devname);
i = -1;
}
unsigned short value = 0;
int devindex = 0;
- if (bus->name == NULL) {
- debug("%s: NULL pointer given\n", __FUNCTION__);
+ if (!bus->name) {
+ debug("%s: NULL pointer given\n", __func__);
} else {
devindex = uec_miiphy_find_dev_by_name(bus->name);
- if (devindex >= 0) {
+ if (devindex >= 0)
value = uec_read_phy_reg(devlist[devindex], addr, reg);
- }
}
return value;
}
{
int devindex = 0;
- if (bus->name == NULL) {
- debug("%s: NULL pointer given\n", __FUNCTION__);
+ if (!bus->name) {
+ debug("%s: NULL pointer given\n", __func__);
} else {
devindex = uec_miiphy_find_dev_by_name(bus->name);
- if (devindex >= 0) {
+ if (devindex >= 0)
uec_write_phy_reg(devlist[devindex], addr, reg, value);
- }
}
return 0;
}
#endif
-static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
+static int uec_set_mac_address(struct uec_priv *uec, u8 *mac_addr)
{
uec_t *uec_regs;
u32 mac_addr1;
u32 mac_addr2;
if (!uec) {
- printf("%s: uec not initial\n", __FUNCTION__);
+ printf("%s: uec not initial\n", __func__);
return -EINVAL;
}
uec_regs = uec->uec_regs;
- /* if a station address of 0x12345678ABCD, perform a write to
- MACSTNADDR1 of 0xCDAB7856,
- MACSTNADDR2 of 0x34120000 */
+ /*
+ * if a station address of 0x12345678ABCD, perform a write to
+ * MACSTNADDR1 of 0xCDAB7856,
+ * MACSTNADDR2 of 0x34120000
+ */
- mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
+ mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) |
(mac_addr[3] << 8) | (mac_addr[2]);
out_be32(&uec_regs->macstnaddr1, mac_addr1);
return 0;
}
-static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
- int *threads_num_ret)
+static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
+ int *threads_num_ret)
{
int num_threads_numerica;
switch (threads_num) {
- case UEC_NUM_OF_THREADS_1:
- num_threads_numerica = 1;
- break;
- case UEC_NUM_OF_THREADS_2:
- num_threads_numerica = 2;
- break;
- case UEC_NUM_OF_THREADS_4:
- num_threads_numerica = 4;
- break;
- case UEC_NUM_OF_THREADS_6:
- num_threads_numerica = 6;
- break;
- case UEC_NUM_OF_THREADS_8:
- num_threads_numerica = 8;
- break;
- default:
- printf("%s: Bad number of threads value.",
- __FUNCTION__);
- return -EINVAL;
+ case UEC_NUM_OF_THREADS_1:
+ num_threads_numerica = 1;
+ break;
+ case UEC_NUM_OF_THREADS_2:
+ num_threads_numerica = 2;
+ break;
+ case UEC_NUM_OF_THREADS_4:
+ num_threads_numerica = 4;
+ break;
+ case UEC_NUM_OF_THREADS_6:
+ num_threads_numerica = 6;
+ break;
+ case UEC_NUM_OF_THREADS_8:
+ num_threads_numerica = 8;
+ break;
+ default:
+ printf("%s: Bad number of threads value.",
+ __func__);
+ return -EINVAL;
}
*threads_num_ret = num_threads_numerica;
return 0;
}
-static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
+static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
{
- uec_info_t *uec_info;
+ struct uec_inf *uec_info;
u32 end_bd;
u8 bmrx = 0;
int i;
uec_info = uec->uec_info;
/* Alloc global Tx parameter RAM page */
- uec->tx_glbl_pram_offset = qe_muram_alloc(
- sizeof(uec_tx_global_pram_t),
- UEC_TX_GLOBAL_PRAM_ALIGNMENT);
- uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
+ uec->tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct uec_tx_global_pram),
+ UEC_TX_GLOBAL_PRAM_ALIGNMENT);
+ uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
qe_muram_addr(uec->tx_glbl_pram_offset);
/* Zero the global Tx prameter RAM */
- memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
+ memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
/* Init global Tx parameter RAM */
out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
/* SQPTR */
- uec->send_q_mem_reg_offset = qe_muram_alloc(
- sizeof(uec_send_queue_qd_t),
- UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
- uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
+ uec->send_q_mem_reg_offset =
+ qe_muram_alloc(sizeof(struct uec_send_queue_qd),
+ UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
qe_muram_addr(uec->send_q_mem_reg_offset);
out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
* SIZEOFBD;
out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
- (u32)(uec->p_tx_bd_ring));
+ (u32)(uec->p_tx_bd_ring));
out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
- end_bd);
+ end_bd);
/* Scheduler Base Pointer, we have only one Tx queue, no need it */
out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
/* IPH_Offset */
- for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
+ for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
- }
/* VTAG table */
- for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
+ for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
- }
/* TQPTR */
- uec->thread_dat_tx_offset = qe_muram_alloc(
- num_threads_tx * sizeof(uec_thread_data_tx_t) +
- 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
+ uec->thread_dat_tx_offset =
+ qe_muram_alloc(num_threads_tx *
+ sizeof(struct uec_thread_data_tx) +
+ 32 * (num_threads_tx == 1),
+ UEC_THREAD_DATA_ALIGNMENT);
- uec->p_thread_data_tx = (uec_thread_data_tx_t *)
+ uec->p_thread_data_tx = (struct uec_thread_data_tx *)
qe_muram_addr(uec->thread_dat_tx_offset);
out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
}
-static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
+static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
{
u8 bmrx = 0;
int i;
- uec_82xx_address_filtering_pram_t *p_af_pram;
+ struct uec_82xx_add_filtering_pram *p_af_pram;
/* Allocate global Rx parameter RAM page */
- uec->rx_glbl_pram_offset = qe_muram_alloc(
- sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
- uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
+ uec->rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct uec_rx_global_pram),
+ UEC_RX_GLOBAL_PRAM_ALIGNMENT);
+ uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
qe_muram_addr(uec->rx_glbl_pram_offset);
/* Zero Global Rx parameter RAM */
- memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
+ memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
/* Init global Rx parameter RAM */
- /* REMODER, Extended feature mode disable, VLAN disable,
- LossLess flow control disable, Receive firmware statisic disable,
- Extended address parsing mode disable, One Rx queues,
- Dynamic maximum/minimum frame length disable, IP checksum check
- disable, IP address alignment disable
- */
+ /*
+ * REMODER, Extended feature mode disable, VLAN disable,
+ * LossLess flow control disable, Receive firmware statisic disable,
+ * Extended address parsing mode disable, One Rx queues,
+ * Dynamic maximum/minimum frame length disable, IP checksum check
+ * disable, IP address alignment disable
+ */
out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
/* RQPTR */
- uec->thread_dat_rx_offset = qe_muram_alloc(
- num_threads_rx * sizeof(uec_thread_data_rx_t),
- UEC_THREAD_DATA_ALIGNMENT);
- uec->p_thread_data_rx = (uec_thread_data_rx_t *)
+ uec->thread_dat_rx_offset =
+ qe_muram_alloc(num_threads_rx *
+ sizeof(struct uec_thread_data_rx),
+ UEC_THREAD_DATA_ALIGNMENT);
+ uec->p_thread_data_rx = (struct uec_thread_data_rx *)
qe_muram_addr(uec->thread_dat_rx_offset);
out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
/* RBDQPTR */
- uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
- sizeof(uec_rx_bd_queues_entry_t) + \
- sizeof(uec_rx_prefetched_bds_t),
- UEC_RX_BD_QUEUES_ALIGNMENT);
- uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
+ uec->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
+ sizeof(struct uec_rx_pref_bds),
+ UEC_RX_BD_QUEUES_ALIGNMENT);
+ uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
qe_muram_addr(uec->rx_bd_qs_tbl_offset);
/* Zero it */
- memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
- sizeof(uec_rx_prefetched_bds_t));
+ memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
+ sizeof(struct uec_rx_pref_bds));
out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
(u32)uec->p_rx_bd_ring);
/* L2QT */
out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
/* L3QT */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++)
out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
- }
/* VLAN_TYPE */
out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
/* Clear PQ2 style address filtering hash table */
- p_af_pram = (uec_82xx_address_filtering_pram_t *) \
+ p_af_pram = (struct uec_82xx_add_filtering_pram *)
uec->p_rx_glbl_pram->addressfiltering;
p_af_pram->iaddr_h = 0;
p_af_pram->gaddr_l = 0;
}
-static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
- int thread_tx, int thread_rx)
+static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
+ int thread_tx, int thread_rx)
{
- uec_init_cmd_pram_t *p_init_enet_param;
+ struct uec_init_cmd_pram *p_init_enet_param;
u32 init_enet_param_offset;
- uec_info_t *uec_info;
+ struct uec_inf *uec_info;
+ struct ucc_fast_inf *uf_info;
int i;
int snum;
- u32 init_enet_offset;
+ u32 off;
u32 entry_val;
u32 command;
u32 cecr_subblock;
uec_info = uec->uec_info;
+ uf_info = &uec_info->uf_info;
/* Allocate init enet command parameter */
- uec->init_enet_param_offset = qe_muram_alloc(
- sizeof(uec_init_cmd_pram_t), 4);
+ uec->init_enet_param_offset =
+ qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
init_enet_param_offset = uec->init_enet_param_offset;
- uec->p_init_enet_param = (uec_init_cmd_pram_t *)
+ uec->p_init_enet_param = (struct uec_init_cmd_pram *)
qe_muram_addr(uec->init_enet_param_offset);
/* Zero init enet command struct */
- memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
+ memset((void *)uec->p_init_enet_param, 0,
+ sizeof(struct uec_init_cmd_pram));
/* Init the command struct */
p_init_enet_param = uec->p_init_enet_param;
/* Init Rx threads */
for (i = 0; i < (thread_rx + 1); i++) {
- if ((snum = qe_get_snum()) < 0) {
- printf("%s can not get snum\n", __FUNCTION__);
+ snum = qe_get_snum();
+ if (snum < 0) {
+ printf("%s can not get snum\n", __func__);
return -ENOMEM;
}
- if (i==0) {
- init_enet_offset = 0;
+ if (i == 0) {
+ off = 0;
} else {
- init_enet_offset = qe_muram_alloc(
- sizeof(uec_thread_rx_pram_t),
- UEC_THREAD_RX_PRAM_ALIGNMENT);
+ off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
+ UEC_THREAD_RX_PRAM_ALIGNMENT);
}
entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
- init_enet_offset | (u32)uec_info->risc_rx;
+ off | (u32)uec_info->risc_rx;
p_init_enet_param->rxthread[i] = entry_val;
}
/* Init Tx threads */
for (i = 0; i < thread_tx; i++) {
- if ((snum = qe_get_snum()) < 0) {
- printf("%s can not get snum\n", __FUNCTION__);
+ snum = qe_get_snum();
+ if (snum < 0) {
+ printf("%s can not get snum\n", __func__);
return -ENOMEM;
}
- init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
- UEC_THREAD_TX_PRAM_ALIGNMENT);
+ off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
+ UEC_THREAD_TX_PRAM_ALIGNMENT);
entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
- init_enet_offset | (u32)uec_info->risc_tx;
+ off | (u32)uec_info->risc_tx;
p_init_enet_param->txthread[i] = entry_val;
}
/* Issue QE command */
command = QE_INIT_TX_RX;
- cecr_subblock = ucc_fast_get_qe_cr_subblock(
- uec->uec_info->uf_info.ucc_num);
- qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
- init_enet_param_offset);
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
+ init_enet_param_offset);
return 0;
}
-static int uec_startup(uec_private_t *uec)
+static int uec_startup(struct uec_priv *uec)
{
- uec_info_t *uec_info;
- ucc_fast_info_t *uf_info;
- ucc_fast_private_t *uccf;
+ struct uec_inf *uec_info;
+ struct ucc_fast_inf *uf_info;
+ struct ucc_fast_priv *uccf;
ucc_fast_t *uf_regs;
uec_t *uec_regs;
int num_threads_tx;
u32 utbipar;
u32 length;
u32 align;
- qe_bd_t *bd;
+ struct buffer_descriptor *bd;
u8 *buf;
int i;
if (!uec || !uec->uec_info) {
- printf("%s: uec or uec_info not initial\n", __FUNCTION__);
+ printf("%s: uec or uec_info not initial\n", __func__);
return -EINVAL;
}
uec_info = uec->uec_info;
- uf_info = &(uec_info->uf_info);
+ uf_info = &uec_info->uf_info;
/* Check if Rx BD ring len is illegal */
- if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
- (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
+ if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
+ (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
/* Check if Tx BD ring len is illegal */
if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
printf("%s: Tx BD ring length must not be smaller than 2.\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
/* Check if MRBLR is illegal */
- if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
+ if (MAX_RXBUF_LEN == 0 || MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT) {
printf("%s: max rx buffer length must be mutliple of 128.\n",
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
/* Init UCC fast */
if (ucc_fast_init(uf_info, &uccf)) {
- printf("%s: failed to init ucc fast\n", __FUNCTION__);
+ printf("%s: failed to init ucc fast\n", __func__);
return -ENOMEM;
}
/* Convert the Tx threads number */
if (uec_convert_threads_num(uec_info->num_threads_tx,
- &num_threads_tx)) {
+ &num_threads_tx)) {
return -EINVAL;
}
/* Convert the Rx threads number */
if (uec_convert_threads_num(uec_info->num_threads_rx,
- &num_threads_rx)) {
+ &num_threads_rx)) {
return -EINVAL;
}
out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
/* Setup MAC interface mode */
- uec_set_mac_if_mode(uec, uec_info->enet_interface_type, uec_info->speed);
+ uec_set_mac_if_mode(uec, uec_info->enet_interface_type,
+ uec_info->speed);
/* Setup MII management base */
#ifndef CONFIG_eTSEC_MDIO_BUS
uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
#else
- uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
+ uec->uec_mii_regs = (uec_mii_t *)CONFIG_MIIM_ADDRESS;
#endif
/* Setup MII master clock source */
out_be32(&uec_regs->utbipar, utbipar);
/* Configure the TBI for SGMII operation */
- if ((uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII) &&
- (uec->uec_info->speed == SPEED_1000)) {
+ if (uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII &&
+ uec->uec_info->speed == SPEED_1000) {
uec_write_phy_reg(uec->dev, uec_regs->utbipar,
- ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+ ENET_TBI_MII_ANA, TBIANA_SETTINGS);
uec_write_phy_reg(uec->dev, uec_regs->utbipar,
- ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+ ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
uec_write_phy_reg(uec->dev, uec_regs->utbipar,
- ENET_TBI_MII_CR, TBICR_SETTINGS);
+ ENET_TBI_MII_CR, TBICR_SETTINGS);
}
/* Allocate Tx BDs */
memset((void *)(uec->rx_buf_offset), 0, length + align);
/* Init TxBD ring */
- bd = (qe_bd_t *)uec->p_tx_bd_ring;
- uec->txBd = bd;
+ bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
+ uec->tx_bd = bd;
for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
BD_DATA_CLEAR(bd);
BD_STATUS_SET(bd, 0);
BD_LENGTH_SET(bd, 0);
- bd ++;
+ bd++;
}
- BD_STATUS_SET((--bd), TxBD_WRAP);
+ BD_STATUS_SET((--bd), TX_BD_WRAP);
/* Init RxBD ring */
- bd = (qe_bd_t *)uec->p_rx_bd_ring;
- uec->rxBd = bd;
+ bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
+ uec->rx_bd = bd;
buf = uec->p_rx_buf;
for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
BD_DATA_SET(bd, buf);
BD_LENGTH_SET(bd, 0);
- BD_STATUS_SET(bd, RxBD_EMPTY);
+ BD_STATUS_SET(bd, RX_BD_EMPTY);
buf += MAX_RXBUF_LEN;
- bd ++;
+ bd++;
}
- BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
+ BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
/* Init global Tx parameter RAM */
uec_init_tx_parameter(uec, num_threads_tx);
/* Init ethernet Tx and Rx parameter command */
if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
num_threads_rx)) {
- printf("%s issue init enet cmd failed\n", __FUNCTION__);
+ printf("%s issue init enet cmd failed\n", __func__);
return -ENOMEM;
}
return 0;
}
-static int uec_init(struct eth_device* dev, struct bd_info *bd)
+static int uec_init(struct eth_device *dev, struct bd_info *bd)
{
- uec_private_t *uec;
+ struct uec_priv *uec;
int err, i;
struct phy_info *curphy;
#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
#endif
- uec = (uec_private_t *)dev->priv;
+ uec = (struct uec_priv *)dev->priv;
- if (uec->the_first_run == 0) {
+ if (!uec->the_first_run) {
#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
- /* QE9 and QE12 need to be set for enabling QE MII managment signals */
- setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
- setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
+ /*
+ * QE9 and QE12 need to be set for enabling QE MII
+ * management signals
+ */
+ setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
+ setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
#endif
err = init_phy(dev);
err = curphy->read_status(uec->mii_info);
if (!(((i-- > 0) && !uec->mii_info->link) || err))
break;
- udelay(100000);
+ mdelay(100);
} while (1);
#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
/* Set up the MAC address */
if (dev->enetaddr[0] & 0x01) {
printf("%s: MacAddress is multcast address\n",
- __FUNCTION__);
+ __func__);
return -1;
}
uec_set_mac_address(uec, dev->enetaddr);
-
err = uec_open(uec, COMM_DIR_RX_AND_TX);
if (err) {
printf("%s: cannot enable UEC device\n", dev->name);
phy_change(dev);
- return (uec->mii_info->link ? 0 : -1);
+ return uec->mii_info->link ? 0 : -1;
}
-static void uec_halt(struct eth_device* dev)
+static void uec_halt(struct eth_device *dev)
{
- uec_private_t *uec = (uec_private_t *)dev->priv;
+ struct uec_priv *uec = (struct uec_priv *)dev->priv;
+
uec_stop(uec, COMM_DIR_RX_AND_TX);
}
static int uec_send(struct eth_device *dev, void *buf, int len)
{
- uec_private_t *uec;
- ucc_fast_private_t *uccf;
- volatile qe_bd_t *bd;
+ struct uec_priv *uec;
+ struct ucc_fast_priv *uccf;
+ struct buffer_descriptor *bd;
u16 status;
int i;
int result = 0;
- uec = (uec_private_t *)dev->priv;
+ uec = (struct uec_priv *)dev->priv;
uccf = uec->uccf;
- bd = uec->txBd;
+ bd = uec->tx_bd;
/* Find an empty TxBD */
- for (i = 0; bd->status & TxBD_READY; i++) {
+ for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
if (i > 0x100000) {
printf("%s: tx buffer not ready\n", dev->name);
return result;
/* Init TxBD */
BD_DATA_SET(bd, buf);
BD_LENGTH_SET(bd, len);
- status = bd->status;
+ status = BD_STATUS(bd);
status &= BD_WRAP;
- status |= (TxBD_READY | TxBD_LAST);
+ status |= (TX_BD_READY | TX_BD_LAST);
BD_STATUS_SET(bd, status);
/* Tell UCC to transmit the buffer */
ucc_fast_transmit_on_demand(uccf);
/* Wait for buffer to be transmitted */
- for (i = 0; bd->status & TxBD_READY; i++) {
+ for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
if (i > 0x100000) {
printf("%s: tx error\n", dev->name);
return result;
/* Ok, the buffer be transimitted */
BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
- uec->txBd = bd;
+ uec->tx_bd = bd;
result = 1;
return result;
}
-static int uec_recv(struct eth_device* dev)
+static int uec_recv(struct eth_device *dev)
{
- uec_private_t *uec = dev->priv;
- volatile qe_bd_t *bd;
+ struct uec_priv *uec = dev->priv;
+ struct buffer_descriptor *bd;
u16 status;
u16 len;
u8 *data;
- bd = uec->rxBd;
- status = bd->status;
+ bd = uec->rx_bd;
+ status = BD_STATUS(bd);
- while (!(status & RxBD_EMPTY)) {
- if (!(status & RxBD_ERROR)) {
+ while (!(status & RX_BD_EMPTY)) {
+ if (!(status & RX_BD_ERROR)) {
data = BD_DATA(bd);
len = BD_LENGTH(bd);
net_process_received_packet(data, len);
}
status &= BD_CLEAN;
BD_LENGTH_SET(bd, 0);
- BD_STATUS_SET(bd, status | RxBD_EMPTY);
+ BD_STATUS_SET(bd, status | RX_BD_EMPTY);
BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
- status = bd->status;
+ status = BD_STATUS(bd);
}
- uec->rxBd = bd;
+ uec->rx_bd = bd;
return 1;
}
-int uec_initialize(struct bd_info *bis, uec_info_t *uec_info)
+int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info)
{
struct eth_device *dev;
int i;
- uec_private_t *uec;
+ struct uec_priv *uec;
int err;
dev = (struct eth_device *)malloc(sizeof(struct eth_device));
memset(dev, 0, sizeof(struct eth_device));
/* Allocate the UEC private struct */
- uec = (uec_private_t *)malloc(sizeof(uec_private_t));
- if (!uec) {
+ uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
+ if (!uec)
return -ENOMEM;
- }
- memset(uec, 0, sizeof(uec_private_t));
+
+ memset(uec, 0, sizeof(struct uec_priv));
/* Adjust uec_info */
#if (MAX_QE_RISC == 4)
err = uec_startup(uec);
if (err) {
- printf("%s: Cannot configure net device, aborting.",dev->name);
+ printf("%s: Cannot configure net device, aborting.", dev->name);
return err;
}
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
int retval;
struct mii_dev *mdiodev = mdio_alloc();
+
if (!mdiodev)
return -ENOMEM;
strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
return 1;
}
-int uec_eth_init(struct bd_info *bis, uec_info_t *uecs, int num)
+int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num)
{
int i;
#define MACCFG2_INIT_VALUE (MACCFG2_PREL | MACCFG2_RESERVED_1 | \
MACCFG2_LC | MACCFG2_PAD_CRC | MACCFG2_FDX)
-/* UEC Event Register
-*/
+/* UEC Event Register */
#define UCCE_MPD 0x80000000
#define UCCE_SCAR 0x40000000
#define UCCE_GRA 0x20000000
#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY | \
UCCE_RXC | UCCE_TXC | UCCE_TXE)
-/* UEC TEMODR Register
-*/
+/* UEC TEMODR Register */
#define TEMODER_SCHEDULER_ENABLE 0x2000
#define TEMODER_IP_CHECKSUM_GENERATE 0x0400
#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200
#define TEMODER_RMON_STATISTICS 0x0100
-#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15)
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15 - 15)
#define TEMODER_INIT_VALUE 0xc000
-/* UEC REMODR Register
-*/
+/* UEC REMODR Register */
#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000
#define REMODER_RX_EXTENDED_FEATURES 0x80000000
-#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 )
-#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10)
-#define REMODER_RX_QOS_MODE_SHIFT (31-15)
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31 - 9)
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31 - 10)
+#define REMODER_RX_QOS_MODE_SHIFT (31 - 15)
#define REMODER_RMON_STATISTICS 0x00001000
#define REMODER_RX_EXTENDED_FILTERING 0x00000800
-#define REMODER_NUM_OF_QUEUES_SHIFT (31-23)
+#define REMODER_NUM_OF_QUEUES_SHIFT (31 - 23)
#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008
#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004
#define REMODER_IP_CHECKSUM_CHECK 0x00000002
#define UESCR_SCOV_SHIFT (15 - 15)
/****** Tx data struct collection ******/
-/* Tx thread data, each Tx thread has one this struct.
-*/
-typedef struct uec_thread_data_tx {
+/* Tx thread data, each Tx thread has one this struct. */
+struct uec_thread_data_tx {
u8 res0[136];
-} __attribute__ ((packed)) uec_thread_data_tx_t;
+} __packed;
-/* Tx thread parameter, each Tx thread has one this struct.
-*/
-typedef struct uec_thread_tx_pram {
+/* Tx thread parameter, each Tx thread has one this struct. */
+struct uec_thread_tx_pram {
u8 res0[64];
-} __attribute__ ((packed)) uec_thread_tx_pram_t;
+} __packed;
-/* Send queue queue-descriptor, each Tx queue has one this QD
-*/
-typedef struct uec_send_queue_qd {
+/* Send queue queue-descriptor, each Tx queue has one this QD */
+struct uec_send_queue_qd {
u32 bd_ring_base; /* pointer to BD ring base address */
u8 res0[0x8];
u32 last_bd_completed_address; /* last entry in BD ring */
u8 res1[0x30];
-} __attribute__ ((packed)) uec_send_queue_qd_t;
+} __packed;
/* Send queue memory region */
-typedef struct uec_send_queue_mem_region {
- uec_send_queue_qd_t sqqd[MAX_TX_QUEUES];
-} __attribute__ ((packed)) uec_send_queue_mem_region_t;
+struct uec_send_queue_mem_region {
+ struct uec_send_queue_qd sqqd[MAX_TX_QUEUES];
+} __packed;
-/* Scheduler struct
-*/
-typedef struct uec_scheduler {
+/* Scheduler struct */
+struct uec_scheduler {
u16 cpucount0; /* CPU packet counter */
u16 cpucount1; /* CPU packet counter */
u16 cecount0; /* QE packet counter */
u8 oldwfqmask; /* temporary variable handled by QE */
u8 weightfactor[MAX_TX_QUEUES]; /**< weight factor for queues */
u32 minw; /* temporary variable handled by QE */
- u8 res1[0x70-0x64];
-} __attribute__ ((packed)) uec_scheduler_t;
+ u8 res1[0x70 - 0x64];
+} __packed;
-/* Tx firmware counters
-*/
-typedef struct uec_tx_firmware_statistics_pram {
+/* Tx firmware counters */
+struct uec_tx_firmware_statistics_pram {
u32 sicoltx; /* single collision */
u32 mulcoltx; /* multiple collision */
u32 latecoltxfr; /* late collision */
u32 txpkts512; /* total packets(including bad) 512~1023B */
u32 txpkts1024; /* total packets(including bad) 1024~1518B */
u32 txpktsjumbo; /* total packets(including bad) >1024 */
-} __attribute__ ((packed)) uec_tx_firmware_statistics_pram_t;
+} __packed;
-/* Tx global parameter table
-*/
-typedef struct uec_tx_global_pram {
+/* Tx global parameter table */
+struct uec_tx_global_pram {
u16 temoder;
- u8 res0[0x38-0x02];
+ u8 res0[0x38 - 0x02];
u32 sqptr;
u32 schedulerbasepointer;
u32 txrmonbaseptr;
u8 iphoffset[MAX_IPH_OFFSET_ENTRY];
u32 vtagtable[0x8];
u32 tqptr;
- u8 res2[0x80-0x74];
-} __attribute__ ((packed)) uec_tx_global_pram_t;
-
+ u8 res2[0x80 - 0x74];
+} __packed;
/****** Rx data struct collection ******/
-/* Rx thread data, each Rx thread has one this struct.
-*/
-typedef struct uec_thread_data_rx {
+/* Rx thread data, each Rx thread has one this struct. */
+struct uec_thread_data_rx {
u8 res0[40];
-} __attribute__ ((packed)) uec_thread_data_rx_t;
+} __packed;
-/* Rx thread parameter, each Rx thread has one this struct.
-*/
-typedef struct uec_thread_rx_pram {
+/* Rx thread parameter, each Rx thread has one this struct. */
+struct uec_thread_rx_pram {
u8 res0[128];
-} __attribute__ ((packed)) uec_thread_rx_pram_t;
+} __packed;
-/* Rx firmware counters
-*/
-typedef struct uec_rx_firmware_statistics_pram {
+/* Rx firmware counters */
+struct uec_rx_firmware_statistics_pram {
u32 frrxfcser; /* frames with crc error */
u32 fraligner; /* frames with alignment error */
u32 inrangelenrxer; /* in range length error */
u32 removevlan;
u32 replacevlan;
u32 insertvlan;
-} __attribute__ ((packed)) uec_rx_firmware_statistics_pram_t;
+} __packed;
-/* Rx interrupt coalescing entry, each Rx queue has one this entry.
-*/
-typedef struct uec_rx_interrupt_coalescing_entry {
+/* Rx interrupt coalescing entry, each Rx queue has one this entry. */
+struct uec_rx_interrupt_coalescing_entry {
u32 maxvalue;
u32 counter;
-} __attribute__ ((packed)) uec_rx_interrupt_coalescing_entry_t;
+} __packed;
-typedef struct uec_rx_interrupt_coalescing_table {
- uec_rx_interrupt_coalescing_entry_t entry[MAX_RX_QUEUES];
-} __attribute__ ((packed)) uec_rx_interrupt_coalescing_table_t;
+struct uec_rx_interrupt_coalescing_table {
+ struct uec_rx_interrupt_coalescing_entry entry[MAX_RX_QUEUES];
+} __packed;
-/* RxBD queue entry, each Rx queue has one this entry.
-*/
-typedef struct uec_rx_bd_queues_entry {
+/* RxBD queue entry, each Rx queue has one this entry. */
+struct uec_rx_bd_queues_entry {
u32 bdbaseptr; /* BD base pointer */
u32 bdptr; /* BD pointer */
u32 externalbdbaseptr; /* external BD base pointer */
u32 externalbdptr; /* external BD pointer */
-} __attribute__ ((packed)) uec_rx_bd_queues_entry_t;
+} __packed;
-/* Rx global paramter table
-*/
-typedef struct uec_rx_global_pram {
+/* Rx global parameter table */
+struct uec_rx_global_pram {
u32 remoder; /* ethernet mode reg. */
u32 rqptr; /* base pointer to the Rx Queues */
u32 res0[0x1];
- u8 res1[0x20-0xC];
+ u8 res1[0x20 - 0xc];
u16 typeorlen;
u8 res2[0x1];
u8 rxgstpack; /* ack on GRACEFUL STOP RX command */
u32 rxrmonbaseptr; /* Rx RMON statistics base */
- u8 res3[0x30-0x28];
+ u8 res3[0x30 - 0x28];
u32 intcoalescingptr; /* Interrupt coalescing table pointer */
- u8 res4[0x36-0x34];
+ u8 res4[0x36 - 0x34];
u8 rstate;
- u8 res5[0x46-0x37];
+ u8 res5[0x46 - 0x37];
u16 mrblr; /* max receive buffer length reg. */
u32 rbdqptr; /* RxBD parameter table description */
u16 mflr; /* max frame length reg. */
u16 vlantype; /* vlan type */
u16 vlantci; /* default vlan tci */
u8 addressfiltering[64];/* address filtering data structure */
- u32 exfGlobalParam; /* extended filtering global parameters */
- u8 res6[0x100-0xC4]; /* Initialize to zero */
-} __attribute__ ((packed)) uec_rx_global_pram_t;
+ u32 exf_global_param; /* extended filtering global parameters */
+ u8 res6[0x100 - 0xc4]; /* Initialize to zero */
+} __packed;
#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
-
/****** UEC common ******/
-/* UCC statistics - hardware counters
-*/
-typedef struct uec_hardware_statistics {
+/* UCC statistics - hardware counters */
+struct uec_hardware_statistics {
u32 tx64;
u32 tx127;
u32 tx255;
u32 rbyt;
u32 rmca;
u32 rbca;
-} __attribute__ ((packed)) uec_hardware_statistics_t;
+} __packed;
-/* InitEnet command parameter
-*/
-typedef struct uec_init_cmd_pram {
+/* InitEnet command parameter */
+struct uec_init_cmd_pram {
u8 resinit0;
u8 resinit1;
u8 resinit2;
u32 txglobal; /* tx global */
u32 txthread[MAX_ENET_INIT_PARAM_ENTRIES_TX]; /* tx threads */
u8 res3[0x1];
-} __attribute__ ((packed)) uec_init_cmd_pram_t;
+} __packed;
#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0x00
#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x0400
-/* structure representing 82xx Address Filtering Enet Address in PRAM
-*/
-typedef struct uec_82xx_enet_address {
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+struct uec_82xx_enet_addr {
u8 res1[0x2];
u16 h; /* address (MSB) */
u16 m; /* address */
u16 l; /* address (LSB) */
-} __attribute__ ((packed)) uec_82xx_enet_address_t;
+} __packed;
-/* structure representing 82xx Address Filtering PRAM
-*/
-typedef struct uec_82xx_address_filtering_pram {
+/* structure representing 82xx Address Filtering PRAM */
+struct uec_82xx_add_filtering_pram {
u32 iaddr_h; /* individual address filter, high */
u32 iaddr_l; /* individual address filter, low */
u32 gaddr_h; /* group address filter, high */
u32 gaddr_l; /* group address filter, low */
- uec_82xx_enet_address_t taddr;
- uec_82xx_enet_address_t paddr[4];
- u8 res0[0x40-0x38];
-} __attribute__ ((packed)) uec_82xx_address_filtering_pram_t;
-
-/* Buffer Descriptor
-*/
-typedef struct buffer_descriptor {
+ struct uec_82xx_enet_addr taddr;
+ struct uec_82xx_enet_addr paddr[4];
+ u8 res0[0x40 - 0x38];
+} __packed;
+
+/* Buffer Descriptor */
+struct buffer_descriptor {
u16 status;
u16 len;
u32 data;
-} __attribute__ ((packed)) qe_bd_t, *p_bd_t;
+} __packed;
-#define SIZEOFBD sizeof(qe_bd_t)
+#define SIZEOFBD sizeof(struct buffer_descriptor)
-/* Common BD flags
-*/
+/* Common BD flags */
#define BD_WRAP 0x2000
#define BD_INT 0x1000
#define BD_LAST 0x0800
#define BD_CLEAN 0x3000
-/* TxBD status flags
-*/
-#define TxBD_READY 0x8000
-#define TxBD_PADCRC 0x4000
-#define TxBD_WRAP BD_WRAP
-#define TxBD_INT BD_INT
-#define TxBD_LAST BD_LAST
-#define TxBD_TXCRC 0x0400
-#define TxBD_DEF 0x0200
-#define TxBD_PP 0x0100
-#define TxBD_LC 0x0080
-#define TxBD_RL 0x0040
-#define TxBD_RC 0x003C
-#define TxBD_UNDERRUN 0x0002
-#define TxBD_TRUNC 0x0001
-
-#define TxBD_ERROR (TxBD_UNDERRUN | TxBD_TRUNC)
-
-/* RxBD status flags
-*/
-#define RxBD_EMPTY 0x8000
-#define RxBD_OWNER 0x4000
-#define RxBD_WRAP BD_WRAP
-#define RxBD_INT BD_INT
-#define RxBD_LAST BD_LAST
-#define RxBD_FIRST 0x0400
-#define RxBD_CMR 0x0200
-#define RxBD_MISS 0x0100
-#define RxBD_BCAST 0x0080
-#define RxBD_MCAST 0x0040
-#define RxBD_LG 0x0020
-#define RxBD_NO 0x0010
-#define RxBD_SHORT 0x0008
-#define RxBD_CRCERR 0x0004
-#define RxBD_OVERRUN 0x0002
-#define RxBD_IPCH 0x0001
-
-#define RxBD_ERROR (RxBD_LG | RxBD_NO | RxBD_SHORT | \
- RxBD_CRCERR | RxBD_OVERRUN)
-
-/* BD access macros
-*/
-#define BD_STATUS(_bd) (((p_bd_t)(_bd))->status)
-#define BD_STATUS_SET(_bd, _val) (((p_bd_t)(_bd))->status = _val)
-#define BD_LENGTH(_bd) (((p_bd_t)(_bd))->len)
-#define BD_LENGTH_SET(_bd, _val) (((p_bd_t)(_bd))->len = _val)
-#define BD_DATA_CLEAR(_bd) (((p_bd_t)(_bd))->data = 0)
-#define BD_IS_DATA(_bd) (((p_bd_t)(_bd))->data)
-#define BD_DATA(_bd) ((u8 *)(((p_bd_t)(_bd))->data))
-#define BD_DATA_SET(_bd, _data) (((p_bd_t)(_bd))->data = (u32)(_data))
-#define BD_ADVANCE(_bd,_status,_base) \
- (((_status) & BD_WRAP) ? (_bd) = ((p_bd_t)(_base)) : ++(_bd))
-
-/* Rx Prefetched BDs
-*/
-typedef struct uec_rx_prefetched_bds {
- qe_bd_t bd[MAX_PREFETCHED_BDS]; /* prefetched bd */
-} __attribute__ ((packed)) uec_rx_prefetched_bds_t;
-
-/* Alignments
- */
+/* TxBD status flags */
+#define TX_BD_READY 0x8000
+#define TX_BD_PADCRC 0x4000
+#define TX_BD_WRAP BD_WRAP
+#define TX_BD_INT BD_INT
+#define TX_BD_LAST BD_LAST
+#define TX_BD_TXCRC 0x0400
+#define TX_BD_DEF 0x0200
+#define TX_BD_PP 0x0100
+#define TX_BD_LC 0x0080
+#define TX_BD_RL 0x0040
+#define TX_BD_RC 0x003C
+#define TX_BD_UNDERRUN 0x0002
+#define TX_BD_TRUNC 0x0001
+
+#define TX_BD_ERROR (TX_BD_UNDERRUN | TX_BD_TRUNC)
+
+/* RxBD status flags */
+#define RX_BD_EMPTY 0x8000
+#define RX_BD_OWNER 0x4000
+#define RX_BD_WRAP BD_WRAP
+#define RX_BD_INT BD_INT
+#define RX_BD_LAST BD_LAST
+#define RX_BD_FIRST 0x0400
+#define RX_BD_CMR 0x0200
+#define RX_BD_MISS 0x0100
+#define RX_BD_BCAST 0x0080
+#define RX_BD_MCAST 0x0040
+#define RX_BD_LG 0x0020
+#define RX_BD_NO 0x0010
+#define RX_BD_SHORT 0x0008
+#define RX_BD_CRCERR 0x0004
+#define RX_BD_OVERRUN 0x0002
+#define RX_BD_IPCH 0x0001
+
+#define RX_BD_ERROR (RX_BD_LG | RX_BD_NO | RX_BD_SHORT | \
+ RX_BD_CRCERR | RX_BD_OVERRUN)
+
+/* BD access macros */
+#define BD_STATUS(_bd) (in_be16(&((_bd)->status)))
+#define BD_STATUS_SET(_bd, _v) (out_be16(&((_bd)->status), _v))
+#define BD_LENGTH(_bd) (in_be16(&((_bd)->len)))
+#define BD_LENGTH_SET(_bd, _v) (out_be16(&((_bd)->len), _v))
+#define BD_DATA_CLEAR(_bd) (out_be32(&((_bd)->data), 0))
+#define BD_DATA(_bd) ((u8 *)(((_bd)->data)))
+#define BD_DATA_SET(_bd, _data) (out_be32(&((_bd)->data), (u32)_data))
+#define BD_ADVANCE(_bd, _status, _base) \
+ (((_status) & BD_WRAP) ? (_bd) = \
+ ((struct buffer_descriptor *)(_base)) : ++(_bd))
+
+/* Rx Prefetched BDs */
+struct uec_rx_pref_bds {
+ struct buffer_descriptor bd[MAX_PREFETCHED_BDS]; /* prefetched bd */
+} __packed;
+
+/* Alignments */
#define UEC_RX_GLOBAL_PRAM_ALIGNMENT 64
#define UEC_TX_GLOBAL_PRAM_ALIGNMENT 64
#define UEC_THREAD_RX_PRAM_ALIGNMENT 128
#define UEC_RX_BD_RING_SIZE_MIN 8
#define UEC_TX_BD_RING_SIZE_MIN 2
-/* Ethernet speed
-*/
-typedef enum enet_speed {
- ENET_SPEED_10BT, /* 10 Base T */
- ENET_SPEED_100BT, /* 100 Base T */
- ENET_SPEED_1000BT /* 1000 Base T */
-} enet_speed_e;
-
-/* Ethernet Address Type.
-*/
-typedef enum enet_addr_type {
- ENET_ADDR_TYPE_INDIVIDUAL,
- ENET_ADDR_TYPE_GROUP,
- ENET_ADDR_TYPE_BROADCAST
-} enet_addr_type_e;
-
-/* TBI / MII Set Register
-*/
-typedef enum enet_tbi_mii_reg {
+/* TBI / MII Set Register */
+enum enet_tbi_mii_reg {
ENET_TBI_MII_CR = 0x00,
ENET_TBI_MII_SR = 0x01,
ENET_TBI_MII_ANA = 0x04,
ENET_TBI_MII_EXST = 0x0F,
ENET_TBI_MII_JD = 0x10,
ENET_TBI_MII_TBICON = 0x11
-} enet_tbi_mii_reg_e;
+};
/* TBI MDIO register bit fields*/
#define TBICON_CLK_SELECT 0x0020
| TBICR_SPEED1_SET \
)
-/* UEC number of threads
-*/
-typedef enum uec_num_of_threads {
+/* UEC number of threads */
+enum uec_num_of_threads {
UEC_NUM_OF_THREADS_1 = 0x1, /* 1 */
UEC_NUM_OF_THREADS_2 = 0x2, /* 2 */
UEC_NUM_OF_THREADS_4 = 0x0, /* 4 */
UEC_NUM_OF_THREADS_6 = 0x3, /* 6 */
UEC_NUM_OF_THREADS_8 = 0x4 /* 8 */
-} uec_num_of_threads_e;
+};
-/* UEC initialization info struct
-*/
+/* UEC initialization info struct */
#define STD_UEC_INFO(num) \
{ \
.uf_info = { \
.speed = CONFIG_SYS_UEC##num##_INTERFACE_SPEED, \
}
-typedef struct uec_info {
- ucc_fast_info_t uf_info;
- uec_num_of_threads_e num_threads_tx;
- uec_num_of_threads_e num_threads_rx;
+struct uec_inf {
+ struct ucc_fast_inf uf_info;
+ enum uec_num_of_threads num_threads_tx;
+ enum uec_num_of_threads num_threads_rx;
unsigned int risc_tx;
unsigned int risc_rx;
u16 rx_bd_ring_len;
u8 phy_address;
phy_interface_t enet_interface_type;
int speed;
-} uec_info_t;
+};
-/* UEC driver initialized info
-*/
+/* UEC driver initialized info */
#define MAX_RXBUF_LEN 1536
#define MAX_FRAME_LEN 1518
#define MIN_FRAME_LEN 64
#define MAX_DMA1_LEN 1520
#define MAX_DMA2_LEN 1520
-/* UEC driver private struct
-*/
-typedef struct uec_private {
- uec_info_t *uec_info;
- ucc_fast_private_t *uccf;
+/* UEC driver private struct */
+struct uec_priv {
+ struct uec_inf *uec_info;
+ struct ucc_fast_priv *uccf;
struct eth_device *dev;
uec_t *uec_regs;
uec_mii_t *uec_mii_regs;
/* enet init command parameter */
- uec_init_cmd_pram_t *p_init_enet_param;
+ struct uec_init_cmd_pram *p_init_enet_param;
u32 init_enet_param_offset;
- /* Rx and Tx paramter */
- uec_rx_global_pram_t *p_rx_glbl_pram;
+ /* Rx and Tx parameter */
+ struct uec_rx_global_pram *p_rx_glbl_pram;
u32 rx_glbl_pram_offset;
- uec_tx_global_pram_t *p_tx_glbl_pram;
+ struct uec_tx_global_pram *p_tx_glbl_pram;
u32 tx_glbl_pram_offset;
- uec_send_queue_mem_region_t *p_send_q_mem_reg;
+ struct uec_send_queue_mem_region *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
- uec_thread_data_tx_t *p_thread_data_tx;
+ struct uec_thread_data_tx *p_thread_data_tx;
u32 thread_dat_tx_offset;
- uec_thread_data_rx_t *p_thread_data_rx;
+ struct uec_thread_data_rx *p_thread_data_rx;
u32 thread_dat_rx_offset;
- uec_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
+ struct uec_rx_bd_queues_entry *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
/* BDs specific */
u8 *p_tx_bd_ring;
u32 rx_bd_ring_offset;
u8 *p_rx_buf;
u32 rx_buf_offset;
- volatile qe_bd_t *txBd;
- volatile qe_bd_t *rxBd;
+ struct buffer_descriptor *tx_bd;
+ struct buffer_descriptor *rx_bd;
/* Status */
int mac_tx_enabled;
int mac_rx_enabled;
int oldspeed;
int oldduplex;
int oldlink;
-} uec_private_t;
+};
-int uec_initialize(struct bd_info *bis, uec_info_t *uec_info);
-int uec_eth_init(struct bd_info *bis, uec_info_t *uecs, int num);
+int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info);
+int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num);
int uec_standard_init(struct bd_info *bis);
#endif /* __UEC_H__ */
printf(format "\n", ## arg)
#define ugphy_dbg(format, arg...) \
- ugphy_printk(format , ## arg)
+ ugphy_printk(format, ## arg)
#define ugphy_err(format, arg...) \
- ugphy_printk(format , ## arg)
+ ugphy_printk(format, ## arg)
#define ugphy_info(format, arg...) \
- ugphy_printk(format , ## arg)
+ ugphy_printk(format, ## arg)
#define ugphy_warn(format, arg...) \
- ugphy_printk(format , ## arg)
+ ugphy_printk(format, ## arg)
#ifdef UEC_VERBOSE_DEBUG
#define ugphy_vdbg ugphy_dbg
#define ugphy_vdbg(ugeth, fmt, args...) do { } while (0)
#endif /* UEC_VERBOSE_DEBUG */
-/*--------------------------------------------------------------------+
+/*
+ * --------------------------------------------------------------------
* Fixed PHY (PHY-less) support for Ethernet Ports.
*
* Copied from arch/powerpc/cpu/ppc4xx/4xx_enet.c
- *--------------------------------------------------------------------*/
-
-/*
+ *--------------------------------------------------------------------
+ *
* Some boards do not have a PHY for each ethernet port. These ports are known
* as Fixed PHY (or PHY-less) ports. For such ports, set the appropriate
* CONFIG_SYS_UECx_PHY_ADDR equal to CONFIG_FIXED_PHY_ADDR (an unused address)
CONFIG_SYS_FIXED_PHY_PORTS /* defined in board configuration file */
};
-/*--------------------------------------------------------------------+
+/*
+ * -------------------------------------------------------------------
* BitBang MII support for ethernet ports
*
* Based from MPC8560ADS implementation
- *--------------------------------------------------------------------*/
-/*
+ *--------------------------------------------------------------------
+ *
* Example board header file to define bitbang ethernet ports:
*
* #define CONFIG_SYS_BITBANG_PHY_PORT(name) name,
* #define CONFIG_SYS_BITBANG_PHY_PORTS CONFIG_SYS_BITBANG_PHY_PORT("UEC0")
-*/
+ */
#ifndef CONFIG_SYS_BITBANG_PHY_PORTS
#define CONFIG_SYS_BITBANG_PHY_PORTS /* default is an empty array */
#endif
#if defined(CONFIG_BITBANGMII)
-static const char *bitbang_phy_port[] = {
+static const char * const bitbang_phy_port[] = {
CONFIG_SYS_BITBANG_PHY_PORTS /* defined in board configuration file */
};
#endif /* CONFIG_BITBANGMII */
-static void config_genmii_advert (struct uec_mii_info *mii_info);
-static void genmii_setup_forced (struct uec_mii_info *mii_info);
-static void genmii_restart_aneg (struct uec_mii_info *mii_info);
-static int gbit_config_aneg (struct uec_mii_info *mii_info);
-static int genmii_config_aneg (struct uec_mii_info *mii_info);
-static int genmii_update_link (struct uec_mii_info *mii_info);
-static int genmii_read_status (struct uec_mii_info *mii_info);
-u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum);
-void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum, u16 val);
-
-/* Write value to the PHY for this device to the register at regnum, */
-/* waiting until the write is done before it returns. All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-void uec_write_phy_reg (struct eth_device *dev, int mii_id, int regnum, int value)
-{
- uec_private_t *ugeth = (uec_private_t *) dev->priv;
+static void config_genmii_advert(struct uec_mii_info *mii_info);
+static void genmii_setup_forced(struct uec_mii_info *mii_info);
+static void genmii_restart_aneg(struct uec_mii_info *mii_info);
+static int gbit_config_aneg(struct uec_mii_info *mii_info);
+static int genmii_config_aneg(struct uec_mii_info *mii_info);
+static int genmii_update_link(struct uec_mii_info *mii_info);
+static int genmii_read_status(struct uec_mii_info *mii_info);
+static u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum);
+static void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum,
+ u16 val);
+
+/*
+ * Write value to the PHY for this device to the register at regnum,
+ * waiting until the write is done before it returns. All PHY
+ * configuration has to be done through the TSEC1 MIIM regs
+ */
+void uec_write_phy_reg(struct eth_device *dev, int mii_id, int regnum,
+ int value)
+{
+ struct uec_priv *ugeth = (struct uec_priv *)dev->priv;
uec_mii_t *ug_regs;
- enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg)regnum;
u32 tmp_reg;
-
#if defined(CONFIG_BITBANGMII)
u32 i = 0;
for (i = 0; i < ARRAY_SIZE(bitbang_phy_port); i++) {
if (strncmp(dev->name, bitbang_phy_port[i],
- sizeof(dev->name)) == 0) {
+ sizeof(dev->name)) == 0) {
(void)bb_miiphy_write(NULL, mii_id, regnum, value);
return;
}
/* Stop the MII management read cycle */
out_be32 (&ug_regs->miimcom, 0);
- /* Setting up the MII Mangement Address Register */
- tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ /* Setting up the MII Management Address Register */
+ tmp_reg = ((u32)mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
out_be32 (&ug_regs->miimadd, tmp_reg);
- /* Setting up the MII Mangement Control Register with the value */
- out_be32 (&ug_regs->miimcon, (u32) value);
+ /* Setting up the MII Management Control Register with the value */
+ out_be32 (&ug_regs->miimcon, (u32)value);
sync();
/* Wait till MII management write is complete */
- while ((in_be32 (&ug_regs->miimind)) & MIIMIND_BUSY);
+ while ((in_be32 (&ug_regs->miimind)) & MIIMIND_BUSY)
+ ;
}
-/* Reads from register regnum in the PHY for device dev, */
-/* returning the value. Clears miimcom first. All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-int uec_read_phy_reg (struct eth_device *dev, int mii_id, int regnum)
+/*
+ * Reads from register regnum in the PHY for device dev,
+ * returning the value. Clears miimcom first. All PHY
+ * configuration has to be done through the TSEC1 MIIM regs
+ */
+int uec_read_phy_reg(struct eth_device *dev, int mii_id, int regnum)
{
- uec_private_t *ugeth = (uec_private_t *) dev->priv;
+ struct uec_priv *ugeth = (struct uec_priv *)dev->priv;
uec_mii_t *ug_regs;
- enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg)regnum;
u32 tmp_reg;
u16 value;
-
#if defined(CONFIG_BITBANGMII)
u32 i = 0;
for (i = 0; i < ARRAY_SIZE(bitbang_phy_port); i++) {
if (strncmp(dev->name, bitbang_phy_port[i],
- sizeof(dev->name)) == 0) {
+ sizeof(dev->name)) == 0) {
(void)bb_miiphy_read(NULL, mii_id, regnum, &value);
- return (value);
+ return value;
}
}
#endif /* CONFIG_BITBANGMII */
ug_regs = ugeth->uec_mii_regs;
- /* Setting up the MII Mangement Address Register */
- tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ /* Setting up the MII Management Address Register */
+ tmp_reg = ((u32)mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
out_be32 (&ug_regs->miimadd, tmp_reg);
/* clear MII management command cycle */
/* Wait till MII management write is complete */
while ((in_be32 (&ug_regs->miimind)) &
- (MIIMIND_NOT_VALID | MIIMIND_BUSY));
+ (MIIMIND_NOT_VALID | MIIMIND_BUSY))
+ ;
/* Read MII management status */
- value = (u16) in_be32 (&ug_regs->miimstat);
+ value = (u16)in_be32 (&ug_regs->miimstat);
if (value == 0xffff)
ugphy_vdbg
("read wrong value : mii_id %d,mii_reg %d, base %08x",
- mii_id, mii_reg, (u32) & (ug_regs->miimcfg));
+ mii_id, mii_reg, (u32)&ug_regs->miimcfg);
- return (value);
+ return value;
}
-void mii_clear_phy_interrupt (struct uec_mii_info *mii_info)
+void mii_clear_phy_interrupt(struct uec_mii_info *mii_info)
{
if (mii_info->phyinfo->ack_interrupt)
- mii_info->phyinfo->ack_interrupt (mii_info);
+ mii_info->phyinfo->ack_interrupt(mii_info);
}
-void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
- u32 interrupts)
+void mii_configure_phy_interrupt(struct uec_mii_info *mii_info,
+ u32 interrupts)
{
mii_info->interrupts = interrupts;
if (mii_info->phyinfo->config_intr)
- mii_info->phyinfo->config_intr (mii_info);
+ mii_info->phyinfo->config_intr(mii_info);
}
/* Writes MII_ADVERTISE with the appropriate values, after
* sanitizing advertise to make sure only supported features
* are advertised
*/
-static void config_genmii_advert (struct uec_mii_info *mii_info)
+static void config_genmii_advert(struct uec_mii_info *mii_info)
{
u32 advertise;
u16 adv;
uec_phy_write(mii_info, MII_ADVERTISE, adv);
}
-static void genmii_setup_forced (struct uec_mii_info *mii_info)
+static void genmii_setup_forced(struct uec_mii_info *mii_info)
{
u16 ctrl;
u32 features = mii_info->phyinfo->features;
| SUPPORTED_10baseT_Full))
break;
default: /* Unsupported speed! */
- ugphy_err ("%s: Bad speed!", mii_info->dev->name);
+ ugphy_err("%s: Bad speed!", mii_info->dev->name);
break;
}
}
/* Enable and Restart Autonegotiation */
-static void genmii_restart_aneg (struct uec_mii_info *mii_info)
+static void genmii_restart_aneg(struct uec_mii_info *mii_info)
{
u16 ctl;
uec_phy_write(mii_info, MII_BMCR, ctl);
}
-static int gbit_config_aneg (struct uec_mii_info *mii_info)
+static int gbit_config_aneg(struct uec_mii_info *mii_info)
{
u16 adv;
u32 advertise;
if (mii_info->autoneg) {
/* Configure the ADVERTISE register */
- config_genmii_advert (mii_info);
+ config_genmii_advert(mii_info);
advertise = mii_info->advertising;
adv = uec_phy_read(mii_info, MII_CTRL1000);
uec_phy_write(mii_info, MII_CTRL1000, adv);
/* Start/Restart aneg */
- genmii_restart_aneg (mii_info);
- } else
- genmii_setup_forced (mii_info);
+ genmii_restart_aneg(mii_info);
+ } else {
+ genmii_setup_forced(mii_info);
+ }
return 0;
}
-static int marvell_config_aneg (struct uec_mii_info *mii_info)
+static int marvell_config_aneg(struct uec_mii_info *mii_info)
{
- /* The Marvell PHY has an errata which requires
+ /*
+ * The Marvell PHY has an errata which requires
* that certain registers get written in order
- * to restart autonegotiation */
+ * to restart autonegotiation
+ */
uec_phy_write(mii_info, MII_BMCR, BMCR_RESET);
uec_phy_write(mii_info, 0x1d, 0x1f);
uec_phy_write(mii_info, 0x1e, 0);
uec_phy_write(mii_info, 0x1e, 0x100);
- gbit_config_aneg (mii_info);
+ gbit_config_aneg(mii_info);
return 0;
}
-static int genmii_config_aneg (struct uec_mii_info *mii_info)
+static int genmii_config_aneg(struct uec_mii_info *mii_info)
{
if (mii_info->autoneg) {
- /* Speed up the common case, if link is already up, speed and
- duplex match, skip auto neg as it already matches */
+ /*
+ * Speed up the common case, if link is already up, speed and
+ * duplex match, skip auto neg as it already matches
+ */
if (!genmii_read_status(mii_info) && mii_info->link)
if (mii_info->duplex == DUPLEX_FULL &&
mii_info->speed == SPEED_100)
ADVERTISED_100baseT_Full)
return 0;
- config_genmii_advert (mii_info);
- genmii_restart_aneg (mii_info);
- } else
- genmii_setup_forced (mii_info);
+ config_genmii_advert(mii_info);
+ genmii_restart_aneg(mii_info);
+ } else {
+ genmii_setup_forced(mii_info);
+ }
return 0;
}
-static int genmii_update_link (struct uec_mii_info *mii_info)
+static int genmii_update_link(struct uec_mii_info *mii_info)
{
u16 status;
* (ie - we're capable and it's not done)
*/
status = uec_phy_read(mii_info, MII_BMSR);
- if ((status & BMSR_LSTATUS) && (status & BMSR_ANEGCAPABLE)
- && !(status & BMSR_ANEGCOMPLETE)) {
+ if ((status & BMSR_LSTATUS) && (status & BMSR_ANEGCAPABLE) &&
+ !(status & BMSR_ANEGCOMPLETE)) {
int i = 0;
while (!(status & BMSR_ANEGCOMPLETE)) {
return 0;
}
-static int genmii_read_status (struct uec_mii_info *mii_info)
+static int genmii_read_status(struct uec_mii_info *mii_info)
{
u16 status;
int err;
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link (mii_info);
+ /* Update the link, but return if there was an error */
+ err = genmii_update_link(mii_info);
if (err)
return err;
static int bcm_init(struct uec_mii_info *mii_info)
{
struct eth_device *edev = mii_info->dev;
- uec_private_t *uec = edev->priv;
+ struct uec_priv *uec = edev->priv;
gbit_config_aneg(mii_info);
- if ((uec->uec_info->enet_interface_type ==
- PHY_INTERFACE_MODE_RGMII_RXID) &&
- (uec->uec_info->speed == SPEED_1000)) {
+ if (uec->uec_info->enet_interface_type ==
+ PHY_INTERFACE_MODE_RGMII_RXID &&
+ uec->uec_info->speed == SPEED_1000) {
u16 val;
int cnt = 50;
uec_phy_write(mii_info, 0x18, val);
}
- return 0;
+ return 0;
}
static int uec_marvell_init(struct uec_mii_info *mii_info)
{
struct eth_device *edev = mii_info->dev;
- uec_private_t *uec = edev->priv;
+ struct uec_priv *uec = edev->priv;
phy_interface_t iface = uec->uec_info->enet_interface_type;
int speed = uec->uec_info->speed;
- if ((speed == SPEED_1000) &&
- (iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ if (speed == SPEED_1000 &&
+ (iface == PHY_INTERFACE_MODE_RGMII_ID ||
iface == PHY_INTERFACE_MODE_RGMII_RXID ||
iface == PHY_INTERFACE_MODE_RGMII_TXID)) {
int temp;
return 0;
}
-static int marvell_read_status (struct uec_mii_info *mii_info)
+static int marvell_read_status(struct uec_mii_info *mii_info)
{
u16 status;
int err;
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link (mii_info);
+ /* Update the link, but return if there was an error */
+ err = genmii_update_link(mii_info);
if (err)
return err;
- /* If the link is up, read the speed and duplex */
- /* If we aren't autonegotiating, assume speeds
- * are as set */
+ /*
+ * If the link is up, read the speed and duplex
+ * If we aren't autonegotiating, assume speeds
+ * are as set
+ */
if (mii_info->autoneg && mii_info->link) {
int speed;
return 0;
}
-static int marvell_ack_interrupt (struct uec_mii_info *mii_info)
+static int marvell_ack_interrupt(struct uec_mii_info *mii_info)
{
/* Clear the interrupts by reading the reg */
uec_phy_read(mii_info, MII_M1011_IEVENT);
return 0;
}
-static int marvell_config_intr (struct uec_mii_info *mii_info)
+static int marvell_config_intr(struct uec_mii_info *mii_info)
{
if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
uec_phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
else
uec_phy_write(mii_info, MII_M1011_IMASK,
- MII_M1011_IMASK_CLEAR);
+ MII_M1011_IMASK_CLEAR);
return 0;
}
-static int dm9161_init (struct uec_mii_info *mii_info)
+static int dm9161_init(struct uec_mii_info *mii_info)
{
/* Reset the PHY */
uec_phy_write(mii_info, MII_BMCR, uec_phy_read(mii_info, MII_BMCR) |
uec_phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
- config_genmii_advert (mii_info);
+ config_genmii_advert(mii_info);
/* Start/restart aneg */
- genmii_config_aneg (mii_info);
+ genmii_config_aneg(mii_info);
return 0;
}
-static int dm9161_config_aneg (struct uec_mii_info *mii_info)
+static int dm9161_config_aneg(struct uec_mii_info *mii_info)
{
return 0;
}
-static int dm9161_read_status (struct uec_mii_info *mii_info)
+static int dm9161_read_status(struct uec_mii_info *mii_info)
{
u16 status;
int err;
/* Update the link, but return if there was an error */
- err = genmii_update_link (mii_info);
+ err = genmii_update_link(mii_info);
if (err)
return err;
- /* If the link is up, read the speed and duplex
- If we aren't autonegotiating assume speeds are as set */
+ /*
+ * If the link is up, read the speed and duplex
+ * If we aren't autonegotiating assume speeds are as set
+ */
if (mii_info->autoneg && mii_info->link) {
status = uec_phy_read(mii_info, MII_DM9161_SCSR);
if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
return 0;
}
-static int dm9161_ack_interrupt (struct uec_mii_info *mii_info)
+static int dm9161_ack_interrupt(struct uec_mii_info *mii_info)
{
/* Clear the interrupt by reading the reg */
uec_phy_read(mii_info, MII_DM9161_INTR);
return 0;
}
-static int dm9161_config_intr (struct uec_mii_info *mii_info)
+static int dm9161_config_intr(struct uec_mii_info *mii_info)
{
if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
uec_phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
return 0;
}
-static void dm9161_close (struct uec_mii_info *mii_info)
+static void dm9161_close(struct uec_mii_info *mii_info)
{
}
-static int fixed_phy_aneg (struct uec_mii_info *mii_info)
+static int fixed_phy_aneg(struct uec_mii_info *mii_info)
{
mii_info->autoneg = 0; /* Turn off auto negotiation for fixed phy */
return 0;
}
-static int fixed_phy_read_status (struct uec_mii_info *mii_info)
+static int fixed_phy_read_status(struct uec_mii_info *mii_info)
{
int i = 0;
for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
if (strncmp(mii_info->dev->name, fixed_phy_port[i].name,
- strlen(mii_info->dev->name)) == 0) {
+ strlen(mii_info->dev->name)) == 0) {
mii_info->speed = fixed_phy_port[i].speed;
mii_info->duplex = fixed_phy_port[i].duplex;
mii_info->link = 1; /* Link is always UP */
return 0;
}
-static int smsc_config_aneg (struct uec_mii_info *mii_info)
+static int smsc_config_aneg(struct uec_mii_info *mii_info)
{
return 0;
}
-static int smsc_read_status (struct uec_mii_info *mii_info)
+static int smsc_read_status(struct uec_mii_info *mii_info)
{
u16 status;
int err;
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link (mii_info);
+ /* Update the link, but return if there was an error */
+ err = genmii_update_link(mii_info);
if (err)
return err;
- /* If the link is up, read the speed and duplex */
- /* If we aren't autonegotiating, assume speeds
- * are as set */
+ /*
+ * If the link is up, read the speed and duplex
+ * If we aren't autonegotiating, assume speeds
+ * are as set
+ */
if (mii_info->autoneg && mii_info->link) {
int val;
val = (status & 0x1c) >> 2;
switch (val) {
- case 1:
- mii_info->duplex = DUPLEX_HALF;
- mii_info->speed = SPEED_10;
- break;
- case 5:
- mii_info->duplex = DUPLEX_FULL;
- mii_info->speed = SPEED_10;
- break;
- case 2:
- mii_info->duplex = DUPLEX_HALF;
- mii_info->speed = SPEED_100;
- break;
- case 6:
- mii_info->duplex = DUPLEX_FULL;
- mii_info->speed = SPEED_100;
- break;
+ case 1:
+ mii_info->duplex = DUPLEX_HALF;
+ mii_info->speed = SPEED_10;
+ break;
+ case 5:
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->speed = SPEED_10;
+ break;
+ case 2:
+ mii_info->duplex = DUPLEX_HALF;
+ mii_info->speed = SPEED_100;
+ break;
+ case 6:
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->speed = SPEED_100;
+ break;
}
mii_info->pause = 0;
}
NULL
};
-u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum)
+static u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum)
{
- return mii_info->mdio_read (mii_info->dev, mii_info->mii_id, regnum);
+ return mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
}
-void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum, u16 val)
+static void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum, u16 val)
{
- mii_info->mdio_write (mii_info->dev, mii_info->mii_id, regnum, val);
+ mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
}
/* Use the PHY ID registers to determine what type of PHY is attached
* to device dev. return a struct phy_info structure describing that PHY
*/
-struct phy_info *uec_get_phy_info (struct uec_mii_info *mii_info)
+struct phy_info *uec_get_phy_info(struct uec_mii_info *mii_info)
{
u16 phy_reg;
u32 phy_ID;
int i;
- struct phy_info *theInfo = NULL;
+ struct phy_info *info = NULL;
/* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = uec_phy_read(mii_info, MII_PHYSID1);
for (i = 0; phy_info[i]; i++)
if (phy_info[i]->phy_id ==
(phy_ID & phy_info[i]->phy_id_mask)) {
- theInfo = phy_info[i];
+ info = phy_info[i];
break;
}
/* This shouldn't happen, as we have generic PHY support */
- if (theInfo == NULL) {
- ugphy_info ("UEC: PHY id %x is not supported!", phy_ID);
+ if (!info) {
+ ugphy_info("UEC: PHY id %x is not supported!", phy_ID);
return NULL;
- } else {
- ugphy_info ("UEC: PHY is %s (%x)", theInfo->name, phy_ID);
}
+ ugphy_info("UEC: PHY is %s (%x)", info->name, phy_ID);
- return theInfo;
+ return info;
}
void marvell_phy_interface_mode(struct eth_device *dev, phy_interface_t type,
- int speed)
+ int speed)
{
- uec_private_t *uec = (uec_private_t *) dev->priv;
+ struct uec_priv *uec = (struct uec_priv *)dev->priv;
struct uec_mii_info *mii_info;
u16 status;
if (!uec->mii_info) {
- printf ("%s: the PHY not initialized\n", __FUNCTION__);
+ printf("%s: the PHY not initialized\n", __func__);
return;
}
mii_info = uec->mii_info;
/* now the B2 will correctly report autoneg completion status */
}
-void change_phy_interface_mode (struct eth_device *dev,
- phy_interface_t type, int speed)
+void change_phy_interface_mode(struct eth_device *dev,
+ phy_interface_t type, int speed)
{
#ifdef CONFIG_PHY_MODE_NEED_CHANGE
- marvell_phy_interface_mode (dev, type, speed);
+ marvell_phy_interface_mode(dev, type, speed);
#endif
}
#ifndef __UEC_PHY_H__
#define __UEC_PHY_H__
+#include <linux/bitops.h>
+
#define MII_end ((u32)-2)
#define MII_read ((u32)-1)
#define MII_DM9161_INTR_LINK_CHANGE 0x0004
#define MII_DM9161_INTR_INIT 0x0000
#define MII_DM9161_INTR_STOP \
-(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
- | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+ (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK | \
+ MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
/* DM9161 10BT Configuration/Status */
#define MII_DM9161_10BTCSR 0x12
#define DUPLEX_HALF 0x00
#define DUPLEX_FULL 0x01
-/* Indicates what features are supported by the interface. */
-#define SUPPORTED_10baseT_Half (1 << 0)
-#define SUPPORTED_10baseT_Full (1 << 1)
-#define SUPPORTED_100baseT_Half (1 << 2)
-#define SUPPORTED_100baseT_Full (1 << 3)
-#define SUPPORTED_1000baseT_Half (1 << 4)
-#define SUPPORTED_1000baseT_Full (1 << 5)
-#define SUPPORTED_Autoneg (1 << 6)
-#define SUPPORTED_TP (1 << 7)
-#define SUPPORTED_AUI (1 << 8)
-#define SUPPORTED_MII (1 << 9)
-#define SUPPORTED_FIBRE (1 << 10)
-#define SUPPORTED_BNC (1 << 11)
-#define SUPPORTED_10000baseT_Full (1 << 12)
-
-#define ADVERTISED_10baseT_Half (1 << 0)
-#define ADVERTISED_10baseT_Full (1 << 1)
-#define ADVERTISED_100baseT_Half (1 << 2)
-#define ADVERTISED_100baseT_Full (1 << 3)
-#define ADVERTISED_1000baseT_Half (1 << 4)
-#define ADVERTISED_1000baseT_Full (1 << 5)
-#define ADVERTISED_Autoneg (1 << 6)
-#define ADVERTISED_TP (1 << 7)
-#define ADVERTISED_AUI (1 << 8)
-#define ADVERTISED_MII (1 << 9)
-#define ADVERTISED_FIBRE (1 << 10)
-#define ADVERTISED_BNC (1 << 11)
-#define ADVERTISED_10000baseT_Full (1 << 12)
-
/* Taken from mii_if_info and sungem_phy.h */
struct uec_mii_info {
/* Information about the PHY type */
void *priv;
/* Provided by ethernet driver */
- int (*mdio_read) (struct eth_device * dev, int mii_id, int reg);
- void (*mdio_write) (struct eth_device * dev, int mii_id, int reg,
- int val);
+ int (*mdio_read)(struct eth_device *dev, int mii_id, int reg);
+ void (*mdio_write)(struct eth_device *dev, int mii_id, int reg,
+ int val);
};
/* struct phy_info: a structure which defines attributes for a PHY
u32 features;
/* Called to initialize the PHY */
- int (*init) (struct uec_mii_info * mii_info);
+ int (*init)(struct uec_mii_info *mii_info);
/* Called to suspend the PHY for power */
- int (*suspend) (struct uec_mii_info * mii_info);
+ int (*suspend)(struct uec_mii_info *mii_info);
/* Reconfigures autonegotiation (or disables it) */
- int (*config_aneg) (struct uec_mii_info * mii_info);
+ int (*config_aneg)(struct uec_mii_info *mii_info);
/* Determines the negotiated speed and duplex */
- int (*read_status) (struct uec_mii_info * mii_info);
+ int (*read_status)(struct uec_mii_info *mii_info);
/* Clears any pending interrupts */
- int (*ack_interrupt) (struct uec_mii_info * mii_info);
+ int (*ack_interrupt)(struct uec_mii_info *mii_info);
/* Enables or disables interrupts */
- int (*config_intr) (struct uec_mii_info * mii_info);
+ int (*config_intr)(struct uec_mii_info *mii_info);
/* Clears up any memory if needed */
- void (*close) (struct uec_mii_info * mii_info);
+ void (*close)(struct uec_mii_info *mii_info);
};
-struct phy_info *uec_get_phy_info (struct uec_mii_info *mii_info);
-void uec_write_phy_reg (struct eth_device *dev, int mii_id, int regnum,
- int value);
-int uec_read_phy_reg (struct eth_device *dev, int mii_id, int regnum);
-void mii_clear_phy_interrupt (struct uec_mii_info *mii_info);
-void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
- u32 interrupts);
+struct phy_info *uec_get_phy_info(struct uec_mii_info *mii_info);
+void uec_write_phy_reg(struct eth_device *dev, int mii_id, int regnum,
+ int value);
+int uec_read_phy_reg(struct eth_device *dev, int mii_id, int regnum);
+void mii_clear_phy_interrupt(struct uec_mii_info *mii_info);
+void mii_configure_phy_interrupt(struct uec_mii_info *mii_info,
+ u32 interrupts);
+void change_phy_interface_mode(struct eth_device *dev,
+ phy_interface_t type, int speed);
#endif /* __UEC_PHY_H__ */