summaryrefslogtreecommitdiff
path: root/drivers/qe
diff options
context:
space:
mode:
authorroy zang <tie-fei.zang@freescale.com>2006-12-01 12:09:33 +0800
committerZang Tiefei <roy@bus.ap.freescale.net>2006-12-01 12:09:33 +0800
commite0b369e4f71b11dd5716903dd6258028cf07e6e8 (patch)
tree615986f2b3088072c49d74e1086650e185087698 /drivers/qe
parentee311214e0d216f904feea269599d0934bf71f23 (diff)
parentdd520bf314c7add4183c5191692180f576f96b60 (diff)
downloadu-boot-imx-e0b369e4f71b11dd5716903dd6258028cf07e6e8.zip
u-boot-imx-e0b369e4f71b11dd5716903dd6258028cf07e6e8.tar.gz
u-boot-imx-e0b369e4f71b11dd5716903dd6258028cf07e6e8.tar.bz2
Fix the confilcts when merging 'master' into hpc2.
The conflicts due to a new mpc7448 p3m7448 board is in the main tree. Merge branch 'master' into hpc2 Conflicts: MAKEALL cpu/74xx_7xx/cpu.c cpu/74xx_7xx/cpu_init.c cpu/74xx_7xx/speed.c
Diffstat (limited to 'drivers/qe')
-rw-r--r--drivers/qe/Makefile43
-rw-r--r--drivers/qe/qe.c254
-rw-r--r--drivers/qe/qe.h237
-rw-r--r--drivers/qe/uccf.c404
-rw-r--r--drivers/qe/uccf.h130
-rw-r--r--drivers/qe/uec.c1266
-rw-r--r--drivers/qe/uec.h716
-rw-r--r--drivers/qe/uec_phy.c607
-rw-r--r--drivers/qe/uec_phy.h259
9 files changed, 3916 insertions, 0 deletions
diff --git a/drivers/qe/Makefile b/drivers/qe/Makefile
new file mode 100644
index 0000000..4844181
--- /dev/null
+++ b/drivers/qe/Makefile
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2006 Freescale Semiconductor, Inc.
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB := $(obj)qe.a
+
+COBJS := qe.o uccf.o uec.o uec_phy.o
+
+SRCS := $(COBJS:.o=.c)
+OBJS := $(addprefix $(obj),$(COBJS))
+
+all: $(LIB)
+
+$(LIB): $(obj).depend $(OBJS)
+ $(AR) $(ARFLAGS) $@ $(OBJS)
+
+#########################################################################
+
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/drivers/qe/qe.c b/drivers/qe/qe.c
new file mode 100644
index 0000000..5f20962
--- /dev/null
+++ b/drivers/qe/qe.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "common.h"
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+
+#if defined(CONFIG_QE)
+qe_map_t *qe_immr = NULL;
+static qe_snum_t snums[QE_NUM_OF_SNUM];
+
+void qe_issue_cmd(uint cmd, uint sbc, u8 mcn, u32 cmd_data)
+{
+ u32 cecr;
+
+ if (cmd == QE_RESET) {
+ out_be32(&qe_immr->cp.cecr,(u32) (cmd | QE_CR_FLG));
+ } else {
+ out_be32(&qe_immr->cp.cecdr, cmd_data);
+ out_be32(&qe_immr->cp.cecr, (sbc | QE_CR_FLG |
+ ((u32) mcn<<QE_CR_PROTOCOL_SHIFT) | cmd));
+ }
+ /* Wait for the QE_CR_FLG to clear */
+ do {
+ cecr = in_be32(&qe_immr->cp.cecr);
+ } while (cecr & QE_CR_FLG);
+
+ return;
+}
+
+uint qe_muram_alloc(uint size, uint align)
+{
+ DECLARE_GLOBAL_DATA_PTR;
+
+ uint retloc;
+ uint align_mask, off;
+ uint savebase;
+
+ align_mask = align - 1;
+ savebase = gd->mp_alloc_base;
+
+ if ((off = (gd->mp_alloc_base & align_mask)) != 0)
+ gd->mp_alloc_base += (align - off);
+
+ if ((off = size & align_mask) != 0)
+ size += (align - off);
+
+ if ((gd->mp_alloc_base + size) >= gd->mp_alloc_top) {
+ gd->mp_alloc_base = savebase;
+ printf("%s: ran out of ram.\n", __FUNCTION__);
+ }
+
+ retloc = gd->mp_alloc_base;
+ gd->mp_alloc_base += size;
+
+ memset((void *)&qe_immr->muram[retloc], 0, size);
+
+ __asm__ __volatile__("sync");
+
+ return retloc;
+}
+
+void *qe_muram_addr(uint offset)
+{
+ return (void *)&qe_immr->muram[offset];
+}
+
+static void qe_sdma_init(void)
+{
+ volatile sdma_t *p;
+ uint sdma_buffer_base;
+
+ p = (volatile sdma_t *)&qe_immr->sdma;
+
+ /* All of DMA transaction in bus 1 */
+ out_be32(&p->sdaqr, 0);
+ out_be32(&p->sdaqmr, 0);
+
+ /* Allocate 2KB temporary buffer for sdma */
+ sdma_buffer_base = qe_muram_alloc(2048, 64);
+ out_be32(&p->sdwbcr, sdma_buffer_base & QE_SDEBCR_BA_MASK);
+
+ /* Clear sdma status */
+ out_be32(&p->sdsr, 0x03000000);
+
+ /* Enable global mode on bus 1, and 2KB buffer size */
+ out_be32(&p->sdmr, QE_SDMR_GLB_1_MSK | (0x3 << QE_SDMR_CEN_SHIFT));
+}
+
+static u8 thread_snum[QE_NUM_OF_SNUM] = {
+ 0x04, 0x05, 0x0c, 0x0d,
+ 0x14, 0x15, 0x1c, 0x1d,
+ 0x24, 0x25, 0x2c, 0x2d,
+ 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xa8, 0xa9,
+ 0xb8, 0xb9, 0xc8, 0xc9,
+ 0xd8, 0xd9, 0xe8, 0xe9
+};
+
+static void qe_snums_init(void)
+{
+ int i;
+
+ for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ snums[i].state = QE_SNUM_STATE_FREE;
+ snums[i].num = thread_snum[i];
+ }
+}
+
+int qe_get_snum(void)
+{
+ int snum = -EBUSY;
+ int i;
+
+ for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ if (snums[i].state == QE_SNUM_STATE_FREE) {
+ snums[i].state = QE_SNUM_STATE_USED;
+ snum = snums[i].num;
+ break;
+ }
+ }
+
+ return snum;
+}
+
+void qe_put_snum(u8 snum)
+{
+ int i;
+
+ for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ if (snums[i].num == snum) {
+ snums[i].state = QE_SNUM_STATE_FREE;
+ break;
+ }
+ }
+}
+
+void qe_init(uint qe_base)
+{
+ DECLARE_GLOBAL_DATA_PTR;
+
+ /* Init the QE IMMR base */
+ qe_immr = (qe_map_t *)qe_base;
+
+ gd->mp_alloc_base = QE_DATAONLY_BASE;
+ gd->mp_alloc_top = gd->mp_alloc_base + QE_DATAONLY_SIZE;
+
+ qe_sdma_init();
+ qe_snums_init();
+}
+
+void qe_reset(void)
+{
+ qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+ (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+
+void qe_assign_page(uint snum, uint para_ram_base)
+{
+ u32 cecr;
+
+ out_be32(&qe_immr->cp.cecdr, para_ram_base);
+ out_be32(&qe_immr->cp.cecr, ((u32) snum<<QE_CR_ASSIGN_PAGE_SNUM_SHIFT)
+ | QE_CR_FLG | QE_ASSIGN_PAGE);
+
+ /* Wait for the QE_CR_FLG to clear */
+ do {
+ cecr = in_be32(&qe_immr->cp.cecr);
+ } while (cecr & QE_CR_FLG );
+
+ return;
+}
+
+/*
+ * brg: 0~15 as BRG1~BRG16
+ rate: baud rate
+ * BRG input clock comes from the BRGCLK (internal clock generated from
+ the QE clock, it is one-half of the QE clock), If need the clock source
+ from CLKn pin, we have te change the function.
+ */
+
+#define BRG_CLK (gd->brg_clk)
+
+int qe_set_brg(uint brg, uint rate)
+{
+ DECLARE_GLOBAL_DATA_PTR;
+ volatile uint *bp;
+ u32 divisor;
+ int div16 = 0;
+
+ if (brg >= QE_NUM_OF_BRGS)
+ return -EINVAL;
+ bp = (uint *)&qe_immr->brg.brgc1;
+ bp += brg;
+
+ divisor = (BRG_CLK / rate);
+ if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+ div16 = 1;
+ divisor /= 16;
+ }
+
+ *bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
+ __asm__ __volatile__("sync");
+
+ if (div16) {
+ *bp |= QE_BRGC_DIV16;
+ __asm__ __volatile__("sync");
+ }
+
+ return 0;
+}
+
+/* Set ethernet MII clock master
+*/
+int qe_set_mii_clk_src(int ucc_num)
+{
+ u32 cmxgcr;
+
+ /* check if the UCC number is in range. */
+ if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
+ printf("%s: ucc num not in ranges\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ cmxgcr = in_be32(&qe_immr->qmx.cmxgcr);
+ cmxgcr &= ~QE_CMXGCR_MII_ENET_MNG_MASK;
+ cmxgcr |= (ucc_num <<QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ out_be32(&qe_immr->qmx.cmxgcr, cmxgcr);
+
+ return 0;
+}
+
+#endif /* CONFIG_QE */
diff --git a/drivers/qe/qe.h b/drivers/qe/qe.h
new file mode 100644
index 0000000..f7f8ed0
--- /dev/null
+++ b/drivers/qe/qe.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QE_H__
+#define __QE_H__
+
+#include "common.h"
+
+#define QE_NUM_OF_SNUM 28
+#define QE_NUM_OF_BRGS 16
+#define UCC_MAX_NUM 8
+
+#define QE_DATAONLY_BASE (uint)(128)
+#define QE_DATAONLY_SIZE ((uint)(0xc000) - QE_DATAONLY_BASE)
+
+/* QE threads SNUM
+*/
+typedef enum qe_snum_state {
+ QE_SNUM_STATE_USED, /* used */
+ QE_SNUM_STATE_FREE /* free */
+} qe_snum_state_e;
+
+typedef struct qe_snum {
+ u8 num; /* snum */
+ qe_snum_state_e state; /* state */
+} qe_snum_t;
+
+/* QE RISC allocation
+*/
+typedef enum qe_risc_allocation {
+ QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */
+ QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */
+ QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* RISC 1 or RISC 2 */
+} qe_risc_allocation_e;
+
+/* QE CECR commands for UCC fast.
+*/
+#define QE_CR_FLG 0x00010000
+#define QE_RESET 0x80000000
+#define QE_INIT_TX_RX 0x00000000
+#define QE_INIT_RX 0x00000001
+#define QE_INIT_TX 0x00000002
+#define QE_ENTER_HUNT_MODE 0x00000003
+#define QE_STOP_TX 0x00000004
+#define QE_GRACEFUL_STOP_TX 0x00000005
+#define QE_RESTART_TX 0x00000006
+#define QE_SWITCH_COMMAND 0x00000007
+#define QE_SET_GROUP_ADDRESS 0x00000008
+#define QE_INSERT_CELL 0x00000009
+#define QE_ATM_TRANSMIT 0x0000000a
+#define QE_CELL_POOL_GET 0x0000000b
+#define QE_CELL_POOL_PUT 0x0000000c
+#define QE_IMA_HOST_CMD 0x0000000d
+#define QE_ATM_MULTI_THREAD_INIT 0x00000011
+#define QE_ASSIGN_PAGE 0x00000012
+#define QE_START_FLOW_CONTROL 0x00000014
+#define QE_STOP_FLOW_CONTROL 0x00000015
+#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016
+#define QE_GRACEFUL_STOP_RX 0x0000001a
+#define QE_RESTART_RX 0x0000001b
+
+/* QE CECR Sub Block Code - sub block code of QE command.
+*/
+#define QE_CR_SUBBLOCK_INVALID 0x00000000
+#define QE_CR_SUBBLOCK_USB 0x03200000
+#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000
+#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000
+#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000
+#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000
+#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000
+#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000
+#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000
+#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000
+#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000
+#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000
+#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000
+#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000
+#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000
+#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000
+#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000
+#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000
+#define QE_CR_SUBBLOCK_MCC1 0x03800000
+#define QE_CR_SUBBLOCK_MCC2 0x03a00000
+#define QE_CR_SUBBLOCK_MCC3 0x03000000
+#define QE_CR_SUBBLOCK_IDMA1 0x02800000
+#define QE_CR_SUBBLOCK_IDMA2 0x02a00000
+#define QE_CR_SUBBLOCK_IDMA3 0x02c00000
+#define QE_CR_SUBBLOCK_IDMA4 0x02e00000
+#define QE_CR_SUBBLOCK_HPAC 0x01e00000
+#define QE_CR_SUBBLOCK_SPI1 0x01400000
+#define QE_CR_SUBBLOCK_SPI2 0x01600000
+#define QE_CR_SUBBLOCK_RAND 0x01c00000
+#define QE_CR_SUBBLOCK_TIMER 0x01e00000
+#define QE_CR_SUBBLOCK_GENERAL 0x03c00000
+
+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command.
+*/
+#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */
+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00
+#define QE_CR_PROTOCOL_ATM_POS 0x0A
+#define QE_CR_PROTOCOL_ETHERNET 0x0C
+#define QE_CR_PROTOCOL_L2_SWITCH 0x0D
+#define QE_CR_PROTOCOL_SHIFT 6
+
+/* QE ASSIGN PAGE command
+*/
+#define QE_CR_ASSIGN_PAGE_SNUM_SHIFT 17
+
+/* Communication Direction.
+*/
+typedef enum comm_dir {
+ COMM_DIR_NONE = 0,
+ COMM_DIR_RX = 1,
+ COMM_DIR_TX = 2,
+ COMM_DIR_RX_AND_TX = 3
+} comm_dir_e;
+
+/* Clocks and BRG's
+*/
+typedef enum qe_clock {
+ QE_CLK_NONE = 0,
+ QE_BRG1, /* Baud Rate Generator 1 */
+ QE_BRG2, /* Baud Rate Generator 2 */
+ QE_BRG3, /* Baud Rate Generator 3 */
+ QE_BRG4, /* Baud Rate Generator 4 */
+ QE_BRG5, /* Baud Rate Generator 5 */
+ QE_BRG6, /* Baud Rate Generator 6 */
+ QE_BRG7, /* Baud Rate Generator 7 */
+ QE_BRG8, /* Baud Rate Generator 8 */
+ QE_BRG9, /* Baud Rate Generator 9 */
+ QE_BRG10, /* Baud Rate Generator 10 */
+ QE_BRG11, /* Baud Rate Generator 11 */
+ QE_BRG12, /* Baud Rate Generator 12 */
+ QE_BRG13, /* Baud Rate Generator 13 */
+ QE_BRG14, /* Baud Rate Generator 14 */
+ QE_BRG15, /* Baud Rate Generator 15 */
+ QE_BRG16, /* Baud Rate Generator 16 */
+ QE_CLK1, /* Clock 1 */
+ QE_CLK2, /* Clock 2 */
+ QE_CLK3, /* Clock 3 */
+ QE_CLK4, /* Clock 4 */
+ QE_CLK5, /* Clock 5 */
+ QE_CLK6, /* Clock 6 */
+ QE_CLK7, /* Clock 7 */
+ QE_CLK8, /* Clock 8 */
+ QE_CLK9, /* Clock 9 */
+ QE_CLK10, /* Clock 10 */
+ QE_CLK11, /* Clock 11 */
+ QE_CLK12, /* Clock 12 */
+ QE_CLK13, /* Clock 13 */
+ QE_CLK14, /* Clock 14 */
+ QE_CLK15, /* Clock 15 */
+ QE_CLK16, /* Clock 16 */
+ QE_CLK17, /* Clock 17 */
+ QE_CLK18, /* Clock 18 */
+ QE_CLK19, /* Clock 19 */
+ QE_CLK20, /* Clock 20 */
+ QE_CLK21, /* Clock 21 */
+ QE_CLK22, /* Clock 22 */
+ QE_CLK23, /* Clock 23 */
+ QE_CLK24, /* Clock 24 */
+ QE_CLK_DUMMY
+} qe_clock_e;
+
+/* QE CMXGCR register
+*/
+#define QE_CMXGCR_MII_ENET_MNG_MASK 0x00007000
+#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12
+
+/* QE CMXUCR registers
+ */
+#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F
+
+/* QE BRG configuration register
+*/
+#define QE_BRGC_ENABLE 0x00010000
+#define QE_BRGC_DIVISOR_SHIFT 1
+#define QE_BRGC_DIVISOR_MAX 0xFFF
+#define QE_BRGC_DIV16 1
+
+/* QE SDMA registers
+*/
+#define QE_SDSR_BER1 0x02000000
+#define QE_SDSR_BER2 0x01000000
+
+#define QE_SDMR_GLB_1_MSK 0x80000000
+#define QE_SDMR_ADR_SEL 0x20000000
+#define QE_SDMR_BER1_MSK 0x02000000
+#define QE_SDMR_BER2_MSK 0x01000000
+#define QE_SDMR_EB1_MSK 0x00800000
+#define QE_SDMR_ER1_MSK 0x00080000
+#define QE_SDMR_ER2_MSK 0x00040000
+#define QE_SDMR_CEN_MASK 0x0000E000
+#define QE_SDMR_SBER_1 0x00000200
+#define QE_SDMR_SBER_2 0x00000200
+#define QE_SDMR_EB1_PR_MASK 0x000000C0
+#define QE_SDMR_ER1_PR 0x00000008
+
+#define QE_SDMR_CEN_SHIFT 13
+#define QE_SDMR_EB1_PR_SHIFT 6
+
+#define QE_SDTM_MSNUM_SHIFT 24
+
+#define QE_SDEBCR_BA_MASK 0x01FFFFFF
+
+void qe_config_iopin(u8 port, u8 pin, int dir, int open_drain, int assign);
+void qe_issue_cmd(uint cmd, uint sbc, u8 mcn, u32 cmd_data);
+uint qe_muram_alloc(uint size, uint align);
+void *qe_muram_addr(uint offset);
+int qe_get_snum(void);
+void qe_put_snum(u8 snum);
+void qe_init(uint qe_base);
+void qe_reset(void);
+void qe_assign_page(uint snum, uint para_ram_base);
+int qe_set_brg(uint brg, uint rate);
+int qe_set_mii_clk_src(int ucc_num);
+
+#endif /* __QE_H__ */
diff --git a/drivers/qe/uccf.c b/drivers/qe/uccf.c
new file mode 100644
index 0000000..c5477e0
--- /dev/null
+++ b/drivers/qe/uccf.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "common.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+#include "uccf.h"
+
+#if defined(CONFIG_QE)
+void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf)
+{
+ out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+}
+
+u32 ucc_fast_get_qe_cr_subblock(int ucc_num)
+{
+ switch (ucc_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+
+static void ucc_get_cmxucr_reg(int ucc_num, volatile u32 **p_cmxucr,
+ u8 *reg_num, u8 *shift)
+{
+ switch (ucc_num) {
+ case 0: /* UCC1 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr1);
+ *reg_num = 1;
+ *shift = 16;
+ break;
+ case 2: /* UCC3 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr1);
+ *reg_num = 1;
+ *shift = 0;
+ break;
+ case 4: /* UCC5 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr2);
+ *reg_num = 2;
+ *shift = 16;
+ break;
+ case 6: /* UCC7 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr2);
+ *reg_num = 2;
+ *shift = 0;
+ break;
+ case 1: /* UCC2 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr3);
+ *reg_num = 3;
+ *shift = 16;
+ break;
+ case 3: /* UCC4 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr3);
+ *reg_num = 3;
+ *shift = 0;
+ break;
+ case 5: /* UCC6 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr4);
+ *reg_num = 4;
+ *shift = 16;
+ break;
+ case 7: /* UCC8 */
+ *p_cmxucr = &(qe_immr->qmx.cmxucr4);
+ *reg_num = 4;
+ *shift = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static int ucc_set_clk_src(int ucc_num, qe_clock_e clock, comm_dir_e mode)
+{
+ volatile u32 *p_cmxucr = NULL;
+ u8 reg_num = 0;
+ u8 shift = 0;
+ u32 clockBits;
+ u32 clockMask;
+ int source = -1;
+
+ /* check if the UCC number is in range. */
+ if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+ return -EINVAL;
+
+ if (! ((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) {
+ printf("%s: bad comm mode type passed\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ ucc_get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
+
+ switch (reg_num) {
+ case 1:
+ switch (clock) {
+ case QE_BRG1: source = 1; break;
+ case QE_BRG2: source = 2; break;
+ case QE_BRG7: source = 3; break;
+ case QE_BRG8: source = 4; break;
+ case QE_CLK9: source = 5; break;
+ case QE_CLK10: source = 6; break;
+ case QE_CLK11: source = 7; break;
+ case QE_CLK12: source = 8; break;
+ case QE_CLK15: source = 9; break;
+ case QE_CLK16: source = 10; break;
+ default: source = -1; break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5: source = 1; break;
+ case QE_BRG6: source = 2; break;
+ case QE_BRG7: source = 3; break;
+ case QE_BRG8: source = 4; break;
+ case QE_CLK13: source = 5; break;
+ case QE_CLK14: source = 6; break;
+ case QE_CLK19: source = 7; break;
+ case QE_CLK20: source = 8; break;
+ case QE_CLK15: source = 9; break;
+ case QE_CLK16: source = 10; break;
+ default: source = -1; break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9: source = 1; break;
+ case QE_BRG10: source = 2; break;
+ case QE_BRG15: source = 3; break;
+ case QE_BRG16: source = 4; break;
+ case QE_CLK3: source = 5; break;
+ case QE_CLK4: source = 6; break;
+ case QE_CLK17: source = 7; break;
+ case QE_CLK18: source = 8; break;
+ case QE_CLK7: source = 9; break;
+ case QE_CLK8: source = 10; break;
+ case QE_CLK16: source = 11; break;
+ default: source = -1; break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13: source = 1; break;
+ case QE_BRG14: source = 2; break;
+ case QE_BRG15: source = 3; break;
+ case QE_BRG16: source = 4; break;
+ case QE_CLK5: source = 5; break;
+ case QE_CLK6: source = 6; break;
+ case QE_CLK21: source = 7; break;
+ case QE_CLK22: source = 8; break;
+ case QE_CLK7: source = 9; break;
+ case QE_CLK8: source = 10; break;
+ case QE_CLK16: source = 11; break;
+ default: source = -1; break;
+ }
+ break;
+ default:
+ source = -1;
+ break;
+ }
+
+ if (source == -1) {
+ printf("%s: Bad combination of clock and UCC\n", __FUNCTION__);
+ return -ENOENT;
+ }
+
+ clockBits = (u32) source;
+ clockMask = QE_CMXUCR_TX_CLK_SRC_MASK;
+ if (mode == COMM_DIR_RX) {
+ clockBits <<= 4; /* Rx field is 4 bits to left of Tx field */
+ clockMask <<= 4; /* Rx field is 4 bits to left of Tx field */
+ }
+ clockBits <<= shift;
+ clockMask <<= shift;
+
+ out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clockMask) | clockBits);
+
+ return 0;
+}
+
+static uint ucc_get_reg_baseaddr(int ucc_num)
+{
+ uint base = 0;
+
+ /* check if the UCC number is in range */
+ if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
+ printf("%s: the UCC num not in ranges\n", __FUNCTION__);
+ return 0;
+ }
+
+ switch (ucc_num) {
+ case 0: base = 0x00002000; break;
+ case 1: base = 0x00003000; break;
+ case 2: base = 0x00002200; break;
+ case 3: base = 0x00003200; break;
+ case 4: base = 0x00002400; break;
+ case 5: base = 0x00003400; break;
+ case 6: base = 0x00002600; break;
+ case 7: base = 0x00003600; break;
+ default: break;
+ }
+
+ base = (uint)qe_immr + base;
+ return base;
+}
+
+void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode)
+{
+ ucc_fast_t *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr = in_be32(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+ out_be32(&uf_regs->gumr, gumr);
+}
+
+void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode)
+{
+ ucc_fast_t *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr = in_be32(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+ out_be32(&uf_regs->gumr, gumr);
+}
+
+int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t **uccf_ret)
+{
+ ucc_fast_private_t *uccf;
+ ucc_fast_t *uf_regs;
+
+ if (!uf_info)
+ return -EINVAL;
+
+ if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printf("%s: Illagal UCC number!\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uccf = (ucc_fast_private_t *)malloc(sizeof(ucc_fast_private_t));
+ if (!uccf) {
+ printf("%s: No memory for UCC fast data structure!\n",
+ __FUNCTION__);
+ return -ENOMEM;
+ }
+ memset(uccf, 0, sizeof(ucc_fast_private_t));
+
+ /* Save fast UCC structure */
+ uccf->uf_info = uf_info;
+ uccf->uf_regs = (ucc_fast_t *)ucc_get_reg_baseaddr(uf_info->ucc_num);
+
+ if (uccf->uf_regs == NULL) {
+ printf("%s: No memory map for UCC fast controller!\n",
+ __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ uccf->enabled_tx = 0;
+ uccf->enabled_rx = 0;
+
+ uf_regs = uccf->uf_regs;
+ uccf->p_ucce = (u32 *) &(uf_regs->ucce);
+ uccf->p_uccm = (u32 *) &(uf_regs->uccm);
+
+ /* Init GUEMR register, UCC both Rx and Tx is Fast protocol */
+ out_8(&uf_regs->guemr, UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_FAST_RX
+ | UCC_GUEMR_MODE_FAST_TX);
+
+ /* Set GUMR, disable UCC both Rx and Tx, Ethernet protocol */
+ out_be32(&uf_regs->gumr, UCC_FAST_GUMR_ETH);
+
+ /* Set the Giga ethernet VFIFO stuff */
+ if (uf_info->eth_type == GIGA_ETH) {
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(UCC_GETH_UTFS_GIGA_INIT,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(UCC_GETH_URFS_GIGA_INIT +
+ UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+ /* utfb, urfb are offsets from MURAM base */
+ out_be32(&uf_regs->utfb,
+ uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ out_be32(&uf_regs->urfb,
+ uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ /* Set Virtual Fifo registers */
+ out_be16(&uf_regs->urfs, UCC_GETH_URFS_GIGA_INIT);
+ out_be16(&uf_regs->urfet, UCC_GETH_URFET_GIGA_INIT);
+ out_be16(&uf_regs->urfset, UCC_GETH_URFSET_GIGA_INIT);
+ out_be16(&uf_regs->utfs, UCC_GETH_UTFS_GIGA_INIT);
+ out_be16(&uf_regs->utfet, UCC_GETH_UTFET_GIGA_INIT);
+ out_be16(&uf_regs->utftt, UCC_GETH_UTFTT_GIGA_INIT);
+ }
+
+ /* Set the Fast ethernet VFIFO stuff */
+ if (uf_info->eth_type == FAST_ETH) {
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(UCC_GETH_UTFS_INIT,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(UCC_GETH_URFS_INIT +
+ UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+ /* utfb, urfb are offsets from MURAM base */
+ out_be32(&uf_regs->utfb,
+ uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ out_be32(&uf_regs->urfb,
+ uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ /* Set Virtual Fifo registers */
+ out_be16(&uf_regs->urfs, UCC_GETH_URFS_INIT);
+ out_be16(&uf_regs->urfet, UCC_GETH_URFET_INIT);
+ out_be16(&uf_regs->urfset, UCC_GETH_URFSET_INIT);
+ out_be16(&uf_regs->utfs, UCC_GETH_UTFS_INIT);
+ out_be16(&uf_regs->utfet, UCC_GETH_UTFET_INIT);
+ out_be16(&uf_regs->utftt, UCC_GETH_UTFTT_INIT);
+ }
+
+ /* Rx clock routing */
+ if (uf_info->rx_clock != QE_CLK_NONE) {
+ if (ucc_set_clk_src(uf_info->ucc_num,
+ uf_info->rx_clock, COMM_DIR_RX)) {
+ printf("%s: Illegal value for parameter 'RxClock'.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* Tx clock routing */
+ if (uf_info->tx_clock != QE_CLK_NONE) {
+ if (ucc_set_clk_src(uf_info->ucc_num,
+ uf_info->tx_clock, COMM_DIR_TX)) {
+ printf("%s: Illegal value for parameter 'TxClock'.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* Clear interrupt mask register to disable all of interrupts */
+ out_be32(&uf_regs->uccm, 0x0);
+
+ /* Writing '1' to clear all of envents */
+ out_be32(&uf_regs->ucce, 0xffffffff);
+
+ *uccf_ret = uccf;
+ return 0;
+}
+#endif /* CONFIG_QE */
diff --git a/drivers/qe/uccf.h b/drivers/qe/uccf.h
new file mode 100644
index 0000000..1ff9e1d
--- /dev/null
+++ b/drivers/qe/uccf.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __UCCF_H__
+#define __UCCF_H__
+
+#include "common.h"
+#include "qe.h"
+
+/* Fast or Giga ethernet
+*/
+typedef enum enet_type {
+ FAST_ETH,
+ GIGA_ETH,
+} enet_type_e;
+
+/* General UCC Extended Mode Register
+*/
+#define UCC_GUEMR_MODE_MASK_RX 0x02
+#define UCC_GUEMR_MODE_MASK_TX 0x01
+#define UCC_GUEMR_MODE_FAST_RX 0x02
+#define UCC_GUEMR_MODE_FAST_TX 0x01
+#define UCC_GUEMR_MODE_SLOW_RX 0x00
+#define UCC_GUEMR_MODE_SLOW_TX 0x00
+#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 must be set 1 */
+
+/* General UCC FAST Mode Register
+*/
+#define UCC_FAST_GUMR_TCI 0x20000000
+#define UCC_FAST_GUMR_TRX 0x10000000
+#define UCC_FAST_GUMR_TTX 0x08000000
+#define UCC_FAST_GUMR_CDP 0x04000000
+#define UCC_FAST_GUMR_CTSP 0x02000000
+#define UCC_FAST_GUMR_CDS 0x01000000
+#define UCC_FAST_GUMR_CTSS 0x00800000
+#define UCC_FAST_GUMR_TXSY 0x00020000
+#define UCC_FAST_GUMR_RSYN 0x00010000
+#define UCC_FAST_GUMR_RTSM 0x00002000
+#define UCC_FAST_GUMR_REVD 0x00000400
+#define UCC_FAST_GUMR_ENR 0x00000020
+#define UCC_FAST_GUMR_ENT 0x00000010
+
+/* GUMR [MODE] bit maps
+*/
+#define UCC_FAST_GUMR_HDLC 0x00000000
+#define UCC_FAST_GUMR_QMC 0x00000002
+#define UCC_FAST_GUMR_UART 0x00000004
+#define UCC_FAST_GUMR_BISYNC 0x00000008
+#define UCC_FAST_GUMR_ATM 0x0000000a
+#define UCC_FAST_GUMR_ETH 0x0000000c
+
+/* Transmit On Demand (UTORD)
+*/
+#define UCC_SLOW_TOD 0x8000
+#define UCC_FAST_TOD 0x8000
+
+/* Fast Ethernet (10/100 Mbps)
+*/
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 128
+
+/* Gigabit Ethernet (1000 Mbps)
+*/
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
+
+/* UCC fast alignment
+*/
+#define UCC_FAST_RX_ALIGN 4
+#define UCC_FAST_MRBLR_ALIGNMENT 4
+#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8
+
+/* Sizes
+*/
+#define UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD 8
+
+/* UCC fast structure.
+*/
+typedef struct ucc_fast_info {
+ int ucc_num;
+ qe_clock_e rx_clock;
+ qe_clock_e tx_clock;
+ enet_type_e eth_type;
+} ucc_fast_info_t;
+
+typedef struct ucc_fast_private {
+ ucc_fast_info_t *uf_info;
+ ucc_fast_t *uf_regs; /* a pointer to memory map of UCC regs */
+ u32 *p_ucce; /* a pointer to the event register */
+ u32 *p_uccm; /* a pointer to the mask register */
+ int enabled_tx; /* whether UCC is enabled for Tx (ENT) */
+ int enabled_rx; /* whether UCC is enabled for Rx (ENR) */
+ u32 ucc_fast_tx_virtual_fifo_base_offset;
+ u32 ucc_fast_rx_virtual_fifo_base_offset;
+} ucc_fast_private_t;
+
+void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf);
+u32 ucc_fast_get_qe_cr_subblock(int ucc_num);
+void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode);
+void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode);
+int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t **uccf_ret);
+
+#endif /* __UCCF_H__ */
diff --git a/drivers/qe/uec.c b/drivers/qe/uec.c
new file mode 100644
index 0000000..f640c81
--- /dev/null
+++ b/drivers/qe/uec.c
@@ -0,0 +1,1266 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "common.h"
+#include "net.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+#include "uccf.h"
+#include "uec.h"
+#include "uec_phy.h"
+
+#if defined(CONFIG_QE)
+
+#ifdef CONFIG_UEC_ETH1
+static uec_info_t eth1_uec_info = {
+ .uf_info = {
+ .ucc_num = CFG_UEC1_UCC_NUM,
+ .rx_clock = CFG_UEC1_RX_CLK,
+ .tx_clock = CFG_UEC1_TX_CLK,
+ .eth_type = CFG_UEC1_ETH_TYPE,
+ },
+ .num_threads_tx = UEC_NUM_OF_THREADS_4,
+ .num_threads_rx = UEC_NUM_OF_THREADS_4,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .tx_bd_ring_len = 16,
+ .rx_bd_ring_len = 16,
+ .phy_address = CFG_UEC1_PHY_ADDR,
+ .enet_interface = CFG_UEC1_INTERFACE_MODE,
+};
+#endif
+#ifdef CONFIG_UEC_ETH2
+static uec_info_t eth2_uec_info = {
+ .uf_info = {
+ .ucc_num = CFG_UEC2_UCC_NUM,
+ .rx_clock = CFG_UEC2_RX_CLK,
+ .tx_clock = CFG_UEC2_TX_CLK,
+ .eth_type = CFG_UEC2_ETH_TYPE,
+ },
+ .num_threads_tx = UEC_NUM_OF_THREADS_4,
+ .num_threads_rx = UEC_NUM_OF_THREADS_4,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .tx_bd_ring_len = 16,
+ .rx_bd_ring_len = 16,
+ .phy_address = CFG_UEC2_PHY_ADDR,
+ .enet_interface = CFG_UEC2_INTERFACE_MODE,
+};
+#endif
+
+static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
+{
+ uec_t *uec_regs;
+ u32 maccfg1;
+
+ if (!uec) {
+ printf("%s: uec not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ uec_regs = uec->uec_regs;
+
+ maccfg1 = in_be32(&uec_regs->maccfg1);
+
+ if (mode & COMM_DIR_TX) {
+ maccfg1 |= MACCFG1_ENABLE_TX;
+ out_be32(&uec_regs->maccfg1, maccfg1);
+ uec->mac_tx_enabled = 1;
+ }
+
+ if (mode & COMM_DIR_RX) {
+ maccfg1 |= MACCFG1_ENABLE_RX;
+ out_be32(&uec_regs->maccfg1, maccfg1);
+ uec->mac_rx_enabled = 1;
+ }
+
+ return 0;
+}
+
+static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
+{
+ uec_t *uec_regs;
+ u32 maccfg1;
+
+ if (!uec) {
+ printf("%s: uec not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ uec_regs = uec->uec_regs;
+
+ maccfg1 = in_be32(&uec_regs->maccfg1);
+
+ if (mode & COMM_DIR_TX) {
+ maccfg1 &= ~MACCFG1_ENABLE_TX;
+ out_be32(&uec_regs->maccfg1, maccfg1);
+ uec->mac_tx_enabled = 0;
+ }
+
+ if (mode & COMM_DIR_RX) {
+ maccfg1 &= ~MACCFG1_ENABLE_RX;
+ out_be32(&uec_regs->maccfg1, maccfg1);
+ uec->mac_rx_enabled = 0;
+ }
+
+ return 0;
+}
+
+static int uec_graceful_stop_tx(uec_private_t *uec)
+{
+ ucc_fast_t *uf_regs;
+ u32 cecr_subblock;
+ u32 ucce;
+
+ if (!uec || !uec->uccf) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uf_regs = uec->uccf->uf_regs;
+
+ /* Clear the grace stop event */
+ out_be32(&uf_regs->ucce, UCCE_GRA);
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ ucce = in_be32(&uf_regs->ucce);
+ } while (! (ucce & UCCE_GRA));
+
+ uec->grace_stopped_tx = 1;
+
+ return 0;
+}
+
+static int uec_graceful_stop_rx(uec_private_t *uec)
+{
+ u32 cecr_subblock;
+ u8 ack;
+
+ if (!uec) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (!uec->p_rx_glbl_pram) {
+ printf("%s: No init rx global parameter\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Clear acknowledge bit */
+ ack = uec->p_rx_glbl_pram->rxgstpack;
+ ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ uec->p_rx_glbl_pram->rxgstpack = ack;
+
+ /* Keep issuing cmd and checking ack bit until it is asserted */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+ ack = uec->p_rx_glbl_pram->rxgstpack;
+ } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
+
+ uec->grace_stopped_rx = 1;
+
+ return 0;
+}
+
+static int uec_restart_tx(uec_private_t *uec)
+{
+ u32 cecr_subblock;
+
+ if (!uec || !uec->uec_info) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+ uec->grace_stopped_tx = 0;
+
+ return 0;
+}
+
+static int uec_restart_rx(uec_private_t *uec)
+{
+ u32 cecr_subblock;
+
+ if (!uec || !uec->uec_info) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+ uec->grace_stopped_rx = 0;
+
+ return 0;
+}
+
+static int uec_open(uec_private_t *uec, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+
+ if (!uec || !uec->uccf) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ uccf = uec->uccf;
+
+ /* check if the UCC number is in range. */
+ if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ printf("%s: ucc_num out of range.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Enable MAC */
+ uec_mac_enable(uec, mode);
+
+ /* Enable UCC fast */
+ ucc_fast_enable(uccf, mode);
+
+ /* RISC microcode start */
+ if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
+ uec_restart_tx(uec);
+ }
+ if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
+ uec_restart_rx(uec);
+ }
+
+ return 0;
+}
+
+static int uec_stop(uec_private_t *uec, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+
+ if (!uec || !uec->uccf) {
+ printf("%s: No handle passed.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ uccf = uec->uccf;
+
+ /* check if the UCC number is in range. */
+ if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ printf("%s: ucc_num out of range.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
+ uec_graceful_stop_tx(uec);
+ }
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
+ uec_graceful_stop_rx(uec);
+ }
+
+ /* Disable the UCC fast */
+ ucc_fast_disable(uec->uccf, mode);
+
+ /* Disable the MAC */
+ uec_mac_disable(uec, mode);
+
+ return 0;
+}
+
+static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
+{
+ uec_t *uec_regs;
+ u32 maccfg2;
+
+ if (!uec) {
+ printf("%s: uec not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ uec_regs = uec->uec_regs;
+
+ if (duplex == DUPLEX_HALF) {
+ maccfg2 = in_be32(&uec_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_FDX;
+ out_be32(&uec_regs->maccfg2, maccfg2);
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ maccfg2 = in_be32(&uec_regs->maccfg2);
+ maccfg2 |= MACCFG2_FDX;
+ out_be32(&uec_regs->maccfg2, maccfg2);
+ }
+
+ return 0;
+}
+
+static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode)
+{
+ enet_interface_e enet_if_mode;
+ uec_info_t *uec_info;
+ uec_t *uec_regs;
+ u32 upsmr;
+ u32 maccfg2;
+
+ if (!uec) {
+ printf("%s: uec not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uec_info = uec->uec_info;
+ uec_regs = uec->uec_regs;
+ enet_if_mode = if_mode;
+
+ maccfg2 = in_be32(&uec_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+
+ upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
+ upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
+
+ switch (enet_if_mode) {
+ case ENET_100_MII:
+ case ENET_10_MII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ break;
+ case ENET_1000_GMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ break;
+ case ENET_1000_TBI:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ upsmr |= UPSMR_TBIM;
+ break;
+ case ENET_1000_RTBI:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ upsmr |= (UPSMR_RPM | UPSMR_TBIM);
+ break;
+ case ENET_1000_RGMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ upsmr |= UPSMR_RPM;
+ break;
+ case ENET_100_RGMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ upsmr |= UPSMR_RPM;
+ break;
+ case ENET_10_RGMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ upsmr |= (UPSMR_RPM | UPSMR_R10M);
+ break;
+ case ENET_100_RMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ upsmr |= UPSMR_RMM;
+ break;
+ case ENET_10_RMII:
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ upsmr |= (UPSMR_R10M | UPSMR_RMM);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ out_be32(&uec_regs->maccfg2, maccfg2);
+ out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
+
+ return 0;
+}
+
+static int init_mii_management_configuration(uec_t *uec_regs)
+{
+ uint timeout = 0x1000;
+ u32 miimcfg = 0;
+
+ miimcfg = in_be32(&uec_regs->miimcfg);
+ miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
+ out_be32(&uec_regs->miimcfg, miimcfg);
+
+ /* Wait until the bus is free */
+ while ((in_be32(&uec_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
+ if (timeout <= 0) {
+ printf("%s: The MII Bus is stuck!", __FUNCTION__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_phy(struct eth_device *dev)
+{
+ uec_private_t *uec;
+ uec_t *uec_regs;
+ struct uec_mii_info *mii_info;
+ struct phy_info *curphy;
+ int err;
+
+ uec = (uec_private_t *)dev->priv;
+ uec_regs = uec->uec_regs;
+
+ uec->oldlink = 0;
+ uec->oldspeed = 0;
+ uec->oldduplex = -1;
+
+ mii_info = malloc(sizeof(*mii_info));
+ if (!mii_info) {
+ printf("%s: Could not allocate mii_info", dev->name);
+ return -ENOMEM;
+ }
+ memset(mii_info, 0, sizeof(*mii_info));
+
+ mii_info->speed = SPEED_1000;
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->pause = 0;
+ mii_info->link = 1;
+
+ mii_info->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ mii_info->autoneg = 1;
+ mii_info->mii_id = uec->uec_info->phy_address;
+ mii_info->dev = dev;
+
+ mii_info->mdio_read = &read_phy_reg;
+ mii_info->mdio_write = &write_phy_reg;
+
+ uec->mii_info = mii_info;
+
+ if (init_mii_management_configuration(uec_regs)) {
+ printf("%s: The MII Bus is stuck!", dev->name);
+ err = -1;
+ goto bus_fail;
+ }
+
+ /* get info for this PHY */
+ curphy = get_phy_info(uec->mii_info);
+ if (!curphy) {
+ printf("%s: No PHY found", dev->name);
+ err = -1;
+ goto no_phy;
+ }
+
+ mii_info->phyinfo = curphy;
+
+ /* Run the commands which initialize the PHY */
+ if (curphy->init) {
+ err = curphy->init(uec->mii_info);
+ if (err)
+ goto phy_init_fail;
+ }
+
+ return 0;
+
+phy_init_fail:
+no_phy:
+bus_fail:
+ free(mii_info);
+ return err;
+}
+
+static void adjust_link(struct eth_device *dev)
+{
+ uec_private_t *uec = (uec_private_t *)dev->priv;
+ uec_t *uec_regs;
+ struct uec_mii_info *mii_info = uec->mii_info;
+
+ extern void change_phy_interface_mode(struct eth_device *dev,
+ enet_interface_e mode);
+ uec_regs = uec->uec_regs;
+
+ if (mii_info->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (mii_info->duplex != uec->oldduplex) {
+ if (!(mii_info->duplex)) {
+ uec_set_mac_duplex(uec, DUPLEX_HALF);
+ printf("%s: Half Duplex\n", dev->name);
+ } else {
+ uec_set_mac_duplex(uec, DUPLEX_FULL);
+ printf("%s: Full Duplex\n", dev->name);
+ }
+ uec->oldduplex = mii_info->duplex;
+ }
+
+ if (mii_info->speed != uec->oldspeed) {
+ switch (mii_info->speed) {
+ case 1000:
+ break;
+ case 100:
+ printf ("switching to rgmii 100\n");
+ /* change phy to rgmii 100 */
+ change_phy_interface_mode(dev,
+ ENET_100_RGMII);
+ /* change the MAC interface mode */
+ uec_set_mac_if_mode(uec,ENET_100_RGMII);
+ break;
+ case 10:
+ printf ("switching to rgmii 10\n");
+ /* change phy to rgmii 10 */
+ change_phy_interface_mode(dev,
+ ENET_10_RGMII);
+ /* change the MAC interface mode */
+ uec_set_mac_if_mode(uec,ENET_10_RGMII);
+ break;
+ default:
+ printf("%s: Ack,Speed(%d)is illegal\n",
+ dev->name, mii_info->speed);
+ break;
+ }
+
+ printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
+ uec->oldspeed = mii_info->speed;
+ }
+
+ if (!uec->oldlink) {
+ printf("%s: Link is up\n", dev->name);
+ uec->oldlink = 1;
+ }
+
+ } else { /* if (mii_info->link) */
+ if (uec->oldlink) {
+ printf("%s: Link is down\n", dev->name);
+ uec->oldlink = 0;
+ uec->oldspeed = 0;
+ uec->oldduplex = -1;
+ }
+ }
+}
+
+static void phy_change(struct eth_device *dev)
+{
+ uec_private_t *uec = (uec_private_t *)dev->priv;
+ uec_t *uec_regs;
+ int result = 0;
+
+ uec_regs = uec->uec_regs;
+
+ /* Delay 5s to give the PHY a chance to change the register state */
+ udelay(5000000);
+
+ /* Update the link, speed, duplex */
+ result = uec->mii_info->phyinfo->read_status(uec->mii_info);
+
+ /* Adjust the interface according to speed */
+ if ((0 == result) || (uec->mii_info->link == 0)) {
+ adjust_link(dev);
+ }
+}
+
+static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
+{
+ uec_t *uec_regs;
+ u32 mac_addr1;
+ u32 mac_addr2;
+
+ if (!uec) {
+ printf("%s: uec not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uec_regs = uec->uec_regs;
+
+ /* if a station address of 0x12345678ABCD, perform a write to
+ MACSTNADDR1 of 0xCDAB7856,
+ MACSTNADDR2 of 0x34120000 */
+
+ mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
+ (mac_addr[3] << 8) | (mac_addr[2]);
+ out_be32(&uec_regs->macstnaddr1, mac_addr1);
+
+ mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
+ out_be32(&uec_regs->macstnaddr2, mac_addr2);
+
+ return 0;
+}
+
+static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
+ int *threads_num_ret)
+{
+ int num_threads_numerica;
+
+ switch (threads_num) {
+ case UEC_NUM_OF_THREADS_1:
+ num_threads_numerica = 1;
+ break;
+ case UEC_NUM_OF_THREADS_2:
+ num_threads_numerica = 2;
+ break;
+ case UEC_NUM_OF_THREADS_4:
+ num_threads_numerica = 4;
+ break;
+ case UEC_NUM_OF_THREADS_6:
+ num_threads_numerica = 6;
+ break;
+ case UEC_NUM_OF_THREADS_8:
+ num_threads_numerica = 8;
+ break;
+ default:
+ printf("%s: Bad number of threads value.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ *threads_num_ret = num_threads_numerica;
+
+ return 0;
+}
+
+static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
+{
+ uec_info_t *uec_info;
+ u32 end_bd;
+ u8 bmrx = 0;
+ int i;
+
+ uec_info = uec->uec_info;
+
+ /* Alloc global Tx parameter RAM page */
+ uec->tx_glbl_pram_offset = qe_muram_alloc(
+ sizeof(uec_tx_global_pram_t),
+ UEC_TX_GLOBAL_PRAM_ALIGNMENT);
+ uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
+ qe_muram_addr(uec->tx_glbl_pram_offset);
+
+ /* Zero the global Tx prameter RAM */
+ memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
+
+ /* Init global Tx parameter RAM */
+
+ /* TEMODER, RMON statistics disable, one Tx queue */
+ out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
+
+ /* SQPTR */
+ uec->send_q_mem_reg_offset = qe_muram_alloc(
+ sizeof(uec_send_queue_qd_t),
+ UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
+ qe_muram_addr(uec->send_q_mem_reg_offset);
+ out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
+
+ /* Setup the table with TxBDs ring */
+ end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
+ * SIZEOFBD;
+ out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
+ (u32)(uec->p_tx_bd_ring));
+ out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
+ end_bd);
+
+ /* Scheduler Base Pointer, we have only one Tx queue, no need it */
+ out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
+
+ /* TxRMON Base Pointer, TxRMON disable, we don't need it */
+ out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
+
+ /* TSTATE, global snooping, big endian, the CSB bus selected */
+ bmrx = BMR_INIT_VALUE;
+ out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
+
+ /* IPH_Offset */
+ for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
+ out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
+ }
+
+ /* VTAG table */
+ for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
+ out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
+ }
+
+ /* TQPTR */
+ uec->thread_dat_tx_offset = qe_muram_alloc(
+ num_threads_tx * sizeof(uec_thread_data_tx_t) +
+ 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
+
+ uec->p_thread_data_tx = (uec_thread_data_tx_t *)
+ qe_muram_addr(uec->thread_dat_tx_offset);
+ out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
+}
+
+static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
+{
+ u8 bmrx = 0;
+ int i;
+ uec_82xx_address_filtering_pram_t *p_af_pram;
+
+ /* Allocate global Rx parameter RAM page */
+ uec->rx_glbl_pram_offset = qe_muram_alloc(
+ sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
+ uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
+ qe_muram_addr(uec->rx_glbl_pram_offset);
+
+ /* Zero Global Rx parameter RAM */
+ memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
+
+ /* Init global Rx parameter RAM */
+ /* REMODER, Extended feature mode disable, VLAN disable,
+ LossLess flow control disable, Receive firmware statisic disable,
+ Extended address parsing mode disable, One Rx queues,
+ Dynamic maximum/minimum frame length disable, IP checksum check
+ disable, IP address alignment disable
+ */
+ out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
+
+ /* RQPTR */
+ uec->thread_dat_rx_offset = qe_muram_alloc(
+ num_threads_rx * sizeof(uec_thread_data_rx_t),
+ UEC_THREAD_DATA_ALIGNMENT);
+ uec->p_thread_data_rx = (uec_thread_data_rx_t *)
+ qe_muram_addr(uec->thread_dat_rx_offset);
+ out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
+
+ /* Type_or_Len */
+ out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
+
+ /* RxRMON base pointer, we don't need it */
+ out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
+
+ /* IntCoalescingPTR, we don't need it, no interrupt */
+ out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
+
+ /* RSTATE, global snooping, big endian, the CSB bus selected */
+ bmrx = BMR_INIT_VALUE;
+ out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
+
+ /* MRBLR */
+ out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
+
+ /* RBDQPTR */
+ uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
+ sizeof(uec_rx_bd_queues_entry_t) + \
+ sizeof(uec_rx_prefetched_bds_t),
+ UEC_RX_BD_QUEUES_ALIGNMENT);
+ uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
+ qe_muram_addr(uec->rx_bd_qs_tbl_offset);
+
+ /* Zero it */
+ memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
+ sizeof(uec_rx_prefetched_bds_t));
+ out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
+ out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
+ (u32)uec->p_rx_bd_ring);
+
+ /* MFLR */
+ out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
+ /* MINFLR */
+ out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
+ /* MAXD1 */
+ out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
+ /* MAXD2 */
+ out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
+ /* ECAM_PTR */
+ out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
+ /* L2QT */
+ out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
+ /* L3QT */
+ for (i = 0; i < 8; i++) {
+ out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
+ }
+
+ /* VLAN_TYPE */
+ out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
+ /* TCI */
+ out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
+
+ /* Clear PQ2 style address filtering hash table */
+ p_af_pram = (uec_82xx_address_filtering_pram_t *) \
+ uec->p_rx_glbl_pram->addressfiltering;
+
+ p_af_pram->iaddr_h = 0;
+ p_af_pram->iaddr_l = 0;
+ p_af_pram->gaddr_h = 0;
+ p_af_pram->gaddr_l = 0;
+}
+
+static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
+ int thread_tx, int thread_rx)
+{
+ uec_init_cmd_pram_t *p_init_enet_param;
+ u32 init_enet_param_offset;
+ uec_info_t *uec_info;
+ int i;
+ int snum;
+ u32 init_enet_offset;
+ u32 entry_val;
+ u32 command;
+ u32 cecr_subblock;
+
+ uec_info = uec->uec_info;
+
+ /* Allocate init enet command parameter */
+ uec->init_enet_param_offset = qe_muram_alloc(
+ sizeof(uec_init_cmd_pram_t), 4);
+ init_enet_param_offset = uec->init_enet_param_offset;
+ uec->p_init_enet_param = (uec_init_cmd_pram_t *)
+ qe_muram_addr(uec->init_enet_param_offset);
+
+ /* Zero init enet command struct */
+ memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
+
+ /* Init the command struct */
+ p_init_enet_param = uec->p_init_enet_param;
+ p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
+ p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ p_init_enet_param->largestexternallookupkeysize = 0;
+
+ p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
+ << ENET_INIT_PARAM_RGF_SHIFT;
+ p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
+ << ENET_INIT_PARAM_TGF_SHIFT;
+
+ /* Init Rx global parameter pointer */
+ p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
+ (u32)uec_info->riscRx;
+
+ /* Init Rx threads */
+ for (i = 0; i < (thread_rx + 1); i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ printf("%s can not get snum\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ if (i==0) {
+ init_enet_offset = 0;
+ } else {
+ init_enet_offset = qe_muram_alloc(
+ sizeof(uec_thread_rx_pram_t),
+ UEC_THREAD_RX_PRAM_ALIGNMENT);
+ }
+
+ entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
+ init_enet_offset | (u32)uec_info->riscRx;
+ p_init_enet_param->rxthread[i] = entry_val;
+ }
+
+ /* Init Tx global parameter pointer */
+ p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
+ (u32)uec_info->riscTx;
+
+ /* Init Tx threads */
+ for (i = 0; i < thread_tx; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ printf("%s can not get snum\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
+ UEC_THREAD_TX_PRAM_ALIGNMENT);
+
+ entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
+ init_enet_offset | (u32)uec_info->riscTx;
+ p_init_enet_param->txthread[i] = entry_val;
+ }
+
+ __asm__ __volatile__("sync");
+
+ /* Issue QE command */
+ command = QE_INIT_TX_RX;
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ uec->uec_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ init_enet_param_offset);
+
+ return 0;
+}
+
+static int uec_startup(uec_private_t *uec)
+{
+ uec_info_t *uec_info;
+ ucc_fast_info_t *uf_info;
+ ucc_fast_private_t *uccf;
+ ucc_fast_t *uf_regs;
+ uec_t *uec_regs;
+ int num_threads_tx;
+ int num_threads_rx;
+ u32 utbipar;
+ enet_interface_e enet_interface;
+ u32 length;
+ u32 align;
+ qe_bd_t *bd;
+ u8 *buf;
+ int i;
+
+ if (!uec || !uec->uec_info) {
+ printf("%s: uec or uec_info not initial\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uec_info = uec->uec_info;
+ uf_info = &(uec_info->uf_info);
+
+ /* Check if Rx BD ring len is illegal */
+ if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
+ (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
+ printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Check if Tx BD ring len is illegal */
+ if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
+ printf("%s: Tx BD ring length must not be smaller than 2.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Check if MRBLR is illegal */
+ if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
+ printf("%s: max rx buffer length must be mutliple of 128.\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Both Rx and Tx are stopped */
+ uec->grace_stopped_rx = 1;
+ uec->grace_stopped_tx = 1;
+
+ /* Init UCC fast */
+ if (ucc_fast_init(uf_info, &uccf)) {
+ printf("%s: failed to init ucc fast\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ /* Save uccf */
+ uec->uccf = uccf;
+
+ /* Convert the Tx threads number */
+ if (uec_convert_threads_num(uec_info->num_threads_tx,
+ &num_threads_tx)) {
+ return -EINVAL;
+ }
+
+ /* Convert the Rx threads number */
+ if (uec_convert_threads_num(uec_info->num_threads_rx,
+ &num_threads_rx)) {
+ return -EINVAL;
+ }
+
+ uf_regs = uccf->uf_regs;
+
+ /* UEC register is following UCC fast registers */
+ uec_regs = (uec_t *)(&uf_regs->ucc_eth);
+
+ /* Save the UEC register pointer to UEC private struct */
+ uec->uec_regs = uec_regs;
+
+ /* Init UPSMR, enable hardware statistics (UCC) */
+ out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
+
+ /* Init MACCFG1, flow control disable, disable Tx and Rx */
+ out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
+
+ /* Init MACCFG2, length check, MAC PAD and CRC enable */
+ out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
+
+ /* Setup MAC interface mode */
+ uec_set_mac_if_mode(uec, uec_info->enet_interface);
+
+ /* Setup MII master clock source */
+ qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
+
+ /* Setup UTBIPAR */
+ utbipar = in_be32(&uec_regs->utbipar);
+ utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
+ enet_interface = uec->uec_info->enet_interface;
+ if (enet_interface == ENET_1000_TBI ||
+ enet_interface == ENET_1000_RTBI) {
+ utbipar |= (uec_info->phy_address + uec_info->uf_info.ucc_num)
+ << UTBIPAR_PHY_ADDRESS_SHIFT;
+ } else {
+ utbipar |= (0x10 + uec_info->uf_info.ucc_num)
+ << UTBIPAR_PHY_ADDRESS_SHIFT;
+ }
+
+ out_be32(&uec_regs->utbipar, utbipar);
+
+ /* Allocate Tx BDs */
+ length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
+ UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
+ UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
+ UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
+ length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ }
+
+ align = UEC_TX_BD_RING_ALIGNMENT;
+ uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
+ if (uec->tx_bd_ring_offset != 0) {
+ uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
+ & ~(align - 1));
+ }
+
+ /* Zero all of Tx BDs */
+ memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
+
+ /* Allocate Rx BDs */
+ length = uec_info->rx_bd_ring_len * SIZEOFBD;
+ align = UEC_RX_BD_RING_ALIGNMENT;
+ uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
+ if (uec->rx_bd_ring_offset != 0) {
+ uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
+ & ~(align - 1));
+ }
+
+ /* Zero all of Rx BDs */
+ memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
+
+ /* Allocate Rx buffer */
+ length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
+ align = UEC_RX_DATA_BUF_ALIGNMENT;
+ uec->rx_buf_offset = (u32)malloc(length + align);
+ if (uec->rx_buf_offset != 0) {
+ uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
+ & ~(align - 1));
+ }
+
+ /* Zero all of the Rx buffer */
+ memset((void *)(uec->rx_buf_offset), 0, length + align);
+
+ /* Init TxBD ring */
+ bd = (qe_bd_t *)uec->p_tx_bd_ring;
+ uec->txBd = bd;
+
+ for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
+ BD_DATA_CLEAR(bd);
+ BD_STATUS_SET(bd, 0);
+ BD_LENGTH_SET(bd, 0);
+ bd ++;
+ }
+ BD_STATUS_SET((--bd), TxBD_WRAP);
+
+ /* Init RxBD ring */
+ bd = (qe_bd_t *)uec->p_rx_bd_ring;
+ uec->rxBd = bd;
+ buf = uec->p_rx_buf;
+ for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
+ BD_DATA_SET(bd, buf);
+ BD_LENGTH_SET(bd, 0);
+ BD_STATUS_SET(bd, RxBD_EMPTY);
+ buf += MAX_RXBUF_LEN;
+ bd ++;
+ }
+ BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
+
+ /* Init global Tx parameter RAM */
+ uec_init_tx_parameter(uec, num_threads_tx);
+
+ /* Init global Rx parameter RAM */
+ uec_init_rx_parameter(uec, num_threads_rx);
+
+ /* Init ethernet Tx and Rx parameter command */
+ if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
+ num_threads_rx)) {
+ printf("%s issue init enet cmd failed\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int uec_init(struct eth_device* dev, bd_t *bd)
+{
+ uec_private_t *uec;
+ int err;
+
+ uec = (uec_private_t *)dev->priv;
+
+ if (uec->the_first_run == 0) {
+ /* Set up the MAC address */
+ if (dev->enetaddr[0] & 0x01) {
+ printf("%s: MacAddress is multcast address\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ uec_set_mac_address(uec, dev->enetaddr);
+ uec->the_first_run = 1;
+ }
+
+ err = uec_open(uec, COMM_DIR_RX_AND_TX);
+ if (err) {
+ printf("%s: cannot enable UEC device\n", dev->name);
+ return err;
+ }
+
+ return 0;
+}
+
+static void uec_halt(struct eth_device* dev)
+{
+ uec_private_t *uec = (uec_private_t *)dev->priv;
+ uec_stop(uec, COMM_DIR_RX_AND_TX);
+}
+
+static int uec_send(struct eth_device* dev, volatile void *buf, int len)
+{
+ uec_private_t *uec;
+ ucc_fast_private_t *uccf;
+ volatile qe_bd_t *bd;
+ volatile u16 status;
+ int i;
+ int result = 0;
+
+ uec = (uec_private_t *)dev->priv;
+ uccf = uec->uccf;
+ bd = uec->txBd;
+
+ /* Find an empty TxBD */
+ for (i = 0; BD_STATUS(bd) & TxBD_READY; i++) {
+ if (i > 0x100000) {
+ printf("%s: tx buffer not ready\n", dev->name);
+ return result;
+ }
+ }
+
+ /* Init TxBD */
+ BD_DATA_SET(bd, buf);
+ BD_LENGTH_SET(bd, len);
+ status = BD_STATUS(bd);
+ status &= BD_WRAP;
+ status |= (TxBD_READY | TxBD_LAST);
+ BD_STATUS_SET(bd, status);
+
+ /* Tell UCC to transmit the buffer */
+ ucc_fast_transmit_on_demand(uccf);
+
+ /* Wait for buffer to be transmitted */
+ status = BD_STATUS(bd);
+ for (i = 0; status & TxBD_READY; i++) {
+ if (i > 0x100000) {
+ printf("%s: tx error\n", dev->name);
+ return result;
+ }
+ status = BD_STATUS(bd);
+ }
+
+ /* Ok, the buffer be transimitted */
+ BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
+ uec->txBd = bd;
+ result = 1;
+
+ return result;
+}
+
+static int uec_recv(struct eth_device* dev)
+{
+ uec_private_t *uec = dev->priv;
+ volatile qe_bd_t *bd;
+ volatile u16 status;
+ u16 len;
+ u8 *data;
+
+ bd = uec->rxBd;
+ status = BD_STATUS(bd);
+
+ while (!(status & RxBD_EMPTY)) {
+ if (!(status & RxBD_ERROR)) {
+ data = BD_DATA(bd);
+ len = BD_LENGTH(bd);
+ NetReceive(data, len);
+ } else {
+ printf("%s: Rx error\n", dev->name);
+ }
+ status &= BD_CLEAN;
+ BD_LENGTH_SET(bd, 0);
+ BD_STATUS_SET(bd, status | RxBD_EMPTY);
+ BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
+ status = BD_STATUS(bd);
+ }
+ uec->rxBd = bd;
+
+ return 1;
+}
+
+int uec_initialize(int index)
+{
+ struct eth_device *dev;
+ int i;
+ uec_private_t *uec;
+ uec_info_t *uec_info;
+ int err;
+
+ dev = (struct eth_device *)malloc(sizeof(struct eth_device));
+ if (!dev)
+ return 0;
+ memset(dev, 0, sizeof(struct eth_device));
+
+ /* Allocate the UEC private struct */
+ uec = (uec_private_t *)malloc(sizeof(uec_private_t));
+ if (!uec) {
+ return -ENOMEM;
+ }
+ memset(uec, 0, sizeof(uec_private_t));
+
+ /* Init UEC private struct, they come from board.h */
+ if (index == 0) {
+#ifdef CONFIG_UEC_ETH1
+ uec_info = &eth1_uec_info;
+#endif
+ } else if (index == 1) {
+#ifdef CONFIG_UEC_ETH2
+ uec_info = &eth2_uec_info;
+#endif
+ } else {
+ printf("%s: index is illegal.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ uec->uec_info = uec_info;
+
+ sprintf(dev->name, "FSL UEC%d", index);
+ dev->iobase = 0;
+ dev->priv = (void *)uec;
+ dev->init = uec_init;
+ dev->halt = uec_halt;
+ dev->send = uec_send;
+ dev->recv = uec_recv;
+
+ /* Clear the ethnet address */
+ for (i = 0; i < 6; i++)
+ dev->enetaddr[i] = 0;
+
+ eth_register(dev);
+
+ err = uec_startup(uec);
+ if (err) {
+ printf("%s: Cannot configure net device, aborting.",dev->name);
+ return err;
+ }
+
+ err = init_phy(dev);
+ if (err) {
+ printf("%s: Cannot initialize PHY, aborting.\n", dev->name);
+ return err;
+ }
+
+ phy_change(dev);
+
+ return 1;
+}
+#endif /* CONFIG_QE */
diff --git a/drivers/qe/uec.h b/drivers/qe/uec.h
new file mode 100644
index 0000000..0495026
--- /dev/null
+++ b/drivers/qe/uec.h
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __UEC_H__
+#define __UEC_H__
+
+#define MAX_TX_THREADS 8
+#define MAX_RX_THREADS 8
+#define MAX_TX_QUEUES 8
+#define MAX_RX_QUEUES 8
+#define MAX_PREFETCHED_BDS 4
+#define MAX_IPH_OFFSET_ENTRY 8
+#define MAX_ENET_INIT_PARAM_ENTRIES_RX 9
+#define MAX_ENET_INIT_PARAM_ENTRIES_TX 8
+
+/* UEC UPSMR (Protocol Specific Mode Register)
+ */
+#define UPSMR_ECM 0x04000000 /* Enable CAM Miss */
+#define UPSMR_HSE 0x02000000 /* Hardware Statistics Enable */
+#define UPSMR_PRO 0x00400000 /* Promiscuous */
+#define UPSMR_CAP 0x00200000 /* CAM polarity */
+#define UPSMR_RSH 0x00100000 /* Receive Short Frames */
+#define UPSMR_RPM 0x00080000 /* Reduced Pin Mode interfaces */
+#define UPSMR_R10M 0x00040000 /* RGMII/RMII 10 Mode */
+#define UPSMR_RLPB 0x00020000 /* RMII Loopback Mode */
+#define UPSMR_TBIM 0x00010000 /* Ten-bit Interface Mode */
+#define UPSMR_RMM 0x00001000 /* RMII/RGMII Mode */
+#define UPSMR_CAM 0x00000400 /* CAM Address Matching */
+#define UPSMR_BRO 0x00000200 /* Broadcast Address */
+#define UPSMR_RES1 0x00002000 /* Reserved feild - must be 1 */
+
+#define UPSMR_INIT_VALUE (UPSMR_HSE | UPSMR_RES1)
+
+/* UEC MACCFG1 (MAC Configuration 1 Register)
+ */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Enable Rx Sync */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Enable Tx Sync */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+#define MACCFG1_INIT_VALUE (0)
+
+/* UEC MACCFG2 (MAC Configuration 2 Register)
+ */
+#define MACCFG2_PREL 0x00007000
+#define MACCFG2_PREL_SHIFT (31 - 19)
+#define MACCFG2_PREL_MASK 0x0000f000
+#define MACCFG2_SRP 0x00000080
+#define MACCFG2_STP 0x00000040
+#define MACCFG2_RESERVED_1 0x00000020 /* must be set */
+#define MACCFG2_LC 0x00000010 /* Length Check */
+#define MACCFG2_MPE 0x00000008
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300
+
+#define MACCFG2_INIT_VALUE (MACCFG2_PREL | MACCFG2_RESERVED_1 | \
+ MACCFG2_LC | MACCFG2_PAD_CRC | MACCFG2_FDX)
+
+/* UEC Event Register
+*/
+#define UCCE_MPD 0x80000000
+#define UCCE_SCAR 0x40000000
+#define UCCE_GRA 0x20000000
+#define UCCE_CBPR 0x10000000
+#define UCCE_BSY 0x08000000
+#define UCCE_RXC 0x04000000
+#define UCCE_TXC 0x02000000
+#define UCCE_TXE 0x01000000
+#define UCCE_TXB7 0x00800000
+#define UCCE_TXB6 0x00400000
+#define UCCE_TXB5 0x00200000
+#define UCCE_TXB4 0x00100000
+#define UCCE_TXB3 0x00080000
+#define UCCE_TXB2 0x00040000
+#define UCCE_TXB1 0x00020000
+#define UCCE_TXB0 0x00010000
+#define UCCE_RXB7 0x00008000
+#define UCCE_RXB6 0x00004000
+#define UCCE_RXB5 0x00002000
+#define UCCE_RXB4 0x00001000
+#define UCCE_RXB3 0x00000800
+#define UCCE_RXB2 0x00000400
+#define UCCE_RXB1 0x00000200
+#define UCCE_RXB0 0x00000100
+#define UCCE_RXF7 0x00000080
+#define UCCE_RXF6 0x00000040
+#define UCCE_RXF5 0x00000020
+#define UCCE_RXF4 0x00000010
+#define UCCE_RXF3 0x00000008
+#define UCCE_RXF2 0x00000004
+#define UCCE_RXF1 0x00000002
+#define UCCE_RXF0 0x00000001
+
+#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 | \
+ UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
+#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 | \
+ UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
+#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 | \
+ UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
+#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY | \
+ UCCE_RXC | UCCE_TXC | UCCE_TXE)
+
+/* UEC TEMODR Register
+*/
+#define TEMODER_SCHEDULER_ENABLE 0x2000
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200
+#define TEMODER_RMON_STATISTICS 0x0100
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15)
+
+#define TEMODER_INIT_VALUE 0xc000
+
+/* UEC REMODR Register
+*/
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 )
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10)
+#define REMODER_RX_QOS_MODE_SHIFT (31-15)
+#define REMODER_RMON_STATISTICS 0x00001000
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23)
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001
+
+#define REMODER_INIT_VALUE 0
+
+/* BMRx - Bus Mode Register */
+#define BMR_GLB 0x20
+#define BMR_BO_BE 0x10
+#define BMR_DTB_SECONDARY_BUS 0x02
+#define BMR_BDB_SECONDARY_BUS 0x01
+
+#define BMR_SHIFT 24
+#define BMR_INIT_VALUE (BMR_GLB | BMR_BO_BE)
+
+/* UEC UCCS (Ethernet Status Register)
+ */
+#define UCCS_BPR 0x02
+#define UCCS_PAU 0x02
+#define UCCS_MPD 0x01
+
+/* UEC MIIMCFG (MII Management Configuration Register)
+ */
+#define MIIMCFG_RESET_MANAGEMENT 0x80000000
+#define MIIMCFG_NO_PREAMBLE 0x00000010
+#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31)
+#define MIIMCFG_CLOCK_DIVIDE_MASK 0x0000000f
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007
+
+#define MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE \
+ MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10
+
+/* UEC MIIMCOM (MII Management Command Register)
+ */
+#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
+#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
+
+/* UEC MIIMADD (MII Management Address Register)
+ */
+#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23)
+#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31)
+
+/* UEC MIIMCON (MII Management Control Register)
+ */
+#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31)
+#define MIIMCON_PHY_STATUS_SHIFT (31 - 31)
+
+/* UEC MIIMIND (MII Management Indicator Register)
+ */
+#define MIIMIND_NOT_VALID 0x00000004
+#define MIIMIND_SCAN 0x00000002
+#define MIIMIND_BUSY 0x00000001
+
+/* UEC UTBIPAR (Ten Bit Interface Physical Address Register)
+ */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31)
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f
+
+/* UEC UESCR (Ethernet Statistics Control Register)
+ */
+#define UESCR_AUTOZ 0x8000
+#define UESCR_CLRCNT 0x4000
+#define UESCR_MAXCOV_SHIFT (15 - 7)
+#define UESCR_SCOV_SHIFT (15 - 15)
+
+/****** Tx data struct collection ******/
+/* Tx thread data, each Tx thread has one this struct.
+*/
+typedef struct uec_thread_data_tx {
+ u8 res0[136];
+} __attribute__ ((packed)) uec_thread_data_tx_t;
+
+/* Tx thread parameter, each Tx thread has one this struct.
+*/
+typedef struct uec_thread_tx_pram {
+ u8 res0[64];
+} __attribute__ ((packed)) uec_thread_tx_pram_t;
+
+/* Send queue queue-descriptor, each Tx queue has one this QD
+*/
+typedef struct uec_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address; /* last entry in BD ring */
+ u8 res1[0x30];
+} __attribute__ ((packed)) uec_send_queue_qd_t;
+
+/* Send queue memory region */
+typedef struct uec_send_queue_mem_region {
+ uec_send_queue_qd_t sqqd[MAX_TX_QUEUES];
+} __attribute__ ((packed)) uec_send_queue_mem_region_t;
+
+/* Scheduler struct
+*/
+typedef struct uec_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[MAX_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz;
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[MAX_TX_QUEUES]; /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70-0x64];
+} __attribute__ ((packed)) uec_scheduler_t;
+
+/* Tx firmware counters
+*/
+typedef struct uec_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to tx collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error tx */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer;
+ u32 txpkts256; /* total packets(including bad) 256~511 B */
+ u32 txpkts512; /* total packets(including bad) 512~1023B */
+ u32 txpkts1024; /* total packets(including bad) 1024~1518B */
+ u32 txpktsjumbo; /* total packets(including bad) >1024 */
+} __attribute__ ((packed)) uec_tx_firmware_statistics_pram_t;
+
+/* Tx global parameter table
+*/
+typedef struct uec_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38-0x02];
+ u32 sqptr;
+ u32 schedulerbasepointer;
+ u32 txrmonbaseptr;
+ u32 tstate;
+ u8 iphoffset[MAX_IPH_OFFSET_ENTRY];
+ u32 vtagtable[0x8];
+ u32 tqptr;
+ u8 res2[0x80-0x74];
+} __attribute__ ((packed)) uec_tx_global_pram_t;
+
+
+/****** Rx data struct collection ******/
+/* Rx thread data, each Rx thread has one this struct.
+*/
+typedef struct uec_thread_data_rx {
+ u8 res0[40];
+} __attribute__ ((packed)) uec_thread_data_rx_t;
+
+/* Rx thread parameter, each Rx thread has one this struct.
+*/
+typedef struct uec_thread_rx_pram {
+ u8 res0[128];
+} __attribute__ ((packed)) uec_thread_rx_pram_t;
+
+/* Rx firmware counters
+*/
+typedef struct uec_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames(including bad)256~511 B */
+ u32 pkts512; /* total frames(including bad)512~1023 B */
+ u32 pkts1024; /* total frames(including bad)1024~1518 B */
+ u32 pktsjumbo; /* total frames(including bad) >1024 B */
+ u32 frlossinmacer;
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan;
+ u32 replacevlan;
+ u32 insertvlan;
+} __attribute__ ((packed)) uec_rx_firmware_statistics_pram_t;
+
+/* Rx interrupt coalescing entry, each Rx queue has one this entry.
+*/
+typedef struct uec_rx_interrupt_coalescing_entry {
+ u32 maxvalue;
+ u32 counter;
+} __attribute__ ((packed)) uec_rx_interrupt_coalescing_entry_t;
+
+typedef struct uec_rx_interrupt_coalescing_table {
+ uec_rx_interrupt_coalescing_entry_t entry[MAX_RX_QUEUES];
+} __attribute__ ((packed)) uec_rx_interrupt_coalescing_table_t;
+
+/* RxBD queue entry, each Rx queue has one this entry.
+*/
+typedef struct uec_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __attribute__ ((packed)) uec_rx_bd_queues_entry_t;
+
+/* Rx global paramter table
+*/
+typedef struct uec_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues */
+ u32 res0[0x1];
+ u8 res1[0x20-0xC];
+ u16 typeorlen;
+ u8 res2[0x1];
+ u8 rxgstpack; /* ack on GRACEFUL STOP RX command */
+ u32 rxrmonbaseptr; /* Rx RMON statistics base */
+ u8 res3[0x30-0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36-0x34];
+ u8 rstate;
+ u8 res5[0x46-0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* RxBD parameter table description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64];/* address filtering data structure */
+ u32 exfGlobalParam; /* extended filtering global parameters */
+ u8 res6[0x100-0xC4]; /* Initialize to zero */
+} __attribute__ ((packed)) uec_rx_global_pram_t;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+
+/****** UEC common ******/
+/* UCC statistics - hardware counters
+*/
+typedef struct uec_hardware_statistics {
+ u32 tx64;
+ u32 tx127;
+ u32 tx255;
+ u32 rx64;
+ u32 rx127;
+ u32 rx255;
+ u32 txok;
+ u16 txcf;
+ u32 tmca;
+ u32 tbca;
+ u32 rxfok;
+ u32 rxbok;
+ u32 rbyt;
+ u32 rmca;
+ u32 rbca;
+} __attribute__ ((packed)) uec_hardware_statistics_t;
+
+/* InitEnet command parameter
+*/
+typedef struct uec_init_cmd_pram {
+ u8 resinit0;
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u16 resinit4;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[MAX_ENET_INIT_PARAM_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[MAX_ENET_INIT_PARAM_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __attribute__ ((packed)) uec_init_cmd_pram_t;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT0 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM
+*/
+typedef struct uec_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __attribute__ ((packed)) uec_82xx_enet_address_t;
+
+/* structure representing 82xx Address Filtering PRAM
+*/
+typedef struct uec_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ uec_82xx_enet_address_t taddr;
+ uec_82xx_enet_address_t paddr[4];
+ u8 res0[0x40-0x38];
+} __attribute__ ((packed)) uec_82xx_address_filtering_pram_t;
+
+/* Buffer Descriptor
+*/
+typedef struct buffer_descriptor {
+ u16 status;
+ u16 len;
+ u32 data;
+} __attribute__ ((packed)) qe_bd_t, *p_bd_t;
+
+#define SIZEOFBD sizeof(qe_bd_t)
+
+/* Common BD flags
+*/
+#define BD_WRAP 0x2000
+#define BD_INT 0x1000
+#define BD_LAST 0x0800
+#define BD_CLEAN 0x3000
+
+/* TxBD status flags
+*/
+#define TxBD_READY 0x8000
+#define TxBD_PADCRC 0x4000
+#define TxBD_WRAP BD_WRAP
+#define TxBD_INT BD_INT
+#define TxBD_LAST BD_LAST
+#define TxBD_TXCRC 0x0400
+#define TxBD_DEF 0x0200
+#define TxBD_PP 0x0100
+#define TxBD_LC 0x0080
+#define TxBD_RL 0x0040
+#define TxBD_RC 0x003C
+#define TxBD_UNDERRUN 0x0002
+#define TxBD_TRUNC 0x0001
+
+#define TxBD_ERROR (TxBD_UNDERRUN | TxBD_TRUNC)
+
+/* RxBD status flags
+*/
+#define RxBD_EMPTY 0x8000
+#define RxBD_OWNER 0x4000
+#define RxBD_WRAP BD_WRAP
+#define RxBD_INT BD_INT
+#define RxBD_LAST BD_LAST
+#define RxBD_FIRST 0x0400
+#define RxBD_CMR 0x0200
+#define RxBD_MISS 0x0100
+#define RxBD_BCAST 0x0080
+#define RxBD_MCAST 0x0040
+#define RxBD_LG 0x0020
+#define RxBD_NO 0x0010
+#define RxBD_SHORT 0x0008
+#define RxBD_CRCERR 0x0004
+#define RxBD_OVERRUN 0x0002
+#define RxBD_IPCH 0x0001
+
+#define RxBD_ERROR (RxBD_LG | RxBD_NO | RxBD_SHORT | \
+ RxBD_CRCERR | RxBD_OVERRUN)
+
+/* BD access macros
+*/
+#define BD_STATUS(_bd) (((p_bd_t)(_bd))->status)
+#define BD_STATUS_SET(_bd, _val) (((p_bd_t)(_bd))->status = _val)
+#define BD_LENGTH(_bd) (((p_bd_t)(_bd))->len)
+#define BD_LENGTH_SET(_bd, _val) (((p_bd_t)(_bd))->len = _val)
+#define BD_DATA_CLEAR(_bd) (((p_bd_t)(_bd))->data = 0)
+#define BD_IS_DATA(_bd) (((p_bd_t)(_bd))->data)
+#define BD_DATA(_bd) ((u8 *)(((p_bd_t)(_bd))->data))
+#define BD_DATA_SET(_bd, _data) (((p_bd_t)(_bd))->data = (u32)(_data))
+#define BD_ADVANCE(_bd,_status,_base) \
+ (((_status) & BD_WRAP) ? (_bd) = ((p_bd_t)(_base)) : ++(_bd))
+
+/* Rx Prefetched BDs
+*/
+typedef struct uec_rx_prefetched_bds {
+ qe_bd_t bd[MAX_PREFETCHED_BDS]; /* prefetched bd */
+} __attribute__ ((packed)) uec_rx_prefetched_bds_t;
+
+/* Alignments
+ */
+#define UEC_RX_GLOBAL_PRAM_ALIGNMENT 64
+#define UEC_TX_GLOBAL_PRAM_ALIGNMENT 64
+#define UEC_THREAD_RX_PRAM_ALIGNMENT 128
+#define UEC_THREAD_TX_PRAM_ALIGNMENT 64
+#define UEC_THREAD_DATA_ALIGNMENT 256
+#define UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UEC_SCHEDULER_ALIGNMENT 4
+#define UEC_TX_STATISTICS_ALIGNMENT 4
+#define UEC_RX_STATISTICS_ALIGNMENT 4
+#define UEC_RX_INTERRUPT_COALESCING_ALIGNMENT 4
+#define UEC_RX_BD_QUEUES_ALIGNMENT 8
+#define UEC_RX_PREFETCHED_BDS_ALIGNMENT 128
+#define UEC_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4
+#define UEC_RX_BD_RING_ALIGNMENT 32
+#define UEC_TX_BD_RING_ALIGNMENT 32
+#define UEC_MRBLR_ALIGNMENT 128
+#define UEC_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UEC_RX_DATA_BUF_ALIGNMENT 64
+
+#define UEC_VLAN_PRIORITY_MAX 8
+#define UEC_IP_PRIORITY_MAX 64
+#define UEC_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UEC_RX_BD_RING_SIZE_MIN 8
+#define UEC_TX_BD_RING_SIZE_MIN 2
+
+/* Ethernet speed
+*/
+typedef enum enet_speed {
+ ENET_SPEED_10BT, /* 10 Base T */
+ ENET_SPEED_100BT, /* 100 Base T */
+ ENET_SPEED_1000BT /* 1000 Base T */
+} enet_speed_e;
+
+/* Ethernet Address Type.
+*/
+typedef enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+} enet_addr_type_e;
+
+/* TBI / MII Set Register
+*/
+typedef enum enet_tbi_mii_reg {
+ ENET_TBI_MII_CR = 0x00,
+ ENET_TBI_MII_SR = 0x01,
+ ENET_TBI_MII_ANA = 0x04,
+ ENET_TBI_MII_ANLPBPA = 0x05,
+ ENET_TBI_MII_ANEX = 0x06,
+ ENET_TBI_MII_ANNPT = 0x07,
+ ENET_TBI_MII_ANLPANP = 0x08,
+ ENET_TBI_MII_EXST = 0x0F,
+ ENET_TBI_MII_JD = 0x10,
+ ENET_TBI_MII_TBICON = 0x11
+} enet_tbi_mii_reg_e;
+
+/* UEC number of threads
+*/
+typedef enum uec_num_of_threads {
+ UEC_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UEC_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UEC_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UEC_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UEC_NUM_OF_THREADS_8 = 0x4 /* 8 */
+} uec_num_of_threads_e;
+
+/* UEC ethernet interface type
+*/
+typedef enum enet_interface {
+ ENET_10_MII,
+ ENET_10_RMII,
+ ENET_10_RGMII,
+ ENET_100_MII,
+ ENET_100_RMII,
+ ENET_100_RGMII,
+ ENET_1000_GMII,
+ ENET_1000_RGMII,
+ ENET_1000_TBI,
+ ENET_1000_RTBI
+} enet_interface_e;
+
+/* UEC initialization info struct
+*/
+typedef struct uec_info {
+ ucc_fast_info_t uf_info;
+ uec_num_of_threads_e num_threads_tx;
+ uec_num_of_threads_e num_threads_rx;
+ qe_risc_allocation_e riscTx;
+ qe_risc_allocation_e riscRx;
+ u16 rx_bd_ring_len;
+ u16 tx_bd_ring_len;
+ u8 phy_address;
+ enet_interface_e enet_interface;
+} uec_info_t;
+
+/* UEC driver initialized info
+*/
+#define MAX_RXBUF_LEN 1536
+#define MAX_FRAME_LEN 1518
+#define MIN_FRAME_LEN 64
+#define MAX_DMA1_LEN 1520
+#define MAX_DMA2_LEN 1520
+
+/* UEC driver private struct
+*/
+typedef struct uec_private {
+ uec_info_t *uec_info;
+ ucc_fast_private_t *uccf;
+ struct eth_device *dev;
+ uec_t *uec_regs;
+ /* enet init command parameter */
+ uec_init_cmd_pram_t *p_init_enet_param;
+ u32 init_enet_param_offset;
+ /* Rx and Tx paramter */
+ uec_rx_global_pram_t *p_rx_glbl_pram;
+ u32 rx_glbl_pram_offset;
+ uec_tx_global_pram_t *p_tx_glbl_pram;
+ u32 tx_glbl_pram_offset;
+ uec_send_queue_mem_region_t *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ uec_thread_data_tx_t *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ uec_thread_data_rx_t *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ uec_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ /* BDs specific */
+ u8 *p_tx_bd_ring;
+ u32 tx_bd_ring_offset;
+ u8 *p_rx_bd_ring;
+ u32 rx_bd_ring_offset;
+ u8 *p_rx_buf;
+ u32 rx_buf_offset;
+ volatile qe_bd_t *txBd;
+ volatile qe_bd_t *rxBd;
+ /* Status */
+ int mac_tx_enabled;
+ int mac_rx_enabled;
+ int grace_stopped_tx;
+ int grace_stopped_rx;
+ int the_first_run;
+ /* PHY specific */
+ struct uec_mii_info *mii_info;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+} uec_private_t;
+
+#endif /* __UEC_H__ */
diff --git a/drivers/qe/uec_phy.c b/drivers/qe/uec_phy.c
new file mode 100644
index 0000000..76fd388
--- /dev/null
+++ b/drivers/qe/uec_phy.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 2005 Freescale Semiconductor, Inc.
+ *
+ * Author: Shlomi Gridish
+ *
+ * Description: UCC GETH Driver -- PHY handling
+ * Driver for UEC on QE
+ * Based on 8260_io/fcc_enet.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "common.h"
+#include "net.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/immap_qe.h"
+#include "asm/io.h"
+#include "qe.h"
+#include "uccf.h"
+#include "uec.h"
+#include "uec_phy.h"
+#include "miiphy.h"
+
+#if defined(CONFIG_QE)
+
+#define UEC_VERBOSE_DEBUG
+#define ugphy_printk(format, arg...) \
+ printf(format "\n", ## arg)
+
+#define ugphy_dbg(format, arg...) \
+ ugphy_printk(format , ## arg)
+#define ugphy_err(format, arg...) \
+ ugphy_printk(format , ## arg)
+#define ugphy_info(format, arg...) \
+ ugphy_printk(format , ## arg)
+#define ugphy_warn(format, arg...) \
+ ugphy_printk(format , ## arg)
+
+#ifdef UEC_VERBOSE_DEBUG
+#define ugphy_vdbg ugphy_dbg
+#else
+#define ugphy_vdbg(ugeth, fmt, args...) do { } while (0)
+#endif /* UEC_VERBOSE_DEBUG */
+
+static void config_genmii_advert (struct uec_mii_info *mii_info);
+static void genmii_setup_forced (struct uec_mii_info *mii_info);
+static void genmii_restart_aneg (struct uec_mii_info *mii_info);
+static int gbit_config_aneg (struct uec_mii_info *mii_info);
+static int genmii_config_aneg (struct uec_mii_info *mii_info);
+static int genmii_update_link (struct uec_mii_info *mii_info);
+static int genmii_read_status (struct uec_mii_info *mii_info);
+u16 phy_read (struct uec_mii_info *mii_info, u16 regnum);
+void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val);
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg (struct eth_device *dev, int mii_id, int regnum, int value)
+{
+ uec_private_t *ugeth = (uec_private_t *) dev->priv;
+ uec_t *ug_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+
+ ug_regs = ugeth->uec_regs;
+
+ /* Stop the MII management read cycle */
+ out_be32 (&ug_regs->miimcom, 0);
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32 (&ug_regs->miimadd, tmp_reg);
+
+ /* Setting up the MII Mangement Control Register with the value */
+ out_be32 (&ug_regs->miimcon, (u32) value);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32 (&ug_regs->miimind)) & MIIMIND_BUSY);
+
+ udelay (100000);
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int read_phy_reg (struct eth_device *dev, int mii_id, int regnum)
+{
+ uec_private_t *ugeth = (uec_private_t *) dev->priv;
+ uec_t *ug_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+ u16 value;
+
+ ug_regs = ugeth->uec_regs;
+
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32 (&ug_regs->miimadd, tmp_reg);
+
+ /* Perform an MII management read cycle */
+ out_be32 (&ug_regs->miimcom, 0);
+ out_be32 (&ug_regs->miimcom, MIIMCOM_READ_CYCLE);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32 (&ug_regs->miimind)) &
+ (MIIMIND_NOT_VALID | MIIMIND_BUSY));
+
+ udelay (100000);
+
+ /* Read MII management status */
+ value = (u16) in_be32 (&ug_regs->miimstat);
+ if (value == 0xffff)
+ ugphy_warn
+ ("read wrong value : mii_id %d,mii_reg %d, base %08x",
+ mii_id, mii_reg, (u32) & (ug_regs->miimcfg));
+
+ return (value);
+}
+
+void mii_clear_phy_interrupt (struct uec_mii_info *mii_info)
+{
+ if (mii_info->phyinfo->ack_interrupt)
+ mii_info->phyinfo->ack_interrupt (mii_info);
+}
+
+void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
+ u32 interrupts)
+{
+ mii_info->interrupts = interrupts;
+ if (mii_info->phyinfo->config_intr)
+ mii_info->phyinfo->config_intr (mii_info);
+}
+
+/* Writes MII_ADVERTISE with the appropriate values, after
+ * sanitizing advertise to make sure only supported features
+ * are advertised
+ */
+static void config_genmii_advert (struct uec_mii_info *mii_info)
+{
+ u32 advertise;
+ u16 adv;
+
+ /* Only allow advertising what this PHY supports */
+ mii_info->advertising &= mii_info->phyinfo->features;
+ advertise = mii_info->advertising;
+
+ /* Setup standard advertisement */
+ adv = phy_read (mii_info, PHY_ANAR);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write (mii_info, PHY_ANAR, adv);
+}
+
+static void genmii_setup_forced (struct uec_mii_info *mii_info)
+{
+ u16 ctrl;
+ u32 features = mii_info->phyinfo->features;
+
+ ctrl = phy_read (mii_info, PHY_BMCR);
+
+ ctrl &= ~(PHY_BMCR_DPLX | PHY_BMCR_100_MBPS |
+ PHY_BMCR_1000_MBPS | PHY_BMCR_AUTON);
+ ctrl |= PHY_BMCR_RESET;
+
+ switch (mii_info->speed) {
+ case SPEED_1000:
+ if (features & (SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full)) {
+ ctrl |= PHY_BMCR_1000_MBPS;
+ break;
+ }
+ mii_info->speed = SPEED_100;
+ case SPEED_100:
+ if (features & (SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full)) {
+ ctrl |= PHY_BMCR_100_MBPS;
+ break;
+ }
+ mii_info->speed = SPEED_10;
+ case SPEED_10:
+ if (features & (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full))
+ break;
+ default: /* Unsupported speed! */
+ ugphy_err ("%s: Bad speed!", mii_info->dev->name);
+ break;
+ }
+
+ phy_write (mii_info, PHY_BMCR, ctrl);
+}
+
+/* Enable and Restart Autonegotiation */
+static void genmii_restart_aneg (struct uec_mii_info *mii_info)
+{
+ u16 ctl;
+
+ ctl = phy_read (mii_info, PHY_BMCR);
+ ctl |= (PHY_BMCR_AUTON | PHY_BMCR_RST_NEG);
+ phy_write (mii_info, PHY_BMCR, ctl);
+}
+
+static int gbit_config_aneg (struct uec_mii_info *mii_info)
+{
+ u16 adv;
+ u32 advertise;
+
+ if (mii_info->autoneg) {
+ /* Configure the ADVERTISE register */
+ config_genmii_advert (mii_info);
+ advertise = mii_info->advertising;
+
+ adv = phy_read (mii_info, MII_1000BASETCONTROL);
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write (mii_info, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ genmii_restart_aneg (mii_info);
+ } else
+ genmii_setup_forced (mii_info);
+
+ return 0;
+}
+
+static int marvell_config_aneg (struct uec_mii_info *mii_info)
+{
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation */
+ phy_write (mii_info, PHY_BMCR, PHY_BMCR_RESET);
+
+ phy_write (mii_info, 0x1d, 0x1f);
+ phy_write (mii_info, 0x1e, 0x200c);
+ phy_write (mii_info, 0x1d, 0x5);
+ phy_write (mii_info, 0x1e, 0);
+ phy_write (mii_info, 0x1e, 0x100);
+
+ gbit_config_aneg (mii_info);
+
+ return 0;
+}
+
+static int genmii_config_aneg (struct uec_mii_info *mii_info)
+{
+ if (mii_info->autoneg) {
+ config_genmii_advert (mii_info);
+ genmii_restart_aneg (mii_info);
+ } else
+ genmii_setup_forced (mii_info);
+
+ return 0;
+}
+
+static int genmii_update_link (struct uec_mii_info *mii_info)
+{
+ u16 status;
+
+ /* Do a fake read */
+ phy_read (mii_info, PHY_BMSR);
+
+ /* Read link and autonegotiation status */
+ status = phy_read (mii_info, PHY_BMSR);
+ if ((status & PHY_BMSR_LS) == 0)
+ mii_info->link = 0;
+ else
+ mii_info->link = 1;
+
+ /* If we are autonegotiating, and not done,
+ * return an error */
+ if (mii_info->autoneg && !(status & PHY_BMSR_AUTN_COMP))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int genmii_read_status (struct uec_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link (mii_info);
+ if (err)
+ return err;
+
+ if (mii_info->autoneg) {
+ status = phy_read (mii_info, PHY_ANLPAR);
+
+ if (status & (PHY_ANLPAR_10FD | PHY_ANLPAR_TXFD))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ if (status & (PHY_ANLPAR_TXFD | PHY_ANLPAR_TX))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+ mii_info->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int marvell_read_status (struct uec_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link (mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+
+ status = phy_read (mii_info, MII_M1011_PHY_SPEC_STATUS);
+
+ /* Get the duplexity */
+ if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ /* Get the speed */
+ speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
+ switch (speed) {
+ case MII_M1011_PHY_SPEC_STATUS_1000:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_M1011_PHY_SPEC_STATUS_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ mii_info->pause = 0;
+ }
+
+ return 0;
+}
+
+static int marvell_ack_interrupt (struct uec_mii_info *mii_info)
+{
+ /* Clear the interrupts by reading the reg */
+ phy_read (mii_info, MII_M1011_IEVENT);
+
+ return 0;
+}
+
+static int marvell_config_intr (struct uec_mii_info *mii_info)
+{
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write (mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ else
+ phy_write (mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+ return 0;
+}
+
+static int dm9161_init (struct uec_mii_info *mii_info)
+{
+ /* Reset the PHY */
+ phy_write (mii_info, PHY_BMCR, phy_read (mii_info, PHY_BMCR) |
+ PHY_BMCR_RESET);
+ /* PHY and MAC connect */
+ phy_write (mii_info, PHY_BMCR, phy_read (mii_info, PHY_BMCR) &
+ ~PHY_BMCR_ISO);
+#ifdef CONFIG_RMII_MODE
+ phy_write (mii_info, MII_DM9161_SCR, MII_DM9161_SCR_RMII_INIT);
+#else
+ phy_write (mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
+#endif
+ config_genmii_advert (mii_info);
+ /* Start/restart aneg */
+ genmii_config_aneg (mii_info);
+ /* Delay to wait the aneg compeleted */
+ udelay (3000000);
+
+ return 0;
+}
+
+static int dm9161_config_aneg (struct uec_mii_info *mii_info)
+{
+ return 0;
+}
+
+static int dm9161_read_status (struct uec_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ /* Update the link, but return if there was an error */
+ err = genmii_update_link (mii_info);
+ if (err)
+ return err;
+ /* If the link is up, read the speed and duplex
+ If we aren't autonegotiating assume speeds are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ status = phy_read (mii_info, MII_DM9161_SCSR);
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int dm9161_ack_interrupt (struct uec_mii_info *mii_info)
+{
+ /* Clear the interrupt by reading the reg */
+ phy_read (mii_info, MII_DM9161_INTR);
+
+ return 0;
+}
+
+static int dm9161_config_intr (struct uec_mii_info *mii_info)
+{
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write (mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
+ else
+ phy_write (mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
+
+ return 0;
+}
+
+static void dm9161_close (struct uec_mii_info *mii_info)
+{
+}
+
+static struct phy_info phy_info_dm9161 = {
+ .phy_id = 0x0181b880,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161E",
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_dm9161a = {
+ .phy_id = 0x0181b8a0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161A",
+ .features = MII_BASIC_FEATURES,
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_marvell = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E11x1",
+ .features = MII_GBIT_FEATURES,
+ .config_aneg = &marvell_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+};
+
+static struct phy_info phy_info_genmii = {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .features = MII_BASIC_FEATURES,
+ .config_aneg = genmii_config_aneg,
+ .read_status = genmii_read_status,
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_dm9161,
+ &phy_info_dm9161a,
+ &phy_info_marvell,
+ &phy_info_genmii,
+ NULL
+};
+
+u16 phy_read (struct uec_mii_info *mii_info, u16 regnum)
+{
+ return mii_info->mdio_read (mii_info->dev, mii_info->mii_id, regnum);
+}
+
+void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val)
+{
+ mii_info->mdio_write (mii_info->dev, mii_info->mii_id, regnum, val);
+}
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info *get_phy_info (struct uec_mii_info *mii_info)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = phy_read (mii_info, PHY_PHYIDR1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = phy_read (mii_info, PHY_PHYIDR2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->phy_id ==
+ (phy_ID & phy_info[i]->phy_id_mask)) {
+ theInfo = phy_info[i];
+ break;
+ }
+
+ /* This shouldn't happen, as we have generic PHY support */
+ if (theInfo == NULL) {
+ ugphy_info ("UEC: PHY id %x is not supported!", phy_ID);
+ return NULL;
+ } else {
+ ugphy_info ("UEC: PHY is %s (%x)", theInfo->name, phy_ID);
+ }
+
+ return theInfo;
+}
+
+void marvell_phy_interface_mode (struct eth_device *dev,
+ enet_interface_e mode)
+{
+ uec_private_t *uec = (uec_private_t *) dev->priv;
+ struct uec_mii_info *mii_info;
+
+ if (!uec->mii_info) {
+ printf ("%s: the PHY not intialized\n", __FUNCTION__);
+ return;
+ }
+ mii_info = uec->mii_info;
+
+ if (mode == ENET_100_RGMII) {
+ phy_write (mii_info, 0x00, 0x9140);
+ phy_write (mii_info, 0x1d, 0x001f);
+ phy_write (mii_info, 0x1e, 0x200c);
+ phy_write (mii_info, 0x1d, 0x0005);
+ phy_write (mii_info, 0x1e, 0x0000);
+ phy_write (mii_info, 0x1e, 0x0100);
+ phy_write (mii_info, 0x09, 0x0e00);
+ phy_write (mii_info, 0x04, 0x01e1);
+ phy_write (mii_info, 0x00, 0x9140);
+ phy_write (mii_info, 0x00, 0x1000);
+ udelay (100000);
+ phy_write (mii_info, 0x00, 0x2900);
+ phy_write (mii_info, 0x14, 0x0cd2);
+ phy_write (mii_info, 0x00, 0xa100);
+ phy_write (mii_info, 0x09, 0x0000);
+ phy_write (mii_info, 0x1b, 0x800b);
+ phy_write (mii_info, 0x04, 0x05e1);
+ phy_write (mii_info, 0x00, 0xa100);
+ phy_write (mii_info, 0x00, 0x2100);
+ udelay (1000000);
+ } else if (mode == ENET_10_RGMII) {
+ phy_write (mii_info, 0x14, 0x8e40);
+ phy_write (mii_info, 0x1b, 0x800b);
+ phy_write (mii_info, 0x14, 0x0c82);
+ phy_write (mii_info, 0x00, 0x8100);
+ udelay (1000000);
+ }
+}
+
+void change_phy_interface_mode (struct eth_device *dev, enet_interface_e mode)
+{
+#ifdef CONFIG_PHY_MODE_NEED_CHANGE
+ marvell_phy_interface_mode (dev, mode);
+#endif
+}
+#endif /* CONFIG_QE */
diff --git a/drivers/qe/uec_phy.h b/drivers/qe/uec_phy.h
new file mode 100644
index 0000000..9bd926d
--- /dev/null
+++ b/drivers/qe/uec_phy.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2005 Freescale Semiconductor, Inc.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description: UCC ethernet driver -- PHY handling
+ * Driver for UEC on QE
+ * Based on 8260_io/fcc_enet.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __UEC_PHY_H__
+#define __UEC_PHY_H__
+
+#define MII_end ((u32)-2)
+#define MII_read ((u32)-1)
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define UGETH_AN_TIMEOUT 2000
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1 0x17
+#define MII_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK 0x19
+#define MII_CIS8201_IMASK_IEN 0x8000
+#define MII_CIS8201_IMASK_SPEED 0x4000
+#define MII_CIS8201_IMASK_LINK 0x2000
+#define MII_CIS8201_IMASK_DUPLEX 0x1000
+#define MII_CIS8201_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT 0x1a
+#define MII_CIS8201_ISTAT_STATUS 0x8000
+#define MII_CIS8201_ISTAT_SPEED 0x4000
+#define MII_CIS8201_ISTAT_LINK 0x2000
+#define MII_CIS8201_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT 0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MII_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
+
+#define MII_M1011_IEVENT 0x13
+#define MII_M1011_IEVENT_CLEAR 0x0000
+
+#define MII_M1011_IMASK 0x12
+#define MII_M1011_IMASK_INIT 0x6400
+#define MII_M1011_IMASK_CLEAR 0x0000
+
+#define MII_DM9161_SCR 0x10
+#define MII_DM9161_SCR_INIT 0x0610
+#define MII_DM9161_SCR_RMII_INIT 0x0710
+
+/* DM9161 Specified Configuration and Status Register */
+#define MII_DM9161_SCSR 0x11
+#define MII_DM9161_SCSR_100F 0x8000
+#define MII_DM9161_SCSR_100H 0x4000
+#define MII_DM9161_SCSR_10F 0x2000
+#define MII_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR 0x15
+#define MII_DM9161_INTR_PEND 0x8000
+#define MII_DM9161_INTR_DPLX_MASK 0x0800
+#define MII_DM9161_INTR_SPD_MASK 0x0400
+#define MII_DM9161_INTR_LINK_MASK 0x0200
+#define MII_DM9161_INTR_MASK 0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MII_DM9161_INTR_SPD_CHANGE 0x0008
+#define MII_DM9161_INTR_LINK_CHANGE 0x0004
+#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_STOP \
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR 0x12
+#define MII_DM9161_10BTCSR_INIT 0x7800
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+#define MII_READ_COMMAND 0x00000001
+
+#define MII_INTERRUPT_DISABLED 0x0
+#define MII_INTERRUPT_ENABLED 0x1
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+
+/* Indicates what features are supported by the interface. */
+#define SUPPORTED_10baseT_Half (1 << 0)
+#define SUPPORTED_10baseT_Full (1 << 1)
+#define SUPPORTED_100baseT_Half (1 << 2)
+#define SUPPORTED_100baseT_Full (1 << 3)
+#define SUPPORTED_1000baseT_Half (1 << 4)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_Autoneg (1 << 6)
+#define SUPPORTED_TP (1 << 7)
+#define SUPPORTED_AUI (1 << 8)
+#define SUPPORTED_MII (1 << 9)
+#define SUPPORTED_FIBRE (1 << 10)
+#define SUPPORTED_BNC (1 << 11)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+
+#define ADVERTISED_10baseT_Half (1 << 0)
+#define ADVERTISED_10baseT_Full (1 << 1)
+#define ADVERTISED_100baseT_Half (1 << 2)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_1000baseT_Half (1 << 4)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_Autoneg (1 << 6)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_AUI (1 << 8)
+#define ADVERTISED_MII (1 << 9)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_BNC (1 << 11)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+
+/* Advertisement control register. */
+#define ADVERTISE_SLCT 0x001f /* Selector bits */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
+#define ADVERTISE_RESV 0x1c00 /* Unused... */
+#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */
+#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
+#define ADVERTISE_NPAGE 0x8000 /* Next page bit */
+
+#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+ ADVERTISE_CSMA)
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+
+/* Taken from mii_if_info and sungem_phy.h */
+struct uec_mii_info {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_info *phyinfo;
+
+ struct eth_device *dev;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ u32 advertising;
+ int autoneg;
+ int mii_id;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Provided by ethernet driver */
+ int (*mdio_read) (struct eth_device * dev, int mii_id, int reg);
+ void (*mdio_write) (struct eth_device * dev, int mii_id, int reg,
+ int val);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be ANDed with phy_id_mask to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * There are 6 commands which take a ugeth_mii_info structure.
+ * Each PHY must declare config_aneg, and read_status.
+ */
+struct phy_info {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+
+ /* Called to initialize the PHY */
+ int (*init) (struct uec_mii_info * mii_info);
+
+ /* Called to suspend the PHY for power */
+ int (*suspend) (struct uec_mii_info * mii_info);
+
+ /* Reconfigures autonegotiation (or disables it) */
+ int (*config_aneg) (struct uec_mii_info * mii_info);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status) (struct uec_mii_info * mii_info);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt) (struct uec_mii_info * mii_info);
+
+ /* Enables or disables interrupts */
+ int (*config_intr) (struct uec_mii_info * mii_info);
+
+ /* Clears up any memory if needed */
+ void (*close) (struct uec_mii_info * mii_info);
+};
+
+struct phy_info *get_phy_info (struct uec_mii_info *mii_info);
+void write_phy_reg (struct eth_device *dev, int mii_id, int regnum,
+ int value);
+int read_phy_reg (struct eth_device *dev, int mii_id, int regnum);
+void mii_clear_phy_interrupt (struct uec_mii_info *mii_info);
+void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
+ u32 interrupts);
+#endif /* __UEC_PHY_H__ */