1 From e3aece79d5003b6879298b05551e113117d5cdd8 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sat, 27 Jun 2015 13:13:36 +0200
4 Subject: [PATCH 63/76] arm: mediatek: add SDK ethernet
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/net/ethernet/Kconfig | 1 +
9 drivers/net/ethernet/Makefile | 1 +
10 drivers/net/ethernet/raeth/Kconfig | 415 ++
11 drivers/net/ethernet/raeth/Makefile | 67 +
12 drivers/net/ethernet/raeth/Makefile.release | 60 +
13 drivers/net/ethernet/raeth/csr_netlink.h | 27 +
14 drivers/net/ethernet/raeth/dvt/pkt_gen.c | 88 +
15 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c | 138 +
16 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c | 191 +
17 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c | 1527 +++++
18 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h | 75 +
19 drivers/net/ethernet/raeth/ethtool_readme.txt | 44 +
20 drivers/net/ethernet/raeth/mcast.c | 187 +
21 drivers/net/ethernet/raeth/mii_mgr.c | 603 ++
22 drivers/net/ethernet/raeth/ra2882ethreg.h | 1985 +++++++
23 drivers/net/ethernet/raeth/ra_ethtool.c | 515 ++
24 drivers/net/ethernet/raeth/ra_ethtool.h | 13 +
25 drivers/net/ethernet/raeth/ra_ioctl.h | 102 +
26 drivers/net/ethernet/raeth/ra_mac.c | 2645 +++++++++
27 drivers/net/ethernet/raeth/ra_mac.h | 57 +
28 drivers/net/ethernet/raeth/ra_netlink.c | 142 +
29 drivers/net/ethernet/raeth/ra_netlink.h | 10 +
30 drivers/net/ethernet/raeth/ra_qos.c | 655 +++
31 drivers/net/ethernet/raeth/ra_qos.h | 18 +
32 drivers/net/ethernet/raeth/ra_rfrw.c | 66 +
33 drivers/net/ethernet/raeth/ra_rfrw.h | 6 +
34 drivers/net/ethernet/raeth/raether.c | 6401 +++++++++++++++++++++
35 drivers/net/ethernet/raeth/raether.h | 126 +
36 drivers/net/ethernet/raeth/raether_hwlro.c | 347 ++
37 drivers/net/ethernet/raeth/raether_pdma.c | 1121 ++++
38 drivers/net/ethernet/raeth/raether_qdma.c | 1407 +++++
39 drivers/net/ethernet/raeth/raether_qdma_mt7623.c | 1020 ++++
40 drivers/net/ethernet/raeth/smb_hook.c | 17 +
41 drivers/net/ethernet/raeth/smb_nf.c | 177 +
42 drivers/net/ethernet/raeth/sync_write.h | 103 +
43 35 files changed, 20357 insertions(+)
44 create mode 100644 drivers/net/ethernet/raeth/Kconfig
45 create mode 100644 drivers/net/ethernet/raeth/Makefile
46 create mode 100644 drivers/net/ethernet/raeth/Makefile.release
47 create mode 100644 drivers/net/ethernet/raeth/csr_netlink.h
48 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen.c
49 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
50 create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
51 create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
52 create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
53 create mode 100644 drivers/net/ethernet/raeth/ethtool_readme.txt
54 create mode 100644 drivers/net/ethernet/raeth/mcast.c
55 create mode 100644 drivers/net/ethernet/raeth/mii_mgr.c
56 create mode 100644 drivers/net/ethernet/raeth/ra2882ethreg.h
57 create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.c
58 create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.h
59 create mode 100644 drivers/net/ethernet/raeth/ra_ioctl.h
60 create mode 100644 drivers/net/ethernet/raeth/ra_mac.c
61 create mode 100644 drivers/net/ethernet/raeth/ra_mac.h
62 create mode 100644 drivers/net/ethernet/raeth/ra_netlink.c
63 create mode 100644 drivers/net/ethernet/raeth/ra_netlink.h
64 create mode 100644 drivers/net/ethernet/raeth/ra_qos.c
65 create mode 100644 drivers/net/ethernet/raeth/ra_qos.h
66 create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.c
67 create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.h
68 create mode 100644 drivers/net/ethernet/raeth/raether.c
69 create mode 100644 drivers/net/ethernet/raeth/raether.h
70 create mode 100755 drivers/net/ethernet/raeth/raether_hwlro.c
71 create mode 100755 drivers/net/ethernet/raeth/raether_pdma.c
72 create mode 100644 drivers/net/ethernet/raeth/raether_qdma.c
73 create mode 100644 drivers/net/ethernet/raeth/raether_qdma_mt7623.c
74 create mode 100644 drivers/net/ethernet/raeth/smb_hook.c
75 create mode 100644 drivers/net/ethernet/raeth/smb_nf.c
76 create mode 100644 drivers/net/ethernet/raeth/sync_write.h
78 --- a/drivers/net/ethernet/Kconfig
79 +++ b/drivers/net/ethernet/Kconfig
80 @@ -17,6 +17,7 @@ config MDIO
84 +source "drivers/net/ethernet/raeth/Kconfig"
85 source "drivers/net/ethernet/3com/Kconfig"
86 source "drivers/net/ethernet/adaptec/Kconfig"
87 source "drivers/net/ethernet/aeroflex/Kconfig"
88 --- a/drivers/net/ethernet/Makefile
89 +++ b/drivers/net/ethernet/Makefile
90 @@ -84,3 +84,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
91 obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
92 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
93 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
94 +obj-$(CONFIG_RAETH) += raeth/
96 +++ b/drivers/net/ethernet/raeth/Kconfig
107 + tristate "Ralink GMAC"
109 + This driver supports Ralink gigabit ethernet family of
114 + default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
117 +config RAETH_SCATTER_GATHER_RX_DMA
119 + default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
124 + prompt "Network BottomHalves"
126 + default RA_NETWORK_WORKQUEUE_BH
128 + config RA_NETWORK_TASKLET_BH
131 + config RA_NETWORK_WORKQUEUE_BH
139 +#config TASKLET_WORKQUEUE_SW
140 +# bool "Tasklet and Workqueue switch"
141 +# depends on RA_NETWORK_TASKLET_BH
143 +config RAETH_SKB_RECYCLE_2K
144 + bool "SKB Recycling"
147 +config RAETH_SPECIAL_TAG
148 + bool "Ralink Special Tag (0x810x)"
149 + depends on RAETH && RT_3052_ESW
151 +#config RAETH_JUMBOFRAME
152 +# bool "Jumbo Frame up to 4K bytes"
153 +# depends on RAETH && !(RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_MT7628)
155 +config RAETH_CHECKSUM_OFFLOAD
156 + bool "TCP/UDP/IP checksum offload"
158 + depends on RAETH && !RALINK_RT2880
161 +# bool "When TX ring is full, inform kernel stop transmit and stop RX handler"
165 +#config RAETH_8023AZ_EEE
166 +# bool "Enable Embeded Switch EEE"
168 +# depends on RAETH && (RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628)
173 + bool "32bytes TX/RX description"
175 + depends on RAETH && (RALINK_MT7620 || RALINK_MT7621)
177 + At this moment, you cannot enable 32B description with Multiple RX ring at the same time.
180 + bool "LRO (Large Receive Offload )"
182 + depends on RAETH && (RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
185 + bool "HW LRO (Large Receive Offload)"
189 +config RAETH_HW_LRO_DBG
190 + bool "HW LRO Debug"
192 + depends on RAETH_HW_LRO
194 +config RAETH_HW_LRO_AUTO_ADJ_DBG
195 + bool "HW LRO Auto Adjustment Debug"
197 + depends on RAETH_HW_LRO
199 +config RAETH_HW_LRO_REASON_DBG
200 + bool "HW LRO Flush Reason Debug"
202 + depends on RAETH_HW_LRO
204 +config RAETH_HW_VLAN_TX
205 + bool "Transmit VLAN HW (DoubleVLAN is not supported)"
206 + depends on RAETH && !(RALINK_RT5350 || RALINK_MT7628)
208 + Please disable HW_VLAN_TX if you need double vlan
210 +config RAETH_HW_VLAN_RX
211 + bool "Receive VLAN HW (DoubleVLAN is not supported)"
212 + depends on RAETH && RALINK_MT7621
214 + Please disable HW_VLAN_RX if you need double vlan
217 + bool "TSOV4 (Tcp Segmentaton Offload)"
218 + depends on (RAETH_HW_VLAN_TX && (RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620))||((RALINK_MT7621 || ARCH_MT7623) &&(RAETH_HW_VLAN_TX || RAETH_GMAC2 ))
221 + bool "TSOV6 (Tcp Segmentaton Offload)"
222 + depends on RAETH_TSO
224 +config RAETH_RW_PDMAPTR_FROM_VAR
226 + default y if RALINK_RT6855A || RALINK_MT7620
230 + bool "Samba Speedup Module"
233 +config SPLICE_NET_SUPPORT
234 + default y if MTK_SMB_HOOK
235 + depends on MTK_SMB_HOOK
241 + depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
243 +config RAETH_PDMA_DVT
245 + depends on RAETH_DVT
247 +config RAETH_PDMA_LEGACY_MODE
248 + bool "PDMA legacy mode"
249 + depends on RAETH_PDMA_DVT
252 +# bool "QoS Feature"
253 +# depends on RAETH && !RALINK_RT2880 && !RALINK_MT7620 && !RALINK_MT7621 && !RAETH_TSO
257 + depends on RAETH_QOS
258 + default DSCP_QOS_DSCP
260 +config RAETH_QOS_DSCP_BASED
262 + depends on RAETH_QOS
264 +config RAETH_QOS_VPRI_BASED
266 + depends on RAETH_QOS
271 + bool "Choose QDMA instead PDMA"
273 + depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
275 +config RAETH_QDMATX_QDMARX
276 + bool "Choose QDMA RX instead PDMA RX"
278 + depends on RAETH_QDMA && !RALINK_MT7621
283 + prompt "GMAC is connected to"
285 + default GE1_RGMII_FORCE_1000
287 +config GE1_MII_FORCE_100
288 + bool "MII_FORCE_100 (10/100M Switch)"
289 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
292 + bool "MII_AN (100Phy)"
293 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
295 +config GE1_RVMII_FORCE_100
296 + bool "RvMII_FORCE_100 (CPU)"
297 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
299 +config GE1_RGMII_FORCE_1000
300 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
301 + depends on (RALINK_RT2880 || RALINK_RT3883)
304 +config GE1_RGMII_FORCE_1000
305 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
306 + depends on (RALINK_MT7621 || ARCH_MT7623)
309 +config GE1_TRGMII_FORCE_1200
310 + bool "TRGMII_FORCE_1200 (GigaSW, CPU)"
311 + depends on (RALINK_MT7621)
314 +config GE1_TRGMII_FORCE_2000
315 + bool "TRGMII_FORCE_2000 (GigaSW, CPU, for MT7623 and MT7683)"
316 + depends on (ARCH_MT7623)
319 +config GE1_TRGMII_FORCE_2600
320 + bool "TRGMII_FORCE_2600 (GigaSW, CPU, MT7623 only)"
321 + depends on (ARCH_MT7623)
325 + bool "RGMII_AN (GigaPhy)"
326 + depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
328 +config GE1_RGMII_NONE
329 + bool "NONE (NO CONNECT)"
330 + depends on (RALINK_MT7621 || ARCH_MT7623)
337 + depends on RAETH_QDMA && (ARCH_MT7623)
341 + bool "Ralink Embedded Switch"
343 + depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628 || ARCH_MT7623)
345 +config LAN_WAN_SUPPORT
346 + bool "LAN/WAN Partition"
347 + depends on RAETH && (RAETH_ROUTER || RT_3052_ESW)
349 +config ETH_MEMORY_OPTIMIZATION
350 + bool "Ethernet memory optimization"
351 + depends on RALINK_MT7628
353 +config ETH_ONE_PORT_ONLY
354 + bool "One Port Only"
355 + depends on RALINK_MT7628
358 + prompt "Switch Board Layout Type"
359 + depends on LAN_WAN_SUPPORT || P5_RGMII_TO_MAC_MODE || GE1_RGMII_FORCE_1000 || GE1_TRGMII_FORCE_1200 || GE2_RGMII_FORCE_1000
369 +config RALINK_VISTA_BASIC
370 + bool 'Vista Basic Logo for IC+ 175C'
371 + depends on LAN_WAN_SUPPORT && (RALINK_RT2880 || RALINK_RT3883)
373 +config ESW_DOUBLE_VLAN_TAG
375 + default y if RT_3052_ESW
377 +config RAETH_HAS_PORT4
378 + bool "Port 4 Support"
379 + depends on RAETH && RALINK_MT7620
381 + prompt "Target Mode"
382 + depends on RAETH_HAS_PORT4
383 + default P4_RGMII_TO_MAC_MODE
385 + config P4_MAC_TO_PHY_MODE
386 + bool "Giga_Phy (RGMII)"
387 + config GE_RGMII_MT7530_P0_AN
388 + bool "GE_RGMII_MT7530_P0_AN (MT7530 Internal GigaPhy)"
389 + config GE_RGMII_MT7530_P4_AN
390 + bool "GE_RGMII_MT7530_P4_AN (MT7530 Internal GigaPhy)"
391 + config P4_RGMII_TO_MAC_MODE
392 + bool "Giga_SW/iNIC (RGMII)"
393 + config P4_MII_TO_MAC_MODE
394 + bool "External_CPU (MII_RvMII)"
395 + config P4_RMII_TO_MAC_MODE
396 + bool "External_CPU (RvMII_MII)"
399 +config MAC_TO_GIGAPHY_MODE_ADDR2
400 + hex "Port4 Phy Address"
402 + depends on P4_MAC_TO_PHY_MODE
404 +config RAETH_HAS_PORT5
405 + bool "Port 5 Support"
406 + depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620)
408 + prompt "Target Mode"
409 + depends on RAETH_HAS_PORT5
410 + default P5_RGMII_TO_MAC_MODE
412 + config P5_MAC_TO_PHY_MODE
413 + bool "Giga_Phy (RGMII)"
414 + config P5_RGMII_TO_MAC_MODE
415 + bool "Giga_SW/iNIC (RGMII)"
416 + config P5_RGMII_TO_MT7530_MODE
417 + bool "MT7530 Giga_SW (RGMII)"
418 + depends on RALINK_MT7620
419 + config P5_MII_TO_MAC_MODE
420 + bool "External_CPU (MII_RvMII)"
421 + config P5_RMII_TO_MAC_MODE
422 + bool "External_CPU (RvMII_MII)"
425 +config MAC_TO_GIGAPHY_MODE_ADDR
426 + hex "GE1 Phy Address"
428 + depends on GE1_MII_AN || GE1_RGMII_AN
430 +config MAC_TO_GIGAPHY_MODE_ADDR
431 + hex "Port5 Phy Address"
433 + depends on P5_MAC_TO_PHY_MODE
436 + bool "GMAC2 Support"
437 + depends on RAETH && (RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
440 + prompt "GMAC2 is connected to"
441 + depends on RAETH_GMAC2
442 + default GE2_RGMII_AN
444 +config GE2_MII_FORCE_100
445 + bool "MII_FORCE_100 (10/100M Switch)"
446 + depends on RAETH_GMAC2
449 + bool "MII_AN (100Phy)"
450 + depends on RAETH_GMAC2
452 +config GE2_RVMII_FORCE_100
453 + bool "RvMII_FORCE_100 (CPU)"
454 + depends on RAETH_GMAC2
456 +config GE2_RGMII_FORCE_1000
457 + bool "RGMII_FORCE_1000 (GigaSW, CPU)"
458 + depends on RAETH_GMAC2
462 + bool "RGMII_AN (External GigaPhy)"
463 + depends on RAETH_GMAC2
465 +config GE2_INTERNAL_GPHY
466 + bool "RGMII_AN (Internal GigaPny)"
467 + depends on RAETH_GMAC2
468 + select LAN_WAN_SUPPORT
472 +config GE_RGMII_INTERNAL_P0_AN
474 + depends on GE2_INTERNAL_GPHY
475 + default y if WAN_AT_P0
477 +config GE_RGMII_INTERNAL_P4_AN
479 + depends on GE2_INTERNAL_GPHY
480 + default y if WAN_AT_P4
482 +config MAC_TO_GIGAPHY_MODE_ADDR2
484 + default 0 if GE_RGMII_INTERNAL_P0_AN
485 + default 4 if GE_RGMII_INTERNAL_P4_AN
486 + depends on GE_RGMII_INTERNAL_P0_AN || GE_RGMII_INTERNAL_P4_AN
488 +config MAC_TO_GIGAPHY_MODE_ADDR2
489 + hex "GE2 Phy Address"
491 + depends on GE2_MII_AN || GE2_RGMII_AN
496 +default y if GE1_MII_FORCE_100 || GE2_MII_FORCE_100 || GE1_RVMII_FORCE_100 || GE2_RVMII_FORCE_100
499 +config MAC_TO_MAC_MODE
501 +default y if GE1_RGMII_FORCE_1000 || GE2_RGMII_FORCE_1000
502 +depends on (RALINK_RT2880 || RALINK_RT3883)
507 +default y if GE1_RGMII_AN || GE2_RGMII_AN
512 +default y if GE1_MII_AN || GE2_MII_AN
514 +++ b/drivers/net/ethernet/raeth/Makefile
516 +obj-$(CONFIG_RAETH) += raeth.o
517 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
519 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
522 +smb-objs := smb_nf.o
525 +#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
527 +ifeq ($(CONFIG_RAETH_QOS),y)
528 +raeth-objs += ra_qos.o
531 +ifeq ($(CONFIG_RAETH_QDMA),y)
532 +raeth-objs += raether_qdma.o
535 +ifneq ($(CONFIG_RAETH_QDMA),y)
536 +raeth-objs += raether_pdma.o
539 +raeth-objs += raether.o
541 +ifeq ($(CONFIG_ETHTOOL),y)
542 +raeth-objs += ra_ethtool.o
545 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
546 +raeth-objs += mcast.o
549 +ifeq ($(CONFIG_RAETH_NETLINK),y)
550 +raeth-objs += ra_netlink.o
553 +ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
554 +raeth-objs += dvt/raether_pdma_dvt.o
555 +obj-m += dvt/pkt_gen.o
556 +obj-m += dvt/pkt_gen_udp_frag.o
557 +obj-m += dvt/pkt_gen_tcp_frag.o
560 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
561 +raeth-objs += raether_hwlro.o
564 +ifeq ($(CONFIG_RAETH_GMAC2),y)
565 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
568 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
569 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
572 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
573 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
576 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
577 +EXTRA_CFLAGS += -DWORKQUEUE_BH
580 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
581 +EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
584 +++ b/drivers/net/ethernet/raeth/Makefile.release
586 +obj-$(CONFIG_RAETH) += raeth.o
587 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
589 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
592 +smb-objs := smb_nf.o
595 +#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
597 +ifeq ($(CONFIG_RAETH_QOS),y)
598 +raeth-objs += ra_qos.o
601 +ifeq ($(CONFIG_RAETH_QDMA),y)
602 +raeth-objs += raether_qdma.o
605 +ifneq ($(CONFIG_RAETH_QDMA),y)
606 +raeth-objs += raether_pdma.o
609 +raeth-objs += raether.o
611 +ifeq ($(CONFIG_ETHTOOL),y)
612 +raeth-objs += ra_ethtool.o
615 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
616 +raeth-objs += mcast.o
619 +ifeq ($(CONFIG_RAETH_NETLINK),y)
620 +raeth-objs += ra_netlink.o
623 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
624 +raeth-objs += raether_hwlro.o
627 +ifeq ($(CONFIG_RAETH_GMAC2),y)
628 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
631 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
632 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
635 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
636 +EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
639 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
640 +EXTRA_CFLAGS += -DWORKQUEUE_BH
643 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
644 +EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
647 +++ b/drivers/net/ethernet/raeth/csr_netlink.h
649 +#ifndef CSR_NETLINK_H
650 +#define CSR_NETLINK_H
652 +#define CSR_NETLINK 30
657 +#define RALINK_CSR_GROUP 2882
659 +typedef struct rt2880_csr_msg {
662 + unsigned long address;
663 + unsigned long default_value;
664 + unsigned long reserved_bits; /* 1 : not reserved, 0 : reserved */
665 + unsigned long write_mask;
666 + unsigned long write_value;
670 +int csr_msg_send(CSR_MSG* msg);
671 +int csr_msg_recv(void);
673 +// static CSR_MSG input_csr_msg;
677 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen.c
679 +//#include <linux/config.h>
680 +#include <linux/version.h>
681 +#include <linux/module.h>
682 +#include <linux/skbuff.h>
683 +#include <linux/kernel.h>
684 +#include <linux/init.h>
685 +#include <linux/netfilter.h>
686 +#include <linux/netdevice.h>
687 +#include <linux/types.h>
688 +#include <asm/uaccess.h>
689 +#include <linux/moduleparam.h>
691 +char *ifname="eth3";
693 +static int32_t PktGenInitMod(void)
696 + struct net_dev *dev;
697 + struct sk_buff *skb;
700 + unsigned char pkt[]={
701 + //0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // dest bcast mac
702 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest macA
703 + //0x00, 0x30, 0xdb, 0x02, 0x02, 0x01, // dest macB
704 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
705 + 0x81, 0x00, // vlan tag
706 + //0x81, 0x10, // vlan tag
707 + //0x87, 0x39, // do not learn
708 + //0xc1, 0x03, // vlan tag SA=0, VID=2, LV=1
709 + 0x00, 0x03, // pri=0, vlan=3
710 + 0x08, 0x00, // eth type=ip
711 + 0x45, 0x00, 0x00, 0x30, 0x12, 0x34, 0x40, 0x00, 0xff, 0x06,
712 + 0x40, 0x74, 0x0a, 0x0a, 0x1e, 0x0a, 0x0a, 0x0a, 0x1e, 0x0b,
713 + 0x00, 0x1e, 0x00, 0x28, 0x00, 0x1c, 0x81, 0x06, 0x00, 0x00,
714 + 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
715 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
717 + skb = alloc_skb(256, GFP_ATOMIC);
719 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
720 + if((dev=dev_get_by_name(&init_net,ifname))){
722 + if((dev=dev_get_by_name(ifname))){
728 + skb_put(skb,sizeof(pkt));
729 + memcpy(skb->data, pkt, sizeof(pkt));
731 + printk("send pkt(len=%d) to %s\n", skb->len, skb->dev->name);
734 + for(i=0;i<sizeof(pkt);i++){
738 + printk("%02X-",skb->data[i]);
741 + dev_queue_xmit(skb);
743 + printk("interface %s not found\n",ifname);
750 +static void PktGenCleanupMod(void)
754 +module_init(PktGenInitMod);
755 +module_exit(PktGenCleanupMod);
756 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
757 +MODULE_PARM (ifname, "s");
759 +module_param (ifname, charp, 0);
762 +MODULE_DESCRIPTION("Ralink PktGen Module");
763 +MODULE_AUTHOR("Steven Liu");
764 +MODULE_LICENSE("Proprietary");
765 +MODULE_PARM_DESC (ifname, "interface name");
768 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
770 +//#include <linux/config.h>
771 +#include <linux/version.h>
772 +#include <linux/module.h>
773 +#include <linux/skbuff.h>
774 +#include <linux/kernel.h>
775 +#include <linux/init.h>
776 +#include <linux/netfilter.h>
777 +#include <linux/netdevice.h>
778 +#include <linux/types.h>
779 +#include <asm/uaccess.h>
780 +#include <linux/moduleparam.h>
782 +char *ifname="eth3";
785 +static int32_t PktGenInitMod(void)
787 + unsigned char pkt_1[]={
788 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
789 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
790 + 0x08, 0x00, // type: ip
791 + 0x45, 0x00, 0x00, 0x34, // ip: ..., total len (0x034 = 52)
792 + 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
793 + 0x80, 0x06, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
794 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
795 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
796 + 0x0d, 0xd5, //tcp src port
797 + 0x13, 0x89, //tcp dst port
798 + 0x40, 0xf5, 0x15, 0x04, //tcp sequence number
799 + 0xf6, 0x4f, 0x1e, 0x31, //tcp ack number
800 + 0x50, 0x10, 0xfc, 0x00, //tcp flags, win size
801 + 0xf1, 0xfe, 0x00, 0x00, //tcp checksum (0xf1fe)
802 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (12 bytes)
803 + 0x06, 0x07, 0x08, 0x09, 0x0a,
807 + unsigned char pkt_2[]={
808 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
809 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
810 + 0x08, 0x00, // type: ip
811 + 0x45, 0x00, 0x00, 0x20, // ip: ..., total len (0x020 = 32)
812 + 0xa1, 0x78, 0x00, 0x04, // ip: id, frag, frag offset (32)
813 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
814 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
815 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
816 + 0x11, 0x12, 0x13, 0x14, 0x15, //payload (12 bytes)
817 + 0x16, 0x17, 0x18, 0x19, 0x1a,
821 + struct net_dev *dev;
822 + struct sk_buff *skb_1;
823 + struct sk_buff *skb_2;
826 + skb_1 = alloc_skb(256, GFP_ATOMIC);
827 + skb_2 = alloc_skb(256, GFP_ATOMIC);
832 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
833 + if((dev=dev_get_by_name(&init_net,ifname))){
835 + if((dev=dev_get_by_name(ifname))){
839 + skb_put(skb_1,sizeof(pkt_1));
840 + memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
842 + printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
845 + for(i=0;i<sizeof(pkt_1);i++){
849 + printk("%02X-",skb_1->data[i]);
852 + dev_queue_xmit(skb_1);
854 + printk("interface %s not found\n",ifname);
861 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
862 + if((dev=dev_get_by_name(&init_net,ifname))){
864 + if((dev=dev_get_by_name(ifname))){
868 + skb_put(skb_2,sizeof(pkt_2));
869 + memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
871 + printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
874 + for(i=0;i<sizeof(pkt_2);i++){
878 + printk("%02X-",skb_2->data[i]);
881 + dev_queue_xmit(skb_2);
883 + printk("interface %s not found\n",ifname);
891 +static void PktGenCleanupMod(void)
895 +module_init(PktGenInitMod);
896 +module_exit(PktGenCleanupMod);
897 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
898 +MODULE_PARM (ifname, "s");
900 +module_param (ifname, charp, 0);
903 +MODULE_DESCRIPTION("Ralink PktGen Module");
904 +MODULE_AUTHOR("Steven Liu");
905 +MODULE_LICENSE("Proprietary");
906 +MODULE_PARM_DESC (ifname, "interface name");
909 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
911 +//#include <linux/config.h>
912 +#include <linux/version.h>
913 +#include <linux/module.h>
914 +#include <linux/skbuff.h>
915 +#include <linux/kernel.h>
916 +#include <linux/init.h>
917 +#include <linux/netfilter.h>
918 +#include <linux/netdevice.h>
919 +#include <linux/types.h>
920 +#include <asm/uaccess.h>
921 +#include <linux/moduleparam.h>
923 +char *ifname="eth3";
926 +static int32_t PktGenInitMod(void)
929 + unsigned char pkt_0[]={
930 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
931 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
932 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
933 + 0x08, 0x00, // type: ip
934 + 0x45, 0x00, 0x00, 0x26, // ip: ..., total len (0x026 = 38)
935 +// 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
936 + 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
937 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
938 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
939 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
940 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
941 + 0xca, 0x7b, //udp src port
942 + 0x13, 0x89, //udp dst port
943 + 0x00, 0x12, //udp len (0x01c = 18)
944 + 0x2f, 0x96, //udp checksum (0x2f96)
945 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (10 bytes)
946 + 0x06, 0x07, 0x08, 0x09, 0x0a
950 + unsigned char pkt_1[]={
951 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
952 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
953 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
954 + 0x08, 0x00, // type: ip
955 + 0x45, 0x00, 0x00, 0x24, // ip: ..., total len (0x024 = 36)
956 + 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
957 +// 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
958 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
959 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
960 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
961 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
962 + 0xca, 0x7b, //udp src port
963 + 0x13, 0x89, //udp dst port
964 + 0x00, 0x1a, //udp len (0x01a = 26)
965 + 0x2f, 0x96, //udp checksum (0x2f96)
966 + 0x01, 0x02, 0x03, 0x04, 0x05, //payload (8 bytes)
970 + unsigned char pkt_2[]={
971 +// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
972 + 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
973 + 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
974 + 0x08, 0x00, // type: ip
975 + 0x45, 0x00, 0x00, 0x1e, // ip: ..., total len (0x01e = 30)
976 + 0xa1, 0x78, 0x00, 0x02, // ip: id, frag, frag offset (16)
977 + 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
978 + 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
979 +// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
980 + 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
981 + 0x11, 0x12, 0x13, 0x14, 0x15, //payload (10 bytes)
982 + 0x16, 0x17, 0x18, 0x19, 0x1a
985 + struct net_dev *dev;
986 +// struct sk_buff *skb_0;
987 + struct sk_buff *skb_1;
988 + struct sk_buff *skb_2;
991 +// skb_0 = alloc_skb(256, GFP_ATOMIC);
992 + skb_1 = alloc_skb(256, GFP_ATOMIC);
993 + skb_2 = alloc_skb(256, GFP_ATOMIC);
997 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
998 + if((dev=dev_get_by_name(&init_net,ifname))){
1000 + if((dev=dev_get_by_name(ifname))){
1004 + skb_put(skb_0,sizeof(pkt_0));
1005 + memcpy(skb_0->data, pkt_0, sizeof(pkt_0));
1007 + printk("send pkt(len=%d) to %s\n", skb_0->len, skb_0->dev->name);
1010 + for(i=0;i<sizeof(pkt_0);i++){
1014 + printk("%02X-",skb_0->data[i]);
1017 + dev_queue_xmit(skb_0);
1019 + printk("interface %s not found\n",ifname);
1025 +/* send packet 1 */
1026 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1027 + if((dev=dev_get_by_name(&init_net,ifname))){
1029 + if((dev=dev_get_by_name(ifname))){
1033 + skb_put(skb_1,sizeof(pkt_1));
1034 + memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
1036 + printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
1039 + for(i=0;i<sizeof(pkt_1);i++){
1043 + printk("%02X-",skb_1->data[i]);
1046 + dev_queue_xmit(skb_1);
1048 + printk("interface %s not found\n",ifname);
1054 +/* send packet 2 */
1055 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1056 + if((dev=dev_get_by_name(&init_net,ifname))){
1058 + if((dev=dev_get_by_name(ifname))){
1062 + skb_put(skb_2,sizeof(pkt_2));
1063 + memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
1065 + printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
1068 + for(i=0;i<sizeof(pkt_2);i++){
1072 + printk("%02X-",skb_2->data[i]);
1075 + dev_queue_xmit(skb_2);
1077 + printk("interface %s not found\n",ifname);
1085 +static void PktGenCleanupMod(void)
1089 +module_init(PktGenInitMod);
1090 +module_exit(PktGenCleanupMod);
1091 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
1092 +MODULE_PARM (ifname, "s");
1094 +module_param (ifname, charp, 0);
1097 +MODULE_DESCRIPTION("Ralink PktGen Module");
1098 +MODULE_AUTHOR("Steven Liu");
1099 +MODULE_LICENSE("Proprietary");
1100 +MODULE_PARM_DESC (ifname, "interface name");
1103 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
1105 +#include <linux/module.h>
1106 +#include <linux/version.h>
1107 +#include <linux/kernel.h>
1108 +#include <linux/types.h>
1109 +#include <linux/pci.h>
1110 +#include <linux/init.h>
1111 +#include <linux/skbuff.h>
1112 +#include <linux/if_vlan.h>
1113 +#include <linux/if_ether.h>
1114 +#include <linux/fs.h>
1115 +#include <asm/uaccess.h>
1116 +#include <asm/rt2880/surfboardint.h>
1117 +#if defined(CONFIG_RAETH_TSO)
1118 +#include <linux/tcp.h>
1119 +#include <net/ipv6.h>
1120 +#include <linux/ip.h>
1121 +#include <net/ip.h>
1122 +#include <net/tcp.h>
1123 +#include <linux/in.h>
1124 +#include <linux/ppp_defs.h>
1125 +#include <linux/if_pppox.h>
1127 +#if defined(CONFIG_RAETH_LRO)
1128 +#include <linux/inet_lro.h>
1130 +#include <linux/delay.h>
1131 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1132 +#include <linux/sched.h>
1135 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
1136 +#include <asm/rt2880/rt_mmap.h>
1138 +#include <linux/libata-compat.h>
1141 +#include "../ra2882ethreg.h"
1142 +#include "../raether.h"
1143 +#include "../ra_mac.h"
1144 +#include "../ra_ioctl.h"
1145 +#include "../ra_rfrw.h"
1146 +#ifdef CONFIG_RAETH_NETLINK
1147 +#include "../ra_netlink.h"
1149 +#if defined(CONFIG_RAETH_QOS)
1150 +#include "../ra_qos.h"
1152 +#include "raether_pdma_dvt.h"
1154 +/* Global variables */
1155 +static unsigned int g_pdma_dvt_show_config;
1156 +static unsigned int g_pdma_dvt_rx_test_config;
1157 +static unsigned int g_pdma_dvt_tx_test_config;
1158 +static unsigned int g_pdma_dvt_debug_test_config;
1159 +static unsigned int g_pdma_dvt_lro_test_config;
1161 +unsigned int g_pdma_dev_lanport = 0;
1162 +unsigned int g_pdma_dev_wanport = 0;
1164 +void skb_dump(struct sk_buff *sk)
1168 + printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
1169 + sk->dev ? sk->dev->name : "ip stack", sk->len, sk->truesize,
1170 + skb_headroom(sk), skb_tailroom(sk));
1172 + /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) { */
1173 + /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) { */
1174 + for (i = (unsigned int)sk->head; i <= (unsigned int)sk->data + 60; i++) {
1175 + if ((i % 20) == 0)
1177 + if (i == (unsigned int)sk->data)
1179 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 21)
1180 + if (i == (unsigned int)sk->transport_header)
1182 + if (i == (unsigned int)sk->network_header)
1184 + if (i == (unsigned int)sk->mac_header)
1187 + if (i == (unsigned int)sk->h.raw)
1189 + if (i == (unsigned int)sk->nh.raw)
1191 + if (i == (unsigned int)sk->mac.raw)
1194 + printk("%02X-", *((unsigned char *)i));
1195 + if (i == (unsigned int)sk->tail)
1201 +#if defined(CONFIG_RAETH_HW_LRO)
1202 +/* PDMA LRO test functions start */
1203 +int pdma_lro_disable_dvt(void)
1205 + unsigned int regVal = 0;
1207 + printk("pdma_lro_disable_dvt()\n");
1209 + /* 1. Invalid LRO ring1~3 */
1210 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1211 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1212 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1214 + /* 2 Polling relinguish */
1215 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1218 + /* 3. Disable LRO */
1219 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1220 + regVal &= ~(PDMA_LRO_EN);
1221 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1224 + /* 4. Disable non-lro multiple rx */
1225 + SET_PDMA_NON_LRO_MULTI_EN(0);
1227 + /* 5.1. Set GDM1 to ring0 */
1228 + SET_GDM_PID1_RXID_SEL(0);
1229 + /* 5.2. Set GDM2 to ring0 */
1230 + SET_GDM_PID2_RXID_SEL(0);
1236 +int pdma_lro_force_aggre_dvt(void)
1238 + unsigned int regVal = 0;
1241 + printk("pdma_lro_force_aggre_dvt()\n");
1243 +/* pdma rx ring1 */
1244 + /* 1. Set RX ring mode to force port */
1245 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1247 + /* 2. Configure lro ring */
1248 + /* 2.1 set src/destination TCP ports */
1249 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1250 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1251 + /* 2.2 set src/destination IPs */
1252 + str_to_ip(&ip, "10.10.10.3");
1253 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1254 + str_to_ip(&ip, "10.10.10.100");
1255 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1256 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1258 + /* 2.3 Valid LRO ring */
1259 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1261 + /* 2.4 Set AGE timer */
1262 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1264 + /* 2.5 Set max AGG timer */
1265 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1267 + /* 2.6 Set max LRO agg count */
1268 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1270 + /* 3. IPv4 checksum update enable */
1271 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1273 + /* 4. Polling relinguish */
1274 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1277 + /* 5. Enable LRO */
1278 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1279 + regVal |= PDMA_LRO_EN;
1280 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1285 +int pdma_lro_auto_aggre_dvt(void)
1287 + unsigned int regVal = 0;
1290 + printk("pdma_lro_auto_aggre_dvt()\n");
1292 + /* 1.1 Set my IP_1 */
1293 + str_to_ip(&ip, "10.10.10.254");
1294 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1295 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1296 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1297 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1298 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1300 + /* 1.2 Set my IP_2 */
1301 + str_to_ip(&ip, "10.10.20.254");
1302 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1303 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1304 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1305 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1306 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1308 + /* 1.3 Set my IP_3 */
1309 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1310 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1311 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1312 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1313 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1315 + /* 1.4 Set my IP_4 */
1316 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1317 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1318 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1319 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1320 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1322 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1323 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1324 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1325 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1327 + /* 2.2 Valid LRO ring */
1328 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1329 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1330 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1331 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1333 + /* 2.3 Set AGE timer */
1334 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1335 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, 0);
1336 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, 0);
1338 + /* 2.4 Set max AGG timer */
1339 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1340 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, 0);
1341 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, 0);
1343 + /* 2.5 Set max LRO agg count */
1344 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1345 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
1346 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
1348 + /* 3.0 IPv6 LRO enable */
1349 + SET_PDMA_LRO_IPV6_EN(1);
1351 + /* 3.1 IPv4 checksum update disable */
1352 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1354 + /* 3.2 switch priority comparision to byte count mode */
1355 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1357 + /* 3.3 bandwidth threshold setting */
1358 + SET_PDMA_LRO_BW_THRESHOLD(0);
1360 + /* 3.4 auto-learn score delta setting */
1361 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1363 + /* 3.5 Set ALT timer to 20us: (unit: 20us) */
1364 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
1365 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
1366 + SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
1368 + /* 4. Polling relinguish */
1369 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1372 + /* 5. Enable LRO */
1373 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1374 + regVal |= PDMA_LRO_EN;
1375 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1380 +int pdma_lro_auto_ipv6_dvt(void)
1382 + unsigned int regVal = 0;
1384 + printk("pdma_lro_auto_ipv6_dvt()\n");
1386 + /* 1. Set my IP */
1387 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0x20010238);
1388 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0x08000000);
1389 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0x00000000);
1390 + sysRegWrite(LRO_RX_RING1_DIP_DW0, 0x00000254);
1392 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1393 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1394 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1395 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1397 + /* 2.2 Valid LRO ring */
1398 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1399 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1400 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1402 + /* 2.3 Set AGE timer */
1403 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1404 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1405 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1407 + /* 3.0 IPv6 LRO enable */
1408 + SET_PDMA_LRO_IPV6_EN(1);
1410 + /* 3.1 IPv4 checksum update disable */
1411 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1413 + /* 3.2 switch priority comparision to byte count mode */
1414 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1416 + /* 3.3 bandwidth threshold setting */
1417 + SET_PDMA_LRO_BW_THRESHOLD(0);
1419 + /* 3.4 auto-learn score delta setting */
1420 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1422 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1423 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1424 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1425 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1427 + /* 3.7 Set max AGG timer: 10 msec. */
1428 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1430 + /* 4. Polling relinguish */
1431 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1434 + /* 5. Enable LRO */
1435 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1436 + regVal |= PDMA_LRO_EN;
1437 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1442 +int pdma_lro_auto_myIP_dvt(void)
1444 + unsigned int regVal = 0;
1447 + printk("pdma_lro_auto_myIP_dvt()\n");
1449 + /* 1.1 Set my IP_1 */
1450 + str_to_ip(&ip, "10.10.10.254");
1451 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1452 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1453 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1454 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1455 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1456 + /* 1.2 Set my IP_2 */
1457 + str_to_ip(&ip, "10.10.20.254");
1458 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1459 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1460 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1461 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1462 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1463 + /* 1.3 Set my IP_3 */
1464 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1465 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1466 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1467 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1468 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1469 + /* 1.4 Set my IP_4 */
1470 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1471 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1472 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1473 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1474 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1476 + /* 2.1 Set RX ring1~3 to auto-learn modes */
1477 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1478 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1479 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1481 + /* 2.2 Valid LRO ring */
1482 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1483 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1484 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1485 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1487 + /* 2.3 Set AGE timer */
1488 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1489 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1490 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1492 + /* 3.0 IPv6 LRO enable */
1493 + SET_PDMA_LRO_IPV6_EN(1);
1495 + /* 3.1 IPv4 checksum update disable */
1496 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1498 + /* 3.2 switch priority comparision to byte count mode */
1499 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1501 + /* 3.3 bandwidth threshold setting */
1502 + SET_PDMA_LRO_BW_THRESHOLD(0);
1504 + /* 3.4 auto-learn score delta setting */
1505 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1507 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1508 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1509 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1510 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1512 + /* 3.7 Set max AGG timer: 10 msec. */
1513 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1515 + /* 4. Polling relinguish */
1516 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1519 + /* 5. Enable LRO */
1520 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1521 + regVal |= PDMA_LRO_EN;
1522 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1527 +int pdma_lro_dly_int_dvt(int index)
1529 + unsigned int regVal = 0;
1532 + printk("pdma_lro_dly_int_dvt(%d)\n", index);
1535 + /* 1.1 Set my IP_1 */
1536 + /* str_to_ip( &ip, "10.10.10.254" ); */
1537 + str_to_ip(&ip, "10.10.10.100");
1538 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1539 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1540 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1541 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1543 + /* 1.1 set src/destination TCP ports */
1544 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1545 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1546 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 3423);
1547 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 2301);
1548 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 3423);
1549 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 2301);
1550 + /* 1.2 set src/destination IPs */
1551 + str_to_ip(&ip, "10.10.10.3");
1552 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1553 + str_to_ip(&ip, "10.10.10.100");
1554 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1555 + str_to_ip(&ip, "10.10.10.3");
1556 + sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
1557 + str_to_ip(&ip, "10.10.10.100");
1558 + sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
1559 + str_to_ip(&ip, "10.10.10.3");
1560 + sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
1561 + str_to_ip(&ip, "10.10.10.100");
1562 + sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
1563 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1564 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1565 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1569 + /* 1.2 Disable DLY_INT for lro ring */
1570 + SET_PDMA_LRO_DLY_INT_EN(0);
1572 + /* 1.2 Enable DLY_INT for lro ring */
1573 + SET_PDMA_LRO_DLY_INT_EN(1);
1576 + /* 1.3 LRO ring DLY_INT setting */
1578 + sysRegWrite(LRO_RX1_DLY_INT, DELAY_INT_INIT);
1579 + } else if (index == 2) {
1580 + sysRegWrite(LRO_RX2_DLY_INT, DELAY_INT_INIT);
1581 + } else if (index == 3) {
1582 + sysRegWrite(LRO_RX3_DLY_INT, DELAY_INT_INIT);
1585 + /* 2.1 Set RX rings to auto-learn modes */
1586 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1587 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1588 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1590 + /* 2.0 set rx ring mode */
1591 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1592 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
1593 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
1595 + /* 2.1 IPv4 force port mode */
1596 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
1597 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
1598 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
1601 + /* 2.2 Valid LRO ring */
1602 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1603 + if ((index == 0) || (index == 1)) {
1604 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1605 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1606 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1607 + } else if (index == 2) {
1608 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1609 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1610 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1612 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1613 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1614 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1617 + /* 2.3 Set AGE timer */
1618 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1619 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1620 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1622 + /* 3.1 IPv4 checksum update enable */
1623 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1625 + /* 3.2 switch priority comparision to byte count mode */
1626 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1628 + /* 3.3 bandwidth threshold setting */
1629 + SET_PDMA_LRO_BW_THRESHOLD(0);
1631 + /* 3.4 auto-learn score delta setting */
1632 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1634 + /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1635 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1636 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1637 + SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1639 + /* 3.7 Set max AGG timer */
1640 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1642 + /* 4. Polling relinguish */
1643 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1646 + /* 5. Enable LRO */
1647 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1648 + regVal |= PDMA_LRO_EN;
1649 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1654 +int pdma_lro_dly_int0_dvt(void)
1656 + return pdma_lro_dly_int_dvt(0);
1659 +int pdma_lro_dly_int1_dvt(void)
1661 + return pdma_lro_dly_int_dvt(1);
1664 +int pdma_lro_dly_int2_dvt(void)
1666 + return pdma_lro_dly_int_dvt(2);
1669 +int pdma_lro_dly_int3_dvt(void)
1671 + return pdma_lro_dly_int_dvt(3);
1674 +#endif /* CONFIG_RAETH_HW_LRO */
1676 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1677 +int pdma_gdm_rxid_config(void)
1679 + unsigned int regVal = 0;
1681 + printk("pdma_gdm_rxid_config()\n");
1683 + /* 1. Set RX ring1~3 to pse modes */
1684 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
1685 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
1686 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
1688 + /* 2. Enable non-lro multiple rx */
1689 + SET_PDMA_NON_LRO_MULTI_EN(1);
1694 +int pdma_non_lro_portid_dvt(void)
1696 + unsigned int regVal = 0;
1698 + printk("pdma_non_lro_portid_dvt()\n");
1700 + /* 1. Set GDM1 to ring3 */
1701 + SET_GDM_PID1_RXID_SEL(3);
1703 + /* 2. Set GDM2 to ring1 */
1704 + SET_GDM_PID2_RXID_SEL(1);
1707 + /* 3. Set priority rule: pid */
1708 + SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1710 + /* PDMA multi-rx enable */
1711 + pdma_gdm_rxid_config();
1716 +int pdma_non_lro_stag_dvt(void)
1718 + unsigned int regVal = 0;
1720 + printk("pdma_non_lro_stag_dvt()\n");
1722 + /* 1. Set STAG4 to ring0 */
1723 + GDM_STAG_RXID_SEL(4, 0);
1724 + /* 2. Set STAG3 to ring1 */
1725 + GDM_STAG_RXID_SEL(3, 1);
1726 + /* 3. Set STAG2 to ring2 */
1727 + GDM_STAG_RXID_SEL(2, 2);
1728 + /* 4. Set STAG1 to ring3 */
1729 + GDM_STAG_RXID_SEL(1, 3);
1731 + /* 5. Set priority rule: stag/pid */
1732 + SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1734 + /* PDMA multi-rx enable */
1735 + pdma_gdm_rxid_config();
1740 +int pdma_non_lro_vlan_dvt(void)
1742 + unsigned int regVal = 0;
1744 + printk("pdma_non_lro_vlan_dvt()\n");
1746 + /* 1. Set vlan priority=3 to ring1 */
1747 + SET_GDM_VLAN_PRI_RXID_SEL(3, 1);
1748 + /* 2. Set vlan priority=2 to ring2 */
1749 + SET_GDM_VLAN_PRI_RXID_SEL(2, 2);
1750 + /* 3. Set vlan priority=1 to ring3 */
1751 + SET_GDM_VLAN_PRI_RXID_SEL(1, 3);
1752 + /* 4. Set vlan priority=0 to ring3 */
1753 + SET_GDM_VLAN_PRI_RXID_SEL(0, 3);
1755 + /* 1. Set vlan priority=4 to ring1 */
1756 + SET_GDM_VLAN_PRI_RXID_SEL(4, 1);
1757 + /* 2. Set vlan priority=5 to ring2 */
1758 + SET_GDM_VLAN_PRI_RXID_SEL(5, 2);
1759 + /* 3. Set vlan priority=6 to ring3 */
1760 + SET_GDM_VLAN_PRI_RXID_SEL(6, 3);
1761 + /* 4. Set vlan priority=7 to ring3 */
1762 + SET_GDM_VLAN_PRI_RXID_SEL(7, 3);
1764 + /* 4. Set priority rule: vlan > pid */
1765 + SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_PID);
1767 + /* PDMA multi-rx enable */
1768 + pdma_gdm_rxid_config();
1773 +int pdma_non_lro_tcpack_dvt(void)
1775 + unsigned int regVal = 0;
1777 + printk("pdma_non_lro_tcpack_dvt()\n");
1779 + /* 1. Enable TCP ACK with zero payload check */
1780 + SET_GDM_TCP_ACK_WZPC(1);
1781 + /* 2. Set TCP ACK to ring3 */
1782 + SET_GDM_TCP_ACK_RXID_SEL(3);
1784 + /* 3. Set priority rule: ack > pid */
1785 + SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_PID);
1787 + /* PDMA multi-rx enable */
1788 + pdma_gdm_rxid_config();
1793 +int pdma_non_lro_pri1_dvt(void)
1795 + unsigned int regVal = 0;
1797 + printk("pdma_non_lro_pri1_dvt()\n");
1799 + /* 1. Set GDM1 to ring0 */
1800 + SET_GDM_PID1_RXID_SEL(0);
1802 + /* 2.1 Disable TCP ACK with zero payload check */
1803 + SET_GDM_TCP_ACK_WZPC(0);
1804 + /* 2.2 Set TCP ACK to ring1 */
1805 + SET_GDM_TCP_ACK_RXID_SEL(1);
1807 + /* 3. Set vlan priority=1 to ring2 */
1808 + SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1810 + /* 4. Set priority rule: vlan > ack > pid */
1811 + SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_ACK_PID);
1813 + /* PDMA multi-rx enable */
1814 + pdma_gdm_rxid_config();
1819 +int pdma_non_lro_pri2_dvt(void)
1821 + unsigned int regVal = 0;
1823 + printk("pdma_non_lro_pri2_dvt()\n");
1825 + /* 1. Set GDM1 to ring0 */
1826 + SET_GDM_PID1_RXID_SEL(0);
1828 + /* 2.1 Disable TCP ACK with zero payload check */
1829 + SET_GDM_TCP_ACK_WZPC(0);
1830 + /* 2.2 Set TCP ACK to ring1 */
1831 + SET_GDM_TCP_ACK_RXID_SEL(1);
1833 + /* 3. Set vlan priority=1 to ring2 */
1834 + SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1836 + /* 4. Set priority rule: ack > vlan > pid */
1837 + SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_VLAN_PID);
1839 + /* PDMA multi-rx enable */
1840 + pdma_gdm_rxid_config();
1844 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1845 +const static PDMA_LRO_DVT_FUNC pdma_dvt_lro_func[] = {
1846 +#if defined(CONFIG_RAETH_HW_LRO)
1847 + [0] = pdma_lro_disable_dvt, /* PDMA_TEST_LRO_DISABLE */
1848 + [1] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_PORT */
1849 + [2] = pdma_lro_auto_aggre_dvt, /* PDMA_TEST_LRO_AUTO_LEARN */
1850 + [3] = pdma_lro_auto_ipv6_dvt, /* PDMA_TEST_LRO_AUTO_IPV6 */
1851 + [4] = pdma_lro_auto_myIP_dvt, /* PDMA_TEST_LRO_AUTO_MYIP */
1852 + [5] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_AGGREGATE */
1853 +#endif /* CONFIG_RAETH_HW_LRO */
1854 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1855 + [6] = pdma_non_lro_portid_dvt, /* PDMA_TEST_NON_LRO_PORT_ID */
1856 + [7] = pdma_non_lro_stag_dvt, /* PDMA_TEST_NON_LRO_STAG */
1857 + [8] = pdma_non_lro_vlan_dvt, /* PDMA_TEST_NON_LRO_VLAN */
1858 + [9] = pdma_non_lro_tcpack_dvt, /* PDMA_TEST_NON_LRO_TCP_ACK */
1859 + [10] = pdma_non_lro_pri1_dvt, /* PDMA_TEST_NON_LRO_PRI1 */
1860 + [11] = pdma_non_lro_pri2_dvt, /* PDMA_TEST_NON_LRO_PRI2 */
1861 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1862 +#if defined(CONFIG_RAETH_HW_LRO)
1863 + [12] = pdma_lro_dly_int0_dvt, /* PDMA_TEST_LRO_DLY_INT0 */
1864 + [13] = pdma_lro_dly_int1_dvt, /* PDMA_TEST_LRO_DLY_INT1 */
1865 + [14] = pdma_lro_dly_int2_dvt, /* PDMA_TEST_LRO_DLY_INT2 */
1866 + [15] = pdma_lro_dly_int3_dvt, /* PDMA_TEST_LRO_DLY_INT3 */
1867 +#endif /* CONFIG_RAETH_HW_LRO */
1870 +/* PDMA LRO test functions end */
1872 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1873 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
1874 + int rx_dma_owner_idx0)
1876 + if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
1877 + if (rx_ring_no == 1) {
1878 + printk("------- rt2880_eth_recv (ring1) --------\n");
1879 + printk("rx_info1=0x%x\n",
1881 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1883 + printk("rx_info2=0x%x\n",
1885 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1887 + printk("rx_info3=0x%x\n",
1889 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1891 + printk("rx_info4=0x%x\n",
1893 + &ei_local->rx_ring1[rx_dma_owner_idx0].
1895 + printk("-------------------------------\n");
1896 + } else if (rx_ring_no == 2) {
1897 + printk("------- rt2880_eth_recv (ring2) --------\n");
1898 + printk("rx_info1=0x%x\n",
1900 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1902 + printk("rx_info2=0x%x\n",
1904 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1906 + printk("rx_info3=0x%x\n",
1908 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1910 + printk("rx_info4=0x%x\n",
1912 + &ei_local->rx_ring2[rx_dma_owner_idx0].
1914 + printk("-------------------------------\n");
1915 + } else if (rx_ring_no == 3) {
1916 + printk("------- rt2880_eth_recv (ring3) --------\n");
1917 + printk("rx_info1=0x%x\n",
1919 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1921 + printk("rx_info2=0x%x\n",
1923 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1925 + printk("rx_info3=0x%x\n",
1927 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1929 + printk("rx_info4=0x%x\n",
1931 + &ei_local->rx_ring3[rx_dma_owner_idx0].
1933 + printk("-------------------------------\n");
1937 + printk("------- rt2880_eth_recv (ring0) --------\n");
1938 + printk("rx_info1=0x%x\n",
1940 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1942 + printk("rx_info2=0x%x\n",
1944 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1946 + printk("rx_info3=0x%x\n",
1948 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1950 + printk("rx_info4=0x%x\n",
1952 + &ei_local->rx_ring0[rx_dma_owner_idx0].
1954 + printk("-------------------------------\n");
1958 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
1959 + (pdma_dvt_get_lro_test_config()==PDMA_TEST_LRO_FORCE_PORT)) {
1960 + if (rx_ring_no == 1) {
1961 + printk("------- rt2880_eth_recv (ring1) --------\n");
1962 + printk("rx_info1.PDP0=0x%x\n",
1963 + ei_local->rx_ring1[rx_dma_owner_idx0].
1965 + printk("rx_info2.DDONE_bit=0x%x\n",
1966 + ei_local->rx_ring1[rx_dma_owner_idx0].
1967 + rxd_info2.DDONE_bit);
1968 + printk("rx_info2.LS0=0x%x\n",
1969 + ei_local->rx_ring1[rx_dma_owner_idx0].
1971 + printk("rx_info2.PLEN0=0x%x\n",
1972 + ei_local->rx_ring1[rx_dma_owner_idx0].
1974 + printk("rx_info2.TAG=0x%x\n",
1975 + ei_local->rx_ring1[rx_dma_owner_idx0].
1977 +#if defined(CONFIG_ARCH_MT7623)
1978 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
1979 + ei_local->rx_ring1[rx_dma_owner_idx0].
1980 + rxd_info2.LRO_AGG_CNT);
1981 + printk("rx_info2.REV=0x%x\n",
1982 + ei_local->rx_ring1[rx_dma_owner_idx0].
1985 + printk("rx_info2.LS1=0x%x\n",
1986 + ei_local->rx_ring1[rx_dma_owner_idx0].
1988 +#endif /* CONFIG_RAETH_HW_LRO */
1989 + printk("rx_info2.PLEN1=0x%x\n",
1990 + ei_local->rx_ring1[rx_dma_owner_idx0].
1992 + printk("rx_info3.TPID=0x%x\n",
1993 + ei_local->rx_ring1[rx_dma_owner_idx0].
1995 + printk("rx_info3.VID=0x%x\n",
1996 + ei_local->rx_ring1[rx_dma_owner_idx0].
1998 + printk("rx_info4.IP6=0x%x\n",
1999 + ei_local->rx_ring1[rx_dma_owner_idx0].
2001 + printk("rx_info4.IP4=0x%x\n",
2002 + ei_local->rx_ring1[rx_dma_owner_idx0].
2004 + printk("rx_info4.IP4F=0x%x\n",
2005 + ei_local->rx_ring1[rx_dma_owner_idx0].
2007 + printk("rx_info4.TACK=0x%x\n",
2008 + ei_local->rx_ring1[rx_dma_owner_idx0].
2010 + printk("rx_info4.L4VLD=0x%x\n",
2011 + ei_local->rx_ring1[rx_dma_owner_idx0].
2013 + printk("rx_info4.L4F=0x%x\n",
2014 + ei_local->rx_ring1[rx_dma_owner_idx0].
2016 + printk("rx_info4.SPORT=0x%x\n",
2017 + ei_local->rx_ring1[rx_dma_owner_idx0].
2019 + printk("rx_info4.CRSN=0x%x\n",
2020 + ei_local->rx_ring1[rx_dma_owner_idx0].
2022 + printk("rx_info4.FOE_Entry=0x%x\n",
2023 + ei_local->rx_ring1[rx_dma_owner_idx0].
2024 + rxd_info4.FOE_Entry);
2025 + printk("-------------------------------\n");
2026 + } else if (rx_ring_no == 2) {
2027 + printk("------- rt2880_eth_recv (ring2) --------\n");
2028 + printk("rx_info1.PDP0=0x%x\n",
2029 + ei_local->rx_ring2[rx_dma_owner_idx0].
2031 + printk("rx_info2.DDONE_bit=0x%x\n",
2032 + ei_local->rx_ring2[rx_dma_owner_idx0].
2033 + rxd_info2.DDONE_bit);
2034 + printk("rx_info2.LS0=0x%x\n",
2035 + ei_local->rx_ring2[rx_dma_owner_idx0].
2037 + printk("rx_info2.PLEN0=0x%x\n",
2038 + ei_local->rx_ring2[rx_dma_owner_idx0].
2040 + printk("rx_info2.TAG=0x%x\n",
2041 + ei_local->rx_ring2[rx_dma_owner_idx0].
2043 +#if defined(CONFIG_ARCH_MT7623)
2044 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2045 + ei_local->rx_ring2[rx_dma_owner_idx0].
2046 + rxd_info2.LRO_AGG_CNT);
2047 + printk("rx_info2.REV=0x%x\n",
2048 + ei_local->rx_ring2[rx_dma_owner_idx0].
2051 + printk("rx_info2.LS1=0x%x\n",
2052 + ei_local->rx_ring2[rx_dma_owner_idx0].
2054 +#endif /* CONFIG_RAETH_HW_LRO */
2055 + printk("rx_info2.PLEN1=0x%x\n",
2056 + ei_local->rx_ring2[rx_dma_owner_idx0].
2058 + printk("rx_info3.TPID=0x%x\n",
2059 + ei_local->rx_ring2[rx_dma_owner_idx0].
2061 + printk("rx_info3.VID=0x%x\n",
2062 + ei_local->rx_ring2[rx_dma_owner_idx0].
2064 + printk("rx_info4.IP6=0x%x\n",
2065 + ei_local->rx_ring2[rx_dma_owner_idx0].
2067 + printk("rx_info4.IP4=0x%x\n",
2068 + ei_local->rx_ring2[rx_dma_owner_idx0].
2070 + printk("rx_info4.IP4F=0x%x\n",
2071 + ei_local->rx_ring2[rx_dma_owner_idx0].
2073 + printk("rx_info4.TACK=0x%x\n",
2074 + ei_local->rx_ring2[rx_dma_owner_idx0].
2076 + printk("rx_info4.L4VLD=0x%x\n",
2077 + ei_local->rx_ring2[rx_dma_owner_idx0].
2079 + printk("rx_info4.L4F=0x%x\n",
2080 + ei_local->rx_ring2[rx_dma_owner_idx0].
2082 + printk("rx_info4.SPORT=0x%x\n",
2083 + ei_local->rx_ring2[rx_dma_owner_idx0].
2085 + printk("rx_info4.CRSN=0x%x\n",
2086 + ei_local->rx_ring2[rx_dma_owner_idx0].
2088 + printk("rx_info4.FOE_Entry=0x%x\n",
2089 + ei_local->rx_ring2[rx_dma_owner_idx0].
2090 + rxd_info4.FOE_Entry);
2091 + printk("-------------------------------\n");
2092 + } else if (rx_ring_no == 3) {
2093 + printk("------- rt2880_eth_recv (ring3) --------\n");
2094 + printk("rx_info1.PDP0=0x%x\n",
2095 + ei_local->rx_ring3[rx_dma_owner_idx0].
2097 + printk("rx_info2.DDONE_bit=0x%x\n",
2098 + ei_local->rx_ring3[rx_dma_owner_idx0].
2099 + rxd_info2.DDONE_bit);
2100 + printk("rx_info2.LS0=0x%x\n",
2101 + ei_local->rx_ring3[rx_dma_owner_idx0].
2103 + printk("rx_info2.PLEN0=0x%x\n",
2104 + ei_local->rx_ring3[rx_dma_owner_idx0].
2106 + printk("rx_info2.TAG=0x%x\n",
2107 + ei_local->rx_ring3[rx_dma_owner_idx0].
2109 +#if defined(CONFIG_ARCH_MT7623)
2110 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2111 + ei_local->rx_ring3[rx_dma_owner_idx0].
2112 + rxd_info2.LRO_AGG_CNT);
2113 + printk("rx_info2.REV=0x%x\n",
2114 + ei_local->rx_ring3[rx_dma_owner_idx0].
2117 + printk("rx_info2.LS1=0x%x\n",
2118 + ei_local->rx_ring3[rx_dma_owner_idx0].
2120 +#endif /* CONFIG_RAETH_HW_LRO */
2121 + printk("rx_info2.PLEN1=0x%x\n",
2122 + ei_local->rx_ring3[rx_dma_owner_idx0].
2124 + printk("rx_info3.TPID=0x%x\n",
2125 + ei_local->rx_ring3[rx_dma_owner_idx0].
2127 + printk("rx_info3.VID=0x%x\n",
2128 + ei_local->rx_ring3[rx_dma_owner_idx0].
2130 + printk("rx_info4.IP6=0x%x\n",
2131 + ei_local->rx_ring3[rx_dma_owner_idx0].
2133 + printk("rx_info4.IP4=0x%x\n",
2134 + ei_local->rx_ring3[rx_dma_owner_idx0].
2136 + printk("rx_info4.IP4F=0x%x\n",
2137 + ei_local->rx_ring3[rx_dma_owner_idx0].
2139 + printk("rx_info4.TACK=0x%x\n",
2140 + ei_local->rx_ring3[rx_dma_owner_idx0].
2142 + printk("rx_info4.L4VLD=0x%x\n",
2143 + ei_local->rx_ring3[rx_dma_owner_idx0].
2145 + printk("rx_info4.L4F=0x%x\n",
2146 + ei_local->rx_ring3[rx_dma_owner_idx0].
2148 + printk("rx_info4.SPORT=0x%x\n",
2149 + ei_local->rx_ring3[rx_dma_owner_idx0].
2151 + printk("rx_info4.CRSN=0x%x\n",
2152 + ei_local->rx_ring3[rx_dma_owner_idx0].
2154 + printk("rx_info4.FOE_Entry=0x%x\n",
2155 + ei_local->rx_ring3[rx_dma_owner_idx0].
2156 + rxd_info4.FOE_Entry);
2157 + printk("-------------------------------\n");
2161 + printk("------- rt2880_eth_recv (ring0) --------\n");
2162 + printk("rx_info1.PDP0=0x%x\n",
2163 + ei_local->rx_ring0[rx_dma_owner_idx0].
2165 + printk("rx_info2.DDONE_bit=0x%x\n",
2166 + ei_local->rx_ring0[rx_dma_owner_idx0].
2167 + rxd_info2.DDONE_bit);
2168 + printk("rx_info2.LS0=0x%x\n",
2169 + ei_local->rx_ring0[rx_dma_owner_idx0].
2171 + printk("rx_info2.PLEN0=0x%x\n",
2172 + ei_local->rx_ring0[rx_dma_owner_idx0].
2174 + printk("rx_info2.TAG=0x%x\n",
2175 + ei_local->rx_ring0[rx_dma_owner_idx0].
2177 + printk("rx_info2.LS1=0x%x\n",
2178 + ei_local->rx_ring0[rx_dma_owner_idx0].
2180 + printk("rx_info2.PLEN1=0x%x\n",
2181 + ei_local->rx_ring0[rx_dma_owner_idx0].
2183 + printk("rx_info3.TPID=0x%x\n",
2184 + ei_local->rx_ring0[rx_dma_owner_idx0].
2186 + printk("rx_info3.VID=0x%x\n",
2187 + ei_local->rx_ring0[rx_dma_owner_idx0].
2189 + printk("rx_info4.IP6=0x%x\n",
2190 + ei_local->rx_ring0[rx_dma_owner_idx0].
2192 + printk("rx_info4.IP4=0x%x\n",
2193 + ei_local->rx_ring0[rx_dma_owner_idx0].
2195 + printk("rx_info4.IP4F=0x%x\n",
2196 + ei_local->rx_ring0[rx_dma_owner_idx0].
2198 + printk("rx_info4.TACK=0x%x\n",
2199 + ei_local->rx_ring0[rx_dma_owner_idx0].
2201 + printk("rx_info4.L4VLD=0x%x\n",
2202 + ei_local->rx_ring0[rx_dma_owner_idx0].
2204 + printk("rx_info4.L4F=0x%x\n",
2205 + ei_local->rx_ring0[rx_dma_owner_idx0].
2207 + printk("rx_info4.SPORT=0x%x\n",
2208 + ei_local->rx_ring0[rx_dma_owner_idx0].
2210 + printk("rx_info4.CRSN=0x%x\n",
2211 + ei_local->rx_ring0[rx_dma_owner_idx0].
2213 + printk("rx_info4.FOE_Entry=0x%x\n",
2214 + ei_local->rx_ring0[rx_dma_owner_idx0].
2215 + rxd_info4.FOE_Entry);
2216 + printk("-------------------------------\n");
2220 + if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_FORCE_AGGREGATE) {
2221 + if (rx_ring_no == 1) {
2222 + printk("PASS!!! => RING1: rxd_info1.PDP0=0x%x\n",
2223 + ei_local->rx_ring1[rx_dma_owner_idx0].
2225 + skb_dump(ei_local->netrx1_skbuf[rx_dma_owner_idx0]);
2226 + pdma_dvt_reset_config();
2232 +int pdma_dvt_show_ctrl(int par1, int par2)
2235 + g_pdma_dvt_show_config = 0;
2237 + g_pdma_dvt_show_config |= (1 << par2);
2242 +int pdma_dvt_test_rx_ctrl(int par1, int par2)
2245 + g_pdma_dvt_rx_test_config = 0;
2247 + g_pdma_dvt_rx_test_config |= (1 << par2);
2252 +int pdma_dvt_test_tx_ctrl(int par1, int par2)
2255 + g_pdma_dvt_tx_test_config = 0;
2257 + g_pdma_dvt_tx_test_config |= (1 << par2);
2262 +int pdma_dvt_test_debug_ctrl(int par1, int par2)
2265 + g_pdma_dvt_debug_test_config = 0;
2267 + g_pdma_dvt_debug_test_config |= (1 << par2);
2272 +int pdma_dvt_test_lro_ctrl(int par1, int par2)
2274 + g_pdma_dvt_lro_test_config = par2;
2276 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
2277 + if (pdma_dvt_lro_func[par2])
2278 + (*pdma_dvt_lro_func[par2]) ();
2279 +#endif /* #if defined (CONFIG_RAETH_HW_LRO) */
2284 +unsigned int pdma_dvt_get_show_config()
2286 + return g_pdma_dvt_show_config;
2289 +unsigned int pdma_dvt_get_rx_test_config()
2291 + return g_pdma_dvt_rx_test_config;
2294 +unsigned int pdma_dvt_get_tx_test_config()
2296 + return g_pdma_dvt_tx_test_config;
2299 +unsigned int pdma_dvt_get_debug_test_config()
2301 + return g_pdma_dvt_debug_test_config;
2304 +unsigned int pdma_dvt_get_lro_test_config()
2306 + return g_pdma_dvt_lro_test_config;
2309 +void pdma_dvt_reset_config()
2311 + g_pdma_dvt_show_config = 0;
2312 + g_pdma_dvt_rx_test_config = 0;
2313 + g_pdma_dvt_tx_test_config = 0;
2314 + g_pdma_dvt_lro_test_config = 0;
2317 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0)
2320 + unsigned int udf = 0;
2323 + if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
2324 + printk("------- rt2880_eth_recv --------\n");
2325 + printk("rx_info1=0x%x\n",
2326 + *(unsigned int *)&ei_local->
2327 + rx_ring0[rx_dma_owner_idx0].rxd_info1);
2328 + printk("rx_info2=0x%x\n",
2329 + *(unsigned int *)&ei_local->
2330 + rx_ring0[rx_dma_owner_idx0].rxd_info2);
2331 + printk("rx_info3=0x%x\n",
2332 + *(unsigned int *)&ei_local->
2333 + rx_ring0[rx_dma_owner_idx0].rxd_info3);
2334 + printk("rx_info4=0x%x\n",
2335 + *(unsigned int *)&ei_local->
2336 + rx_ring0[rx_dma_owner_idx0].rxd_info4);
2337 + printk("-------------------------------\n");
2339 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
2340 + pdma_dvt_get_rx_test_config()) {
2342 + udf = ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6 << 5 |
2343 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4 << 4 |
2344 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F << 3 |
2345 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK << 2 |
2346 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD << 1 |
2347 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F;
2349 + printk("------- rt2880_eth_recv --------\n");
2350 + printk("rx_info1.PDP0=0x%x\n",
2351 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info1.PDP0);
2352 + printk("rx_info2.DDONE_bit=0x%x\n",
2353 + ei_local->rx_ring0[rx_dma_owner_idx0].
2354 + rxd_info2.DDONE_bit);
2355 + printk("rx_info2.LS0=0x%x\n",
2356 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS0);
2357 + printk("rx_info2.PLEN0=0x%x\n",
2358 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN0);
2359 + printk("rx_info2.TAG=0x%x\n",
2360 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG);
2361 +#if defined(CONFIG_ARCH_MT7623)
2362 + printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2363 + ei_local->rx_ring0[rx_dma_owner_idx0].
2364 + rxd_info2.LRO_AGG_CNT);
2366 + printk("rx_info2.LS1=0x%x\n",
2367 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS1);
2368 +#endif /* CONFIG_RAETH_HW_LRO */
2369 + printk("rx_info2.PLEN1=0x%x\n",
2370 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN1);
2371 + printk("rx_info3.TPID=0x%x\n",
2372 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.TPID);
2373 + printk("rx_info3.VID=0x%x\n",
2374 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID);
2376 + printk("rx_info4.UDF=0x%x\n", udf);
2378 + printk("rx_info4.IP6=0x%x\n",
2379 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6);
2380 + printk("rx_info4.IP4=0x%x\n",
2381 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4);
2382 + printk("rx_info4.IP4F=0x%x\n",
2383 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F);
2384 + printk("rx_info4.TACK=0x%x\n",
2385 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK);
2386 + printk("rx_info4.L4VLD=0x%x\n",
2387 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD);
2388 + printk("rx_info4.L4F=0x%x\n",
2389 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F);
2390 + printk("rx_info4.SPORT=0x%x\n",
2391 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP);
2392 + printk("rx_info4.CRSN=0x%x\n",
2393 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.CRSN);
2394 + printk("rx_info4.FOE_Entry=0x%x\n",
2395 + ei_local->rx_ring0[rx_dma_owner_idx0].
2396 + rxd_info4.FOE_Entry);
2397 + printk("-------------------------------\n");
2399 + if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV6)) {
2400 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6) {
2401 + printk("PASS!!! => rx_info4.IP6=0x%x\n",
2402 + ei_local->rx_ring0[rx_dma_owner_idx0].
2404 + pdma_dvt_reset_config();
2406 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4)) {
2407 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4) {
2408 + printk("PASS!!! => rx_info4.IP4=0x%x\n",
2409 + ei_local->rx_ring0[rx_dma_owner_idx0].
2411 + pdma_dvt_reset_config();
2413 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4F)) {
2414 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F) {
2415 + printk("PASS!!! => rx_info4.IP4F=0x%x\n",
2416 + ei_local->rx_ring0[rx_dma_owner_idx0].
2418 + pdma_dvt_reset_config();
2420 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4VLD)) {
2421 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD) {
2422 + printk("PASS!!! => rx_info4.L4VLD=0x%x\n",
2423 + ei_local->rx_ring0[rx_dma_owner_idx0].
2425 + pdma_dvt_reset_config();
2427 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4F)) {
2428 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F) {
2429 + printk("PASS!!! => rx_info4.L4F=0x%x\n",
2430 + ei_local->rx_ring0[rx_dma_owner_idx0].
2432 + pdma_dvt_reset_config();
2434 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_SPORT)) {
2435 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP == 1) {
2436 + g_pdma_dev_lanport++;
2437 + } else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP ==
2439 + g_pdma_dev_wanport++;
2441 + if (g_pdma_dev_lanport && g_pdma_dev_wanport) {
2443 + ("PASS!!! => g_pdma_dev_lanport=0x%x, g_pdma_dev_wanport=0x%x",
2444 + g_pdma_dev_lanport, g_pdma_dev_wanport);
2446 + g_pdma_dev_lanport = 0;
2447 + g_pdma_dev_wanport = 0;
2448 + pdma_dvt_reset_config();
2450 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_OFF)) {
2451 + if (!ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID) {
2452 + printk("PASS!!! => rxd_info3.VID=0x%x\n",
2453 + ei_local->rx_ring0[rx_dma_owner_idx0].
2455 + pdma_dvt_reset_config();
2457 + } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_ON)) {
2458 + printk("RX data: (PDP0=%x)\n",
2459 + (unsigned int)ei_local->
2460 + netrx0_skbuf[rx_dma_owner_idx0]->data);
2462 + skb_dump(ei_local->netrx0_skbuf[rx_dma_owner_idx0]);
2464 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID &&
2465 + ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG) {
2466 + printk("PASS!!! => rxd_info2.TAG=0x%x\n",
2467 + ei_local->rx_ring0[rx_dma_owner_idx0].
2469 + printk("PASS!!! => rxd_info3.VID=0x%x\n",
2470 + ei_local->rx_ring0[rx_dma_owner_idx0].
2472 + pdma_dvt_reset_config();
2477 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2478 + unsigned long tx_cpu_owner_idx0)
2480 + if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2481 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xE007; /* VLAN_TAG = 0x1E007 */
2482 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2483 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0x0000; /* VLAN_TAG = 0x10000 */
2484 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2485 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xFFFF; /* VLAN_TAG = 0x1FFFF */
2489 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2490 + unsigned long tx_cpu_owner_idx0)
2492 + if (PDMA_TEST_RX_UDF == pdma_dvt_get_rx_test_config()) {
2493 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
2494 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF = 0x2F;
2496 + if (pdma_dvt_get_show_config() & PDMA_SHOW_TX_DESC) {
2497 + printk("------- rt2880_eth_send --------\n");
2498 + printk("tx_info1=%x\n",
2499 + *(unsigned int *)&ei_local->
2500 + tx_ring0[tx_cpu_owner_idx0].txd_info1);
2501 + printk("tx_info2=%x\n",
2502 + *(unsigned int *)&ei_local->
2503 + tx_ring0[tx_cpu_owner_idx0].txd_info2);
2504 + printk("tx_info3=%x\n",
2505 + *(unsigned int *)&ei_local->
2506 + tx_ring0[tx_cpu_owner_idx0].txd_info3);
2507 + printk("tx_info4=%x\n",
2508 + *(unsigned int *)&ei_local->
2509 + tx_ring0[tx_cpu_owner_idx0].txd_info4);
2510 + printk("--------------------------------\n");
2512 + if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_TX_DESC) ||
2513 + pdma_dvt_get_tx_test_config()) {
2514 + printk("------- rt2880_eth_send --------\n");
2515 + printk("tx_info1.SDP0=%x\n",
2516 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0);
2517 + printk("tx_info2.DDONE_bit=%x\n",
2518 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2519 + txd_info2.DDONE_bit);
2520 + printk("tx_info2.LS0_bit=%x\n",
2521 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit);
2522 + printk("tx_info2.SDL0=%x\n",
2523 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0);
2524 + printk("tx_info2.BURST_bit=%x\n",
2525 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2526 + txd_info2.BURST_bit);
2527 + printk("tx_info2.LS1_bit=%x\n",
2528 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit);
2529 + printk("tx_info2.SDL1=%x\n",
2530 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1);
2531 + printk("tx_info3.SDP1=%x\n",
2532 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1);
2533 + printk("tx_info4.TUI_CO=%x\n",
2534 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO);
2535 + printk("tx_info4.TSO=%x\n",
2536 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO);
2537 + printk("tx_info4.FPORT=%x\n",
2538 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT);
2539 + printk("tx_info4.UDF=%x\n",
2540 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF);
2541 + printk("tx_info4.RESV=%x\n",
2542 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.RESV);
2543 + printk("tx_info4.VLAN_TAG=%x\n",
2544 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2545 + txd_info4.VLAN_TAG);
2546 + printk("--------------------------------\n");
2548 + if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_LAN_SPORT)) {
2549 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 1) {
2550 + printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2551 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2553 + pdma_dvt_reset_config();
2555 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_WAN_SPORT)) {
2556 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 2) {
2557 + printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2558 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2560 + pdma_dvt_reset_config();
2562 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2563 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2564 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2565 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2566 + txd_info4.VLAN_TAG);
2567 + /* pdma_dvt_reset_config(); */
2569 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_OFF)) {
2570 + if (!ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2571 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2572 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2573 + txd_info4.VLAN_TAG);
2574 + pdma_dvt_reset_config();
2576 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2577 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2578 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2579 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2580 + txd_info4.VLAN_TAG);
2581 + /* pdma_dvt_reset_config(); */
2583 + } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2584 + if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2585 + printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2586 + ei_local->tx_ring0[tx_cpu_owner_idx0].
2587 + txd_info4.VLAN_TAG);
2588 + /* pdma_dvt_reset_config(); */
2593 +void raeth_pdma_lro_dly_int_dvt(void)
2595 + unsigned int reg_int_val;
2597 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
2599 + if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT0) {
2600 + if ((reg_int_val & RX_DLY_INT)) {
2601 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2602 + pdma_dvt_reset_config();
2604 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT1) {
2605 + if ((reg_int_val & RING1_RX_DLY_INT)) {
2606 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2607 + pdma_dvt_reset_config();
2609 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT2) {
2610 + if ((reg_int_val & RING2_RX_DLY_INT)) {
2611 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2612 + pdma_dvt_reset_config();
2614 + } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT3) {
2615 + if ((reg_int_val & RING3_RX_DLY_INT)) {
2616 + printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2617 + pdma_dvt_reset_config();
2622 +void pdma_dvt_set_dma_mode(void)
2624 +#if defined(CONFIG_RAETH_PDMA_LEGACY_MODE)
2625 + unsigned int regVal;
2626 + regVal = sysRegRead(ADMA_LRO_CTRL_DW3);
2627 + regVal &= ~(BIT(15));
2628 + sysRegWrite(ADMA_LRO_CTRL_DW3, regVal);
2629 +#endif /* CONFIG_RAETH_PDMA_DVT */
2633 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
2635 +/* Show controls */
2636 +#define PDMA_SHOW_RX_DESC (1 << 1)
2637 +#define PDMA_SHOW_TX_DESC (1 << 2)
2638 +#define PDMA_SHOW_DETAIL_RX_DESC (1 << 3)
2639 +#define PDMA_SHOW_DETAIL_TX_DESC (1 << 4)
2641 +/* Rx test controls */
2642 +#define PDMA_TEST_RX_UDF (1 << 1)
2643 +#define PDMA_TEST_RX_IPV6 (1 << 2)
2644 +#define PDMA_TEST_RX_IPV4 (1 << 3)
2645 +#define PDMA_TEST_RX_IPV4F (1 << 4)
2646 +#define PDMA_TEST_RX_L4VLD (1 << 5)
2647 +#define PDMA_TEST_RX_L4F (1 << 6)
2648 +#define PDMA_TEST_RX_SPORT (1 << 7)
2649 +#define PDMA_TEST_RX_VID_ON (1 << 8)
2650 +#define PDMA_TEST_RX_VID_OFF (1 << 9)
2652 +/* Tx test controls */
2653 +#define PDMA_TEST_TX_LAN_SPORT (1 << 1)
2654 +#define PDMA_TEST_TX_WAN_SPORT (1 << 2)
2655 +#define PDMA_TEST_TX_VLAN_ON (1 << 3)
2656 +#define PDMA_TEST_TX_VLAN_OFF (1 << 4)
2657 +#define PDMA_TEST_TX_VLAN_ZERO (1 << 5)
2658 +#define PDMA_TEST_TX_VLAN_MAX (1 << 6)
2659 +#define PDMA_TEST_TX_PDMA_LPK (1 << 31)
2661 +/* Debug controls */
2662 +#define PDMA_TEST_TSO_DEBUG (1 << 1)
2664 +/* LRO test controls */
2665 +typedef int (*PDMA_LRO_DVT_FUNC) (void);
2667 +#define PDMA_TEST_LRO_DISABLE (0)
2668 +#define PDMA_TEST_LRO_FORCE_PORT (1)
2669 +#define PDMA_TEST_LRO_AUTO_LEARN (2)
2670 +#define PDMA_TEST_LRO_AUTO_IPV6 (3)
2671 +#define PDMA_TEST_LRO_AUTO_MYIP (4)
2672 +#define PDMA_TEST_LRO_FORCE_AGGREGATE (5)
2673 +#define PDMA_TEST_NON_LRO_PORT_ID (6)
2674 +#define PDMA_TEST_NON_LRO_STAG (7)
2675 +#define PDMA_TEST_NON_LRO_VLAN (8)
2676 +#define PDMA_TEST_NON_LRO_TCP_ACK (9)
2677 +#define PDMA_TEST_NON_LRO_PRI1 (10)
2678 +#define PDMA_TEST_NON_LRO_PRI2 (11)
2679 +#define PDMA_TEST_LRO_DLY_INT0 (12)
2680 +#define PDMA_TEST_LRO_DLY_INT1 (13)
2681 +#define PDMA_TEST_LRO_DLY_INT2 (14)
2682 +#define PDMA_TEST_LRO_DLY_INT3 (15)
2684 +void skb_dump(struct sk_buff *sk);
2686 +int pdma_dvt_show_ctrl(int par1, int par2);
2687 +int pdma_dvt_test_rx_ctrl(int par1, int par2);
2688 +int pdma_dvt_test_tx_ctrl(int par1, int par2);
2689 +int pdma_dvt_test_debug_ctrl(int par1, int par2);
2690 +int pdma_dvt_test_lro_ctrl(int par1, int par2);
2692 +unsigned int pdma_dvt_get_show_config(void);
2693 +unsigned int pdma_dvt_get_rx_test_config(void);
2694 +unsigned int pdma_dvt_get_tx_test_config(void);
2695 +unsigned int pdma_dvt_get_debug_test_config(void);
2696 +unsigned int pdma_dvt_get_lro_test_config(void);
2697 +void pdma_dvt_reset_config(void);
2699 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0);
2700 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2701 + unsigned long tx_cpu_owner_idx0);
2702 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2703 + unsigned long tx_cpu_owner_idx0);
2705 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
2706 + int rx_dma_owner_idx0);
2707 +void raeth_pdma_lro_dly_int_dvt(void);
2708 +void pdma_dvt_set_dma_mode(void);
2711 +++ b/drivers/net/ethernet/raeth/ethtool_readme.txt
2714 +Ethtool readme for selecting different PHY address.
2716 +Before doing any ethtool command you should make sure the current PHY
2717 +address is expected. The default PHY address is 1(port 1).
2719 +You can change current PHY address to X(0~4) by doing follow command:
2720 +# echo X > /proc/rt2880/gmac
2722 +Ethtool command also would show the current PHY address as following.
2726 + Supported ports: [ TP MII ]
2727 + Supported link modes: 10baseT/Half 10baseT/Full
2728 + 100baseT/Half 100baseT/Full
2729 + Supports auto-negotiation: Yes
2730 + Advertised link modes: 10baseT/Half 10baseT/Full
2731 + 100baseT/Half 100baseT/Full
2732 + Advertised auto-negotiation: No
2737 + Transceiver: internal
2738 + Auto-negotiation: off
2739 + Current message level: 0x00000000 (0)
2743 +The "PHYAD" field shows the current PHY address.
2749 +# echo 1 > /proc/rt2880/gmac # change phy address to 1
2753 +# echo 0 > /proc/rt2880/gmac # change phy address to 0
2758 +++ b/drivers/net/ethernet/raeth/mcast.c
2760 +#include <linux/config.h>
2761 +#include <linux/version.h>
2762 +#include <linux/module.h>
2763 +#include <linux/skbuff.h>
2764 +#include <linux/kernel.h>
2765 +#include <linux/init.h>
2766 +#include <linux/types.h>
2767 +#include <linux/netdevice.h>
2768 +#include <linux/if_vlan.h>
2771 +#define MAX_MCAST_ENTRY 16
2772 +#define AGEING_TIME 5 //Unit: Sec
2773 +#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2], \
2774 + ((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
2776 +//#define MCAST_DEBUG
2778 +#define MCAST_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
2780 +#define MCAST_PRINT(fmt, args...) { }
2784 + uint8_t src_mac[6];
2785 + uint8_t dst_mac[6];
2788 + uint32_t use_count;
2789 + unsigned long ageout;
2792 +mcast_entry mcast_tbl[MAX_MCAST_ENTRY];
2793 +atomic_t mcast_entry_num=ATOMIC_INIT(0);
2794 +DECLARE_MUTEX(mtbl_lock);
2796 +uint32_t inline is_multicast_pkt(uint8_t *mac)
2798 + if(mac[0]==0x01 && mac[1]==0x00 && mac[2]==0x5E) {
2805 +int32_t inline mcast_entry_get(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2809 + for(i=0;i<MAX_MCAST_ENTRY;i++) {
2810 + if( (mcast_tbl[i].vlan_id == vlan_id) &&
2811 + memcmp(mcast_tbl[i].src_mac,src_mac, 6)==0 &&
2812 + memcmp(mcast_tbl[i].dst_mac, dst_mac, 6)==0 &&
2813 + mcast_tbl[i].valid == 1) {
2820 +int inline __add_mcast_entry(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2824 + // use empty or ageout entry
2825 + for(i=0;i<MAX_MCAST_ENTRY;i++) {
2826 + if( mcast_tbl[i].valid==0 ||
2827 + time_after(jiffies, mcast_tbl[i].ageout)) {
2829 + if(mcast_tbl[i].valid==0) {
2830 + atomic_inc(&mcast_entry_num);
2832 + mcast_tbl[i].vlan_id = vlan_id;
2833 + memcpy(mcast_tbl[i].src_mac, src_mac, 6);
2834 + memcpy(mcast_tbl[i].dst_mac, dst_mac, 6);
2835 + mcast_tbl[i].valid=1;
2836 + mcast_tbl[i].use_count=1;
2837 + mcast_tbl[i].ageout=jiffies + AGEING_TIME * HZ;
2843 + MCAST_PRINT("RAETH: Multicast Table is FULL!!\n");
2847 +int inline mcast_entry_ins(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2849 + int entry_num=0, ret=0;
2852 + if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2853 + mcast_tbl[entry_num].use_count++;
2854 + mcast_tbl[entry_num].ageout=jiffies + AGEING_TIME * HZ;
2855 + MCAST_PRINT("%s: Update %0X:%0X:%0X:%0X:%0X:%0X's use_count=%d\n" \
2856 + ,__FUNCTION__, MAC_ARG(dst_mac), mcast_tbl[entry_num].use_count);
2858 + }else { //if entry not found, create new entry.
2859 + MCAST_PRINT("%s: Create new entry %0X:%0X:%0X:%0X:%0X:%0X\n", \
2860 + __FUNCTION__, MAC_ARG(dst_mac));
2861 + ret = __add_mcast_entry(vlan_id, src_mac,dst_mac);
2873 + * 1: entry not found
2875 +int inline mcast_entry_del(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2880 + if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2881 + if((--mcast_tbl[entry_num].use_count)==0) {
2882 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X (entry_num=%d)\n", \
2883 + __FUNCTION__, MAC_ARG(dst_mac), entry_num);
2884 + mcast_tbl[entry_num].valid=0;
2885 + atomic_dec(&mcast_entry_num);
2890 + /* this multicast packet was not sent by meself, just ignore it */
2901 +int32_t mcast_rx(struct sk_buff * skb)
2903 + struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data-ETH_HLEN);
2905 + /* if we do not send multicast packet before,
2906 + * we don't need to check re-inject multicast packet.
2908 + if (atomic_read(&mcast_entry_num)==0) {
2913 + if(is_multicast_pkt(eth->h_dest)) {
2914 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__, \
2915 + MAC_ARG(eth->h_dest));
2917 + if(ntohs(eth->h_vlan_proto)==0x8100) {
2918 + return mcast_entry_del(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2920 + return mcast_entry_del(0, eth->h_source, eth->h_dest);
2928 +int32_t mcast_tx(struct sk_buff *skb)
2930 + struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data);
2933 + if(is_multicast_pkt(eth->h_dest)) {
2934 + MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__,\
2935 + MAC_ARG(eth->h_dest));
2937 + if(ntohs(eth->h_vlan_proto)==0x8100) {
2938 + mcast_entry_ins(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2940 + mcast_entry_ins(0, eth->h_source, eth->h_dest);
2948 +++ b/drivers/net/ethernet/raeth/mii_mgr.c
2950 +#include <linux/module.h>
2951 +#include <linux/version.h>
2952 +#include <linux/netdevice.h>
2954 +#include <linux/kernel.h>
2955 +#include <linux/sched.h>
2956 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
2957 +#include <asm/rt2880/rt_mmap.h>
2960 +#include "ra2882ethreg.h"
2961 +#include "raether.h"
2964 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
2965 +#define PHY_CONTROL_0 0xC0
2966 +#define PHY_CONTROL_1 0xC4
2967 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2968 +#define MDIO_PHY_CONTROL_1 (RALINK_ETH_SW_BASE + PHY_CONTROL_1)
2970 +#define GPIO_MDIO_BIT (1<<7)
2971 +#define GPIO_PURPOSE_SELECT 0x60
2972 +#define GPIO_PRUPOSE (RALINK_SYSCTL_BASE + GPIO_PURPOSE_SELECT)
2974 +#elif defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
2976 +#define PHY_CONTROL_0 0x7004
2977 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2978 +#define enable_mdio(x)
2980 +#elif defined (CONFIG_RALINK_MT7620)
2982 +#define PHY_CONTROL_0 0x7004
2983 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2984 +#define enable_mdio(x)
2986 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
2988 +#define PHY_CONTROL_0 0x0004
2989 +#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2990 +#define enable_mdio(x)
2993 +#define PHY_CONTROL_0 0x00
2994 +#define PHY_CONTROL_1 0x04
2995 +#define MDIO_PHY_CONTROL_0 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_0)
2996 +#define MDIO_PHY_CONTROL_1 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_1)
2997 +#define enable_mdio(x)
3000 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3001 +void enable_mdio(int enable)
3003 +#if !defined (CONFIG_P5_MAC_TO_PHY_MODE) && !defined(CONFIG_GE1_RGMII_AN) && !defined(CONFIG_GE2_RGMII_AN) && \
3004 + !defined (CONFIG_GE1_MII_AN) && !defined (CONFIG_GE2_MII_AN) && !defined (CONFIG_RALINK_MT7628)
3005 + u32 data = sysRegRead(GPIO_PRUPOSE);
3007 + data &= ~GPIO_MDIO_BIT;
3009 + data |= GPIO_MDIO_BIT;
3010 + sysRegWrite(GPIO_PRUPOSE, data);
3015 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
3017 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3019 + u32 volatile status = 0;
3021 + unsigned long volatile t_start = jiffies;
3022 + u32 volatile data = 0;
3024 + /* We enable mdio gpio purpose register, and disable it when exit. */
3027 + // make sure previous read operation is complete
3029 + // 0 : Read/write operation complete
3030 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3034 + else if (time_after(jiffies, t_start + 5*HZ)) {
3036 + printk("\n MDIO Read operation is ongoing !!\n");
3041 + data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3042 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3044 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3045 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3048 + // make sure read operation is complete
3049 + t_start = jiffies;
3051 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3052 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3053 + *read_data = (u32)(status & 0x0000FFFF);
3058 + else if (time_after(jiffies, t_start+5*HZ)) {
3060 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3066 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3068 + unsigned long volatile t_start=jiffies;
3069 + u32 volatile data;
3073 + // make sure previous write operation is complete
3075 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3079 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3081 + printk("\n MDIO Write operation ongoing\n");
3086 + data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3087 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3089 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3090 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3092 + t_start = jiffies;
3094 + // make sure write operation is complete
3096 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3101 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3103 + printk("\n MDIO Write operation Time Out\n");
3108 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
3110 +u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3112 + u32 volatile status = 0;
3114 + unsigned long volatile t_start = jiffies;
3115 + u32 volatile data = 0;
3117 + /* We enable mdio gpio purpose register, and disable it when exit. */
3120 + // make sure previous read operation is complete
3122 + // 0 : Read/write operation complete
3123 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3127 + else if (time_after(jiffies, t_start + 5*HZ)) {
3129 + printk("\n MDIO Read operation is ongoing !!\n");
3134 + data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3135 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3137 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3138 + //printk("\n Set Command [0x%08X] = [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0, data);
3141 + // make sure read operation is complete
3142 + t_start = jiffies;
3144 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3145 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3146 + *read_data = (u32)(status & 0x0000FFFF);
3151 + else if (time_after(jiffies, t_start+5*HZ)) {
3153 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3159 +u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3161 + unsigned long volatile t_start=jiffies;
3162 + u32 volatile data;
3166 + // make sure previous write operation is complete
3168 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3172 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3174 + printk("\n MDIO Write operation ongoing\n");
3179 + data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3180 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3182 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3183 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3185 + t_start = jiffies;
3187 + // make sure write operation is complete
3189 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3194 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3196 + printk("\n MDIO Write operation Time Out\n");
3202 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3204 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3207 + u32 an_status = 0;
3211 + an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3213 + *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3215 + //phase1: write page address phase
3216 + if(__mii_mgr_write(phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
3217 + //phase2: write address & read low word phase
3218 + if(__mii_mgr_read(phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
3219 + //phase3: write address & read high word phase
3220 + if(__mii_mgr_read(phy_addr, (0x1 << 4), &high_word)) {
3221 + *read_data = (high_word << 16) | (low_word & 0xFFFF);
3223 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3230 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3235 + if(__mii_mgr_read(phy_addr, phy_register, read_data)) {
3243 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3245 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3246 + u32 an_status = 0;
3248 + if(phy_addr == 31)
3250 + an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3252 + *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3254 + //phase1: write page address phase
3255 + if(__mii_mgr_write(phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
3256 + //phase2: write address & read low word phase
3257 + if(__mii_mgr_write(phy_addr, ((phy_register >> 2) & 0xF), write_data & 0xFFFF)) {
3258 + //phase3: write address & read high word phase
3259 + if(__mii_mgr_write(phy_addr, (0x1 << 4), write_data >> 16)) {
3261 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3268 + *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3273 + if(__mii_mgr_write(phy_addr, phy_register, write_data)) {
3281 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
3284 + unsigned long volatile t_start = jiffies;
3285 + u32 volatile data = 0;
3290 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3294 + else if (time_after(jiffies, t_start + 5*HZ)) {
3296 + printk("\n MDIO Read operation is ongoing !!\n");
3300 + data = (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) | reg_addr;
3301 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3303 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3305 + t_start = jiffies;
3307 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3312 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3314 + printk("\n MDIO Write operation Time Out\n");
3322 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
3324 + u32 volatile status = 0;
3326 + unsigned long volatile t_start = jiffies;
3327 + u32 volatile data = 0;
3329 + // set address first
3330 + mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3336 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3340 + else if (time_after(jiffies, t_start + 5*HZ)) {
3342 + printk("\n MDIO Read operation is ongoing !!\n");
3346 + data = (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) | reg_addr;
3347 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3349 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3350 + t_start = jiffies;
3352 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3353 + *read_data = (sysRegRead(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
3357 + else if (time_after(jiffies, t_start+5*HZ)) {
3359 + printk("\n Set Operation: MDIO Read operation is ongoing and Time Out!!\n");
3362 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3367 +u32 mii_mgr_write_cl45 (u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
3370 + unsigned long volatile t_start = jiffies;
3371 + u32 volatile data = 0;
3373 + // set address first
3374 + mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3379 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3383 + else if (time_after(jiffies, t_start + 5*HZ)) {
3385 + printk("\n MDIO Read operation is ongoing !!\n");
3390 + data = (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) | write_data;
3391 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3393 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3395 + t_start = jiffies;
3398 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3403 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3405 + printk("\n MDIO Write operation Time Out\n");
3412 +#else // not rt6855
3414 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3416 + u32 volatile status = 0;
3418 + unsigned long volatile t_start = jiffies;
3419 +#if !defined (CONFIG_RALINK_RT3052) && !defined (CONFIG_RALINK_RT3352) && !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
3420 + u32 volatile data = 0;
3423 + /* We enable mdio gpio purpose register, and disable it when exit. */
3426 + // make sure previous read operation is complete
3428 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3429 + // rd_rdy: read operation is complete
3430 + if(!( sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)))
3432 + // 0 : Read/write operation complet
3433 + if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3438 + else if (time_after(jiffies, t_start + 5*HZ)) {
3440 + printk("\n MDIO Read operation is ongoing !!\n");
3445 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3446 + sysRegWrite(MDIO_PHY_CONTROL_0 , (1<<14) | (phy_register << 8) | (phy_addr));
3448 + data = (phy_addr << 24) | (phy_register << 16);
3449 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3451 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3453 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3456 + // make sure read operation is complete
3457 + t_start = jiffies;
3459 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3460 + if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)) {
3461 + status = sysRegRead(MDIO_PHY_CONTROL_1);
3462 + *read_data = (u32)(status >>16);
3468 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3469 + status = sysRegRead(MDIO_PHY_CONTROL_0);
3470 + *read_data = (u32)(status & 0x0000FFFF);
3476 + else if (time_after(jiffies, t_start+5*HZ)) {
3478 + printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3485 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3487 + unsigned long volatile t_start=jiffies;
3488 + u32 volatile data;
3492 + // make sure previous write operation is complete
3494 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3495 + if (!(sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)))
3497 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3502 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3504 + printk("\n MDIO Write operation ongoing\n");
3509 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3510 + data = ((write_data & 0xFFFF) << 16);
3511 + data |= (phy_register << 8) | (phy_addr);
3513 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3515 + data = (1<<30) | (phy_addr << 24) | (phy_register << 16) | write_data;
3516 + sysRegWrite(MDIO_PHY_CONTROL_0, data);
3518 + sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3520 + //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3522 + t_start = jiffies;
3524 + // make sure write operation is complete
3526 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3527 + if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)) //wt_done ?= 1
3529 + if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3535 + else if (time_after(jiffies, t_start + 5 * HZ)) {
3537 + printk("\n MDIO Write operation Time Out\n");
3551 +EXPORT_SYMBOL(mii_mgr_write);
3552 +EXPORT_SYMBOL(mii_mgr_read);
3554 +++ b/drivers/net/ethernet/raeth/ra2882ethreg.h
3556 +#ifndef RA2882ETHREG_H
3557 +#define RA2882ETHREG_H
3559 +#include <linux/mii.h> // for struct mii_if_info in ra2882ethreg.h
3560 +#include <linux/version.h> /* check linux version for 2.4 and 2.6 compatibility */
3561 +#include <linux/interrupt.h> /* for "struct tasklet_struct" in linux-3.10.14 */
3562 +#if defined (CONFIG_HW_SFQ)
3563 +#include <linux/ip.h>
3564 +#include <linux/ipv6.h>
3566 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
3567 +#include <asm/rt2880/rt_mmap.h>
3569 +#include "raether.h"
3571 +#ifdef WORKQUEUE_BH
3572 +#include <linux/workqueue.h>
3573 +#endif // WORKQUEUE_BH //
3574 +#ifdef CONFIG_RAETH_LRO
3575 +#include <linux/inet_lro.h>
3578 +#define MAX_PACKET_SIZE 1514
3579 +#define MIN_PACKET_SIZE 60
3580 +#define MAX_TXD_LEN 0x3fff
3582 +#if defined (CONFIG_ARCH_MT7623)
3583 +#define phys_to_bus(a) (a)
3585 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
3591 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
3592 +#define BIT(x) ((1 << x))
3594 +/* bits range: for example BITS(16,23) = 0xFF0000
3595 + * ==> (BIT(m)-1) = 0x0000FFFF ~(BIT(m)-1) => 0xFFFF0000
3596 + * ==> (BIT(n+1)-1) = 0x00FFFFFF
3598 +#define BITS(m,n) (~(BIT(m)-1) & ((BIT(n) - 1) | BIT(n)))
3600 +#define ETHER_ADDR_LEN 6
3602 +/* Phy Vender ID list */
3604 +#define EV_ICPLUS_PHY_ID0 0x0243
3605 +#define EV_ICPLUS_PHY_ID1 0x0D90
3606 +#define EV_MARVELL_PHY_ID0 0x0141
3607 +#define EV_MARVELL_PHY_ID1 0x0CC2
3608 +#define EV_VTSS_PHY_ID0 0x0007
3609 +#define EV_VTSS_PHY_ID1 0x0421
3614 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3615 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3616 + defined (CONFIG_ARCH_MT7623)
3618 +#define RX_COHERENT BIT(31)
3619 +#define RX_DLY_INT BIT(30)
3620 +#define TX_COHERENT BIT(29)
3621 +#define TX_DLY_INT BIT(28)
3622 +#define RING3_RX_DLY_INT BIT(27)
3623 +#define RING2_RX_DLY_INT BIT(26)
3624 +#define RING1_RX_DLY_INT BIT(25)
3626 +#define ALT_RPLC_INT3 BIT(23)
3627 +#define ALT_RPLC_INT2 BIT(22)
3628 +#define ALT_RPLC_INT1 BIT(21)
3630 +#define RX_DONE_INT3 BIT(19)
3631 +#define RX_DONE_INT2 BIT(18)
3632 +#define RX_DONE_INT1 BIT(17)
3633 +#define RX_DONE_INT0 BIT(16)
3635 +#define TX_DONE_INT3 BIT(3)
3636 +#define TX_DONE_INT2 BIT(2)
3637 +#define TX_DONE_INT1 BIT(1)
3638 +#define TX_DONE_INT0 BIT(0)
3640 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3641 +#define RLS_COHERENT BIT(29)
3642 +#define RLS_DLY_INT BIT(28)
3643 +#define RLS_DONE_INT BIT(0)
3647 +//#define CNT_PPE_AF BIT(31)
3648 +//#define CNT_GDM_AF BIT(29)
3649 +#define PSE_P2_FC BIT(26)
3650 +#define GDM_CRC_DROP BIT(25)
3651 +#define PSE_BUF_DROP BIT(24)
3652 +#define GDM_OTHER_DROP BIT(23)
3653 +#define PSE_P1_FC BIT(22)
3654 +#define PSE_P0_FC BIT(21)
3655 +#define PSE_FQ_EMPTY BIT(20)
3656 +#define GE1_STA_CHG BIT(18)
3657 +#define TX_COHERENT BIT(17)
3658 +#define RX_COHERENT BIT(16)
3660 +#define TX_DONE_INT3 BIT(11)
3661 +#define TX_DONE_INT2 BIT(10)
3662 +#define TX_DONE_INT1 BIT(9)
3663 +#define TX_DONE_INT0 BIT(8)
3664 +#define RX_DONE_INT1 RX_DONE_INT0
3665 +#define RX_DONE_INT0 BIT(2)
3666 +#define TX_DLY_INT BIT(1)
3667 +#define RX_DLY_INT BIT(0)
3670 +#define FE_INT_ALL (TX_DONE_INT3 | TX_DONE_INT2 | \
3671 + TX_DONE_INT1 | TX_DONE_INT0 | \
3672 + RX_DONE_INT0 | RX_DONE_INT1 | \
3673 + RX_DONE_INT2 | RX_DONE_INT3)
3675 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3676 +#define QFE_INT_ALL (RLS_DONE_INT | RX_DONE_INT0 | RX_DONE_INT1)
3677 +#define QFE_INT_DLY_INIT (RLS_DLY_INT | RX_DLY_INT)
3679 +#define NUM_QDMA_PAGE 512
3680 +#define QDMA_PAGE_SIZE 2048
3685 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3686 +#define PORT0_QUEUE_FULL BIT(14) //port0 queue full
3687 +#define PORT1_QUEUE_FULL BIT(15) //port1 queue full
3688 +#define PORT2_QUEUE_FULL BIT(16) //port2 queue full
3689 +#define PORT3_QUEUE_FULL BIT(17) //port3 queue full
3690 +#define PORT4_QUEUE_FULL BIT(18) //port4 queue full
3691 +#define PORT5_QUEUE_FULL BIT(19) //port5 queue full
3692 +#define PORT6_QUEUE_FULL BIT(20) //port6 queue full
3693 +#define SHARED_QUEUE_FULL BIT(23) //shared queue full
3694 +#define QUEUE_EXHAUSTED BIT(24) //global queue is used up and all packets are dropped
3695 +#define BC_STROM BIT(25) //the device is undergoing broadcast storm
3696 +#define PORT_ST_CHG BIT(26) //Port status change
3697 +#define UNSECURED_ALERT BIT(27) //Intruder alert
3698 +#define ABNORMAL_ALERT BIT(28) //Abnormal
3700 +#define ESW_ISR (RALINK_ETH_SW_BASE + 0x00)
3701 +#define ESW_IMR (RALINK_ETH_SW_BASE + 0x04)
3702 +#define ESW_INT_ALL (PORT_ST_CHG)
3704 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3705 + defined (CONFIG_RALINK_MT7620)
3706 +#define MIB_INT BIT(25)
3707 +#define ACL_INT BIT(24)
3708 +#define P5_LINK_CH BIT(5)
3709 +#define P4_LINK_CH BIT(4)
3710 +#define P3_LINK_CH BIT(3)
3711 +#define P2_LINK_CH BIT(2)
3712 +#define P1_LINK_CH BIT(1)
3713 +#define P0_LINK_CH BIT(0)
3715 +#define RX_GOCT_CNT BIT(4)
3716 +#define RX_GOOD_CNT BIT(6)
3717 +#define TX_GOCT_CNT BIT(17)
3718 +#define TX_GOOD_CNT BIT(19)
3720 +#define MSK_RX_GOCT_CNT BIT(4)
3721 +#define MSK_RX_GOOD_CNT BIT(6)
3722 +#define MSK_TX_GOCT_CNT BIT(17)
3723 +#define MSK_TX_GOOD_CNT BIT(19)
3724 +#define MSK_CNT_INT_ALL (MSK_RX_GOCT_CNT | MSK_RX_GOOD_CNT | MSK_TX_GOCT_CNT | MSK_TX_GOOD_CNT)
3725 +//#define MSK_CNT_INT_ALL (MSK_RX_GOOD_CNT | MSK_TX_GOOD_CNT)
3728 +#define ESW_IMR (RALINK_ETH_SW_BASE + 0x7000 + 0x8)
3729 +#define ESW_ISR (RALINK_ETH_SW_BASE + 0x7000 + 0xC)
3730 +#define ESW_INT_ALL (P0_LINK_CH | P1_LINK_CH | P2_LINK_CH | P3_LINK_CH | P4_LINK_CH | P5_LINK_CH | ACL_INT | MIB_INT)
3731 +#define ESW_AISR (RALINK_ETH_SW_BASE + 0x8)
3732 +#define ESW_P0_IntSn (RALINK_ETH_SW_BASE + 0x4004)
3733 +#define ESW_P1_IntSn (RALINK_ETH_SW_BASE + 0x4104)
3734 +#define ESW_P2_IntSn (RALINK_ETH_SW_BASE + 0x4204)
3735 +#define ESW_P3_IntSn (RALINK_ETH_SW_BASE + 0x4304)
3736 +#define ESW_P4_IntSn (RALINK_ETH_SW_BASE + 0x4404)
3737 +#define ESW_P5_IntSn (RALINK_ETH_SW_BASE + 0x4504)
3738 +#define ESW_P6_IntSn (RALINK_ETH_SW_BASE + 0x4604)
3739 +#define ESW_P0_IntMn (RALINK_ETH_SW_BASE + 0x4008)
3740 +#define ESW_P1_IntMn (RALINK_ETH_SW_BASE + 0x4108)
3741 +#define ESW_P2_IntMn (RALINK_ETH_SW_BASE + 0x4208)
3742 +#define ESW_P3_IntMn (RALINK_ETH_SW_BASE + 0x4308)
3743 +#define ESW_P4_IntMn (RALINK_ETH_SW_BASE + 0x4408)
3744 +#define ESW_P5_IntMn (RALINK_ETH_SW_BASE + 0x4508)
3745 +#define ESW_P6_IntMn (RALINK_ETH_SW_BASE + 0x4608)
3747 +#if defined (CONFIG_RALINK_MT7620)
3748 +#define ESW_P7_IntSn (RALINK_ETH_SW_BASE + 0x4704)
3749 +#define ESW_P7_IntMn (RALINK_ETH_SW_BASE + 0x4708)
3753 +#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x7000)
3755 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3757 +#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x0000)
3759 +#define P5_LINK_CH BIT(5)
3760 +#define P4_LINK_CH BIT(4)
3761 +#define P3_LINK_CH BIT(3)
3762 +#define P2_LINK_CH BIT(2)
3763 +#define P1_LINK_CH BIT(1)
3764 +#define P0_LINK_CH BIT(0)
3767 +#endif // CONFIG_RALINK_RT3052 || CONFIG_RALINK_RT3352 || CONFIG_RALINK_RT5350 || defined (CONFIG_RALINK_MT7628)//
3769 +#define RX_BUF_ALLOC_SIZE 2000
3770 +#define FASTPATH_HEADROOM 64
3772 +#define ETHER_BUFFER_ALIGN 32 ///// Align on a cache line
3774 +#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
3775 + ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
3776 + ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
3778 +#ifdef CONFIG_PSEUDO_SUPPORT
3779 +typedef struct _PSEUDO_ADAPTER {
3780 + struct net_device *RaethDev;
3781 + struct net_device *PseudoDev;
3782 + struct net_device_stats stat;
3783 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
3784 + struct mii_if_info mii_info;
3787 +} PSEUDO_ADAPTER, PPSEUDO_ADAPTER;
3789 +#define MAX_PSEUDO_ENTRY 1
3794 +/* Register Categories Definition */
3795 +#define RAFRAMEENGINE_OFFSET 0x0000
3796 +#define RAGDMA_OFFSET 0x0020
3797 +#define RAPSE_OFFSET 0x0040
3798 +#define RAGDMA2_OFFSET 0x0060
3799 +#define RACDMA_OFFSET 0x0080
3800 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3801 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3802 + defined (CONFIG_ARCH_MT7623)
3804 +#define RAPDMA_OFFSET 0x0800
3805 +#define SDM_OFFSET 0x0C00
3807 +#define RAPDMA_OFFSET 0x0100
3809 +#define RAPPE_OFFSET 0x0200
3810 +#define RACMTABLE_OFFSET 0x0400
3811 +#define RAPOLICYTABLE_OFFSET 0x1000
3814 +/* Register Map Detail */
3816 +#define SYSCFG1 (RALINK_SYSCTL_BASE + 0x14)
3818 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3821 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x000)
3822 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x004)
3823 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x008)
3824 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00C)
3826 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x010)
3827 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x014)
3828 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x018)
3829 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x01C)
3831 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x020)
3832 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x024)
3833 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x028)
3834 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x02C)
3836 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x030)
3837 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x034)
3838 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x038)
3839 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x03C)
3841 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x100)
3842 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x104)
3843 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x108)
3844 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10C)
3846 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x110)
3847 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x114)
3848 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x118)
3849 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x11C)
3851 +#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x200)
3852 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x204)
3853 +#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x208)
3854 +#define PDMA_RST_CFG (PDMA_RST_IDX)
3855 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20C)
3856 +#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x210)
3857 +#define INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x220)
3858 +#define FE_INT_STATUS (INT_STATUS)
3859 +#define INT_MASK (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x228)
3860 +#define FE_INT_ENABLE (INT_MASK)
3861 +#define PDMA_WRR (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3862 +#define PDMA_SCH_CFG (PDMA_WRR)
3864 +#define SDM_CON (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x00) //Switch DMA configuration
3865 +#define SDM_RRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x04) //Switch DMA Rx Ring
3866 +#define SDM_TRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x08) //Switch DMA Tx Ring
3867 +#define SDM_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x0C) //Switch MAC address LSB
3868 +#define SDM_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10) //Switch MAC Address MSB
3869 +#define SDM_TPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x100) //Switch DMA Tx packet count
3870 +#define SDM_TBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x104) //Switch DMA Tx byte count
3871 +#define SDM_RPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x108) //Switch DMA rx packet count
3872 +#define SDM_RBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10C) //Switch DMA rx byte count
3873 +#define SDM_CS_ERR (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x110) //Switch DMA rx checksum error count
3875 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3876 + defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || \
3877 + defined (CONFIG_ARCH_MT7623)
3879 +/* Old FE with New PDMA */
3880 +#define PDMA_RELATED 0x0800
3882 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x000)
3883 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x004)
3884 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x008)
3885 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x00C)
3887 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x010)
3888 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x014)
3889 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x018)
3890 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x01C)
3892 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x020)
3893 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x024)
3894 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x028)
3895 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x02C)
3897 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x030)
3898 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x034)
3899 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x038)
3900 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x03C)
3902 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x100)
3903 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x104)
3904 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x108)
3905 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x10C)
3907 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x110)
3908 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x114)
3909 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x118)
3910 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x11C)
3912 +#define RX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x120)
3913 +#define RX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x124)
3914 +#define RX_CALC_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x128)
3915 +#define RX_DRX_IDX12 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x12C)
3917 +#define RX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x130)
3918 +#define RX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x134)
3919 +#define RX_CALC_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x138)
3920 +#define RX_DRX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x13C)
3922 +#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x200)
3923 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x204)
3924 +#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x208)
3925 +#define PDMA_RST_CFG (PDMA_RST_IDX)
3926 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x20C)
3927 +#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x210)
3928 +#define INT_STATUS (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x220)
3929 +#define FE_INT_STATUS (INT_STATUS)
3930 +#define INT_MASK (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x228)
3931 +#define FE_INT_ENABLE (INT_MASK)
3932 +#define SCH_Q01_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3933 +#define SCH_Q23_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x284)
3935 +#define FE_GLO_CFG RALINK_FRAME_ENGINE_BASE + 0x00
3936 +#define FE_RST_GL RALINK_FRAME_ENGINE_BASE + 0x04
3937 +#define FE_INT_STATUS2 RALINK_FRAME_ENGINE_BASE + 0x08
3938 +#define FE_INT_ENABLE2 RALINK_FRAME_ENGINE_BASE + 0x0c
3939 +//#define FC_DROP_STA RALINK_FRAME_ENGINE_BASE + 0x18
3940 +#define FOE_TS_T RALINK_FRAME_ENGINE_BASE + 0x10
3942 +#if defined (CONFIG_RALINK_MT7620)
3943 +#define GDMA1_RELATED 0x0600
3944 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3945 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3946 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3947 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3948 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3949 +#define GDMA1_RELATED 0x0500
3950 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3951 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3952 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3953 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3955 +#define GDMA2_RELATED 0x1500
3956 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
3957 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
3958 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
3959 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
3961 +#define GDMA1_RELATED 0x0020
3962 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3963 +#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3964 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3965 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3966 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x10)
3968 +#define GDMA2_RELATED 0x0060
3969 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
3970 +#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
3971 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
3972 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
3973 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x10)
3976 +#if defined (CONFIG_RALINK_MT7620)
3977 +#define PSE_RELATED 0x0500
3978 +#define PSE_FQFC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
3979 +#define PSE_IQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
3980 +#define PSE_QUE_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
3982 +#define PSE_RELATED 0x0040
3983 +#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
3984 +#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
3985 +#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
3986 +#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C)
3987 +#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10)
3988 +#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14)
3989 +#define GDMA2_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18)
3990 +#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C)
3994 +#if defined (CONFIG_RALINK_MT7620)
3995 +#define CDMA_RELATED 0x0400
3996 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
3997 +#define SMACCR0 (RALINK_ETH_SW_BASE + 0x3FE4)
3998 +#define SMACCR1 (RALINK_ETH_SW_BASE + 0x3FE8)
3999 +#define CKGCR (RALINK_ETH_SW_BASE + 0x3FF0)
4000 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4001 +#define CDMA_RELATED 0x0400
4002 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) //fake definition
4003 +#define CDMP_IG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4004 +#define CDMP_EG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4006 +#define CDMA_RELATED 0x0080
4007 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4008 +#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4009 +#define SMACCR0 (RALINK_ETH_SW_BASE + 0x30E4)
4010 +#define SMACCR1 (RALINK_ETH_SW_BASE + 0x30E8)
4011 +#define CKGCR (RALINK_ETH_SW_BASE + 0x30F0)
4014 +#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x100)
4017 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4018 +/*kurtis: add QDMA define*/
4020 +#define CLK_CFG_0 (RALINK_SYSCTL_BASE + 0x2C)
4021 +#define PAD_RGMII2_MDIO_CFG (RALINK_SYSCTL_BASE + 0x58)
4023 +#define QDMA_RELATED 0x1800
4024 +#define QTX_CFG_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000)
4025 +#define QTX_SCH_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004)
4026 +#define QTX_HEAD_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008)
4027 +#define QTX_TAIL_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C)
4028 +#define QTX_CFG_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010)
4029 +#define QTX_SCH_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014)
4030 +#define QTX_HEAD_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018)
4031 +#define QTX_TAIL_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C)
4032 +#define QTX_CFG_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020)
4033 +#define QTX_SCH_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024)
4034 +#define QTX_HEAD_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028)
4035 +#define QTX_TAIL_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C)
4036 +#define QTX_CFG_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030)
4037 +#define QTX_SCH_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034)
4038 +#define QTX_HEAD_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038)
4039 +#define QTX_TAIL_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C)
4040 +#define QTX_CFG_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040)
4041 +#define QTX_SCH_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044)
4042 +#define QTX_HEAD_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048)
4043 +#define QTX_TAIL_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C)
4044 +#define QTX_CFG_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050)
4045 +#define QTX_SCH_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054)
4046 +#define QTX_HEAD_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058)
4047 +#define QTX_TAIL_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C)
4048 +#define QTX_CFG_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060)
4049 +#define QTX_SCH_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064)
4050 +#define QTX_HEAD_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068)
4051 +#define QTX_TAIL_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C)
4052 +#define QTX_CFG_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070)
4053 +#define QTX_SCH_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074)
4054 +#define QTX_HEAD_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078)
4055 +#define QTX_TAIL_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C)
4056 +#define QTX_CFG_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080)
4057 +#define QTX_SCH_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084)
4058 +#define QTX_HEAD_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088)
4059 +#define QTX_TAIL_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C)
4060 +#define QTX_CFG_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090)
4061 +#define QTX_SCH_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094)
4062 +#define QTX_HEAD_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098)
4063 +#define QTX_TAIL_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C)
4064 +#define QTX_CFG_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0)
4065 +#define QTX_SCH_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4)
4066 +#define QTX_HEAD_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8)
4067 +#define QTX_TAIL_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC)
4068 +#define QTX_CFG_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0)
4069 +#define QTX_SCH_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4)
4070 +#define QTX_HEAD_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8)
4071 +#define QTX_TAIL_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC)
4072 +#define QTX_CFG_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0)
4073 +#define QTX_SCH_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4)
4074 +#define QTX_HEAD_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8)
4075 +#define QTX_TAIL_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC)
4076 +#define QTX_CFG_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0)
4077 +#define QTX_SCH_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4)
4078 +#define QTX_HEAD_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8)
4079 +#define QTX_TAIL_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC)
4080 +#define QTX_CFG_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0)
4081 +#define QTX_SCH_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4)
4082 +#define QTX_HEAD_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8)
4083 +#define QTX_TAIL_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC)
4084 +#define QTX_CFG_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0)
4085 +#define QTX_SCH_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4)
4086 +#define QTX_HEAD_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8)
4087 +#define QTX_TAIL_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC)
4088 +#define QRX_BASE_PTR_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100)
4089 +#define QRX_MAX_CNT_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104)
4090 +#define QRX_CRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108)
4091 +#define QRX_DRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C)
4092 +#define QRX_BASE_PTR_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110)
4093 +#define QRX_MAX_CNT_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114)
4094 +#define QRX_CRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118)
4095 +#define QRX_DRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C)
4096 +#if defined (CONFIG_ARCH_MT7623)
4097 +#define VQTX_TB_BASE_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x180)
4098 +#define VQTX_TB_BASE_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x184)
4099 +#define VQTX_TB_BASE_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x188)
4100 +#define VQTX_TB_BASE_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x18C)
4102 +#define QDMA_INFO (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200)
4103 +#define QDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204)
4104 +#define QDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208)
4105 +#define QDMA_RST_CFG (QDMA_RST_IDX)
4106 +#define QDMA_DELAY_INT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C)
4107 +#define QDMA_FC_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210)
4108 +#define QDMA_TX_SCH (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214)
4109 +#define QDMA_INT_STS (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218)
4110 +#define QFE_INT_STATUS (QDMA_INT_STS)
4111 +#define QDMA_INT_MASK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C)
4112 +#define QFE_INT_ENABLE (QDMA_INT_MASK)
4113 +#define QDMA_TRTCM (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
4114 +#define QDMA_DATA0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
4115 +#define QDMA_DATA1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228)
4116 +#define QDMA_RED_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C)
4117 +#define QDMA_TEST (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230)
4118 +#define QDMA_DMA (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234)
4119 +#define QDMA_BMU (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238)
4120 +#define QDMA_HRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240)
4121 +#define QDMA_HRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244)
4122 +#define QDMA_SRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248)
4123 +#define QDMA_SRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C)
4124 +#define QTX_CTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300)
4125 +#define QTX_DTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304)
4126 +#define QTX_FWD_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308)
4127 +#define QTX_CRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310)
4128 +#define QTX_DRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314)
4129 +#define QTX_RLS_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318)
4130 +#define QDMA_FQ_HEAD (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320)
4131 +#define QDMA_FQ_TAIL (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324)
4132 +#define QDMA_FQ_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328)
4133 +#define QDMA_FQ_BLEN (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C)
4134 +#define QTX_Q0MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350)
4135 +#define QTX_Q1MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354)
4136 +#define QTX_Q2MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358)
4137 +#define QTX_Q3MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C)
4138 +#define QTX_Q0MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360)
4139 +#define QTX_Q1MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364)
4140 +#define QTX_Q2MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368)
4141 +#define QTX_Q3MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C)
4144 +#endif/*MT7621 QDMA*/
4148 +/* 1. Frame Engine Global Registers */
4149 +#define MDIO_ACCESS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x00)
4150 +#define MDIO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x04)
4151 +#define FE_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x08)
4152 +#define FE_RST_GL (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x0C)
4153 +#define FE_INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x10)
4154 +#define FE_INT_ENABLE (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x14)
4155 +#define MDIO_CFG2 (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x18) //Original:FC_DROP_STA
4156 +#define FOC_TS_T (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x1C)
4159 +/* 2. GDMA Registers */
4160 +#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x00)
4161 +#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x04)
4162 +#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x08)
4163 +#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x0C)
4164 +#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x10)
4166 +#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x00)
4167 +#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x04)
4168 +#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x08)
4169 +#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x0C)
4170 +#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x10)
4173 +#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x00)
4174 +#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x04)
4175 +#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x08)
4176 +#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x0C)
4177 +#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x1f0)
4180 +#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x00)
4181 +#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x04)
4182 +/* skip ppoe sid and vlan id definition */
4186 +#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00)
4187 +#define PDMA_RST_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x04)
4188 +#define PDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x08)
4190 +#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x0C)
4192 +#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10)
4193 +#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x14)
4194 +#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x18)
4195 +#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x1C)
4197 +#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20)
4198 +#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x24)
4199 +#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x28)
4200 +#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x2C)
4202 +#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4203 +#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4204 +#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4205 +#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4207 +#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x50)
4208 +#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x54)
4209 +#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x58)
4210 +#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x5C)
4212 +#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x30)
4213 +#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x34)
4214 +#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x38)
4215 +#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x3C)
4217 +#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4218 +#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4219 +#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4220 +#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4224 +#define DELAY_INT_INIT 0x84048404
4225 +#define FE_INT_DLY_INIT (TX_DLY_INT | RX_DLY_INT)
4228 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
4230 +/* 6. Counter and Meter Table */
4231 +#define PPE_AC_BCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x000) /* PPE Accounting Group 0 Byte Cnt */
4232 +#define PPE_AC_PCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x004) /* PPE Accounting Group 0 Packet Cnt */
4235 +#define PPE_MTR_CNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x200) /* 0 ~ 63 */
4237 +#define PPE_MTR_CNT63 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x2FC)
4239 +#define GDMA_TX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x300) /* Transmit good byte cnt for GEport */
4240 +#define GDMA_TX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x304) /* Transmit good pkt cnt for GEport */
4241 +#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x308) /* Transmit skip cnt for GEport */
4242 +#define GDMA_TX_COLCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x30C) /* Transmit collision cnt for GEport */
4244 +/* update these address mapping to fit data sheet v0.26, by bobtseng, 2007.6.14 */
4245 +#define GDMA_RX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x320)
4246 +#define GDMA_RX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x324)
4247 +#define GDMA_RX_OERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x328)
4248 +#define GDMA_RX_FERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x32C)
4249 +#define GDMA_RX_SERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x330)
4250 +#define GDMA_RX_LERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x334)
4251 +#define GDMA_RX_CERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x338)
4252 +#define GDMA_RX_FCCNT1 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x33C)
4256 +/* LRO global control */
4257 +/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
4258 +#define LRO_ALT_REFRESH_TIMER (RALINK_FRAME_ENGINE_BASE+0x001C)
4260 +/* LRO auto-learn table info */
4261 +#define PDMA_FE_ALT_CF8 (RALINK_FRAME_ENGINE_BASE+0x0300)
4262 +#define PDMA_FE_ALT_SGL_CFC (RALINK_FRAME_ENGINE_BASE+0x0304)
4263 +#define PDMA_FE_ALT_SEQ_CFC (RALINK_FRAME_ENGINE_BASE+0x0308)
4266 +#define ADMA_LRO_CTRL_OFFSET 0x0980
4268 + * Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN, Bit [3]:MULTIPLE_RXD_PREFETCH_EN,
4269 + * Bit [4]:RXD_PREFETCH_EN, Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
4270 + * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode, Bit [22]:cr_lro_alt_rplc_mode,
4271 + * Bit [23]:cr_lro_l4_ctrl_psh_en, Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
4273 +#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x00)
4274 +/* Bits [31:0]:LRO_CPU_REASON */
4275 +#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x04)
4276 +/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
4277 +#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x08)
4279 + * Bits [7:0]:LRO_MAX_AGGREGATED_CNT, Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
4280 + * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
4282 +#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x0C)
4284 +/* LRO RX delay interrupt configurations */
4285 +#define LRO_RX1_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a70)
4286 +#define LRO_RX2_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a74)
4287 +#define LRO_RX3_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a78)
4289 +/* LRO auto-learn configurations */
4290 +#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET 0x0990
4291 +#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE+PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
4292 +#define LRO_ALT_SCORE_DELTA (RALINK_FRAME_ENGINE_BASE+0x0a4c)
4294 +/* LRO agg timer configurations */
4295 +#define LRO_MAX_AGG_TIME (RALINK_FRAME_ENGINE_BASE+0x0a5c)
4297 +/* LRO configurations of RX ring #0 */
4298 +#define LRO_RXRING0_OFFSET 0x0b00
4299 +#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x04)
4300 +#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x08)
4301 +#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x0C)
4302 +#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x10)
4303 +#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x28)
4304 +/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
4305 +#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x2C)
4306 +#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x30)
4307 +/* LRO configurations of RX ring #1 */
4308 +#define LRO_RXRING1_OFFSET 0x0b40
4309 +#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x00)
4310 +#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x04)
4311 +#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x08)
4312 +#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x0C)
4313 +#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x10)
4314 +#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x14)
4315 +#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x18)
4316 +#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x1C)
4317 +#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x20)
4318 +#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x24)
4319 +#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x28)
4320 +#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x2C)
4321 +#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x30)
4322 +#define LRO_RXRING2_OFFSET 0x0b80
4323 +#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x00)
4324 +#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x04)
4325 +#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x08)
4326 +#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x0C)
4327 +#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x10)
4328 +#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x14)
4329 +#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x18)
4330 +#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x1C)
4331 +#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x20)
4332 +#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x24)
4333 +#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x28)
4334 +#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x2C)
4335 +#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x30)
4336 +#define LRO_RXRING3_OFFSET 0x0bc0
4337 +#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x00)
4338 +#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x04)
4339 +#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x08)
4340 +#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x0C)
4341 +#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x10)
4342 +#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x14)
4343 +#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x18)
4344 +#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x1C)
4345 +#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x20)
4346 +#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x24)
4347 +#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x28)
4348 +#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x2C)
4349 +#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x30)
4351 +/* LRO RX ring mode */
4352 +#define PDMA_RX_NORMAL_MODE (0x0)
4353 +#define PDMA_RX_PSE_MODE (0x1)
4354 +#define PDMA_RX_FORCE_PORT (0x2)
4355 +#define PDMA_RX_AUTO_LEARN (0x3)
4357 +#define ADMA_RX_RING0 (0)
4358 +#define ADMA_RX_RING1 (1)
4359 +#define ADMA_RX_RING2 (2)
4360 +#define ADMA_RX_RING3 (3)
4362 +#define ADMA_RX_LEN0_MASK (0x3fff)
4363 +#define ADMA_RX_LEN1_MASK (0x3)
4365 +#define PDMA_LRO_EN BIT(0)
4366 +#define PDMA_LRO_IPV6_EN BIT(1)
4367 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN BIT(7)
4368 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN BIT(23)
4369 +#define PDMA_LRO_RXD_PREFETCH_EN BITS(3,4)
4370 +#define PDMA_NON_LRO_MULTI_EN BIT(2)
4371 +#define PDMA_LRO_DLY_INT_EN BIT(5)
4372 +#define PDMA_LRO_FUSH_REQ BITS(26,28)
4373 +#define PDMA_LRO_RELINGUISH BITS(29,31)
4374 +#define PDMA_LRO_FREQ_PRI_ADJ BITS(16,19)
4375 +#define PDMA_LRO_TPUT_PRE_ADJ BITS(8,11)
4376 +#define PDMA_LRO_TPUT_PRI_ADJ BITS(12,15)
4377 +#define PDMA_LRO_ALT_SCORE_MODE BIT(21)
4378 +#define PDMA_LRO_RING_AGE1 BITS(22,31)
4379 +#define PDMA_LRO_RING_AGE2 BITS(0,5)
4380 +#define PDMA_LRO_RING_AGG BITS(10,25)
4381 +#define PDMA_LRO_RING_AGG_CNT1 BITS(26,31)
4382 +#define PDMA_LRO_RING_AGG_CNT2 BITS(0,1)
4383 +#define PDMA_LRO_ALT_TICK_TIMER BITS(16,20)
4384 +#define PDMA_LRO_LRO_MIN_RXD_SDL0 BITS(16,31)
4386 +#define PDMA_LRO_DLY_INT_EN_OFFSET (5)
4387 +#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET (8)
4388 +#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET (16)
4389 +#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET (16)
4390 +#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET (12)
4391 +#define PDMA_LRO_ALT_SCORE_MODE_OFFSET (21)
4392 +#define PDMA_LRO_FUSH_REQ_OFFSET (26)
4393 +#define PDMA_NON_LRO_MULTI_EN_OFFSET (2)
4394 +#define PDMA_LRO_IPV6_EN_OFFSET (1)
4395 +#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET (3)
4396 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
4397 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET (23)
4398 +#define PDMA_LRO_ALT_TICK_TIMER_OFFSET (16)
4400 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ BITS(12,31)
4401 +#define PDMA_LRO_CNT_OVERFLOW_ADJ BITS(0,11)
4403 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET (12)
4404 +#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET (0)
4406 +#define PDMA_LRO_ALT_BYTE_CNT_MODE (0)
4407 +#define PDMA_LRO_ALT_PKT_CNT_MODE (1)
4409 +/* LRO_RX_RING1_CTRL_DW1 offsets */
4410 +#define PDMA_LRO_AGE_H_OFFSET (10)
4411 +#define PDMA_LRO_RING_AGE1_OFFSET (22)
4412 +#define PDMA_LRO_RING_AGG_CNT1_OFFSET (26)
4413 +/* LRO_RX_RING1_CTRL_DW2 offsets */
4414 +#define PDMA_RX_MODE_OFFSET (6)
4415 +#define PDMA_RX_PORT_VALID_OFFSET (8)
4416 +#define PDMA_RX_MYIP_VALID_OFFSET (9)
4417 +#define PDMA_LRO_RING_AGE2_OFFSET (0)
4418 +#define PDMA_LRO_RING_AGG_OFFSET (10)
4419 +#define PDMA_LRO_RING_AGG_CNT2_OFFSET (0)
4420 +/* LRO_RX_RING1_CTRL_DW3 offsets */
4421 +#define PDMA_LRO_AGG_CNT_H_OFFSET (6)
4422 +/* LRO_RX_RING1_STP_DTP_DW offsets */
4423 +#define PDMA_RX_TCP_SRC_PORT_OFFSET (16)
4424 +#define PDMA_RX_TCP_DEST_PORT_OFFSET (0)
4425 +/* LRO_RX_RING1_CTRL_DW0 offsets */
4426 +#define PDMA_RX_IPV4_FORCE_OFFSET (1)
4427 +#define PDMA_RX_IPV6_FORCE_OFFSET (0)
4429 +#define SET_ADMA_RX_LEN0(x) ((x)&ADMA_RX_LEN0_MASK)
4430 +#define SET_ADMA_RX_LEN1(x) ((x)&ADMA_RX_LEN1_MASK)
4432 +#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
4433 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4435 + *addr |= ((x) & 0xff); \
4437 +#define SET_PDMA_LRO_FLUSH_REQ(x) \
4438 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4439 + *addr &= ~PDMA_LRO_FUSH_REQ; \
4440 + *addr |= ((x) & 0x7)<<PDMA_LRO_FUSH_REQ_OFFSET; \
4442 +#define SET_PDMA_LRO_IPV6_EN(x) \
4443 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4444 + *addr &= ~PDMA_LRO_IPV6_EN; \
4445 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV6_EN_OFFSET; \
4447 +#if defined(CONFIG_RAETH_HW_LRO_PREFETCH)
4448 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
4449 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4450 + *addr &= ~PDMA_LRO_RXD_PREFETCH_EN; \
4451 + *addr |= ((x) & 0x3)<<PDMA_LRO_RXD_PREFETCH_EN_OFFSET; \
4454 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x)
4456 +#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
4457 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4458 + *addr &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN; \
4459 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET; \
4461 +#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
4462 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4463 + *addr &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN; \
4464 + *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET; \
4466 +#define SET_PDMA_NON_LRO_MULTI_EN(x) \
4467 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4468 + *addr &= ~(PDMA_NON_LRO_MULTI_EN); \
4469 + *addr |= ((x) & 0x1)<<PDMA_NON_LRO_MULTI_EN_OFFSET; \
4471 +#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
4472 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4473 + *addr &= ~PDMA_LRO_FREQ_PRI_ADJ; \
4474 + *addr |= ((x) & 0xf)<<PDMA_LRO_FREQ_PRI_ADJ_OFFSET; \
4476 +#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
4477 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4478 + *addr &= ~PDMA_LRO_TPUT_PRE_ADJ; \
4479 + *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRE_ADJ_OFFSET; \
4481 +#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
4482 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4483 + *addr &= ~PDMA_LRO_TPUT_PRI_ADJ; \
4484 + *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRI_ADJ_OFFSET; \
4486 +#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
4487 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4488 + *addr &= ~PDMA_LRO_ALT_SCORE_MODE; \
4489 + *addr |= ((x) & 0x1)<<PDMA_LRO_ALT_SCORE_MODE_OFFSET; \
4491 +#define SET_PDMA_LRO_DLY_INT_EN(x) \
4492 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4493 + *addr &= ~PDMA_LRO_DLY_INT_EN; \
4494 + *addr |= ((x) & 0x1)<<PDMA_LRO_DLY_INT_EN_OFFSET; \
4496 +#define SET_PDMA_LRO_BW_THRESHOLD(x) \
4497 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW2; \
4500 +#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
4501 + { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4502 + *addr &= ~PDMA_LRO_LRO_MIN_RXD_SDL0; \
4503 + *addr |= ((x) & 0xffff)<<PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET; \
4505 +#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
4506 + { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4507 + *addr &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ; \
4508 + *addr |= ((x) & 0xfffff)<<PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET; \
4510 +#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
4511 + { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4512 + *addr &= ~PDMA_LRO_CNT_OVERFLOW_ADJ; \
4513 + *addr |= ((x) & 0xfff)<<PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET; \
4515 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
4516 + { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4517 + *addr &= ~PDMA_LRO_ALT_TICK_TIMER; \
4518 + *addr |= ((x) & 0x1f)<<PDMA_LRO_ALT_TICK_TIMER_OFFSET; \
4520 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
4521 + { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4522 + *addr &= ~0xffff; \
4523 + *addr |= ((x) & 0xffff); \
4525 +#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
4526 + { volatile unsigned int *addr = (unsigned int*)LRO_MAX_AGG_TIME; \
4527 + *addr &= ~0xffff; \
4528 + *addr |= ((x) & 0xffff); \
4530 +#define SET_PDMA_RXRING_MODE(x,y) \
4531 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4532 + *addr &= ~(0x3<<PDMA_RX_MODE_OFFSET); \
4533 + *addr |= (y)<<PDMA_RX_MODE_OFFSET; \
4535 +#define SET_PDMA_RXRING_MYIP_VALID(x,y) \
4536 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4537 + *addr &= ~(0x1<<PDMA_RX_MYIP_VALID_OFFSET); \
4538 + *addr |= ((y)&0x1)<<PDMA_RX_MYIP_VALID_OFFSET; \
4540 +#define SET_PDMA_RXRING_VALID(x,y) \
4541 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4542 + *addr &= ~(0x1<<PDMA_RX_PORT_VALID_OFFSET); \
4543 + *addr |= ((y)&0x1)<<PDMA_RX_PORT_VALID_OFFSET; \
4545 +#define SET_PDMA_RXRING_TCP_SRC_PORT(x,y) \
4546 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4547 + *addr &= ~(0xffff<<PDMA_RX_TCP_SRC_PORT_OFFSET); \
4548 + *addr |= (y)<<PDMA_RX_TCP_SRC_PORT_OFFSET; \
4550 +#define SET_PDMA_RXRING_TCP_DEST_PORT(x,y) \
4551 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4552 + *addr &= ~(0xffff<<PDMA_RX_TCP_DEST_PORT_OFFSET); \
4553 + *addr |= (y)<<PDMA_RX_TCP_DEST_PORT_OFFSET; \
4555 +#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x,y) \
4556 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4557 + *addr &= ~(0x1<<PDMA_RX_IPV4_FORCE_OFFSET); \
4558 + *addr |= (y)<<PDMA_RX_IPV4_FORCE_OFFSET; \
4560 +#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x,y) \
4561 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4562 + *addr &= ~(0x1<<PDMA_RX_IPV6_FORCE_OFFSET); \
4563 + *addr |= (y)<<PDMA_RX_IPV6_FORCE_OFFSET; \
4565 +#define SET_PDMA_RXRING_AGE_TIME(x,y) \
4566 + { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
4567 + volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4568 + *addr1 &= ~PDMA_LRO_RING_AGE1; \
4569 + *addr2 &= ~PDMA_LRO_RING_AGE2; \
4570 + *addr1 |= ((y) & 0x3ff)<<PDMA_LRO_RING_AGE1_OFFSET; \
4571 + *addr2 |= (((y)>>PDMA_LRO_AGE_H_OFFSET) & 0x03f)<<PDMA_LRO_RING_AGE2_OFFSET; \
4573 +#define SET_PDMA_RXRING_AGG_TIME(x,y) \
4574 + { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4575 + *addr &= ~PDMA_LRO_RING_AGG; \
4576 + *addr |= ((y) & 0xffff)<<PDMA_LRO_RING_AGG_OFFSET; \
4578 +#define SET_PDMA_RXRING_MAX_AGG_CNT(x,y) \
4579 + { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING1_CTRL_DW2 + (((x)-1) << 6)); \
4580 + volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING1_CTRL_DW3 + (((x)-1) << 6)); \
4581 + *addr1 &= ~PDMA_LRO_RING_AGG_CNT1; \
4582 + *addr2 &= ~PDMA_LRO_RING_AGG_CNT2; \
4583 + *addr1 |= ((y) & 0x3f)<<PDMA_LRO_RING_AGG_CNT1_OFFSET; \
4584 + *addr2 |= (((y)>>PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03)<<PDMA_LRO_RING_AGG_CNT2_OFFSET; \
4587 +typedef struct _PDMA_LRO_AUTO_TLB_INFO0_ PDMA_LRO_AUTO_TLB_INFO0_T;
4588 +typedef struct _PDMA_LRO_AUTO_TLB_INFO1_ PDMA_LRO_AUTO_TLB_INFO1_T;
4589 +typedef struct _PDMA_LRO_AUTO_TLB_INFO2_ PDMA_LRO_AUTO_TLB_INFO2_T;
4590 +typedef struct _PDMA_LRO_AUTO_TLB_INFO3_ PDMA_LRO_AUTO_TLB_INFO3_T;
4591 +typedef struct _PDMA_LRO_AUTO_TLB_INFO4_ PDMA_LRO_AUTO_TLB_INFO4_T;
4592 +typedef struct _PDMA_LRO_AUTO_TLB_INFO5_ PDMA_LRO_AUTO_TLB_INFO5_T;
4593 +typedef struct _PDMA_LRO_AUTO_TLB_INFO6_ PDMA_LRO_AUTO_TLB_INFO6_T;
4594 +typedef struct _PDMA_LRO_AUTO_TLB_INFO7_ PDMA_LRO_AUTO_TLB_INFO7_T;
4595 +typedef struct _PDMA_LRO_AUTO_TLB_INFO8_ PDMA_LRO_AUTO_TLB_INFO8_T;
4597 +struct _PDMA_LRO_AUTO_TLB_INFO0_
4599 + unsigned int DTP : 16;
4600 + unsigned int STP : 16;
4602 +struct _PDMA_LRO_AUTO_TLB_INFO1_
4604 + unsigned int SIP0 : 32;
4606 +struct _PDMA_LRO_AUTO_TLB_INFO2_
4608 + unsigned int SIP1 : 32;
4610 +struct _PDMA_LRO_AUTO_TLB_INFO3_
4612 + unsigned int SIP2 : 32;
4614 +struct _PDMA_LRO_AUTO_TLB_INFO4_
4616 + unsigned int SIP3 : 32;
4618 +struct _PDMA_LRO_AUTO_TLB_INFO5_
4620 + unsigned int VLAN_VID0 : 32;
4622 +struct _PDMA_LRO_AUTO_TLB_INFO6_
4624 + unsigned int VLAN_VID1 : 16;
4625 + unsigned int VLAN_VID_VLD : 4;
4626 + unsigned int CNT : 12;
4628 +struct _PDMA_LRO_AUTO_TLB_INFO7_
4630 + unsigned int DW_LEN : 32;
4632 +struct _PDMA_LRO_AUTO_TLB_INFO8_
4634 + unsigned int DIP_ID : 2;
4635 + unsigned int IPV6 : 1;
4636 + unsigned int IPV4 : 1;
4637 + unsigned int RESV : 27;
4638 + unsigned int VALID : 1;
4640 +struct PDMA_LRO_AUTO_TLB_INFO {
4641 + PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
4642 + PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
4643 + PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
4644 + PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
4645 + PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
4646 + PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
4647 + PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
4648 + PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
4649 + PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
4652 +#if defined (CONFIG_HW_SFQ)
4653 +#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1980)
4654 +#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1984)
4655 +#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1988)
4656 +#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + 0x198C)
4657 +#define SFQ_OFFSET 0x1A80
4658 +#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET)
4659 +#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x0C)
4660 +#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x10)
4661 +#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x18)
4662 +#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x20)
4663 +#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x24)
4664 +#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x30)
4665 +#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x3C)
4666 +#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x40)
4667 +#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x44)
4668 +#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x48)
4670 +#define VQTX_MIB_EN (1<<17)
4671 +#define VQTX_NUM_0 (4<<0)
4672 +#define VQTX_NUM_1 (4<<4)
4673 +#define VQTX_NUM_2 (4<<8)
4674 +#define VQTX_NUM_3 (4<<12)
4676 +/*=========================================
4677 + SFQ Table Format define
4678 +=========================================*/
4679 +typedef struct _SFQ_INFO1_ SFQ_INFO1_T;
4683 + unsigned int VQHPTR;
4685 +//-------------------------------------------------
4686 +typedef struct _SFQ_INFO2_ SFQ_INFO2_T;
4690 + unsigned int VQTPTR;
4692 +//-------------------------------------------------
4693 +typedef struct _SFQ_INFO3_ SFQ_INFO3_T;
4697 + unsigned int QUE_DEPTH:16;
4698 + unsigned int DEFICIT_CNT:16;
4700 +//-------------------------------------------------
4701 +typedef struct _SFQ_INFO4_ SFQ_INFO4_T;
4705 + unsigned int RESV;
4707 +//-------------------------------------------------
4709 +typedef struct _SFQ_INFO5_ SFQ_INFO5_T;
4713 + unsigned int PKT_CNT;
4715 +//-------------------------------------------------
4717 +typedef struct _SFQ_INFO6_ SFQ_INFO6_T;
4721 + unsigned int BYTE_CNT;
4723 +//-------------------------------------------------
4725 +typedef struct _SFQ_INFO7_ SFQ_INFO7_T;
4729 + unsigned int BYTE_CNT;
4731 +//-------------------------------------------------
4733 +typedef struct _SFQ_INFO8_ SFQ_INFO8_T;
4737 + unsigned int RESV;
4742 + SFQ_INFO1_T sfq_info1;
4743 + SFQ_INFO2_T sfq_info2;
4744 + SFQ_INFO3_T sfq_info3;
4745 + SFQ_INFO4_T sfq_info4;
4746 + SFQ_INFO5_T sfq_info5;
4747 + SFQ_INFO6_T sfq_info6;
4748 + SFQ_INFO7_T sfq_info7;
4749 + SFQ_INFO8_T sfq_info8;
4753 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
4754 +#define FE_GDM_RXID1_OFFSET (0x0130)
4755 +#define FE_GDM_RXID1 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID1_OFFSET)
4756 +#define GDM_VLAN_PRI7_RXID_SEL BITS(30,31)
4757 +#define GDM_VLAN_PRI6_RXID_SEL BITS(28,29)
4758 +#define GDM_VLAN_PRI5_RXID_SEL BITS(26,27)
4759 +#define GDM_VLAN_PRI4_RXID_SEL BITS(24,25)
4760 +#define GDM_VLAN_PRI3_RXID_SEL BITS(22,23)
4761 +#define GDM_VLAN_PRI2_RXID_SEL BITS(20,21)
4762 +#define GDM_VLAN_PRI1_RXID_SEL BITS(18,19)
4763 +#define GDM_VLAN_PRI0_RXID_SEL BITS(16,17)
4764 +#define GDM_TCP_ACK_RXID_SEL BITS(4,5)
4765 +#define GDM_TCP_ACK_WZPC BIT(3)
4766 +#define GDM_RXID_PRI_SEL BITS(0,2)
4768 +#define FE_GDM_RXID2_OFFSET (0x0134)
4769 +#define FE_GDM_RXID2 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID2_OFFSET)
4770 +#define GDM_STAG7_RXID_SEL BITS(30,31)
4771 +#define GDM_STAG6_RXID_SEL BITS(28,29)
4772 +#define GDM_STAG5_RXID_SEL BITS(26,27)
4773 +#define GDM_STAG4_RXID_SEL BITS(24,25)
4774 +#define GDM_STAG3_RXID_SEL BITS(22,23)
4775 +#define GDM_STAG2_RXID_SEL BITS(20,21)
4776 +#define GDM_STAG1_RXID_SEL BITS(18,19)
4777 +#define GDM_STAG0_RXID_SEL BITS(16,17)
4778 +#define GDM_PID2_RXID_SEL BITS(2,3)
4779 +#define GDM_PID1_RXID_SEL BITS(0,1)
4781 +#define GDM_PRI_PID (0)
4782 +#define GDM_PRI_VLAN_PID (1)
4783 +#define GDM_PRI_ACK_PID (2)
4784 +#define GDM_PRI_VLAN_ACK_PID (3)
4785 +#define GDM_PRI_ACK_VLAN_PID (4)
4787 +#define SET_GDM_VLAN_PRI_RXID_SEL(x,y) \
4788 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4789 + *addr &= ~(0x03 << (((x) << 1)+16)); \
4790 + *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4792 +#define SET_GDM_TCP_ACK_RXID_SEL(x) \
4793 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4794 + *addr &= ~(GDM_TCP_ACK_RXID_SEL); \
4795 + *addr |= ((x) & 0x3) << 4; \
4797 +#define SET_GDM_TCP_ACK_WZPC(x) \
4798 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4799 + *addr &= ~(GDM_TCP_ACK_WZPC); \
4800 + *addr |= ((x) & 0x1) << 3; \
4802 +#define SET_GDM_RXID_PRI_SEL(x) \
4803 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4804 + *addr &= ~(GDM_RXID_PRI_SEL); \
4805 + *addr |= (x) & 0x7; \
4807 +#define GDM_STAG_RXID_SEL(x,y) \
4808 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4809 + *addr &= ~(0x03 << (((x) << 1)+16)); \
4810 + *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4812 +#define SET_GDM_PID2_RXID_SEL(x) \
4813 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4814 + *addr &= ~(GDM_PID2_RXID_SEL); \
4815 + *addr |= ((x) & 0x3) << 2; \
4817 +#define SET_GDM_PID1_RXID_SEL(x) \
4818 + { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4819 + *addr &= ~(GDM_PID1_RXID_SEL); \
4820 + *addr |= ((x) & 0x3); \
4822 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
4823 +/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
4824 +#define PORT0_PKCOUNT (0xb01100e8)
4825 +#define PORT1_PKCOUNT (0xb01100ec)
4826 +#define PORT2_PKCOUNT (0xb01100f0)
4827 +#define PORT3_PKCOUNT (0xb01100f4)
4828 +#define PORT4_PKCOUNT (0xb01100f8)
4829 +#define PORT5_PKCOUNT (0xb01100fc)
4831 +#if defined (CONFIG_ARCH_MT7623)
4832 +#include "sync_write.h"
4833 +#define sysRegRead(phys) (*(volatile unsigned int *)((phys)))
4834 +#define sysRegWrite(phys, val) mt65xx_reg_sync_writel((val), (phys))
4836 +#define PHYS_TO_K1(physaddr) KSEG1ADDR(physaddr)
4837 +#define sysRegRead(phys) (*(volatile unsigned int *)PHYS_TO_K1(phys))
4838 +#define sysRegWrite(phys, val) ((*(volatile unsigned int *)PHYS_TO_K1(phys)) = (val))
4841 +#define u_long unsigned long
4842 +#define u32 unsigned int
4843 +#define u16 unsigned short
4846 +/* ====================================== */
4847 +#define GDM1_DISPAD BIT(18)
4848 +#define GDM1_DISCRC BIT(17)
4850 +//GDMA1 uni-cast frames destination port
4851 +#define GDM1_ICS_EN (0x1 << 22)
4852 +#define GDM1_TCS_EN (0x1 << 21)
4853 +#define GDM1_UCS_EN (0x1 << 20)
4854 +#define GDM1_JMB_EN (0x1 << 19)
4855 +#define GDM1_STRPCRC (0x1 << 16)
4856 +#define GDM1_UFRC_P_CPU (0 << 12)
4857 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4858 +#define GDM1_UFRC_P_PPE (4 << 12)
4860 +#define GDM1_UFRC_P_PPE (6 << 12)
4863 +//GDMA1 broad-cast MAC address frames
4864 +#define GDM1_BFRC_P_CPU (0 << 8)
4865 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4866 +#define GDM1_BFRC_P_PPE (4 << 8)
4868 +#define GDM1_BFRC_P_PPE (6 << 8)
4871 +//GDMA1 multi-cast MAC address frames
4872 +#define GDM1_MFRC_P_CPU (0 << 4)
4873 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4874 +#define GDM1_MFRC_P_PPE (4 << 4)
4876 +#define GDM1_MFRC_P_PPE (6 << 4)
4879 +//GDMA1 other MAC address frames destination port
4880 +#define GDM1_OFRC_P_CPU (0 << 0)
4881 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4882 +#define GDM1_OFRC_P_PPE (4 << 0)
4884 +#define GDM1_OFRC_P_PPE (6 << 0)
4887 +#if defined (CONFIG_RALINK_RT6856) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4888 +/* checksum generator registers are removed */
4889 +#define ICS_GEN_EN (0 << 2)
4890 +#define UCS_GEN_EN (0 << 1)
4891 +#define TCS_GEN_EN (0 << 0)
4893 +#define ICS_GEN_EN (1 << 2)
4894 +#define UCS_GEN_EN (1 << 1)
4895 +#define TCS_GEN_EN (1 << 0)
4899 +#define MDIO_CFG_GP1_FC_TX (1 << 11)
4900 +#define MDIO_CFG_GP1_FC_RX (1 << 10)
4902 +/* ====================================== */
4903 +/* ====================================== */
4904 +#define GP1_LNK_DWN BIT(9)
4905 +#define GP1_AN_FAIL BIT(8)
4906 +/* ====================================== */
4907 +/* ====================================== */
4908 +#define PSE_RESET BIT(0)
4909 +/* ====================================== */
4910 +#define PST_DRX_IDX3 BIT(19)
4911 +#define PST_DRX_IDX2 BIT(18)
4912 +#define PST_DRX_IDX1 BIT(17)
4913 +#define PST_DRX_IDX0 BIT(16)
4914 +#define PST_DTX_IDX3 BIT(3)
4915 +#define PST_DTX_IDX2 BIT(2)
4916 +#define PST_DTX_IDX1 BIT(1)
4917 +#define PST_DTX_IDX0 BIT(0)
4919 +#define RX_2B_OFFSET BIT(31)
4920 +#define DESC_32B_EN BIT(8)
4921 +#define TX_WB_DDONE BIT(6)
4922 +#define RX_DMA_BUSY BIT(3)
4923 +#define TX_DMA_BUSY BIT(1)
4924 +#define RX_DMA_EN BIT(2)
4925 +#define TX_DMA_EN BIT(0)
4927 +#define PDMA_BT_SIZE_4DWORDS (0<<4)
4928 +#define PDMA_BT_SIZE_8DWORDS (1<<4)
4929 +#define PDMA_BT_SIZE_16DWORDS (2<<4)
4930 +#define PDMA_BT_SIZE_32DWORDS (3<<4)
4932 +#define ADMA_RX_BT_SIZE_4DWORDS (0<<11)
4933 +#define ADMA_RX_BT_SIZE_8DWORDS (1<<11)
4934 +#define ADMA_RX_BT_SIZE_16DWORDS (2<<11)
4935 +#define ADMA_RX_BT_SIZE_32DWORDS (3<<11)
4940 +#define MACCFG_RXEN (1<<2)
4941 +#define MACCFG_TXEN (1<<3)
4942 +#define MACCFG_PROMISC (1<<18)
4943 +#define MACCFG_RXMCAST (1<<19)
4944 +#define MACCFG_FDUPLEX (1<<20)
4945 +#define MACCFG_PORTSEL (1<<27)
4946 +#define MACCFG_HBEATDIS (1<<28)
4949 +#define DMACTL_SR (1<<1) /* Start/Stop Receive */
4950 +#define DMACTL_ST (1<<13) /* Start/Stop Transmission Command */
4952 +#define DMACFG_SWR (1<<0) /* Software Reset */
4953 +#define DMACFG_BURST32 (32<<8)
4955 +#define DMASTAT_TS 0x00700000 /* Transmit Process State */
4956 +#define DMASTAT_RS 0x000e0000 /* Receive Process State */
4958 +#define MACCFG_INIT 0 //(MACCFG_FDUPLEX) // | MACCFG_PORTSEL)
4962 +/* Descriptor bits.
4964 +#define R_OWN 0x80000000 /* Own Bit */
4965 +#define RD_RER 0x02000000 /* Receive End Of Ring */
4966 +#define RD_LS 0x00000100 /* Last Descriptor */
4967 +#define RD_ES 0x00008000 /* Error Summary */
4968 +#define RD_CHAIN 0x01000000 /* Chained */
4971 +#define T_OWN 0x80000000 /* Own Bit */
4972 +#define TD_ES 0x00008000 /* Error Summary */
4975 +#define TD_LS 0x40000000 /* Last Segment */
4976 +#define TD_FS 0x20000000 /* First Segment */
4977 +#define TD_TER 0x08000000 /* Transmit End Of Ring */
4978 +#define TD_CHAIN 0x01000000 /* Chained */
4981 +#define TD_SET 0x08000000 /* Setup Packet */
4984 +#define POLL_DEMAND 1
4986 +#define RSTCTL (0x34)
4987 +#define RSTCTL_RSTENET1 (1<<19)
4988 +#define RSTCTL_RSTENET2 (1<<20)
4990 +#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG 0xff908000
4991 +#define INIT_VALUE_OF_PSE_FQFC_CFG 0x80504000
4992 +#define INIT_VALUE_OF_FORCE_100_FD 0x1001BC01
4993 +#define INIT_VALUE_OF_FORCE_1000_FD 0x1F01DC01
4995 +// Define Whole FE Reset Register
4996 +#define RSTCTRL (RALINK_SYSCTL_BASE + 0x34)
4997 +#define RT2880_AGPIOCFG_REG (RALINK_SYSCTL_BASE + 0x3C)
4999 +/*=========================================
5000 + PDMA RX Descriptor Format define
5001 +=========================================*/
5003 +//-------------------------------------------------
5004 +typedef struct _PDMA_RXD_INFO1_ PDMA_RXD_INFO1_T;
5006 +struct _PDMA_RXD_INFO1_
5008 + unsigned int PDP0;
5010 +//-------------------------------------------------
5011 +typedef struct _PDMA_RXD_INFO2_ PDMA_RXD_INFO2_T;
5013 +struct _PDMA_RXD_INFO2_
5015 +#if defined (CONFIG_ARCH_MT7623)
5016 + unsigned int PLEN1 : 2;
5017 + unsigned int LRO_AGG_CNT : 8;
5018 + unsigned int REV : 5;
5020 + unsigned int PLEN1 : 14;
5021 + unsigned int LS1 : 1;
5022 +#endif /* CONFIG_RAETH_HW_LRO */
5023 + unsigned int TAG : 1;
5024 + unsigned int PLEN0 : 14;
5025 + unsigned int LS0 : 1;
5026 + unsigned int DDONE_bit : 1;
5028 +//-------------------------------------------------
5029 +typedef struct _PDMA_RXD_INFO3_ PDMA_RXD_INFO3_T;
5031 +struct _PDMA_RXD_INFO3_
5033 + unsigned int VID:16;
5034 + unsigned int TPID:16;
5036 +//-------------------------------------------------
5037 +typedef struct _PDMA_RXD_INFO4_ PDMA_RXD_INFO4_T;
5039 +struct _PDMA_RXD_INFO4_
5041 +#if defined (CONFIG_RALINK_MT7620)
5042 + unsigned int FOE_Entry : 14;
5043 + unsigned int CRSN : 5;
5044 + unsigned int SPORT : 3;
5045 + unsigned int L4F : 1;
5046 + unsigned int L4VLD : 1;
5047 + unsigned int TACK : 1;
5048 + unsigned int IP4F : 1;
5049 + unsigned int IP4 : 1;
5050 + unsigned int IP6 : 1;
5051 + unsigned int UN_USE1 : 4;
5052 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5053 + unsigned int FOE_Entry : 14;
5054 + unsigned int CRSN : 5;
5055 + unsigned int SP : 4;
5056 + unsigned int L4F : 1;
5057 + unsigned int L4VLD : 1;
5058 + unsigned int TACK : 1;
5059 + unsigned int IP4F : 1;
5060 + unsigned int IP4 : 1;
5061 + unsigned int IP6 : 1;
5062 + unsigned int UN_USE1 : 3;
5064 + unsigned int FOE_Entry : 14;
5065 + unsigned int FVLD : 1;
5066 + unsigned int UN_USE1 : 1;
5067 + unsigned int AI : 8;
5068 + unsigned int SP : 3;
5069 + unsigned int AIS : 1;
5070 + unsigned int L4F : 1;
5071 + unsigned int IPF : 1;
5072 + unsigned int L4FVLD_bit : 1;
5073 + unsigned int IPFVLD_bit : 1;
5078 +struct PDMA_rxdesc {
5079 + PDMA_RXD_INFO1_T rxd_info1;
5080 + PDMA_RXD_INFO2_T rxd_info2;
5081 + PDMA_RXD_INFO3_T rxd_info3;
5082 + PDMA_RXD_INFO4_T rxd_info4;
5083 +#ifdef CONFIG_32B_DESC
5084 + unsigned int rxd_info5;
5085 + unsigned int rxd_info6;
5086 + unsigned int rxd_info7;
5087 + unsigned int rxd_info8;
5091 +/*=========================================
5092 + PDMA TX Descriptor Format define
5093 +=========================================*/
5094 +//-------------------------------------------------
5095 +typedef struct _PDMA_TXD_INFO1_ PDMA_TXD_INFO1_T;
5097 +struct _PDMA_TXD_INFO1_
5099 + unsigned int SDP0;
5101 +//-------------------------------------------------
5102 +typedef struct _PDMA_TXD_INFO2_ PDMA_TXD_INFO2_T;
5104 +struct _PDMA_TXD_INFO2_
5106 + unsigned int SDL1 : 14;
5107 + unsigned int LS1_bit : 1;
5108 + unsigned int BURST_bit : 1;
5109 + unsigned int SDL0 : 14;
5110 + unsigned int LS0_bit : 1;
5111 + unsigned int DDONE_bit : 1;
5113 +//-------------------------------------------------
5114 +typedef struct _PDMA_TXD_INFO3_ PDMA_TXD_INFO3_T;
5116 +struct _PDMA_TXD_INFO3_
5118 + unsigned int SDP1;
5120 +//-------------------------------------------------
5121 +typedef struct _PDMA_TXD_INFO4_ PDMA_TXD_INFO4_T;
5123 +struct _PDMA_TXD_INFO4_
5125 +#if defined (CONFIG_RALINK_MT7620)
5126 + unsigned int VPRI_VIDX : 8;
5127 + unsigned int SIDX : 4;
5128 + unsigned int INSP : 1;
5129 + unsigned int RESV : 2;
5130 + unsigned int UDF : 5;
5131 + unsigned int FP_BMAP : 8;
5132 + unsigned int TSO : 1;
5133 + unsigned int TUI_CO : 3;
5134 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5135 + unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5136 + unsigned int RESV : 2;
5137 + unsigned int UDF : 6;
5138 + unsigned int FPORT : 3;
5139 + unsigned int TSO : 1;
5140 + unsigned int TUI_CO : 3;
5142 + unsigned int VPRI_VIDX : 8;
5143 + unsigned int SIDX : 4;
5144 + unsigned int INSP : 1;
5145 + unsigned int RESV : 1;
5146 + unsigned int UN_USE3 : 2;
5147 + unsigned int QN : 3;
5148 + unsigned int UN_USE2 : 1;
5149 + unsigned int UDF : 4;
5150 + unsigned int PN : 3;
5151 + unsigned int UN_USE1 : 1;
5152 + unsigned int TSO : 1;
5153 + unsigned int TUI_CO : 3;
5158 +struct PDMA_txdesc {
5159 + PDMA_TXD_INFO1_T txd_info1;
5160 + PDMA_TXD_INFO2_T txd_info2;
5161 + PDMA_TXD_INFO3_T txd_info3;
5162 + PDMA_TXD_INFO4_T txd_info4;
5163 +#ifdef CONFIG_32B_DESC
5164 + unsigned int txd_info5;
5165 + unsigned int txd_info6;
5166 + unsigned int txd_info7;
5167 + unsigned int txd_info8;
5172 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5173 +/*=========================================
5174 + QDMA TX Descriptor Format define
5175 +=========================================*/
5176 +//-------------------------------------------------
5177 +typedef struct _QDMA_TXD_INFO1_ QDMA_TXD_INFO1_T;
5179 +struct _QDMA_TXD_INFO1_
5183 +//-------------------------------------------------
5184 +typedef struct _QDMA_TXD_INFO2_ QDMA_TXD_INFO2_T;
5186 +struct _QDMA_TXD_INFO2_
5190 +//-------------------------------------------------
5191 +typedef struct _QDMA_TXD_INFO3_ QDMA_TXD_INFO3_T;
5193 +struct _QDMA_TXD_INFO3_
5195 + unsigned int QID : 4;
5196 +#if defined (CONFIG_HW_SFQ)
5197 + //unsigned int VQID : 10;
5198 + unsigned int PROT : 3;
5199 + unsigned int IPOFST : 7;
5201 + unsigned int RESV : 10;
5203 + unsigned int SWC_bit : 1;
5204 + unsigned int BURST_bit : 1;
5205 + unsigned int SDL : 14;
5206 + unsigned int LS_bit : 1;
5207 + unsigned int OWN_bit : 1;
5209 +//-------------------------------------------------
5210 +typedef struct _QDMA_TXD_INFO4_ QDMA_TXD_INFO4_T;
5212 +struct _QDMA_TXD_INFO4_
5214 + unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5215 +#if defined (CONFIG_RALINK_MT7621)
5216 + unsigned int RESV : 2;
5217 + unsigned int UDF : 6;
5218 +#elif defined(CONFIG_ARCH_MT7623)
5219 + unsigned int VQID0 : 1;
5220 + unsigned int RESV : 7;
5222 + unsigned int FPORT : 3;
5223 + unsigned int TSO : 1;
5224 + unsigned int TUI_CO : 3;
5228 +struct QDMA_txdesc {
5229 + QDMA_TXD_INFO1_T txd_info1;
5230 + QDMA_TXD_INFO2_T txd_info2;
5231 + QDMA_TXD_INFO3_T txd_info3;
5232 + QDMA_TXD_INFO4_T txd_info4;
5233 +#ifdef CONFIG_32B_DESC
5234 + unsigned int txd_info5;
5235 + unsigned int txd_info6;
5236 + unsigned int txd_info7;
5237 + unsigned int txd_info8;
5242 +#if defined (CONFIG_ARCH_MT7623)
5243 +#define phys_to_bus(a) (a)
5245 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
5248 +#define PHY_Enable_Auto_Nego 0x1000
5249 +#define PHY_Restart_Auto_Nego 0x0200
5251 +/* PHY_STAT_REG = 1; */
5252 +#define PHY_Auto_Neco_Comp 0x0020
5253 +#define PHY_Link_Status 0x0004
5255 +/* PHY_AUTO_NEGO_REG = 4; */
5256 +#define PHY_Cap_10_Half 0x0020
5257 +#define PHY_Cap_10_Full 0x0040
5258 +#define PHY_Cap_100_Half 0x0080
5259 +#define PHY_Cap_100_Full 0x0100
5261 +/* proc definition */
5263 +#if !defined (CONFIG_RALINK_RT6855) && !defined(CONFIG_RALINK_RT6855A) && \
5264 + !defined (CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7621) && \
5265 + !defined (CONFIG_ARCH_MT7623)
5266 +#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x4c)
5267 +#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x50)
5268 +#define PPE_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x54)
5269 +#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x58)
5272 +#define PROCREG_CONTROL_FILE "/var/run/procreg_control"
5273 +#if defined (CONFIG_RALINK_RT2880)
5274 +#define PROCREG_DIR "rt2880"
5275 +#elif defined (CONFIG_RALINK_RT3052)
5276 +#define PROCREG_DIR "rt3052"
5277 +#elif defined (CONFIG_RALINK_RT3352)
5278 +#define PROCREG_DIR "rt3352"
5279 +#elif defined (CONFIG_RALINK_RT5350)
5280 +#define PROCREG_DIR "rt5350"
5281 +#elif defined (CONFIG_RALINK_RT2883)
5282 +#define PROCREG_DIR "rt2883"
5283 +#elif defined (CONFIG_RALINK_RT3883)
5284 +#define PROCREG_DIR "rt3883"
5285 +#elif defined (CONFIG_RALINK_RT6855)
5286 +#define PROCREG_DIR "rt6855"
5287 +#elif defined (CONFIG_RALINK_MT7620)
5288 +#define PROCREG_DIR "mt7620"
5289 +#elif defined (CONFIG_RALINK_MT7621)
5290 +#define PROCREG_DIR "mt7621"
5291 +#elif defined (CONFIG_ARCH_MT7623)
5292 +#define PROCREG_DIR "mt7623"
5293 +#elif defined (CONFIG_RALINK_MT7628)
5294 +#define PROCREG_DIR "mt7628"
5295 +#elif defined (CONFIG_RALINK_RT6855A)
5296 +#define PROCREG_DIR "rt6855a"
5298 +#define PROCREG_DIR "rt2880"
5300 +#define PROCREG_SKBFREE "skb_free"
5301 +#define PROCREG_TXRING "tx_ring"
5302 +#define PROCREG_RXRING "rx_ring"
5303 +#define PROCREG_RXRING1 "rx_ring1"
5304 +#define PROCREG_RXRING2 "rx_ring2"
5305 +#define PROCREG_RXRING3 "rx_ring3"
5306 +#define PROCREG_NUM_OF_TXD "num_of_txd"
5307 +#define PROCREG_TSO_LEN "tso_len"
5308 +#define PROCREG_LRO_STATS "lro_stats"
5309 +#define PROCREG_HW_LRO_STATS "hw_lro_stats"
5310 +#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
5311 +#define PROCREG_GMAC "gmac"
5312 +#define PROCREG_GMAC2 "gmac2"
5313 +#define PROCREG_CP0 "cp0"
5314 +#define PROCREG_RAQOS "qos"
5315 +#define PROCREG_READ_VAL "regread_value"
5316 +#define PROCREG_WRITE_VAL "regwrite_value"
5317 +#define PROCREG_ADDR "reg_addr"
5318 +#define PROCREG_CTL "procreg_control"
5319 +#define PROCREG_RXDONE_INTR "rxdone_intr_count"
5320 +#define PROCREG_ESW_INTR "esw_intr_count"
5321 +#define PROCREG_ESW_CNT "esw_cnt"
5322 +#define PROCREG_SNMP "snmp"
5323 +#if defined (TASKLET_WORKQUEUE_SW)
5324 +#define PROCREG_SCHE "schedule"
5326 +#define PROCREG_QDMA "qdma"
5327 +#if defined(CONFIG_RAETH_PDMA_DVT)
5328 +#define PROCREG_PDMA_DVT "pdma_dvt"
5329 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
5330 +struct rt2880_reg_op_data {
5332 + unsigned int reg_addr;
5334 + unsigned int reg_value;
5337 +#ifdef CONFIG_RAETH_LRO
5338 +struct lro_counters {
5339 + u32 lro_aggregated;
5344 +struct lro_para_struct {
5345 + unsigned int lan_ip1;
5348 +#endif // CONFIG_RAETH_LRO //
5351 +#if defined (CONFIG_HW_SFQ)
5358 + uint16_t vlan_tag;
5359 + uint16_t vlan1_gap;
5361 + uint16_t vlan2_gap;
5363 + uint16_t vlan_layer;
5366 + uint32_t pppoe_gap;
5368 + uint16_t pppoe_sid;
5371 + uint16_t eth_type;
5373 + struct ipv6hdr ip6h;
5379 + uint32_t pkt_type;
5384 +typedef struct end_device
5387 + unsigned int tx_cpu_owner_idx0;
5388 + unsigned int rx_cpu_owner_idx0;
5389 + unsigned int fe_int_status;
5390 + unsigned int tx_full;
5392 +#if !defined (CONFIG_RAETH_QDMA)
5393 + unsigned int phy_tx_ring0;
5396 + struct sk_buff *free_skb[NUM_TX_DESC];
5397 + unsigned int tx_dma_ptr;
5398 + unsigned int tx_cpu_ptr;
5399 + unsigned int free_txd_num;
5400 + unsigned int free_txd_head;
5401 + unsigned int free_txd_tail;
5402 + struct QDMA_txdesc *txd_pool;
5403 + dma_addr_t phy_txd_pool;
5404 + unsigned int txd_pool_info[NUM_TX_DESC];
5405 + struct QDMA_txdesc *free_head;
5406 + unsigned int phy_free_head;
5407 + unsigned int *free_page_head;
5408 + unsigned int phy_free_page_head;
5409 + struct PDMA_rxdesc *qrx_ring;
5410 + unsigned int phy_qrx_ring;
5411 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
5412 + unsigned int phy_tx_ring0;
5416 + unsigned int phy_rx_ring0, phy_rx_ring1, phy_rx_ring2, phy_rx_ring3;
5418 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || \
5419 + defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || \
5420 + defined(CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || \
5421 + defined(CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
5422 + defined (CONFIG_ARCH_MT7623)
5423 + //send signal to user application to notify link status changed
5424 + struct work_struct kill_sig_wq;
5427 + struct work_struct reset_task;
5428 +#ifdef WORKQUEUE_BH
5429 + struct work_struct rx_wq;
5431 +#if defined (TASKLET_WORKQUEUE_SW)
5432 + struct work_struct rx_wq;
5434 + struct tasklet_struct rx_tasklet;
5435 + struct tasklet_struct tx_tasklet;
5436 +#endif // WORKQUEUE_BH //
5438 +#if defined(CONFIG_RAETH_QOS)
5439 + struct sk_buff * skb_free[NUM_TX_RINGS][NUM_TX_DESC];
5440 + unsigned int free_idx[NUM_TX_RINGS];
5442 + struct sk_buff* skb_free[NUM_TX_DESC];
5443 + unsigned int free_idx;
5446 + struct net_device_stats stat; /* The new statistics table. */
5447 + spinlock_t page_lock; /* Page register locks */
5448 + struct PDMA_txdesc *tx_ring0;
5449 +#if defined(CONFIG_RAETH_QOS)
5450 + struct PDMA_txdesc *tx_ring1;
5451 + struct PDMA_txdesc *tx_ring2;
5452 + struct PDMA_txdesc *tx_ring3;
5454 + struct PDMA_rxdesc *rx_ring0;
5455 + struct sk_buff *netrx0_skbuf[NUM_RX_DESC];
5456 +#if defined (CONFIG_RAETH_HW_LRO)
5457 + struct PDMA_rxdesc *rx_ring3;
5458 + struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
5459 + struct PDMA_rxdesc *rx_ring2;
5460 + struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
5461 + struct PDMA_rxdesc *rx_ring1;
5462 + struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
5463 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
5464 + struct PDMA_rxdesc *rx_ring1;
5465 + struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
5466 +#if defined(CONFIG_ARCH_MT7623)
5467 + struct PDMA_rxdesc *rx_ring2;
5468 + struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
5469 + struct PDMA_rxdesc *rx_ring3;
5470 + struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
5471 +#endif /* CONFIG_ARCH_MT7623 */
5473 +#ifdef CONFIG_RAETH_NAPI
5475 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5476 + struct napi_struct napi;
5479 +#ifdef CONFIG_PSEUDO_SUPPORT
5480 + struct net_device *PseudoDev;
5481 + unsigned int isPseudo;
5483 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
5484 + struct mii_if_info mii_info;
5486 +#ifdef CONFIG_RAETH_LRO
5487 + struct lro_counters lro_counters;
5488 + struct net_lro_mgr lro_mgr;
5489 + struct net_lro_desc lro_arr[8];
5491 +#ifdef CONFIG_RAETH_HW_VLAN_RX
5492 + struct vlan_group *vlgrp;
5494 +#if defined (CONFIG_RAETH_HW_LRO)
5495 + struct work_struct hw_lro_wq;
5496 + unsigned int hw_lro_pkt_interval[3];
5497 + unsigned int hw_lro_alpha; /* 0 < packet interval alpha <= 10 */
5498 + unsigned int hw_lro_fix_setting; /* 0: dynamical AGG/AGE time, 1: fixed AGG/AGE time */
5499 +#endif /* CONFIG_RAETH_HW_LRO */
5500 +} END_DEVICE, *pEND_DEVICE;
5503 +#define RAETH_VERSION "v3.1"
5507 +#define DMA_GLO_CFG PDMA_GLO_CFG
5509 +#if defined(CONFIG_RAETH_QDMATX_QDMARX)
5510 +#define GDMA1_FWD_PORT 0x5555
5511 +#define GDMA2_FWD_PORT 0x5555
5512 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
5513 +#define GDMA1_FWD_PORT 0x5555
5514 +#define GDMA2_FWD_PORT 0x5555
5516 +#define GDMA1_FWD_PORT 0x0000
5517 +#define GDMA2_FWD_PORT 0x0000
5520 +#if defined(CONFIG_RAETH_QDMATX_QDMARX)
5521 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5522 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5523 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
5524 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5525 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5527 +#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
5528 +#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
5530 +#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
5531 +#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
5532 +#define RAETH_FE_INT_STATUS FE_INT_STATUS
5533 +#define RAETH_FE_INT_ALL FE_INT_ALL
5534 +#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
5535 +#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
5536 +#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5537 +#define QFE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5538 +#define RAETH_TX_DLY_INT TX_DLY_INT
5539 +#define RAETH_TX_DONE_INT0 TX_DONE_INT0
5540 +#define RAETH_DLY_INT_CFG DLY_INT_CFG
5542 +++ b/drivers/net/ethernet/raeth/ra_ethtool.c
5544 +#include <linux/module.h>
5545 +#include <linux/version.h>
5547 +#include <linux/kernel.h>
5548 +#include <linux/sched.h>
5550 +#include <linux/netdevice.h>
5551 +#include <linux/etherdevice.h>
5552 +#include <linux/skbuff.h>
5553 +#include <linux/if_ether.h>
5554 +#include <linux/ethtool.h>
5556 +#include "ra2882ethreg.h"
5557 +#include "raether.h"
5558 +#include "ra_mac.h"
5559 +#include "ra_ethtool.h"
5561 +#define RAETHER_DRIVER_NAME "raether"
5562 +#define RA_NUM_STATS 4
5566 + const char str[ETH_GSTRING_LEN];
5567 +} ethtool_stats_keys[] = {
5574 +unsigned char get_current_phy_address(void)
5576 + struct net_device *cur_dev_p;
5577 + END_DEVICE *ei_local;
5579 + for(cur_dev_p=dev_base; cur_dev_p!=NULL; cur_dev_p=cur_dev_p->next){
5580 + if (strncmp(cur_dev_p->name, DEV_NAME /* "eth2" usually */, 4) == 0)
5584 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5585 + cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
5587 + cur_dev_p = dev_get_by_name(DEV_NAME);
5592 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5593 + ei_local = netdev_priv(cur_dev_p);
5595 + ei_local = cur_dev_p->priv;
5597 + return ei_local->mii_info.phy_id;
5600 +static u32 et_get_tx_csum(struct net_device *dev)
5602 + return (sysRegRead(GDMA1_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
5605 +static u32 et_get_rx_csum(struct net_device *dev)
5607 + return (sysRegRead(GDMA1_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5610 +static int et_set_tx_csum(struct net_device *dev, u32 data)
5613 + //printk("et_set_tx_csum(): data = %d\n", data);
5615 + value = sysRegRead(GDMA1_FWD_CFG);
5617 + value |= GDM1_DISCRC;
5619 + value &= ~GDM1_DISCRC;
5621 + sysRegWrite(GDMA1_FWD_CFG, value);
5625 +static int et_set_rx_csum(struct net_device *dev, u32 data)
5628 + //printk("et_set_rx_csum(): data = %d\n", data);
5630 + value = sysRegRead(GDMA1_FWD_CFG);
5632 + value |= GDM1_STRPCRC;
5634 + value &= ~GDM1_STRPCRC;
5636 + sysRegWrite(GDMA1_FWD_CFG, value);
5641 +#define MII_CR_ADDR 0x00
5642 +#define MII_CR_MR_AUTONEG_ENABLE (1 << 12)
5643 +#define MII_CR_MR_RESTART_NEGOTIATION (1 << 9)
5645 +#define AUTO_NEGOTIATION_ADVERTISEMENT 0x04
5646 +#define AN_PAUSE (1 << 10)
5648 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5649 +static void et_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5653 + END_DEVICE *ei_local = dev->priv;
5655 + // get mii auto-negotiation register
5656 + mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5657 + epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5659 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5660 + epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5661 + epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5663 + //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5666 +static int et_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5670 + END_DEVICE *ei_local = dev->priv;
5672 + //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5675 + mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5676 + if(epause->autoneg)
5677 + mii_an_reg |= AN_PAUSE;
5679 + mii_an_reg &= ~AN_PAUSE;
5680 + mii_mgr_write(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5683 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5684 + if(epause->tx_pause)
5685 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5687 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5688 + if(epause->rx_pause)
5689 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5691 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5692 + sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5697 +static int et_nway_reset(struct net_device *dev)
5699 + END_DEVICE *ei_local = dev->priv;
5700 + return mii_nway_restart(&ei_local->mii_info);
5704 +static u32 et_get_link(struct net_device *dev)
5706 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5707 + END_DEVICE *ei_local = netdev_priv(dev);
5709 + END_DEVICE *ei_local = dev->priv;
5711 + return mii_link_ok(&ei_local->mii_info);
5714 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5715 +static int et_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5717 + END_DEVICE *ei_local = dev->priv;
5719 + rc = mii_ethtool_sset(&ei_local->mii_info, cmd);
5724 +static int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5726 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5727 + END_DEVICE *ei_local = netdev_priv(dev);
5729 + END_DEVICE *ei_local = dev->priv;
5731 + mii_ethtool_gset(&ei_local->mii_info, cmd);
5735 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5736 +static u32 et_get_msglevel(struct net_device *dev)
5741 +static void et_set_msglevel(struct net_device *dev, u32 datum)
5746 +static void et_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5748 + //END_DEVICE *ei_local = dev->priv;
5749 + strcpy(info->driver, RAETHER_DRIVER_NAME);
5750 + strcpy(info->version, RAETH_VERSION);
5751 + strcpy(info->bus_info, "n/a");
5752 + info->n_stats = RA_NUM_STATS;
5753 + info->eedump_len = 0;
5754 + info->regdump_len = 0;
5757 +static int et_get_stats_count(struct net_device *dev)
5759 + return RA_NUM_STATS;
5762 +static void et_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5764 +// END_DEVICE *ei_local = dev->priv;
5765 + data[0] = 0;//np->xstats.early_rx;
5766 + data[1] = 0;//np->xstats.tx_buf_mapped;
5767 + data[2] = 0;//np->xstats.tx_timeouts;
5768 + data[3] = 0;//np->xstats.rx_lost_in_ring;
5771 +static void et_get_strings(struct net_device *dev, u32 stringset, u8 *data)
5773 + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
5778 + * mii_mgr_read wrapper for mii.o ethtool
5780 +int mdio_read(struct net_device *dev, int phy_id, int location)
5782 + unsigned int result;
5783 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5784 + END_DEVICE *ei_local = netdev_priv(dev);
5786 + END_DEVICE *ei_local = dev->priv;
5788 + mii_mgr_read( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, &result);
5789 + //printk("\n%s mii.o query= phy_id:%d, address:%d retval:%x\n", dev->name, phy_id, location, result);
5790 + return (int)result;
5794 + * mii_mgr_write wrapper for mii.o ethtool
5796 +void mdio_write(struct net_device *dev, int phy_id, int location, int value)
5798 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5799 + END_DEVICE *ei_local = netdev_priv(dev);
5801 + END_DEVICE *ei_local = dev->priv;
5803 + //printk("mii.o write= phy_id:%d, address:%d value:%x\n", phy_id, location, value);
5804 + mii_mgr_write( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
5808 +struct ethtool_ops ra_ethtool_ops = {
5810 + .get_settings = et_get_settings,
5811 + .get_link = et_get_link,
5812 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5813 + .get_drvinfo = et_get_drvinfo,
5814 + .set_settings = et_set_settings,
5815 + .get_pauseparam = et_get_pauseparam,
5816 + .set_pauseparam = et_set_pauseparam,
5817 +// .get_rx_csum = et_get_rx_csum,
5818 +// .set_rx_csum = et_set_rx_csum,
5819 +// .get_tx_csum = et_get_tx_csum,
5820 +// .set_tx_csum = et_set_tx_csum,
5821 + .nway_reset = et_nway_reset,
5822 + .get_msglevel = et_get_msglevel,
5823 + .set_msglevel = et_set_msglevel,
5824 + .get_strings = et_get_strings,
5825 + .get_stats_count = et_get_stats_count,
5826 + .get_ethtool_stats = et_get_ethtool_stats,
5827 +/* .get_regs_len = et_get_regs_len,
5828 + .get_regs = et_get_regs,
5833 +#ifdef CONFIG_PSEUDO_SUPPORT
5835 + * We unable to re-use the Raether functions because it is hard to tell
5836 + * where the calling from is. From eth2 or eth3?
5838 + * These code size is around 950 bytes.
5840 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5841 +static void et_virt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5843 + // PSEUDO_ADAPTER *pseudo = dev->priv;
5844 + return et_get_drvinfo(dev, info);
5847 +static void et_virt_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5849 + int mii_an_reg, mdio_cfg_reg;
5850 + PSEUDO_ADAPTER *pseudo = dev->priv;
5852 + // get mii auto-negotiation register
5853 + mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5854 + epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5856 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5857 + epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5858 + epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5860 + //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5863 +static int et_virt_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5867 + PSEUDO_ADAPTER *pseudo = dev->priv;
5869 + //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5871 + mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5872 + if(epause->autoneg)
5873 + mii_an_reg |= AN_PAUSE;
5875 + mii_an_reg &= ~AN_PAUSE;
5876 + mii_mgr_write(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5879 + mdio_cfg_reg = sysRegRead(MDIO_CFG);
5880 + if(epause->tx_pause)
5881 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5883 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5884 + if(epause->rx_pause)
5885 + mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5887 + mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5888 + sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5893 +static u32 et_virt_get_tx_csum(struct net_device *dev)
5895 + return (sysRegRead(GDMA2_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
5898 +static u32 et_virt_get_rx_csum(struct net_device *dev)
5900 + return (sysRegRead(GDMA2_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5903 +static int et_virt_set_tx_csum(struct net_device *dev, u32 data)
5906 + //printk("et_set_tx_csum(): data = %d\n", data);
5907 + value = sysRegRead(GDMA2_FWD_CFG);
5909 + value |= GDM1_DISCRC;
5911 + value &= ~GDM1_DISCRC;
5912 + sysRegWrite(GDMA1_FWD_CFG, value);
5916 +static int et_virt_set_rx_csum(struct net_device *dev, u32 data)
5919 + //printk("et_set_rx_csum(): data = %d\n", data);
5920 + value = sysRegRead(GDMA2_FWD_CFG);
5922 + value |= GDM1_STRPCRC;
5924 + value &= ~GDM1_STRPCRC;
5925 + sysRegWrite(GDMA1_FWD_CFG, value);
5929 +static int et_virt_nway_reset(struct net_device *dev)
5931 + PSEUDO_ADAPTER *pseudo = dev->priv;
5932 + return mii_nway_restart(&pseudo->mii_info);
5936 +static u32 et_virt_get_link(struct net_device *dev)
5938 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5939 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
5941 + PSEUDO_ADAPTER *pseudo = dev->priv;
5943 + return mii_link_ok(&pseudo->mii_info);
5946 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5947 +static int et_virt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5949 + PSEUDO_ADAPTER *pseudo = dev->priv;
5950 + int rc = mii_ethtool_sset(&pseudo->mii_info, cmd);
5955 +static int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5957 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5958 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
5960 + PSEUDO_ADAPTER *pseudo = dev->priv;
5962 + mii_ethtool_gset(&pseudo->mii_info, cmd);
5965 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5966 +static u32 et_virt_get_msglevel(struct net_device *dev)
5971 +static void et_virt_set_msglevel(struct net_device *dev, u32 datum)
5976 +static void et_virt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5978 +// PSEUDO_ADAPTER *pseudo = dev->priv;
5979 + data[0] = 0;//np->xstats.early_rx;
5980 + data[1] = 0;//np->xstats.tx_buf_mapped;
5981 + data[2] = 0;//np->xstats.tx_timeouts;
5982 + data[3] = 0;//np->xstats.rx_lost_in_ring;
5985 +/* for virtual interface dedicated */
5986 +#define RA_VIRT_NUM_STATS 4
5988 + const char str[ETH_GSTRING_LEN];
5989 +} ethtool_stats_keys_2[] = {
5996 +static int et_virt_get_stats_count(struct net_device *dev)
5998 + return RA_VIRT_NUM_STATS;
6001 +static void et_virt_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6003 + memcpy(data, ethtool_stats_keys_2, sizeof(ethtool_stats_keys_2));
6007 +struct ethtool_ops ra_virt_ethtool_ops = {
6008 + .get_settings = et_virt_get_settings,
6009 + .get_link = et_virt_get_link,
6010 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
6011 + .get_drvinfo = et_virt_get_drvinfo,
6012 + .set_settings = et_virt_set_settings,
6013 + .get_pauseparam = et_virt_get_pauseparam,
6014 + .set_pauseparam = et_virt_set_pauseparam,
6015 + .get_rx_csum = et_virt_get_rx_csum,
6016 + .set_rx_csum = et_virt_set_rx_csum,
6017 + .get_tx_csum = et_virt_get_tx_csum,
6018 + .set_tx_csum = et_virt_set_tx_csum,
6019 + .nway_reset = et_virt_nway_reset,
6020 + .get_msglevel = et_virt_get_msglevel,
6021 + .set_msglevel = et_virt_set_msglevel,
6022 + .get_strings = et_virt_get_strings,
6023 + .get_stats_count = et_virt_get_stats_count,
6024 + .get_ethtool_stats = et_virt_get_ethtool_stats,
6025 +/* .get_regs_len = et_virt_get_regs_len,
6026 + .get_regs = et_virt_get_regs,
6031 +int mdio_virt_read(struct net_device *dev, int phy_id, int location)
6033 + unsigned int result;
6034 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6035 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
6037 + PSEUDO_ADAPTER *pseudo = dev->priv;
6039 + mii_mgr_read( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, &result);
6040 +// printk("%s mii.o query= phy_id:%d, address:%d retval:%d\n", dev->name, phy_id, location, result);
6041 + return (int)result;
6044 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value)
6046 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6047 + PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
6049 + PSEUDO_ADAPTER *pseudo = dev->priv;
6051 +// printk("mii.o write= phy_id:%d, address:%d value:%d\n", phy_id, location, value);
6052 + mii_mgr_write( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
6056 +#endif /* CONFIG_PSEUDO_SUPPORT */
6060 +++ b/drivers/net/ethernet/raeth/ra_ethtool.h
6062 +#ifndef RA_ETHTOOL_H
6063 +#define RA_ETHTOOL_H
6065 +/* ethtool related */
6066 +unsigned char get_current_phy_address(void);
6067 +int mdio_read(struct net_device *dev, int phy_id, int location);
6068 +void mdio_write(struct net_device *dev, int phy_id, int location, int value);
6070 +/* for pseudo interface */
6071 +int mdio_virt_read(struct net_device *dev, int phy_id, int location);
6072 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value);
6076 +++ b/drivers/net/ethernet/raeth/ra_ioctl.h
6078 +#ifndef _RAETH_IOCTL_H
6079 +#define _RAETH_IOCTL_H
6081 +/* ioctl commands */
6082 +#define RAETH_ESW_REG_READ 0x89F1
6083 +#define RAETH_ESW_REG_WRITE 0x89F2
6084 +#define RAETH_MII_READ 0x89F3
6085 +#define RAETH_MII_WRITE 0x89F4
6086 +#define RAETH_ESW_INGRESS_RATE 0x89F5
6087 +#define RAETH_ESW_EGRESS_RATE 0x89F6
6088 +#define RAETH_ESW_PHY_DUMP 0x89F7
6089 +#define RAETH_QDMA_REG_READ 0x89F8
6090 +#define RAETH_QDMA_REG_WRITE 0x89F9
6091 +#define RAETH_QDMA_QUEUE_MAPPING 0x89FA
6092 +#define RAETH_QDMA_READ_CPU_CLK 0x89FB
6093 +#define RAETH_MII_READ_CL45 0x89FC
6094 +#define RAETH_MII_WRITE_CL45 0x89FD
6095 +#if defined(CONFIG_HW_SFQ)
6096 +#define RAETH_QDMA_SFQ_WEB_ENABLE 0x89FE
6099 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6100 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
6101 + defined (CONFIG_ARCH_MT7623)
6103 +#define REG_ESW_WT_MAC_MFC 0x10
6104 +#define REG_ESW_ISC 0x18
6105 +#define REG_ESW_WT_MAC_ATA1 0x74
6106 +#define REG_ESW_WT_MAC_ATA2 0x78
6107 +#define REG_ESW_WT_MAC_ATWD 0x7C
6108 +#define REG_ESW_WT_MAC_ATC 0x80
6110 +#define REG_ESW_TABLE_TSRA1 0x84
6111 +#define REG_ESW_TABLE_TSRA2 0x88
6112 +#define REG_ESW_TABLE_ATRD 0x8C
6115 +#define REG_ESW_VLAN_VTCR 0x90
6116 +#define REG_ESW_VLAN_VAWD1 0x94
6117 +#define REG_ESW_VLAN_VAWD2 0x98
6120 +#define REG_ESW_VLAN_ID_BASE 0x100
6122 +//#define REG_ESW_VLAN_ID_BASE 0x50
6123 +#define REG_ESW_VLAN_MEMB_BASE 0x70
6124 +#define REG_ESW_TABLE_SEARCH 0x24
6125 +#define REG_ESW_TABLE_STATUS0 0x28
6126 +#define REG_ESW_TABLE_STATUS1 0x2C
6127 +#define REG_ESW_TABLE_STATUS2 0x30
6128 +#define REG_ESW_WT_MAC_AD0 0x34
6129 +#define REG_ESW_WT_MAC_AD1 0x38
6130 +#define REG_ESW_WT_MAC_AD2 0x3C
6133 +/* rt3052 embedded ethernet switch registers */
6134 +#define REG_ESW_VLAN_ID_BASE 0x50
6135 +#define REG_ESW_VLAN_MEMB_BASE 0x70
6136 +#define REG_ESW_TABLE_SEARCH 0x24
6137 +#define REG_ESW_TABLE_STATUS0 0x28
6138 +#define REG_ESW_TABLE_STATUS1 0x2C
6139 +#define REG_ESW_TABLE_STATUS2 0x30
6140 +#define REG_ESW_WT_MAC_AD0 0x34
6141 +#define REG_ESW_WT_MAC_AD1 0x38
6142 +#define REG_ESW_WT_MAC_AD2 0x3C
6146 +#if defined(CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6147 +#define REG_ESW_MAX 0x16C
6148 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6149 + defined (CONFIG_RALINK_MT7620)
6150 +#define REG_ESW_MAX 0x7FFFF
6151 +#else //RT305x, RT3350
6152 +#define REG_ESW_MAX 0xFC
6154 +#define REG_HQOS_MAX 0x3FFF
6157 +typedef struct rt3052_esw_reg {
6162 +typedef struct ralink_mii_ioctl_data {
6170 +} ra_mii_ioctl_data;
6172 +typedef struct rt335x_esw_reg {
6173 + unsigned int on_off;
6174 + unsigned int port;
6175 + unsigned int bw;/*Mbps*/
6181 +++ b/drivers/net/ethernet/raeth/ra_mac.c
6183 +#include <linux/module.h>
6184 +#include <linux/version.h>
6185 +#include <linux/kernel.h>
6186 +#include <linux/sched.h>
6187 +#include <linux/types.h>
6188 +#include <linux/fcntl.h>
6189 +#include <linux/interrupt.h>
6190 +#include <linux/ptrace.h>
6191 +#include <linux/ioport.h>
6192 +#include <linux/in.h>
6193 +#include <linux/slab.h>
6194 +#include <linux/string.h>
6195 +#include <linux/signal.h>
6196 +#include <linux/irq.h>
6197 +#include <linux/ctype.h>
6198 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,4)
6199 +#include <asm/system.h>
6200 +#include <linux/mca.h>
6202 +#include <asm/io.h>
6203 +#include <asm/bitops.h>
6204 +#include <asm/io.h>
6205 +#include <asm/dma.h>
6207 +#include <asm/rt2880/surfboardint.h> /* for cp0 reg access, added by bobtseng */
6209 +#include <linux/errno.h>
6210 +#include <linux/init.h>
6212 +#include <linux/netdevice.h>
6213 +#include <linux/etherdevice.h>
6214 +#include <linux/skbuff.h>
6216 +#include <linux/init.h>
6217 +#include <linux/module.h>
6218 +#include <linux/proc_fs.h>
6219 +#include <asm/uaccess.h>
6221 +#include <linux/seq_file.h>
6224 +#if defined(CONFIG_RAETH_LRO)
6225 +#include <linux/inet_lro.h>
6228 +#include "ra2882ethreg.h"
6229 +#include "raether.h"
6230 +#include "ra_mac.h"
6231 +#include "ra_ethtool.h"
6232 +#if defined(CONFIG_RAETH_PDMA_DVT)
6233 +#include "dvt/raether_pdma_dvt.h"
6234 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
6236 +extern struct net_device *dev_raether;
6238 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6239 + defined (CONFIG_RALINK_MT7620)
6240 +extern unsigned short p0_rx_good_cnt;
6241 +extern unsigned short p0_tx_good_cnt;
6242 +extern unsigned short p1_rx_good_cnt;
6243 +extern unsigned short p1_tx_good_cnt;
6244 +extern unsigned short p2_rx_good_cnt;
6245 +extern unsigned short p2_tx_good_cnt;
6246 +extern unsigned short p3_rx_good_cnt;
6247 +extern unsigned short p3_tx_good_cnt;
6248 +extern unsigned short p4_rx_good_cnt;
6249 +extern unsigned short p4_tx_good_cnt;
6250 +extern unsigned short p5_rx_good_cnt;
6251 +extern unsigned short p5_tx_good_cnt;
6252 +extern unsigned short p6_rx_good_cnt;
6253 +extern unsigned short p6_tx_good_cnt;
6255 +extern unsigned short p0_rx_byte_cnt;
6256 +extern unsigned short p1_rx_byte_cnt;
6257 +extern unsigned short p2_rx_byte_cnt;
6258 +extern unsigned short p3_rx_byte_cnt;
6259 +extern unsigned short p4_rx_byte_cnt;
6260 +extern unsigned short p5_rx_byte_cnt;
6261 +extern unsigned short p6_rx_byte_cnt;
6262 +extern unsigned short p0_tx_byte_cnt;
6263 +extern unsigned short p1_tx_byte_cnt;
6264 +extern unsigned short p2_tx_byte_cnt;
6265 +extern unsigned short p3_tx_byte_cnt;
6266 +extern unsigned short p4_tx_byte_cnt;
6267 +extern unsigned short p5_tx_byte_cnt;
6268 +extern unsigned short p6_tx_byte_cnt;
6270 +#if defined(CONFIG_RALINK_MT7620)
6271 +extern unsigned short p7_rx_good_cnt;
6272 +extern unsigned short p7_tx_good_cnt;
6273 +extern unsigned short p7_rx_byte_cnt;
6274 +extern unsigned short p7_tx_byte_cnt;
6280 +#if defined(CONFIG_RAETH_TSO)
6281 +int txd_cnt[MAX_SKB_FRAGS/2 + 1];
6285 +#if defined(CONFIG_RAETH_LRO)
6286 +#define MAX_AGGR 64
6288 +int lro_stats_cnt[MAX_AGGR + 1];
6289 +int lro_flush_cnt[MAX_AGGR + 1];
6290 +int lro_len_cnt1[16];
6291 +//int lro_len_cnt2[16];
6292 +int aggregated[MAX_DESC];
6293 +int lro_aggregated;
6301 +#if defined(CONFIG_RAETH_HW_LRO)
6302 +#define HW_LRO_RING_NUM 3
6303 +#define MAX_HW_LRO_AGGR 64
6304 +unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
6305 +unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
6306 +unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
6307 +unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
6308 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
6309 +unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
6310 +unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
6311 +unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
6312 +unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
6313 +unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
6314 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
6315 +#endif /* CONFIG_RAETH_HW_LRO */
6317 +#if defined(CONFIG_RAETH_QDMA)
6318 +extern unsigned int M2Q_table[64];
6319 +extern struct QDMA_txdesc *free_head;
6321 +#if defined (CONFIG_ARCH_MT7623)
6322 +extern struct SFQ_table *sfq0;
6323 +extern struct SFQ_table *sfq1;
6324 +extern struct SFQ_table *sfq2;
6325 +extern struct SFQ_table *sfq3;
6328 +#if defined(CONFIG_USER_SNMPD)
6330 +static int ra_snmp_seq_show(struct seq_file *seq, void *v)
6332 +#if !defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7628)
6334 + seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n", sysRegRead(GDMA_RX_GBCNT0), sysRegRead(GDMA_RX_GPCNT0),sysRegRead(GDMA_RX_OERCNT0), sysRegRead(GDMA_RX_FERCNT0), sysRegRead(GDMA_RX_SERCNT0), sysRegRead(GDMA_RX_LERCNT0), sysRegRead(GDMA_RX_CERCNT0));
6336 + seq_printf(seq, "fc config: %x %x %x %x\n", sysRegRead(CDMA_FC_CFG), sysRegRead(GDMA1_FC_CFG), PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6338 + seq_printf(seq, "scheduler: %x %x %x\n", sysRegRead(GDMA1_SCH_CFG), sysRegRead(GDMA2_SCH_CFG), sysRegRead(PDMA_SCH_CFG));
6341 + seq_printf(seq, "ports: %x %x %x %x %x %x\n", sysRegRead(PORT0_PKCOUNT), sysRegRead(PORT1_PKCOUNT), sysRegRead(PORT2_PKCOUNT), sysRegRead(PORT3_PKCOUNT), sysRegRead(PORT4_PKCOUNT), sysRegRead(PORT5_PKCOUNT));
6346 +static int ra_snmp_seq_open(struct inode *inode, struct file *file)
6348 + return single_open(file, ra_snmp_seq_show, NULL);
6351 +static const struct file_operations ra_snmp_seq_fops = {
6352 + .owner = THIS_MODULE,
6353 + .open = ra_snmp_seq_open,
6355 + .llseek = seq_lseek,
6356 + .release = single_release
6361 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_100PHY) || \
6362 + defined (CONFIG_P5_MAC_TO_PHY_MODE) || defined (CONFIG_RAETH_GMAC2)
6363 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6364 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
6365 + defined (CONFIG_ARCH_MT7623)
6366 +void enable_auto_negotiate(int unused)
6369 +#if !defined (CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
6370 + u32 addr = CONFIG_MAC_TO_GIGAPHY_MODE_ADDR;
6373 +#if defined (CONFIG_RALINK_MT7621)
6374 + //enable MDIO mode all the time
6375 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
6376 + regValue &= ~(0x3 << 12);
6377 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
6380 + /* FIXME: we don't know how to deal with PHY end addr */
6381 + regValue = sysRegRead(ESW_PHY_POLLING);
6382 + regValue |= (1<<31);
6383 + regValue &= ~(0x1f);
6384 + regValue &= ~(0x1f<<8);
6385 +#if defined (CONFIG_RALINK_MT7620)
6386 + regValue |= ((addr-1) << 0);//setup PHY address for auto polling (Start Addr).
6387 + regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6388 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
6389 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)|| defined (CONFIG_GE_RGMII_INTERNAL_P4_AN) || defined (CONFIG_GE2_RGMII_AN)
6390 + regValue |= ((CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2-1)&0x1f << 0);//setup PHY address for auto polling (Start Addr).
6391 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6393 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 0);//setup PHY address for auto polling (Start Addr).
6394 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6397 + regValue |= (addr << 0);// setup PHY address for auto polling (start Addr).
6398 + regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6401 + /*kurtis: AN is strange*/
6402 + sysRegWrite(ESW_PHY_POLLING, regValue);
6404 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE)
6405 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;
6407 +#if defined (CONFIG_P5_MAC_TO_PHY_MODE)
6408 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x56330;
6411 +#elif defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT3883) || \
6412 + defined (CONFIG_RALINK_RT3052) || defined(CONFIG_RALINK_RT3352)
6414 +void enable_auto_negotiate(int ge)
6416 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6417 + u32 regValue = sysRegRead(0xb01100C8);
6420 + regValue = (ge == 2)? sysRegRead(MDIO_CFG2) : sysRegRead(MDIO_CFG);
6423 + regValue &= 0xe0ff7fff; // clear auto polling related field:
6424 + // (MD_PHY1ADDR & GP1_FRC_EN).
6425 + regValue |= 0x20000000; // force to enable MDC/MDIO auto polling.
6427 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_GE2_MII_AN)
6429 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 24); // setup PHY address for auto polling.
6432 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_GE1_MII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
6434 + regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 24); // setup PHY address for auto polling.
6438 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6439 + sysRegWrite(0xb01100C8, regValue);
6442 + sysRegWrite(MDIO_CFG2, regValue);
6444 + sysRegWrite(MDIO_CFG, regValue);
6449 +void ra2880stop(END_DEVICE *ei_local)
6451 + unsigned int regValue;
6452 + printk("ra2880stop()...");
6454 + regValue = sysRegRead(DMA_GLO_CFG);
6455 + regValue &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
6456 + sysRegWrite(DMA_GLO_CFG, regValue);
6459 + // printk("Done0x%x...\n", readreg(DMA_GLO_CFG));
6462 +void ei_irq_clear(void)
6464 + sysRegWrite(FE_INT_STATUS, 0xFFFFFFFF);
6467 +void rt2880_gmac_hard_reset(void)
6469 +#if !defined (CONFIG_RALINK_RT6855A)
6471 + sysRegWrite(RSTCTRL, RALINK_FE_RST);
6472 + sysRegWrite(RSTCTRL, 0);
6476 +void ra2880EnableInterrupt()
6478 + unsigned int regValue = sysRegRead(FE_INT_ENABLE);
6479 + RAETH_PRINT("FE_INT_ENABLE -- : 0x%08x\n", regValue);
6480 +// regValue |= (RX_DONE_INT0 | TX_DONE_INT0);
6482 + sysRegWrite(FE_INT_ENABLE, regValue);
6485 +void ra2880MacAddressSet(unsigned char p[6])
6487 + unsigned long regValue;
6489 + regValue = (p[0] << 8) | (p[1]);
6490 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6491 + sysRegWrite(SDM_MAC_ADRH, regValue);
6492 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRH));
6493 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6494 + sysRegWrite(GDMA1_MAC_ADRH, regValue);
6495 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6497 + /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6498 + sysRegWrite(SMACCR1, regValue);
6499 + printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6500 +#elif defined (CONFIG_RALINK_MT7620)
6501 + sysRegWrite(SMACCR1, regValue);
6502 + printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6504 + sysRegWrite(GDMA1_MAC_ADRH, regValue);
6505 + printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6508 + regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6509 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6510 + sysRegWrite(SDM_MAC_ADRL, regValue);
6511 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRL));
6512 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6513 + sysRegWrite(GDMA1_MAC_ADRL, regValue);
6514 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
6516 + /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6517 + sysRegWrite(SMACCR0, regValue);
6518 + printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6519 +#elif defined (CONFIG_RALINK_MT7620)
6520 + sysRegWrite(SMACCR0, regValue);
6521 + printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6523 + sysRegWrite(GDMA1_MAC_ADRL, regValue);
6524 + printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
6530 +#ifdef CONFIG_PSEUDO_SUPPORT
6531 +void ra2880Mac2AddressSet(unsigned char p[6])
6533 + unsigned long regValue;
6535 + regValue = (p[0] << 8) | (p[1]);
6536 + sysRegWrite(GDMA2_MAC_ADRH, regValue);
6538 + regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6539 + sysRegWrite(GDMA2_MAC_ADRL, regValue);
6541 + printk("GDMA2_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRH));
6542 + printk("GDMA2_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRL));
6548 + * hard_init - Called by raeth_probe to inititialize network device
6549 + * @dev: device pointer
6551 + * ethdev_init initilize dev->priv and set to END_DEVICE structure
6554 +void ethtool_init(struct net_device *dev)
6556 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
6557 + END_DEVICE *ei_local = netdev_priv(dev);
6559 + // init mii structure
6560 + ei_local->mii_info.dev = dev;
6561 + ei_local->mii_info.mdio_read = mdio_read;
6562 + ei_local->mii_info.mdio_write = mdio_write;
6563 + ei_local->mii_info.phy_id_mask = 0x1f;
6564 + ei_local->mii_info.reg_num_mask = 0x1f;
6565 + ei_local->mii_info.supports_gmii = mii_check_gmii_support(&ei_local->mii_info);
6566 + // TODO: phy_id: 0~4
6567 + ei_local->mii_info.phy_id = 1;
6573 + * Routine Name : get_idx(mode, index)
6574 + * Description: calculate ring usage for tx/rx rings
6575 + * Mode 1 : Tx Ring
6576 + * Mode 2 : Rx Ring
6578 +int get_ring_usage(int mode, int i)
6580 + unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
6581 + unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
6583 + struct PDMA_rxdesc* rxring;
6584 + struct PDMA_txdesc* txring;
6586 + END_DEVICE *ei_local = netdev_priv(dev_raether);
6590 + /* cpu point to the next descriptor of rx dma ring */
6591 + rx_calc_idx = *(unsigned long*)RX_CALC_IDX0;
6592 + rx_drx_idx = *(unsigned long*)RX_DRX_IDX0;
6593 + rxring = (struct PDMA_rxdesc*)RX_BASE_PTR0;
6595 + rx_usage = (rx_drx_idx - rx_calc_idx -1 + NUM_RX_DESC) % NUM_RX_DESC;
6596 + if ( rx_calc_idx == rx_drx_idx ) {
6597 + if ( rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
6598 + tx_usage = NUM_RX_DESC;
6608 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX0;
6609 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
6610 + txring = ei_local->tx_ring0;
6612 +#if defined(CONFIG_RAETH_QOS)
6614 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX1;
6615 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
6616 + txring = ei_local->tx_ring1;
6619 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX2;
6620 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
6621 + txring = ei_local->tx_ring2;
6624 + tx_ctx_idx = *(unsigned long*)TX_CTX_IDX3;
6625 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
6626 + txring = ei_local->tx_ring3;
6630 + printk("get_tx_idx failed %d %d\n", mode, i);
6634 + tx_usage = (tx_ctx_idx - tx_dtx_idx + NUM_TX_DESC) % NUM_TX_DESC;
6635 + if ( tx_ctx_idx == tx_dtx_idx ) {
6636 + if ( txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
6639 + tx_usage = NUM_TX_DESC;
6645 +#if defined(CONFIG_RAETH_QOS)
6646 +void dump_qos(struct seq_file *s)
6651 + seq_printf(s, "\n-----Raeth QOS -----\n\n");
6653 + for ( i = 0; i < 4; i++) {
6654 + usage = get_ring_usage(1,i);
6655 + seq_printf(s, "Tx Ring%d Usage : %d/%d\n", i, usage, NUM_TX_DESC);
6658 + usage = get_ring_usage(2,0);
6659 + seq_printf(s, "RX Usage : %d/%d\n\n", usage, NUM_RX_DESC);
6660 +#if defined (CONFIG_RALINK_MT7620)
6661 + seq_printf(s, "PSE_FQFC_CFG(0x%08x) : 0x%08x\n", PSE_FQFC_CFG, sysRegRead(PSE_FQFC_CFG));
6662 + seq_printf(s, "PSE_IQ_CFG(0x%08x) : 0x%08x\n", PSE_IQ_CFG, sysRegRead(PSE_IQ_CFG));
6663 + seq_printf(s, "PSE_QUE_STA(0x%08x) : 0x%08x\n", PSE_QUE_STA, sysRegRead(PSE_QUE_STA));
6664 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6667 + seq_printf(s, "GDMA1_FC_CFG(0x%08x) : 0x%08x\n", GDMA1_FC_CFG, sysRegRead(GDMA1_FC_CFG));
6668 + seq_printf(s, "GDMA2_FC_CFG(0x%08x) : 0x%08x\n", GDMA2_FC_CFG, sysRegRead(GDMA2_FC_CFG));
6669 + seq_printf(s, "PDMA_FC_CFG(0x%08x) : 0x%08x\n", PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6670 + seq_printf(s, "PSE_FQ_CFG(0x%08x) : 0x%08x\n", PSE_FQ_CFG, sysRegRead(PSE_FQ_CFG));
6672 + seq_printf(s, "\n\nTX_CTX_IDX0 : 0x%08x\n", sysRegRead(TX_CTX_IDX0));
6673 + seq_printf(s, "TX_DTX_IDX0 : 0x%08x\n", sysRegRead(TX_DTX_IDX0));
6674 + seq_printf(s, "TX_CTX_IDX1 : 0x%08x\n", sysRegRead(TX_CTX_IDX1));
6675 + seq_printf(s, "TX_DTX_IDX1 : 0x%08x\n", sysRegRead(TX_DTX_IDX1));
6676 + seq_printf(s, "TX_CTX_IDX2 : 0x%08x\n", sysRegRead(TX_CTX_IDX2));
6677 + seq_printf(s, "TX_DTX_IDX2 : 0x%08x\n", sysRegRead(TX_DTX_IDX2));
6678 + seq_printf(s, "TX_CTX_IDX3 : 0x%08x\n", sysRegRead(TX_CTX_IDX3));
6679 + seq_printf(s, "TX_DTX_IDX3 : 0x%08x\n", sysRegRead(TX_DTX_IDX3));
6680 + seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", sysRegRead(RX_CALC_IDX0));
6681 + seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", sysRegRead(RX_DRX_IDX0));
6683 + seq_printf(s, "\n------------------------------\n\n");
6687 +void dump_reg(struct seq_file *s)
6689 + int fe_int_enable;
6696 +#if !defined (CONFIG_RAETH_QDMA)
6698 + int tx_base_ptr[4];
6699 + int tx_max_cnt[4];
6700 + int tx_ctx_idx[4];
6701 + int tx_dtx_idx[4];
6705 + fe_int_enable = sysRegRead(FE_INT_ENABLE);
6706 + rx_usage = get_ring_usage(2,0);
6708 + dly_int_cfg = sysRegRead(DLY_INT_CFG);
6710 +#if !defined (CONFIG_RAETH_QDMA)
6711 + tx_usage = get_ring_usage(1,0);
6713 + tx_base_ptr[0] = sysRegRead(TX_BASE_PTR0);
6714 + tx_max_cnt[0] = sysRegRead(TX_MAX_CNT0);
6715 + tx_ctx_idx[0] = sysRegRead(TX_CTX_IDX0);
6716 + tx_dtx_idx[0] = sysRegRead(TX_DTX_IDX0);
6718 + tx_base_ptr[1] = sysRegRead(TX_BASE_PTR1);
6719 + tx_max_cnt[1] = sysRegRead(TX_MAX_CNT1);
6720 + tx_ctx_idx[1] = sysRegRead(TX_CTX_IDX1);
6721 + tx_dtx_idx[1] = sysRegRead(TX_DTX_IDX1);
6723 + tx_base_ptr[2] = sysRegRead(TX_BASE_PTR2);
6724 + tx_max_cnt[2] = sysRegRead(TX_MAX_CNT2);
6725 + tx_ctx_idx[2] = sysRegRead(TX_CTX_IDX2);
6726 + tx_dtx_idx[2] = sysRegRead(TX_DTX_IDX2);
6728 + tx_base_ptr[3] = sysRegRead(TX_BASE_PTR3);
6729 + tx_max_cnt[3] = sysRegRead(TX_MAX_CNT3);
6730 + tx_ctx_idx[3] = sysRegRead(TX_CTX_IDX3);
6731 + tx_dtx_idx[3] = sysRegRead(TX_DTX_IDX3);
6734 + rx_base_ptr0 = sysRegRead(RX_BASE_PTR0);
6735 + rx_max_cnt0 = sysRegRead(RX_MAX_CNT0);
6736 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
6737 + rx_drx_idx0 = sysRegRead(RX_DRX_IDX0);
6739 + seq_printf(s, "\n\nFE_INT_ENABLE : 0x%08x\n", fe_int_enable);
6740 +#if !defined (CONFIG_RAETH_QDMA)
6741 + seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, NUM_TX_DESC);
6743 + seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, NUM_RX_DESC);
6744 + seq_printf(s, "DLY_INT_CFG : 0x%08x\n", dly_int_cfg);
6746 +#if !defined (CONFIG_RAETH_QDMA)
6747 + for(i=0;i<4;i++) {
6748 + seq_printf(s, "TX_BASE_PTR%d : 0x%08x\n", i, tx_base_ptr[i]);
6749 + seq_printf(s, "TX_MAX_CNT%d : 0x%08x\n", i, tx_max_cnt[i]);
6750 + seq_printf(s, "TX_CTX_IDX%d : 0x%08x\n", i, tx_ctx_idx[i]);
6751 + seq_printf(s, "TX_DTX_IDX%d : 0x%08x\n", i, tx_dtx_idx[i]);
6755 + seq_printf(s, "RX_BASE_PTR0 : 0x%08x\n", rx_base_ptr0);
6756 + seq_printf(s, "RX_MAX_CNT0 : 0x%08x\n", rx_max_cnt0);
6757 + seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", rx_calc_idx0);
6758 + seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", rx_drx_idx0);
6760 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
6761 + seq_printf(s, "The current PHY address selected by ethtool is %d\n", get_current_phy_address());
6764 +#if defined (CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
6765 + seq_printf(s, "GDMA_RX_FCCNT1(0x%08x) : 0x%08x\n\n", GDMA_RX_FCCNT1, sysRegRead(GDMA_RX_FCCNT1));
6770 +void dump_cp0(void)
6772 + printk("CP0 Register dump --\n");
6773 + printk("CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
6774 + printk("CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
6775 + printk("CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
6776 + printk("CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
6777 + printk("CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
6778 + printk("CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
6779 + printk("CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
6780 + printk("CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
6781 + printk("CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
6782 + printk("CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
6783 + printk("CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
6784 + printk("CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
6785 + printk("CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
6786 + printk("CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
6787 + printk("CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
6788 + printk("CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
6789 + printk("CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
6790 + printk("CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
6791 + printk("CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
6792 + printk("CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
6793 + printk("CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
6794 + printk("CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
6795 + printk("CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
6796 + printk("CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
6797 + printk("CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
6798 + printk("CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
6799 + printk("CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
6800 + printk("CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
6801 + printk("CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
6802 + printk("CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
6803 + printk("CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
6804 + printk("CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
6805 + printk("CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
6809 +struct proc_dir_entry *procRegDir;
6810 +static struct proc_dir_entry *procGmac, *procSysCP0, *procTxRing, *procRxRing, *procSkbFree;
6811 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
6812 +static struct proc_dir_entry *procGmac2;
6814 +#if defined(CONFIG_USER_SNMPD)
6815 +static struct proc_dir_entry *procRaSnmp;
6817 +#if defined(CONFIG_RAETH_TSO)
6818 +static struct proc_dir_entry *procNumOfTxd, *procTsoLen;
6821 +#if defined(CONFIG_RAETH_LRO)
6822 +static struct proc_dir_entry *procLroStats;
6824 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
6825 +static struct proc_dir_entry *procRxRing1, *procRxRing2, *procRxRing3;
6826 +static struct proc_dir_entry *procHwLroStats, *procHwLroAutoTlb;
6827 +const static HWLRO_DBG_FUNC hw_lro_dbg_func[] =
6829 + [0] = hwlro_agg_cnt_ctrl,
6830 + [1] = hwlro_agg_time_ctrl,
6831 + [2] = hwlro_age_time_ctrl,
6832 + [3] = hwlro_pkt_int_alpha_ctrl,
6833 + [4] = hwlro_threshold_ctrl,
6834 + [5] = hwlro_fix_setting_switch_ctrl,
6836 +#endif /* CONFIG_RAETH_HW_LRO */
6837 +#if defined (TASKLET_WORKQUEUE_SW)
6838 +static struct proc_dir_entry *procSCHE;
6841 +#if defined(CONFIG_RAETH_PDMA_DVT)
6842 +static struct proc_dir_entry *procPdmaDvt;
6844 +const static PDMA_DBG_FUNC pdma_dvt_dbg_func[] =
6846 + [0] = pdma_dvt_show_ctrl,
6847 + [1] = pdma_dvt_test_rx_ctrl,
6848 + [2] = pdma_dvt_test_tx_ctrl,
6849 + [3] = pdma_dvt_test_debug_ctrl,
6850 + [4] = pdma_dvt_test_lro_ctrl,
6852 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
6854 +int RegReadMain(struct seq_file *seq, void *v)
6860 +static void *seq_SkbFree_start(struct seq_file *seq, loff_t *pos)
6862 + if (*pos < NUM_TX_DESC)
6867 +static void *seq_SkbFree_next(struct seq_file *seq, void *v, loff_t *pos)
6870 + if (*pos >= NUM_TX_DESC)
6875 +static void seq_SkbFree_stop(struct seq_file *seq, void *v)
6877 + /* Nothing to do */
6880 +static int seq_SkbFree_show(struct seq_file *seq, void *v)
6882 + int i = *(loff_t *) v;
6883 + END_DEVICE *ei_local = netdev_priv(dev_raether);
6885 + seq_printf(seq, "%d: %08x\n",i, *(int *)&ei_local->skb_free[i]);
6890 +static const struct seq_operations seq_skb_free_ops = {
6891 + .start = seq_SkbFree_start,
6892 + .next = seq_SkbFree_next,
6893 + .stop = seq_SkbFree_stop,
6894 + .show = seq_SkbFree_show
6897 +static int skb_free_open(struct inode *inode, struct file *file)
6899 + return seq_open(file, &seq_skb_free_ops);
6902 +static const struct file_operations skb_free_fops = {
6903 + .owner = THIS_MODULE,
6904 + .open = skb_free_open,
6906 + .llseek = seq_lseek,
6907 + .release = seq_release
6910 +#if defined (CONFIG_RAETH_QDMA)
6911 +int QDMARead(struct seq_file *seq, void *v)
6913 + unsigned int temp,i;
6914 + unsigned int sw_fq, hw_fq;
6915 + unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
6916 + unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head, queue_tail;
6917 + struct net_device *dev = dev_raether;
6918 + END_DEVICE *ei_local = netdev_priv(dev);
6920 + seq_printf(seq, "==== General Information ====\n");
6921 + temp = sysRegRead(QDMA_FQ_CNT);
6922 + sw_fq = (temp&0xFFFF0000)>>16;
6923 + hw_fq = (temp&0x0000FFFF);
6924 + seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq, NUM_TX_DESC, hw_fq,NUM_QDMA_PAGE);
6925 + seq_printf(seq, "SW TXD virtual start address: 0x%08x\n", ei_local->txd_pool);
6926 + seq_printf(seq, "HW TXD virtual start address: 0x%08x\n\n", free_head);
6928 + seq_printf(seq, "==== Scheduler Information ====\n");
6929 + temp = sysRegRead(QDMA_TX_SCH);
6930 + max_en = (temp&0x00000800)>>11;
6931 + max_rate = (temp&0x000007F0)>>4;
6932 + for(i=0;i<(temp&0x0000000F);i++)
6934 + seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n", max_en, max_rate);
6935 + max_en = (temp&0x08000000)>>27;
6936 + max_rate = (temp&0x07F00000)>>20;
6937 + for(i=0;i<(temp&0x000F0000);i++)
6939 + seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n", max_en, max_rate);
6941 + seq_printf(seq, "==== Physical Queue Information ====\n");
6942 + for (queue = 0; queue < 16; queue++){
6943 + temp = sysRegRead(QTX_CFG_0 + 0x10 * queue);
6944 + tx_des_cnt = (temp & 0xffff0000) >> 16;
6945 + hw_resv = (temp & 0xff00) >> 8;
6946 + sw_resv = (temp & 0xff);
6947 + temp = sysRegRead(QTX_CFG_0 +(0x10 * queue) + 0x4);
6948 + sch = (temp >> 31) + 1 ;
6949 + min_en = (temp & 0x8000000) >> 27;
6950 + min_rate = (temp & 0x7f00000) >> 20;
6951 + for (i = 0; i< (temp & 0xf0000) >> 16; i++)
6953 + max_en = (temp & 0x800) >> 11;
6954 + max_rate = (temp & 0x7f0) >> 4;
6955 + for (i = 0; i< (temp & 0xf); i++)
6957 + weight = (temp & 0xf000) >> 12;
6958 + queue_head = sysRegRead(QTX_HEAD_0 + 0x10 * queue);
6959 + queue_tail = sysRegRead(QTX_TAIL_0 + 0x10 * queue);
6961 + seq_printf(seq, "Queue#%d Information:\n", queue);
6962 + seq_printf(seq, "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n", tx_des_cnt, queue_head, queue_tail);
6963 + seq_printf(seq, "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n", hw_resv, sw_resv, sch, weight);
6964 + seq_printf(seq, "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n", min_en, min_rate, max_en, max_rate);
6966 +#if defined (CONFIG_ARCH_MT7623) && defined(CONFIG_HW_SFQ)
6967 + seq_printf(seq, "==== Virtual Queue Information ====\n");
6968 + seq_printf(seq, "VQTX_TB_BASE_0:0x%08x;VQTX_TB_BASE_1:0x%08x;VQTX_TB_BASE_2:0x%08x;VQTX_TB_BASE_3:0x%08x\n", \
6969 + sfq0, sfq1, sfq2, sfq3);
6970 + temp = sysRegRead(VQTX_NUM);
6971 + seq_printf(seq, "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n", \
6972 + temp&0xF, (temp&0xF0)>>4, (temp&0xF00)>>8, (temp&0xF000)>>12);
6976 + seq_printf(seq, "==== Flow Control Information ====\n");
6977 + temp = sysRegRead(QDMA_FC_THRES);
6978 + seq_printf(seq, "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n", \
6979 + (temp&0x1000000)>>24, (temp&0x200000)>>25, (temp&0x30000000)>>28);
6980 + seq_printf(seq, "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n", \
6981 + (temp&0x10000)>>16, (temp&0x2000)>>17, (temp&0x300000)>>20);
6982 +#if defined (CONFIG_ARCH_MT7623)
6983 + seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n", \
6984 + (temp&0xC0000000)>>30, (temp&0x08000000)>>27);
6985 + seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n", \
6986 + (temp&0xC00000)>>22, (temp&0x080000)>>19);
6989 + seq_printf(seq, "\n==== FSM Information\n");
6990 + temp = sysRegRead(QDMA_DMA);
6991 +#if defined (CONFIG_ARCH_MT7623)
6992 + seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp&0x0F000000)>>24);
6994 + seq_printf(seq, "FQ_FSM:0x%01x\n", (temp&0x000F0000)>>16);
6995 + seq_printf(seq, "TX_FSM:0x%01x\n", (temp&0x00000F00)>>12);
6996 + seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp&0x0000000f));
6998 + seq_printf(seq, "==== M2Q Information ====\n");
6999 + for (i = 0; i < 64; i+=8){
7000 + seq_printf(seq, " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
7001 + i, M2Q_table[i], i+1, M2Q_table[i+1], i+2, M2Q_table[i+2], i+3, M2Q_table[i+3],
7002 + i+4, M2Q_table[i+4], i+5, M2Q_table[i+5], i+6, M2Q_table[i+6], i+7, M2Q_table[i+7]);
7009 +static int qdma_open(struct inode *inode, struct file *file)
7011 + return single_open(file, QDMARead, NULL);
7014 +static const struct file_operations qdma_fops = {
7015 + .owner = THIS_MODULE,
7016 + .open = qdma_open,
7018 + .llseek = seq_lseek,
7019 + .release = single_release
7023 +int TxRingRead(struct seq_file *seq, void *v)
7025 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7026 + struct PDMA_txdesc *tx_ring;
7029 + tx_ring = kmalloc(sizeof(struct PDMA_txdesc) * NUM_TX_DESC, GFP_KERNEL);
7030 + if(tx_ring==NULL){
7031 + seq_printf(seq, " allocate temp tx_ring fail.\n");
7035 + for (i=0; i < NUM_TX_DESC; i++) {
7036 + tx_ring[i] = ei_local->tx_ring0[i];
7039 + for (i=0; i < NUM_TX_DESC; i++) {
7040 +#ifdef CONFIG_32B_DESC
7041 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1,
7042 + *(int *)&tx_ring[i].txd_info2, *(int *)&tx_ring[i].txd_info3,
7043 + *(int *)&tx_ring[i].txd_info4, *(int *)&tx_ring[i].txd_info5,
7044 + *(int *)&tx_ring[i].txd_info6, *(int *)&tx_ring[i].txd_info7,
7045 + *(int *)&tx_ring[i].txd_info8);
7047 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1, *(int *)&tx_ring[i].txd_info2,
7048 + *(int *)&tx_ring[i].txd_info3, *(int *)&tx_ring[i].txd_info4);
7056 +static int tx_ring_open(struct inode *inode, struct file *file)
7058 +#if !defined (CONFIG_RAETH_QDMA)
7059 + return single_open(file, TxRingRead, NULL);
7061 + return single_open(file, QDMARead, NULL);
7065 +static const struct file_operations tx_ring_fops = {
7066 + .owner = THIS_MODULE,
7067 + .open = tx_ring_open,
7069 + .llseek = seq_lseek,
7070 + .release = single_release
7073 +int RxRingRead(struct seq_file *seq, void *v)
7075 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7076 + struct PDMA_rxdesc *rx_ring;
7079 + rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RX_DESC, GFP_KERNEL);
7080 + if(rx_ring==NULL){
7081 + seq_printf(seq, " allocate temp rx_ring fail.\n");
7085 + for (i=0; i < NUM_RX_DESC; i++) {
7086 + memcpy(&rx_ring[i], &ei_local->rx_ring0[i], sizeof(struct PDMA_rxdesc));
7089 + for (i=0; i < NUM_RX_DESC; i++) {
7090 +#ifdef CONFIG_32B_DESC
7091 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
7092 + *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7093 + *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7094 + *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7095 + *(int *)&rx_ring[i].rxd_info8);
7097 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
7098 + *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7106 +static int rx_ring_open(struct inode *inode, struct file *file)
7108 + return single_open(file, RxRingRead, NULL);
7111 +static const struct file_operations rx_ring_fops = {
7112 + .owner = THIS_MODULE,
7113 + .open = rx_ring_open,
7115 + .llseek = seq_lseek,
7116 + .release = single_release
7119 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
7120 +int RxLRORingRead(struct seq_file *seq, void *v, struct PDMA_rxdesc *rx_ring_p)
7122 + struct PDMA_rxdesc *rx_ring;
7125 + rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
7126 + if(rx_ring==NULL){
7127 + seq_printf(seq, " allocate temp rx_ring fail.\n");
7131 + for (i=0; i < NUM_LRO_RX_DESC; i++) {
7132 + memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
7135 + for (i=0; i < NUM_LRO_RX_DESC; i++) {
7136 +#ifdef CONFIG_32B_DESC
7137 + seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
7138 + *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7139 + *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7140 + *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7141 + *(int *)&rx_ring[i].rxd_info8);
7143 + seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
7144 + *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7152 +int RxRing1Read(struct seq_file *seq, void *v)
7154 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7155 + RxLRORingRead(seq, v, ei_local->rx_ring1);
7160 +int RxRing2Read(struct seq_file *seq, void *v)
7162 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7163 + RxLRORingRead(seq, v, ei_local->rx_ring2);
7168 +int RxRing3Read(struct seq_file *seq, void *v)
7170 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7171 + RxLRORingRead(seq, v, ei_local->rx_ring3);
7176 +static int rx_ring1_open(struct inode *inode, struct file *file)
7178 + return single_open(file, RxRing1Read, NULL);
7181 +static int rx_ring2_open(struct inode *inode, struct file *file)
7183 + return single_open(file, RxRing2Read, NULL);
7186 +static int rx_ring3_open(struct inode *inode, struct file *file)
7188 + return single_open(file, RxRing3Read, NULL);
7191 +static const struct file_operations rx_ring1_fops = {
7192 + .owner = THIS_MODULE,
7193 + .open = rx_ring1_open,
7195 + .llseek = seq_lseek,
7196 + .release = single_release
7199 +static const struct file_operations rx_ring2_fops = {
7200 + .owner = THIS_MODULE,
7201 + .open = rx_ring2_open,
7203 + .llseek = seq_lseek,
7204 + .release = single_release
7207 +static const struct file_operations rx_ring3_fops = {
7208 + .owner = THIS_MODULE,
7209 + .open = rx_ring3_open,
7211 + .llseek = seq_lseek,
7212 + .release = single_release
7214 +#endif /* CONFIG_RAETH_HW_LRO */
7216 +#if defined(CONFIG_RAETH_TSO)
7218 +int NumOfTxdUpdate(int num_of_txd)
7221 + txd_cnt[num_of_txd]++;
7226 +static void *seq_TsoTxdNum_start(struct seq_file *seq, loff_t *pos)
7228 + seq_printf(seq, "TXD | Count\n");
7229 + if (*pos < (MAX_SKB_FRAGS/2 + 1))
7234 +static void *seq_TsoTxdNum_next(struct seq_file *seq, void *v, loff_t *pos)
7237 + if (*pos >= (MAX_SKB_FRAGS/2 + 1))
7242 +static void seq_TsoTxdNum_stop(struct seq_file *seq, void *v)
7244 + /* Nothing to do */
7247 +static int seq_TsoTxdNum_show(struct seq_file *seq, void *v)
7249 + int i = *(loff_t *) v;
7250 + seq_printf(seq, "%d: %d\n",i , txd_cnt[i]);
7255 +ssize_t NumOfTxdWrite(struct file *file, const char __user *buffer,
7256 + size_t count, loff_t *data)
7258 + memset(txd_cnt, 0, sizeof(txd_cnt));
7259 + printk("clear txd cnt table\n");
7264 +int TsoLenUpdate(int tso_len)
7267 + if(tso_len > 70000) {
7269 + }else if(tso_len > 65000) {
7271 + }else if(tso_len > 60000) {
7273 + }else if(tso_len > 55000) {
7275 + }else if(tso_len > 50000) {
7277 + }else if(tso_len > 45000) {
7279 + }else if(tso_len > 40000) {
7281 + }else if(tso_len > 35000) {
7283 + }else if(tso_len > 30000) {
7285 + }else if(tso_len > 25000) {
7287 + }else if(tso_len > 20000) {
7289 + }else if(tso_len > 15000) {
7291 + }else if(tso_len > 10000) {
7293 + }else if(tso_len > 5000) {
7302 +ssize_t TsoLenWrite(struct file *file, const char __user *buffer,
7303 + size_t count, loff_t *data)
7305 + memset(tso_cnt, 0, sizeof(tso_cnt));
7306 + printk("clear tso cnt table\n");
7311 +static void *seq_TsoLen_start(struct seq_file *seq, loff_t *pos)
7313 + seq_printf(seq, " Length | Count\n");
7319 +static void *seq_TsoLen_next(struct seq_file *seq, void *v, loff_t *pos)
7327 +static void seq_TsoLen_stop(struct seq_file *seq, void *v)
7329 + /* Nothing to do */
7332 +static int seq_TsoLen_show(struct seq_file *seq, void *v)
7334 + int i = *(loff_t *) v;
7336 + seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, tso_cnt[i]);
7341 +static const struct seq_operations seq_tso_txd_num_ops = {
7342 + .start = seq_TsoTxdNum_start,
7343 + .next = seq_TsoTxdNum_next,
7344 + .stop = seq_TsoTxdNum_stop,
7345 + .show = seq_TsoTxdNum_show
7348 +static int tso_txd_num_open(struct inode *inode, struct file *file)
7350 + return seq_open(file, &seq_tso_txd_num_ops);
7353 +static struct file_operations tso_txd_num_fops = {
7354 + .owner = THIS_MODULE,
7355 + .open = tso_txd_num_open,
7357 + .llseek = seq_lseek,
7358 + .write = NumOfTxdWrite,
7359 + .release = seq_release
7362 +static const struct seq_operations seq_tso_len_ops = {
7363 + .start = seq_TsoLen_start,
7364 + .next = seq_TsoLen_next,
7365 + .stop = seq_TsoLen_stop,
7366 + .show = seq_TsoLen_show
7369 +static int tso_len_open(struct inode *inode, struct file *file)
7371 + return seq_open(file, &seq_tso_len_ops);
7374 +static struct file_operations tso_len_fops = {
7375 + .owner = THIS_MODULE,
7376 + .open = tso_len_open,
7378 + .llseek = seq_lseek,
7379 + .write = TsoLenWrite,
7380 + .release = seq_release
7384 +#if defined(CONFIG_RAETH_LRO)
7385 +static int LroLenUpdate(struct net_lro_desc *lro_desc)
7389 + if(lro_desc->ip_tot_len > 65000) {
7391 + }else if(lro_desc->ip_tot_len > 60000) {
7393 + }else if(lro_desc->ip_tot_len > 55000) {
7395 + }else if(lro_desc->ip_tot_len > 50000) {
7397 + }else if(lro_desc->ip_tot_len > 45000) {
7399 + }else if(lro_desc->ip_tot_len > 40000) {
7401 + }else if(lro_desc->ip_tot_len > 35000) {
7403 + }else if(lro_desc->ip_tot_len > 30000) {
7405 + }else if(lro_desc->ip_tot_len > 25000) {
7407 + }else if(lro_desc->ip_tot_len > 20000) {
7409 + }else if(lro_desc->ip_tot_len > 15000) {
7411 + }else if(lro_desc->ip_tot_len > 10000) {
7413 + }else if(lro_desc->ip_tot_len > 5000) {
7421 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed)
7423 + struct net_lro_desc *tmp;
7427 + if (all_flushed) {
7428 + for (i=0; i< MAX_DESC; i++) {
7429 + tmp = & lro_mgr->lro_arr[i];
7430 + if (tmp->pkt_aggr_cnt !=0) {
7431 + for(j=0; j<=MAX_AGGR; j++) {
7432 + if(tmp->pkt_aggr_cnt == j) {
7433 + lro_flush_cnt[j]++;
7436 + len_idx = LroLenUpdate(tmp);
7437 + lro_len_cnt1[len_idx]++;
7440 + aggregated[i] = 0;
7443 + if (lro_flushed != lro_mgr->stats.flushed) {
7444 + if (lro_aggregated != lro_mgr->stats.aggregated) {
7445 + for (i=0; i<MAX_DESC; i++) {
7446 + tmp = &lro_mgr->lro_arr[i];
7447 + if ((aggregated[i]!= tmp->pkt_aggr_cnt)
7448 + && (tmp->pkt_aggr_cnt == 0)) {
7450 + for (j=0; j<=MAX_AGGR; j++) {
7451 + if (aggregated[i] == j) {
7452 + lro_stats_cnt[j] ++;
7455 + aggregated[i] = 0;
7456 + //len_idx = LroLenUpdate(tmp);
7457 + //lro_len_cnt2[len_idx]++;
7462 + for (i=0; i<MAX_DESC; i++) {
7463 + tmp = &lro_mgr->lro_arr[i];
7464 + if ((aggregated[i] != 0) && (tmp->pkt_aggr_cnt==0)) {
7465 + for (j=0; j<=MAX_AGGR; j++) {
7466 + if (aggregated[i] == j) {
7467 + lro_stats_cnt[j] ++;
7470 + aggregated[i] = 0;
7471 + //len_idx = LroLenUpdate(tmp);
7472 + //lro_len_cnt2[len_idx]++;
7479 + if (lro_aggregated != lro_mgr->stats.aggregated) {
7480 + for (i=0; i<MAX_DESC; i++) {
7481 + tmp = &lro_mgr->lro_arr[i];
7482 + if (tmp->active) {
7483 + if (aggregated[i] != tmp->pkt_aggr_cnt)
7484 + aggregated[i] = tmp->pkt_aggr_cnt;
7486 + aggregated[i] = 0;
7493 + lro_aggregated = lro_mgr->stats.aggregated;
7494 + lro_flushed = lro_mgr->stats.flushed;
7495 + lro_nodesc = lro_mgr->stats.no_desc;
7502 +ssize_t LroStatsWrite(struct file *file, const char __user *buffer,
7503 + size_t count, loff_t *data)
7505 + memset(lro_stats_cnt, 0, sizeof(lro_stats_cnt));
7506 + memset(lro_flush_cnt, 0, sizeof(lro_flush_cnt));
7507 + memset(lro_len_cnt1, 0, sizeof(lro_len_cnt1));
7508 + //memset(lro_len_cnt2, 0, sizeof(lro_len_cnt2));
7509 + memset(aggregated, 0, sizeof(aggregated));
7510 + lro_aggregated = 0;
7516 + printk("clear lro cnt table\n");
7521 +int LroStatsRead(struct seq_file *seq, void *v)
7528 + seq_printf(seq, "LRO statistic dump:\n");
7529 + seq_printf(seq, "Cnt: Kernel | Driver\n");
7530 + for(i=0; i<=MAX_AGGR; i++) {
7531 + tot_cnt = tot_cnt + lro_stats_cnt[i] + lro_flush_cnt[i];
7532 + seq_printf(seq, " %d : %d %d\n", i, lro_stats_cnt[i], lro_flush_cnt[i]);
7533 + tot_aggr = tot_aggr + i * (lro_stats_cnt[i] + lro_flush_cnt[i]);
7535 + ave_aggr = lro_aggregated/lro_flushed;
7536 + seq_printf(seq, "Total aggregated pkt: %d\n", lro_aggregated);
7537 + seq_printf(seq, "Flushed pkt: %d %d\n", lro_flushed, force_flush);
7538 + seq_printf(seq, "Average flush cnt: %d\n", ave_aggr);
7539 + seq_printf(seq, "No descriptor pkt: %d\n\n\n", lro_nodesc);
7541 + seq_printf(seq, "Driver flush pkt len:\n");
7542 + seq_printf(seq, " Length | Count\n");
7543 + for(i=0; i<15; i++) {
7544 + seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, lro_len_cnt1[i]);
7546 + seq_printf(seq, "Kernel flush: %d; Driver flush: %d\n", tot_called2, tot_called1);
7550 +static int lro_stats_open(struct inode *inode, struct file *file)
7552 + return single_open(file, LroStatsRead, NULL);
7555 +static struct file_operations lro_stats_fops = {
7556 + .owner = THIS_MODULE,
7557 + .open = lro_stats_open,
7559 + .llseek = seq_lseek,
7560 + .write = LroStatsWrite,
7561 + .release = single_release
7565 +int getnext(const char *src, int separator, char *dest)
7570 + if ( (src == NULL) || (dest == NULL) ) {
7574 + c = strchr(src, separator);
7576 + strcpy(dest, src);
7580 + strncpy(dest, src, len);
7585 +int str_to_ip(unsigned int *ip, const char *str)
7588 + const char *ptr = str;
7590 + unsigned char c[4];
7593 + for (i = 0; i < 3; ++i) {
7594 + if ((len = getnext(ptr, '.', buf)) == -1) {
7595 + return 1; /* parse error */
7597 + c[i] = simple_strtoul(buf, NULL, 10);
7600 + c[3] = simple_strtoul(ptr, NULL, 0);
7601 + *ip = (c[0]<<24) + (c[1]<<16) + (c[2]<<8) + c[3];
7605 +#if defined(CONFIG_RAETH_HW_LRO)
7606 +static int HwLroLenUpdate(unsigned int agg_size)
7610 + if(agg_size > 65000) {
7612 + }else if(agg_size > 60000) {
7614 + }else if(agg_size > 55000) {
7616 + }else if(agg_size > 50000) {
7618 + }else if(agg_size > 45000) {
7620 + }else if(agg_size > 40000) {
7622 + }else if(agg_size > 35000) {
7624 + }else if(agg_size > 30000) {
7626 + }else if(agg_size > 25000) {
7628 + }else if(agg_size > 20000) {
7630 + }else if(agg_size > 15000) {
7632 + }else if(agg_size > 10000) {
7634 + }else if(agg_size > 5000) {
7643 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size)
7645 + if( (ring_num > 0) && (ring_num < 4) )
7647 + hw_lro_agg_size_cnt[ring_num-1][HwLroLenUpdate(agg_size)]++;
7648 + hw_lro_agg_num_cnt[ring_num-1][agg_cnt]++;
7649 + hw_lro_tot_flush_cnt[ring_num-1]++;
7650 + hw_lro_tot_agg_cnt[ring_num-1] += agg_cnt;
7656 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7657 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason)
7659 + if( (ring_num > 0) && (ring_num < 4) )
7662 + if ( (flush_reason & 0x7) == HW_LRO_AGG_FLUSH )
7663 + hw_lro_agg_flush_cnt[ring_num-1]++;
7664 + else if ( (flush_reason & 0x7) == HW_LRO_AGE_FLUSH )
7665 + hw_lro_age_flush_cnt[ring_num-1]++;
7666 + else if ( (flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH )
7667 + hw_lro_seq_flush_cnt[ring_num-1]++;
7668 + else if ( (flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH )
7669 + hw_lro_timestamp_flush_cnt[ring_num-1]++;
7670 + else if ( (flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH )
7671 + hw_lro_norule_flush_cnt[ring_num-1]++;
7673 + if ( flush_reason & BIT(4) )
7674 + hw_lro_agg_flush_cnt[ring_num-1]++;
7675 + else if ( flush_reason & BIT(3) )
7676 + hw_lro_age_flush_cnt[ring_num-1]++;
7677 + else if ( flush_reason & BIT(2) )
7678 + hw_lro_seq_flush_cnt[ring_num-1]++;
7679 + else if ( flush_reason & BIT(1) )
7680 + hw_lro_timestamp_flush_cnt[ring_num-1]++;
7681 + else if ( flush_reason & BIT(0) )
7682 + hw_lro_norule_flush_cnt[ring_num-1]++;
7688 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7690 +ssize_t HwLroStatsWrite(struct file *file, const char __user *buffer,
7691 + size_t count, loff_t *data)
7693 + memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
7694 + memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
7695 + memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
7696 + memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
7697 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7698 + memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
7699 + memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
7700 + memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
7701 + memset(hw_lro_timestamp_flush_cnt, 0, sizeof(hw_lro_timestamp_flush_cnt));
7702 + memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
7703 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7705 + printk("clear hw lro cnt table\n");
7710 +int HwLroStatsRead(struct seq_file *seq, void *v)
7714 + seq_printf(seq, "HW LRO statistic dump:\n");
7716 + /* Agg number count */
7717 + seq_printf(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
7718 + for(i=0; i<=MAX_HW_LRO_AGGR; i++) {
7719 + seq_printf(seq, " %d : %d %d %d %d\n",
7720 + i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
7721 + hw_lro_agg_num_cnt[0][i]+hw_lro_agg_num_cnt[1][i]+hw_lro_agg_num_cnt[2][i]);
7724 + /* Total agg count */
7725 + seq_printf(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
7726 + seq_printf(seq, " %d %d %d %d\n",
7727 + hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], hw_lro_tot_agg_cnt[2],
7728 + hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2]);
7730 + /* Total flush count */
7731 + seq_printf(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
7732 + seq_printf(seq, " %d %d %d %d\n",
7733 + hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], hw_lro_tot_flush_cnt[2],
7734 + hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]);
7736 + /* Avg agg count */
7737 + seq_printf(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
7738 + seq_printf(seq, " %d %d %d %d\n",
7739 + (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0]/hw_lro_tot_flush_cnt[0] : 0,
7740 + (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1]/hw_lro_tot_flush_cnt[1] : 0,
7741 + (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2]/hw_lro_tot_flush_cnt[2] : 0,
7742 + (hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]) ? \
7743 + ((hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2])/(hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2])) : 0
7746 + /* Statistics of aggregation size counts */
7747 + seq_printf(seq, "HW LRO flush pkt len:\n");
7748 + seq_printf(seq, " Length | RING1 | RING2 | RING3 | Total\n");
7749 + for(i=0; i<15; i++) {
7750 + seq_printf(seq, "%d~%d: %d %d %d %d\n", i*5000, (i+1)*5000,
7751 + hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
7752 + hw_lro_agg_size_cnt[0][i]+hw_lro_agg_size_cnt[1][i]+hw_lro_agg_size_cnt[2][i]);
7754 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7755 + seq_printf(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
7756 + seq_printf(seq, "AGG timeout: %d %d %d %d\n",
7757 + hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], hw_lro_agg_flush_cnt[2],
7758 + (hw_lro_agg_flush_cnt[0]+hw_lro_agg_flush_cnt[1]+hw_lro_agg_flush_cnt[2])
7760 + seq_printf(seq, "AGE timeout: %d %d %d %d\n",
7761 + hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], hw_lro_age_flush_cnt[2],
7762 + (hw_lro_age_flush_cnt[0]+hw_lro_age_flush_cnt[1]+hw_lro_age_flush_cnt[2])
7764 + seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
7765 + hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], hw_lro_seq_flush_cnt[2],
7766 + (hw_lro_seq_flush_cnt[0]+hw_lro_seq_flush_cnt[1]+hw_lro_seq_flush_cnt[2])
7768 + seq_printf(seq, "Timestamp: %d %d %d %d\n",
7769 + hw_lro_timestamp_flush_cnt[0], hw_lro_timestamp_flush_cnt[1], hw_lro_timestamp_flush_cnt[2],
7770 + (hw_lro_timestamp_flush_cnt[0]+hw_lro_timestamp_flush_cnt[1]+hw_lro_timestamp_flush_cnt[2])
7772 + seq_printf(seq, "No LRO rule: %d %d %d %d\n",
7773 + hw_lro_norule_flush_cnt[0], hw_lro_norule_flush_cnt[1], hw_lro_norule_flush_cnt[2],
7774 + (hw_lro_norule_flush_cnt[0]+hw_lro_norule_flush_cnt[1]+hw_lro_norule_flush_cnt[2])
7776 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7781 +static int hw_lro_stats_open(struct inode *inode, struct file *file)
7783 + return single_open(file, HwLroStatsRead, NULL);
7786 +static struct file_operations hw_lro_stats_fops = {
7787 + .owner = THIS_MODULE,
7788 + .open = hw_lro_stats_open,
7790 + .llseek = seq_lseek,
7791 + .write = HwLroStatsWrite,
7792 + .release = single_release
7795 +int hwlro_agg_cnt_ctrl(int par1, int par2)
7797 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
7798 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
7799 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
7803 +int hwlro_agg_time_ctrl(int par1, int par2)
7805 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
7806 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
7807 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
7811 +int hwlro_age_time_ctrl(int par1, int par2)
7813 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
7814 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
7815 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
7819 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2)
7821 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7823 + ei_local->hw_lro_alpha = par2;
7824 + printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_alpha = %d\n", ei_local->hw_lro_alpha);
7829 +int hwlro_threshold_ctrl(int par1, int par2)
7831 + /* bandwidth threshold setting */
7832 + SET_PDMA_LRO_BW_THRESHOLD(par2);
7836 +int hwlro_fix_setting_switch_ctrl(int par1, int par2)
7838 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
7839 + END_DEVICE *ei_local = netdev_priv(dev_raether);
7841 + ei_local->hw_lro_fix_setting = par2;
7842 + printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_fix_setting = %d\n", ei_local->hw_lro_fix_setting);
7843 +#endif /* CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG */
7848 +ssize_t HwLroAutoTlbWrite(struct file *file, const char __user *buffer,
7849 + size_t count, loff_t *data)
7855 + char *pToken = NULL;
7856 + char *pDelimiter = " \t";
7858 + printk("[HwLroAutoTlbWrite]write parameter len = %d\n\r", (int)len);
7859 + if(len >= sizeof(buf)){
7860 + printk("input handling fail!\n");
7861 + len = sizeof(buf) - 1;
7865 + if(copy_from_user(buf, buffer, len)){
7869 + printk("[HwLroAutoTlbWrite]write parameter data = %s\n\r", buf);
7872 + pToken = strsep(&pBuf, pDelimiter);
7873 + x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7875 + pToken = strsep(&pBuf, "\t\n ");
7876 + if(pToken != NULL){
7877 + y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7878 + printk("y = 0x%08x \n\r", y);
7881 + if ( (sizeof(hw_lro_dbg_func)/sizeof(hw_lro_dbg_func[0]) > x) && NULL != hw_lro_dbg_func[x])
7883 + (*hw_lro_dbg_func[x])(x, y);
7889 +void HwLroAutoTlbDump(struct seq_file *seq, unsigned int index)
7892 + struct PDMA_LRO_AUTO_TLB_INFO pdma_lro_auto_tlb;
7893 + unsigned int tlb_info[9];
7894 + unsigned int dw_len, cnt, priority;
7895 + unsigned int entry;
7898 + index = index - 1;
7899 + entry = (index * 9) + 1;
7901 + /* read valid entries of the auto-learn table */
7902 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
7904 + //seq_printf(seq, "\nEntry = %d\n", entry);
7905 + for(i=0; i<9; i++){
7906 + tlb_info[i] = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
7907 + //seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]);
7909 + memcpy(&pdma_lro_auto_tlb, tlb_info, sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
7911 + dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
7912 + cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
7914 + if ( sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE ) /* packet count */
7916 + else /* byte count */
7917 + priority = dw_len;
7919 + /* dump valid entries of the auto-learn table */
7921 + seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
7923 + seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
7924 + if( pdma_lro_auto_tlb.auto_tlb_info8.IPV4 ){
7925 + seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n",
7926 + pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7927 + pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7928 + pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7929 + pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7932 + seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
7933 + pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7934 + pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7935 + pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7936 + pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7938 + seq_printf(seq, "DIP_ID = %d\n", pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
7939 + seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
7940 + pdma_lro_auto_tlb.auto_tlb_info0.STP,
7941 + pdma_lro_auto_tlb.auto_tlb_info0.DTP);
7942 + seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d \n",
7943 + pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0,
7944 + (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 12),
7945 + (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 24),
7946 + pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1);
7947 + seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
7948 + seq_printf(seq, "PRIORITY = %d\n", priority);
7951 +int HwLroAutoTlbRead(struct seq_file *seq, void *v)
7954 + unsigned int regVal;
7955 + unsigned int regOp1, regOp2, regOp3, regOp4;
7956 + unsigned int agg_cnt, agg_time, age_time;
7958 + /* Read valid entries of the auto-learn table */
7959 + sysRegWrite(PDMA_FE_ALT_CF8, 0);
7960 + regVal = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
7962 + seq_printf(seq, "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n", regVal);
7964 + for(i = 7; i >= 0; i--)
7966 + if( regVal & (1 << i) )
7967 + HwLroAutoTlbDump(seq, i);
7970 + /* Read the agg_time/age_time/agg_cnt of LRO rings */
7971 + seq_printf(seq, "\nHW LRO Ring Settings\n");
7972 + for(i = 1; i <= 3; i++)
7974 + regOp1 = sysRegRead( LRO_RX_RING0_CTRL_DW1 + (i * 0x40) );
7975 + regOp2 = sysRegRead( LRO_RX_RING0_CTRL_DW2 + (i * 0x40) );
7976 + regOp3 = sysRegRead( LRO_RX_RING0_CTRL_DW3 + (i * 0x40) );
7977 + regOp4 = sysRegRead( ADMA_LRO_CTRL_DW2 );
7978 + agg_cnt = ((regOp3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) | ((regOp2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
7979 + agg_time = (regOp2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
7980 + age_time = ((regOp2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) | ((regOp1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
7981 + seq_printf(seq, "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
7982 + i, agg_cnt, agg_time, age_time, regOp4);
7988 +static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
7990 + return single_open(file, HwLroAutoTlbRead, NULL);
7993 +static struct file_operations hw_lro_auto_tlb_fops = {
7994 + .owner = THIS_MODULE,
7995 + .open = hw_lro_auto_tlb_open,
7997 + .llseek = seq_lseek,
7998 + .write = HwLroAutoTlbWrite,
7999 + .release = single_release
8001 +#endif /* CONFIG_RAETH_HW_LRO */
8003 +#if defined (CONFIG_MIPS)
8004 +int CP0RegRead(struct seq_file *seq, void *v)
8006 + seq_printf(seq, "CP0 Register dump --\n");
8007 + seq_printf(seq, "CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
8008 + seq_printf(seq, "CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
8009 + seq_printf(seq, "CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
8010 + seq_printf(seq, "CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
8011 + seq_printf(seq, "CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
8012 + seq_printf(seq, "CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
8013 + seq_printf(seq, "CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
8014 + seq_printf(seq, "CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
8015 + seq_printf(seq, "CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
8016 + seq_printf(seq, "CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
8017 + seq_printf(seq, "CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
8018 + seq_printf(seq, "CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
8019 + seq_printf(seq, "CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
8020 + seq_printf(seq, "CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
8021 + seq_printf(seq, "CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
8022 + seq_printf(seq, "CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
8023 + seq_printf(seq, "CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
8024 + seq_printf(seq, "CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
8025 + seq_printf(seq, "CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
8026 + seq_printf(seq, "CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
8027 + seq_printf(seq, "CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
8028 + seq_printf(seq, "CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
8029 + seq_printf(seq, "CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
8030 + seq_printf(seq, "CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
8031 + seq_printf(seq, "CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
8032 + seq_printf(seq, "CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
8033 + seq_printf(seq, "CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
8034 + seq_printf(seq, "CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
8035 + seq_printf(seq, "CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
8036 + seq_printf(seq, "CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
8037 + seq_printf(seq, "CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
8038 + seq_printf(seq, "CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
8039 + seq_printf(seq, "CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
8044 +static int cp0_reg_open(struct inode *inode, struct file *file)
8046 + return single_open(file, CP0RegRead, NULL);
8049 +static const struct file_operations cp0_reg_fops = {
8050 + .owner = THIS_MODULE,
8051 + .open = cp0_reg_open,
8053 + .llseek = seq_lseek,
8054 + .release = single_release
8058 +#if defined(CONFIG_RAETH_QOS)
8059 +static struct proc_dir_entry *procRaQOS, *procRaFeIntr, *procRaEswIntr;
8060 +extern uint32_t num_of_rxdone_intr;
8061 +extern uint32_t num_of_esw_intr;
8063 +int RaQOSRegRead(struct seq_file *seq, void *v)
8069 +static int raeth_qos_open(struct inode *inode, struct file *file)
8071 + return single_open(file, RaQOSRegRead, NULL);
8074 +static const struct file_operations raeth_qos_fops = {
8075 + .owner = THIS_MODULE,
8076 + .open = raeth_qos_open,
8078 + .llseek = seq_lseek,
8079 + .release = single_release
8083 +static struct proc_dir_entry *procEswCnt;
8085 +int EswCntRead(struct seq_file *seq, void *v)
8087 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
8088 + unsigned int pkt_cnt = 0;
8091 + seq_printf(seq, "\n <<CPU>> \n");
8092 + seq_printf(seq, " | \n");
8093 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8094 + seq_printf(seq, "+-----------------------------------------------+\n");
8095 + seq_printf(seq, "| <<PDMA>> |\n");
8096 + seq_printf(seq, "+-----------------------------------------------+\n");
8098 + seq_printf(seq, "+-----------------------------------------------+\n");
8099 + seq_printf(seq, "| <<PSE>> |\n");
8100 + seq_printf(seq, "+-----------------------------------------------+\n");
8101 + seq_printf(seq, " | \n");
8102 + seq_printf(seq, "+-----------------------------------------------+\n");
8103 + seq_printf(seq, "| <<GDMA>> |\n");
8104 +#if defined (CONFIG_RALINK_MT7620)
8105 + seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1304));
8106 + seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1324));
8107 + seq_printf(seq, "| |\n");
8108 + seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1308));
8109 + seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x130c));
8110 + seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1328));
8111 + seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x132c));
8112 + seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1330));
8113 + seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1334));
8114 + seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1338));
8115 + seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x133c));
8117 + seq_printf(seq, "| |\n");
8118 + seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1344));
8119 + seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1364));
8120 + seq_printf(seq, "| |\n");
8121 + seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1348));
8122 + seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x134c));
8123 + seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1368));
8124 + seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x136c));
8125 + seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1370));
8126 + seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1374));
8127 + seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1378));
8128 + seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x137c));
8129 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8130 + seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2400));
8131 + seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2408));
8132 + seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2410));
8133 + seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2414));
8134 + seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2418));
8135 + seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x241C));
8136 + seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2420));
8137 + seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2424));
8138 + seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2428));
8139 + seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x242C));
8140 + seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2430));
8141 + seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2438));
8142 + seq_printf(seq, "| |\n");
8143 + seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2440));
8144 + seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2448));
8145 + seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2450));
8146 + seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2454));
8147 + seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2458));
8148 + seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x245C));
8149 + seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2460));
8150 + seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2464));
8151 + seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2468));
8152 + seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x246C));
8153 + seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2470));
8154 + seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2478));
8156 + seq_printf(seq, "| GDMA_TX_GPCNT1 : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x704));
8157 + seq_printf(seq, "| GDMA_RX_GPCNT1 : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x724));
8158 + seq_printf(seq, "| |\n");
8159 + seq_printf(seq, "| GDMA_TX_SKIPCNT1: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x708));
8160 + seq_printf(seq, "| GDMA_TX_COLCNT1 : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x70c));
8161 + seq_printf(seq, "| GDMA_RX_OERCNT1 : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x728));
8162 + seq_printf(seq, "| GDMA_RX_FERCNT1 : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x72c));
8163 + seq_printf(seq, "| GDMA_RX_SERCNT1 : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x730));
8164 + seq_printf(seq, "| GDMA_RX_LERCNT1 : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x734));
8165 + seq_printf(seq, "| GDMA_RX_CERCNT1 : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x738));
8166 + seq_printf(seq, "| GDMA_RX_FCCNT1 : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x73c));
8169 + seq_printf(seq, "+-----------------------------------------------+\n");
8172 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8173 + defined (CONFIG_RALINK_MT7620)
8175 + seq_printf(seq, " ^ \n");
8176 + seq_printf(seq, " | Port6 Rx:%010u Good Pkt \n", ((p6_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4620)&0xFFFF)));
8177 + seq_printf(seq, " | Port6 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4620)>>16);
8178 + seq_printf(seq, " | Port6 Tx:%010u Good Pkt \n", ((p6_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4610)&0xFFFF)));
8179 + seq_printf(seq, " | Port6 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4610)>>16);
8180 +#if defined (CONFIG_RALINK_MT7620)
8182 + seq_printf(seq, " | Port7 Rx:%010u Good Pkt \n", ((p7_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4720)&0xFFFF)));
8183 + seq_printf(seq, " | Port7 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4720)>>16);
8184 + seq_printf(seq, " | Port7 Tx:%010u Good Pkt \n", ((p7_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4710)&0xFFFF)));
8185 + seq_printf(seq, " | Port7 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4710)>>16);
8187 + seq_printf(seq, "+---------------------v-------------------------+\n");
8188 + seq_printf(seq, "| P6 |\n");
8189 + seq_printf(seq, "| <<10/100/1000 Embedded Switch>> |\n");
8190 + seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
8191 + seq_printf(seq, "+-----------------------------------------------+\n");
8192 + seq_printf(seq, " | | | | | | \n");
8193 +#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8194 + /* no built-in switch */
8196 + seq_printf(seq, " ^ \n");
8197 + seq_printf(seq, " | Port6 Rx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)&0xFFFF);
8198 + seq_printf(seq, " | Port6 Tx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)>>16);
8199 + seq_printf(seq, "+---------------------v-------------------------+\n");
8200 + seq_printf(seq, "| P6 |\n");
8201 + seq_printf(seq, "| <<10/100 Embedded Switch>> |\n");
8202 + seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
8203 + seq_printf(seq, "+-----------------------------------------------+\n");
8204 + seq_printf(seq, " | | | | | | \n");
8207 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8208 + defined (CONFIG_RALINK_MT7620)
8210 + seq_printf(seq, "Port0 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p0_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4020)&0xFFFF)), ((p0_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4010)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4020)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4010)>>16);
8212 + seq_printf(seq, "Port1 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p1_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4120)&0xFFFF)), ((p1_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4110)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4120)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4110)>>16);
8214 + seq_printf(seq, "Port2 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p2_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4220)&0xFFFF)), ((p2_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4210)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4220)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4210)>>16);
8216 + seq_printf(seq, "Port3 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p3_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4320)&0xFFFF)), ((p3_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4310)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4320)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4310)>>16);
8218 + seq_printf(seq, "Port4 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p4_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4420)&0xFFFF)), ((p4_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4410)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4420)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4410)>>16);
8220 + seq_printf(seq, "Port5 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p5_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4520)&0xFFFF)), ((p5_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4510)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4520)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4510)>>16);
8222 + seq_printf(seq, "Port0 KBytes RX=%010u Tx=%010u \n", ((p0_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4028) >> 10)), ((p0_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4018) >> 10)));
8224 + seq_printf(seq, "Port1 KBytes RX=%010u Tx=%010u \n", ((p1_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4128) >> 10)), ((p1_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4118) >> 10)));
8226 + seq_printf(seq, "Port2 KBytes RX=%010u Tx=%010u \n", ((p2_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4228) >> 10)), ((p2_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4218) >> 10)));
8228 + seq_printf(seq, "Port3 KBytes RX=%010u Tx=%010u \n", ((p3_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4328) >> 10)), ((p3_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4318) >> 10)));
8230 + seq_printf(seq, "Port4 KBytes RX=%010u Tx=%010u \n", ((p4_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4428) >> 10)), ((p4_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4418) >> 10)));
8232 + seq_printf(seq, "Port5 KBytes RX=%010u Tx=%010u \n", ((p5_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4528) >> 10)), ((p5_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4518) >> 10)));
8234 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
8235 +#define DUMP_EACH_PORT(base) \
8236 + for(i=0; i < 7;i++) { \
8237 + mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8238 + seq_printf(seq, "%8u ", pkt_cnt); \
8240 + seq_printf(seq, "\n");
8241 + seq_printf(seq, "========================================[MT7530] READ CLEAR========================\n");
8243 + seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8244 + seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
8245 + //seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
8246 + seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
8247 + seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8248 + seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8249 + //seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
8250 + seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
8251 + seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
8252 + seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8253 + seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
8254 + seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8255 + seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8256 + seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
8257 + seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
8258 + seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
8259 + //seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
8260 + //seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
8261 + //seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
8262 + seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
8263 + mii_mgr_write(31, 0x4fe0, 0xf0);
8264 + mii_mgr_write(31, 0x4fe0, 0x800000f0);
8268 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8269 + seq_printf(seq, "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x150)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x150)>>16);
8271 + seq_printf(seq, "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x154)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x154)>>16);
8273 + seq_printf(seq, "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x158)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x158)>>16);
8275 + seq_printf(seq, "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x15C)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x15c)>>16);
8277 + seq_printf(seq, "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x160)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x160)>>16);
8279 + seq_printf(seq, "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x164)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x164)>>16);
8280 +#elif defined (CONFIG_RALINK_RT3883)
8281 + /* no built-in switch */
8282 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8284 +#define DUMP_EACH_PORT(base) \
8285 + for(i=0; i < 7;i++) { \
8286 + mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8287 + seq_printf(seq, "%8u ", pkt_cnt); \
8289 + seq_printf(seq, "\n");
8291 +#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
8292 + if(sysRegRead(0xbe00000c & (1<<16)))//MCM
8295 + seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8296 + seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
8297 + seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
8298 + seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
8299 + seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8300 + seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8301 + seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
8302 + seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
8303 + seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
8304 + seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8305 + seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
8306 + seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8307 + seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8308 + seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
8309 + seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
8310 + seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
8311 + seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
8312 + seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
8313 + seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
8314 + seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
8315 + mii_mgr_write(31, 0x4fe0, 0xf0);
8316 + mii_mgr_write(31, 0x4fe0, 0x800000f0);
8318 +#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
8320 + seq_printf(seq, "no built-in switch\n");
8324 +#else /* RT305x, RT3352 */
8325 + seq_printf(seq, "Port0: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16);
8326 + seq_printf(seq, "Port1: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16);
8327 + seq_printf(seq, "Port2: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16);
8328 + seq_printf(seq, "Port3: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16);
8329 + seq_printf(seq, "Port4: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16);
8330 + seq_printf(seq, "Port5: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16);
8332 + seq_printf(seq, "\n");
8337 +static int switch_count_open(struct inode *inode, struct file *file)
8339 + return single_open(file, EswCntRead, NULL);
8342 +static const struct file_operations switch_count_fops = {
8343 + .owner = THIS_MODULE,
8344 + .open = switch_count_open,
8346 + .llseek = seq_lseek,
8347 + .release = single_release
8350 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8352 + * proc write procedure
8354 +static ssize_t change_phyid(struct file *file, const char __user *buffer,
8355 + size_t count, loff_t *data)
8358 + struct net_device *cur_dev_p;
8359 + END_DEVICE *ei_local;
8361 + unsigned int phy_id;
8365 + memset(buf, 0, 32);
8366 + if (copy_from_user(buf, buffer, count))
8369 + /* determine interface name */
8370 + strcpy(if_name, DEV_NAME); /* "eth2" by default */
8371 + if(isalpha(buf[0]))
8372 + sscanf(buf, "%s %d", if_name, &phy_id);
8374 + phy_id = simple_strtol(buf, 0, 10);
8375 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8376 + cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
8378 + cur_dev_p = dev_get_by_name(DEV_NAME);
8380 + if (cur_dev_p == NULL)
8383 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8384 + ei_local = netdev_priv(cur_dev_p);
8386 + ei_local = cur_dev_p->priv;
8388 + ei_local->mii_info.phy_id = (unsigned char)phy_id;
8392 +#if defined(CONFIG_PSEUDO_SUPPORT)
8393 +static ssize_t change_gmac2_phyid(struct file *file, const char __user *buffer,
8394 + size_t count, loff_t *data)
8397 + struct net_device *cur_dev_p;
8398 + PSEUDO_ADAPTER *pPseudoAd;
8400 + unsigned int phy_id;
8404 + memset(buf, 0, 32);
8405 + if (copy_from_user(buf, buffer, count))
8407 + /* determine interface name */
8408 + strcpy(if_name, DEV2_NAME); /* "eth3" by default */
8409 + if(isalpha(buf[0]))
8410 + sscanf(buf, "%s %d", if_name, &phy_id);
8412 + phy_id = simple_strtol(buf, 0, 10);
8414 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8415 + cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
8417 + cur_dev_p = dev_get_by_name(DEV2_NAMEj);
8419 + if (cur_dev_p == NULL)
8421 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8422 + pPseudoAd = netdev_priv(cur_dev_p);
8424 + pPseudoAd = cur_dev_p->priv;
8426 + pPseudoAd->mii_info.phy_id = (unsigned char)phy_id;
8430 +static struct file_operations gmac2_fops = {
8431 + .owner = THIS_MODULE,
8432 + .write = change_gmac2_phyid
8437 +static int gmac_open(struct inode *inode, struct file *file)
8439 + return single_open(file, RegReadMain, NULL);
8442 +static struct file_operations gmac_fops = {
8443 + .owner = THIS_MODULE,
8444 + .open = gmac_open,
8446 + .llseek = seq_lseek,
8447 +#if defined (CONFIG_ETHTOOL)
8448 + .write = change_phyid,
8450 + .release = single_release
8453 +#if defined (TASKLET_WORKQUEUE_SW)
8454 +extern int init_schedule;
8455 +extern int working_schedule;
8456 +static int ScheduleRead(struct seq_file *seq, void *v)
8458 + if (init_schedule == 1)
8459 + seq_printf(seq, "Initialize Raeth with workqueque<%d>\n", init_schedule);
8461 + seq_printf(seq, "Initialize Raeth with tasklet<%d>\n", init_schedule);
8462 + if (working_schedule == 1)
8463 + seq_printf(seq, "Raeth is running at workqueque<%d>\n", working_schedule);
8465 + seq_printf(seq, "Raeth is running at tasklet<%d>\n", working_schedule);
8470 +static ssize_t ScheduleWrite(struct file *file, const char __user *buffer,
8471 + size_t count, loff_t *data)
8476 + if (copy_from_user(buf, buffer, count))
8478 + old = init_schedule;
8479 + init_schedule = simple_strtol(buf, 0, 10);
8480 + printk("Change Raeth initial schedule from <%d> to <%d>\n! Not running schedule at present !\n",
8481 + old, init_schedule);
8486 +static int schedule_switch_open(struct inode *inode, struct file *file)
8488 + return single_open(file, ScheduleRead, NULL);
8491 +static const struct file_operations schedule_sw_fops = {
8492 + .owner = THIS_MODULE,
8493 + .open = schedule_switch_open,
8495 + .write = ScheduleWrite,
8496 + .llseek = seq_lseek,
8497 + .release = single_release
8501 +#if defined(CONFIG_RAETH_PDMA_DVT)
8502 +static int PdmaDvtRead(struct seq_file *seq, void *v)
8504 + seq_printf(seq, "g_pdma_dvt_show_config = 0x%x\n", pdma_dvt_get_show_config());
8505 + seq_printf(seq, "g_pdma_dvt_rx_test_config = 0x%x\n", pdma_dvt_get_rx_test_config());
8506 + seq_printf(seq, "g_pdma_dvt_tx_test_config = 0x%x\n", pdma_dvt_get_tx_test_config());
8511 +static int PdmaDvtOpen(struct inode *inode, struct file *file)
8513 + return single_open(file, PdmaDvtRead, NULL);
8516 +static ssize_t PdmaDvtWrite(struct file *file, const char __user *buffer,
8517 + size_t count, loff_t *data)
8523 + char *pToken = NULL;
8524 + char *pDelimiter = " \t";
8526 + printk("write parameter len = %d\n\r", (int)len);
8527 + if(len >= sizeof(buf)){
8528 + printk("input handling fail!\n");
8529 + len = sizeof(buf) - 1;
8533 + if(copy_from_user(buf, buffer, len)){
8537 + printk("write parameter data = %s\n\r", buf);
8540 + pToken = strsep(&pBuf, pDelimiter);
8541 + x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8543 + pToken = strsep(&pBuf, "\t\n ");
8544 + if(pToken != NULL){
8545 + y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8546 + printk("y = 0x%08x \n\r", y);
8549 + if ( (sizeof(pdma_dvt_dbg_func)/sizeof(pdma_dvt_dbg_func[0]) > x) && NULL != pdma_dvt_dbg_func[x])
8551 + (*pdma_dvt_dbg_func[x])(x, y);
8555 + printk("no handler defined for command id(0x%08x)\n\r", x);
8558 + printk("x(0x%08x), y(0x%08x)\n", x, y);
8563 +static const struct file_operations pdma_dev_sw_fops = {
8564 + .owner = THIS_MODULE,
8565 + .open = PdmaDvtOpen,
8567 + .write = PdmaDvtWrite
8569 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8571 +int debug_proc_init(void)
8573 + if (procRegDir == NULL)
8574 + procRegDir = proc_mkdir(PROCREG_DIR, NULL);
8576 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8577 + if ((procGmac = create_proc_entry(PROCREG_GMAC, 0, procRegDir)))
8578 + procGmac->proc_fops = &gmac_fops;
8581 + if (!(procGmac = proc_create(PROCREG_GMAC, 0, procRegDir, &gmac_fops)))
8583 + printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
8584 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8585 +#if defined(CONFIG_PSEUDO_SUPPORT)
8586 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8587 + if ((procGmac2 = create_proc_entry(PROCREG_GMAC2, 0, procRegDir)))
8588 + procGmac2->proc_fops = &gmac2_fops;
8591 + if (!(procGmac2 = proc_create(PROCREG_GMAC2, 0, procRegDir, &gmac2_fops)))
8593 + printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC2);
8597 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8598 + if ((procSkbFree = create_proc_entry(PROCREG_SKBFREE, 0, procRegDir)))
8599 + procSkbFree->proc_fops = &skb_free_fops;
8602 + if (!(procSkbFree = proc_create(PROCREG_SKBFREE, 0, procRegDir, &skb_free_fops)))
8604 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
8606 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8607 + if ((procTxRing = create_proc_entry(PROCREG_TXRING, 0, procRegDir)))
8608 + procTxRing->proc_fops = &tx_ring_fops;
8611 + if (!(procTxRing = proc_create(PROCREG_TXRING, 0, procRegDir, &tx_ring_fops)))
8613 + printk("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
8615 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8616 + if ((procRxRing = create_proc_entry(PROCREG_RXRING, 0, procRegDir)))
8617 + procRxRing->proc_fops = &rx_ring_fops;
8620 + if (!(procRxRing = proc_create(PROCREG_RXRING, 0, procRegDir, &rx_ring_fops)))
8622 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
8624 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
8625 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8626 + if ((procRxRing1 = create_proc_entry(PROCREG_RXRING1, 0, procRegDir)))
8627 + procRxRing1->proc_fops = &rx_ring1_fops;
8630 + if (!(procRxRing1 = proc_create(PROCREG_RXRING1, 0, procRegDir, &rx_ring1_fops)))
8632 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
8634 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8635 + if ((procRxRing2 = create_proc_entry(PROCREG_RXRING2, 0, procRegDir)))
8636 + procRxRing2->proc_fops = &rx_ring2_fops;
8639 + if (!(procRxRing2 = proc_create(PROCREG_RXRING2, 0, procRegDir, &rx_ring2_fops)))
8641 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
8643 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8644 + if ((procRxRing3 = create_proc_entry(PROCREG_RXRING3, 0, procRegDir)))
8645 + procRxRing3->proc_fops = &rx_ring3_fops;
8648 + if (!(procRxRing3 = proc_create(PROCREG_RXRING3, 0, procRegDir, &rx_ring3_fops)))
8650 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
8651 +#endif /* CONFIG_RAETH_HW_LRO */
8653 +#if defined (CONFIG_MIPS)
8654 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8655 + if ((procSysCP0 = create_proc_entry(PROCREG_CP0, 0, procRegDir)))
8656 + procSysCP0->proc_fops = &cp0_reg_fops;
8659 + if (!(procSysCP0 = proc_create(PROCREG_CP0, 0, procRegDir, &cp0_reg_fops)))
8661 + printk("!! FAIL to create %s PROC !!\n", PROCREG_CP0);
8664 +#if defined(CONFIG_RAETH_TSO)
8665 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8666 + if ((procNumOfTxd = create_proc_entry(PROCREG_NUM_OF_TXD, 0, procRegDir)))
8667 + procNumOfTxd->proc_fops = &tso_txd_num_fops;
8670 + if (!(procNumOfTxd = proc_create(PROCREG_NUM_OF_TXD, 0, procRegDir, &tso_txd_num_fops)))
8672 + printk("!! FAIL to create %s PROC !!\n", PROCREG_NUM_OF_TXD);
8674 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8675 + if ((procTsoLen = create_proc_entry(PROCREG_TSO_LEN, 0, procRegDir)))
8676 + procTsoLen->proc_fops = &tso_len_fops;
8679 + if (!(procTsoLen = proc_create(PROCREG_TSO_LEN, 0, procRegDir, &tso_len_fops)))
8681 + printk("!! FAIL to create %s PROC !!\n", PROCREG_TSO_LEN);
8684 +#if defined(CONFIG_RAETH_LRO)
8685 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8686 + if ((procLroStats = create_proc_entry(PROCREG_LRO_STATS, 0, procRegDir)))
8687 + procLroStats->proc_fops = &lro_stats_fops;
8690 + if (!(procLroStats = proc_create(PROCREG_LRO_STATS, 0, procRegDir, &lro_stats_fops)))
8692 + printk("!! FAIL to create %s PROC !!\n", PROCREG_LRO_STATS);
8695 +#if defined(CONFIG_RAETH_HW_LRO)
8696 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8697 + if ((procHwLroStats = create_proc_entry(PROCREG_HW_LRO_STATS, 0, procRegDir)))
8698 + procHwLroStats->proc_fops = &hw_lro_stats_fops;
8701 + if (!(procHwLroStats = proc_create(PROCREG_HW_LRO_STATS, 0, procRegDir, &hw_lro_stats_fops)))
8703 + printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
8704 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8705 + if ((procHwLroAutoTlb = create_proc_entry(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir)))
8706 + procHwLroAutoTlb->proc_fops = &hw_lro_auto_tlb_fops;
8709 + if (!(procHwLroAutoTlb = proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir, &hw_lro_auto_tlb_fops)))
8711 + printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_AUTO_TLB);
8712 +#endif /* CONFIG_RAETH_HW_LRO */
8714 +#if defined(CONFIG_RAETH_QOS)
8715 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8716 + if ((procRaQOS = create_proc_entry(PROCREG_RAQOS, 0, procRegDir)))
8717 + procRaQOS->proc_fops = &raeth_qos_fops;
8720 + if (!(procRaQOS = proc_create(PROCREG_RAQOS, 0, procRegDir, &raeth_qos_fops)))
8722 + printk("!! FAIL to create %s PROC !!\n", PROCREG_RAQOS);
8725 +#if defined(CONFIG_USER_SNMPD)
8726 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8727 + if ((procRaSnmp = create_proc_entry(PROCREG_SNMP, S_IRUGO, procRegDir)))
8728 + procRaSnmp->proc_fops = &ra_snmp_seq_fops;
8731 + if (!(procRaSnmp = proc_create(PROCREG_SNMP, S_IRUGO, procRegDir, &ra_snmp_seq_fops)))
8733 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SNMP);
8736 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8737 + if ((procEswCnt = create_proc_entry(PROCREG_ESW_CNT, 0, procRegDir)))
8738 + procEswCnt->proc_fops = &switch_count_fops;
8741 + if (!(procEswCnt = proc_create(PROCREG_ESW_CNT, 0, procRegDir, &switch_count_fops)))
8743 + printk("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
8745 +#if defined (TASKLET_WORKQUEUE_SW)
8746 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8747 + if ((procSCHE = create_proc_entry(PROCREG_SCHE, 0, procRegDir)))
8748 + procSCHE->proc_fops = &schedule_sw_fops;
8751 + if (!(procSCHE = proc_create(PROCREG_SCHE, 0, procRegDir, &schedule_sw_fops)))
8753 + printk("!! FAIL to create %s PROC !!\n", PROCREG_SCHE);
8756 +#if defined(CONFIG_RAETH_PDMA_DVT)
8757 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8758 + if ((procPdmaDvt = create_proc_entry(PROCREG_PDMA_DVT, 0, procRegDir)))
8759 + procPdmaDvt->proc_fops = &pdma_dev_sw_fops;
8762 + if (!(procPdmaDvt = proc_create(PROCREG_PDMA_DVT, 0, procRegDir, &pdma_dev_sw_fops )))
8764 + printk("!! FAIL to create %s PROC !!\n", PROCREG_PDMA_DVT);
8765 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8767 + printk(KERN_ALERT "PROC INIT OK!\n");
8771 +void debug_proc_exit(void)
8775 + remove_proc_entry(PROCREG_CP0, procRegDir);
8778 + remove_proc_entry(PROCREG_GMAC, procRegDir);
8779 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
8781 + remove_proc_entry(PROCREG_GMAC, procRegDir);
8784 + remove_proc_entry(PROCREG_SKBFREE, procRegDir);
8787 + remove_proc_entry(PROCREG_TXRING, procRegDir);
8790 + remove_proc_entry(PROCREG_RXRING, procRegDir);
8792 +#if defined(CONFIG_RAETH_TSO)
8794 + remove_proc_entry(PROCREG_NUM_OF_TXD, procRegDir);
8797 + remove_proc_entry(PROCREG_TSO_LEN, procRegDir);
8800 +#if defined(CONFIG_RAETH_LRO)
8802 + remove_proc_entry(PROCREG_LRO_STATS, procRegDir);
8805 +#if defined(CONFIG_RAETH_QOS)
8807 + remove_proc_entry(PROCREG_RAQOS, procRegDir);
8809 + remove_proc_entry(PROCREG_RXDONE_INTR, procRegDir);
8810 + if (procRaEswIntr)
8811 + remove_proc_entry(PROCREG_ESW_INTR, procRegDir);
8814 +#if defined(CONFIG_USER_SNMPD)
8816 + remove_proc_entry(PROCREG_SNMP, procRegDir);
8820 + remove_proc_entry(PROCREG_ESW_CNT, procRegDir);
8823 + //remove_proc_entry(PROCREG_DIR, 0);
8825 + printk(KERN_ALERT "proc exit\n");
8827 +EXPORT_SYMBOL(procRegDir);
8829 +++ b/drivers/net/ethernet/raeth/ra_mac.h
8834 +void ra2880stop(END_DEVICE *ei_local);
8835 +void ra2880MacAddressSet(unsigned char p[6]);
8836 +void ra2880Mac2AddressSet(unsigned char p[6]);
8837 +void ethtool_init(struct net_device *dev);
8839 +void ra2880EnableInterrupt(void);
8841 +void dump_qos(void);
8842 +void dump_reg(struct seq_file *s);
8843 +void dump_cp0(void);
8845 +int debug_proc_init(void);
8846 +void debug_proc_exit(void);
8848 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8849 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
8850 +void enable_auto_negotiate(int unused);
8852 +void enable_auto_negotiate(int ge);
8855 +void rt2880_gmac_hard_reset(void);
8857 +int TsoLenUpdate(int tso_len);
8858 +int NumOfTxdUpdate(int num_of_txd);
8860 +#ifdef CONFIG_RAETH_LRO
8861 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed);
8863 +#ifdef CONFIG_RAETH_HW_LRO
8864 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size);
8865 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
8866 +#define HW_LRO_AGG_FLUSH (1)
8867 +#define HW_LRO_AGE_FLUSH (2)
8868 +#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
8869 +#define HW_LRO_TIMESTAMP_FLUSH (4)
8870 +#define HW_LRO_NON_RULE_FLUSH (5)
8871 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason);
8872 +#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
8873 +typedef int (*HWLRO_DBG_FUNC)(int par1, int par2);
8874 +int hwlro_agg_cnt_ctrl(int par1, int par2);
8875 +int hwlro_agg_time_ctrl(int par1, int par2);
8876 +int hwlro_age_time_ctrl(int par1, int par2);
8877 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2);
8878 +int hwlro_threshold_ctrl(int par1, int par2);
8879 +int hwlro_fix_setting_switch_ctrl(int par1, int par2);
8880 +#endif /* CONFIG_RAETH_HW_LRO */
8881 +int getnext(const char *src, int separator, char *dest);
8882 +int str_to_ip(unsigned int *ip, const char *str);
8884 +#if defined(CONFIG_RAETH_PDMA_DVT)
8885 +typedef int (*PDMA_DBG_FUNC)(int par1, int par2);
8886 +#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
8889 +++ b/drivers/net/ethernet/raeth/ra_netlink.c
8891 +// for netlink header
8892 +#include <asm/types.h>
8893 +#include <net/sock.h>
8894 +#include <linux/socket.h>
8895 +#include <linux/netlink.h>
8896 +#include <linux/skbuff.h>
8897 +#include <linux/net.h>
8898 +#include <linux/version.h>
8900 +#include "csr_netlink.h"
8901 +#include "ra2882ethreg.h"
8902 +#include "ra_netlink.h"
8904 +static struct sock *csr_msg_socket = NULL; // synchronize socket for netlink use
8905 +unsigned int flags;
8907 +void rt2880_csr_receiver(struct sock *sk, int len)
8909 + struct sk_buff *skb;
8911 + struct nlmsghdr *nlh;
8912 + unsigned int reg_value = 0;
8914 + RAETH_PRINT("csr netlink receiver!\n");
8915 + skb = skb_recv_datagram(sk, 0, 1, &err);
8917 + RAETH_PRINT("error no : %d\n", err);
8919 + if (skb == NULL) {
8920 + printk("rt2880_csr_receiver(): No data received, error!\n");
8924 + nlh = (struct nlmsghdr*)skb->data;
8926 + csrmsg = NLMSG_DATA(nlh);
8928 + if (csrmsg->enable == CSR_READ ) {
8929 + reg_value = sysRegRead(csrmsg->address);
8931 + printk("raeth -- 0x%08x: 0x%08x\n", csrmsg->address, reg_value);
8933 + } else if ( csrmsg->enable == CSR_WRITE ) {
8934 + sysRegWrite(csrmsg->address, csrmsg->default_value);
8935 + reg_value = sysRegRead(csrmsg->address);
8936 + } else if ( csrmsg->enable == CSR_TEST ) {
8937 + reg_value = sysRegRead(csrmsg->address);
8938 + printk("0x%08x: 0x%08x\n", (unsigned int)csrmsg->address, reg_value);
8941 + printk("drv: Command format error!\n");
8943 + csrmsg->default_value = reg_value;
8945 + RAETH_PRINT("drv: rt2880_csr_msgsend() - msg to send!\n");
8947 + err = rt2880_csr_msgsend(csrmsg);
8950 + printk("drv: msg send error!\n");
8952 + skb_free_datagram(sk, skb);
8955 +int rt2880_csr_msgsend(CSR_MSG* csrmsg)
8957 + struct sk_buff *skb;
8958 + struct nlmsghdr *nlh = NULL;
8960 + struct sock *send_syncnl = csr_msg_socket;
8963 + if (send_syncnl == NULL) {
8964 + printk("drv: netlink_kernel_create() failed!\n");
8968 + size = NLMSG_SPACE(sizeof(CSR_MSG));
8969 + skb = alloc_skb(size, GFP_ATOMIC);
8973 + printk("rt2880_csr_msgsend() : error! msg structure not available\n");
8977 + nlh = NLMSG_PUT(skb, 0, 0, RALINK_CSR_GROUP, size - sizeof(struct nlmsghdr));
8981 + printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
8985 + csr_reg = NLMSG_DATA(nlh);
8988 + printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
8992 + csr_reg->address = csrmsg->address;
8993 + csr_reg->default_value = csrmsg->default_value;
8994 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
8995 + NETLINK_CB(skb).dst_group = RALINK_CSR_GROUP;
8997 + NETLINK_CB(skb).dst_groups = RALINK_CSR_GROUP;
8999 + netlink_broadcast(send_syncnl, skb, 0, RALINK_CSR_GROUP, GFP_ATOMIC);
9006 +int csr_netlink_init()
9009 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9010 + csr_msg_socket = netlink_kernel_create(NETLINK_CSR, RALINK_CSR_GROUP, rt2880_csr_receiver, THIS_MODULE);
9012 + csr_msg_socket = netlink_kernel_create(NETLINK_CSR, rt2880_csr_receiver);
9015 + if ( csr_msg_socket == NULL )
9016 + printk("unable to create netlink socket!\n");
9018 + printk("Netlink init ok!\n");
9022 +void csr_netlink_end()
9024 + if (csr_msg_socket != NULL){
9025 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9026 + sock_release(csr_msg_socket->sk_socket);
9028 + sock_release(csr_msg_socket->socket);
9030 + printk("Netlink end...\n");
9034 +++ b/drivers/net/ethernet/raeth/ra_netlink.h
9039 +#include "csr_netlink.h"
9040 +int rt2880_csr_msgsend(CSR_MSG* csrmsg);
9041 +void rt2880_csr_receiver(struct sock *sk, int len);
9042 +int csr_netlink_init(void);
9043 +void csr_netlink_end(void);
9047 +++ b/drivers/net/ethernet/raeth/ra_qos.c
9049 +#include <asm/io.h>
9050 +#include <linux/pci.h>
9051 +#include <linux/netdevice.h>
9052 +#include <linux/etherdevice.h>
9053 +#include <linux/net.h>
9054 +#include <linux/in.h>
9055 +#include "ra_qos.h"
9056 +#include "raether.h"
9057 +#include "ra2882ethreg.h"
9059 +#include <asm/types.h>
9060 +#include <net/sock.h>
9061 +#include <linux/socket.h>
9062 +#include <linux/skbuff.h>
9063 +#include <linux/net.h>
9064 +#include <linux/if_vlan.h>
9065 +#include <linux/ip.h>
9068 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9069 +#include "../../../net/nat/hw_nat/ra_nat.h"
9072 +#define CONTI_TX_SEND_MAX_SIZE 1440
9075 + * set tx queue # to descriptor
9077 +void rt3052_tx_queue_init(unsigned long data)
9079 + /* define qos p */
9083 +void rt3052_pse_port0_fc_clear(unsigned long data)
9085 + /* clear FE_INT_STATUS.PSE_P0_FC */
9089 +inline int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx)
9091 + switch (ring_no) {
9093 + *idx = *(unsigned long*)TX_CTX_IDX0;
9096 + *idx = *(unsigned long*)TX_CTX_IDX1;
9099 + *idx = *(unsigned long*)TX_CTX_IDX2;
9102 + *idx = *(unsigned long*)TX_CTX_IDX3;
9105 + printk("set_tx_ctx_idex error\n");
9111 +inline int set_tx_ctx_idx(unsigned int ring_no, unsigned int idx)
9113 + switch (ring_no ) {
9115 + *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32)idx);
9118 + *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32)idx);
9121 + *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32)idx);
9124 + *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32)idx);
9127 + printk("set_tx_ctx_idex error\n");
9134 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc)
9136 + switch (ring_no) {
9138 + *tx_desc = ei_local->tx_ring0;
9139 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
9142 + *tx_desc = ei_local->tx_ring1;
9143 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
9146 + *tx_desc = ei_local->tx_ring2;
9147 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
9150 + *tx_desc = ei_local->tx_ring3;
9151 + *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
9154 + printk("ring_no input error... %d\n", ring_no);
9158 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned pn)
9160 + END_DEVICE* ei_local = netdev_priv(dev);
9161 + struct PDMA_txdesc* tx_desc;
9162 + unsigned int tx_cpu_owner_idx, tx_dtx_idx;
9164 + unsigned int length=skb->len;
9166 + unsigned long flags;
9168 + //printk("fe_qos_packet_send: ring_no=%d qn=%d pn=%d\n", ring_no, qn, pn);
9170 + switch ( ring_no ) {
9172 + tx_desc = ei_local->tx_ring0;
9173 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX0;
9174 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
9177 + tx_desc = ei_local->tx_ring1;
9178 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX1;
9179 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
9182 + tx_desc = ei_local->tx_ring2;
9183 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX2;
9184 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
9187 + tx_desc = ei_local->tx_ring3;
9188 + tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX3;
9189 + tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
9192 + printk("ring_no input error... %d\n", ring_no);
9196 + //printk("tx_cpu_owner_idx=%d tx_dtx_idx=%d\n", tx_cpu_owner_idx, tx_dtx_idx);
9198 + if(tx_desc == NULL) {
9199 + printk("%s : txdesc is NULL\n", dev->name);
9203 + tx_desc[tx_cpu_owner_idx].txd_info1.SDP0 = virt_to_phys(skb->data);
9204 + tx_desc[tx_cpu_owner_idx].txd_info2.SDL0 = length;
9205 + tx_desc[tx_cpu_owner_idx].txd_info2.DDONE_bit = 0;
9206 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
9207 + tx_desc[tx_cpu_owner_idx].txd_info4.QN = qn;
9209 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
9210 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.TCO = 1;
9211 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.UCO = 1;
9212 + ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.ICO = 1;
9215 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9216 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
9217 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = 6; /* PPE */
9219 + tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
9224 + spin_lock_irqsave(&ei_local->page_lock, flags);
9225 + ei_local->skb_free[ring_no][tx_cpu_owner_idx] = skb;
9226 + tx_cpu_owner_idx = (tx_cpu_owner_idx +1) % NUM_TX_DESC;
9227 + ret = set_tx_ctx_idx(ring_no, tx_cpu_owner_idx);
9228 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
9230 + ei_local->stat.tx_packets++;
9231 + ei_local->stat.tx_bytes += length;
9233 +#ifdef CONFIG_RAETH_NAPI
9234 + switch ( ring_no ) {
9236 + if ( ei_local->tx0_full == 1) {
9237 + ei_local->tx0_full = 0;
9238 + netif_wake_queue(dev);
9242 + if ( ei_local->tx1_full == 1) {
9243 + ei_local->tx1_full = 0;
9244 + netif_wake_queue(dev);
9248 + if ( ei_local->tx2_full == 1) {
9249 + ei_local->tx2_full = 0;
9250 + netif_wake_queue(dev);
9254 + if ( ei_local->tx3_full == 1) {
9255 + ei_local->tx3_full = 0;
9256 + netif_wake_queue(dev);
9260 + printk("ring_no input error %d\n", ring_no);
9266 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn)
9268 + END_DEVICE* ei_local = netdev_priv(dev);
9269 + struct PDMA_txdesc *tx_desc;
9270 + unsigned int tx_cpu_owner_idx = 0;
9272 + unsigned int phy_tx_ring;
9275 + if ( ring_no > 3 ){
9276 + printk("%s : ring_no - %d, please under 4...\n", dev->name, ring_no);
9281 + printk("%s : pn - %d, please under 2...\n", dev->name, pn);
9285 + tx_desc = pci_alloc_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), &phy_tx_ring);
9286 + ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx;
9288 + switch (ring_no) {
9290 + ei_local->tx_ring0 = tx_desc;
9291 + ei_local->phy_tx_ring0 = phy_tx_ring;
9294 + ei_local->phy_tx_ring1 = phy_tx_ring;
9295 + ei_local->tx_ring1 = tx_desc;
9298 + ei_local->phy_tx_ring2 = phy_tx_ring;
9299 + ei_local->tx_ring2 = tx_desc;
9302 + ei_local->phy_tx_ring3 = phy_tx_ring;
9303 + ei_local->tx_ring3 = tx_desc;
9306 + printk("ring_no input error! %d\n", ring_no);
9307 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), tx_desc, phy_tx_ring);
9311 + if ( tx_desc == NULL)
9313 + printk("tx desc allocation failed!\n");
9317 + for( i = 0; i < NUM_TX_DESC; i++) {
9318 + memset( &tx_desc[i], 0, sizeof(struct PDMA_txdesc));
9319 + tx_desc[i].txd_info2.LS0_bit = 1;
9320 + tx_desc[i].txd_info2.DDONE_bit = 1;
9321 + tx_desc[i].txd_info4.PN = pn;
9322 + tx_desc[i].txd_info4.QN = qn;
9325 + switch ( ring_no ) {
9327 + *(unsigned long*)TX_BASE_PTR0 = phys_to_bus((u32) phy_tx_ring);
9328 + *(unsigned long*)TX_MAX_CNT0 = cpu_to_le32((u32)NUM_TX_DESC);
9329 + *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32) tx_cpu_owner_idx);
9330 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
9333 + *(unsigned long*)TX_BASE_PTR1 = phys_to_bus((u32) phy_tx_ring);
9334 + *(unsigned long*)TX_MAX_CNT1 = cpu_to_le32((u32)NUM_TX_DESC);
9335 + *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32) tx_cpu_owner_idx);
9336 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX1);
9339 + *(unsigned long*)TX_BASE_PTR2 = phys_to_bus((u32) phy_tx_ring);
9340 + *(unsigned long*)TX_MAX_CNT2 = cpu_to_le32((u32)NUM_TX_DESC);
9341 + *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32) tx_cpu_owner_idx);
9342 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX2);
9345 + *(unsigned long*)TX_BASE_PTR3 = phys_to_bus((u32) phy_tx_ring);
9346 + *(unsigned long*)TX_MAX_CNT3 = cpu_to_le32((u32)NUM_TX_DESC);
9347 + *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32) tx_cpu_owner_idx);
9348 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX3);
9351 + printk("tx descriptor init failed %d\n", ring_no);
9358 + DSCP | AC | WMM_AC (Access Category)
9359 + ------+----+--------
9370 + DSCP |(bit5~bit7)| WMM
9371 + -------+-----------+-------
9381 + Notes: BE should be mapped to AC1, but mapped to AC0 in linux kernel.
9385 +int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no)
9387 +#if defined(CONFIG_RALINK_RT2880)
9388 + /* RT2880 -- Assume using 1 Ring (Ring0), Queue 0, and Port 0 */
9393 + unsigned int ac=0;
9394 + unsigned int bridge_traffic=0, lan_traffic=0;
9395 + struct iphdr *iph=NULL;
9396 + struct vlan_ethhdr *veth=NULL;
9397 + unsigned int vlan_id=0;
9398 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9399 + static char DscpToAcMap[8]={1,0,0,1,2,2,3,3};
9400 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9401 + static char VlanPriToAcMap[8]={1,0,0,1,2,2,3,3};
9404 + /* Bridge:: {BG,BE,VI,VO} */
9405 + /* GateWay:: WAN: {BG,BE,VI,VO}, LAN: {BG,BE,VI,VO} */
9406 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9409 + * 1.1) GMAC1 ONLY:
9410 + * VO/VI->Ring3, BG/BE->Ring2
9411 + * 1.2) GMAC1+GMAC2:
9412 + * GMAC1:: VO/VI->Ring3, BG/BE->Ring2
9413 + * GMAC2:: VO/VI->Ring1, BG/BE->Ring0
9415 + * 2.1) GMAC1 ONLY:
9416 + * GMAC1:: LAN:VI/VO->Ring2, BE/BK->Ring2
9417 + * WAN:VI/VO->Ring3, BE/BK->Ring3
9418 + * 2.2)GMAC1+GMAC2:
9419 + * GMAC1:: LAN:VI/VO/BE/BK->Ring2, WAN:VI/VO/BE/BK->Ring3
9420 + * GMAC2:: VI/VO->Ring1, BE/BK->Ring0
9422 + static unsigned char AcToRing_BridgeMap[4] = {2, 2, 3, 3};
9423 + static unsigned char AcToRing_GE1Map[2][4] = {{3, 3, 3, 3},{2, 2, 2, 2}};
9424 + static unsigned char AcToRing_GE2Map[4] = {0, 0, 1, 1};
9425 +#elif defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT2883) || \
9426 + defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || \
9427 + defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9428 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
9429 + defined (CONFIG_RALINK_MT7628) || \
9430 + (defined (CONFIG_RALINK_RT3883) && !defined(CONFIG_RAETH_GMAC2))
9432 + * 1) Bridge: VO->Ring3, VI->Ring2, BG->Ring1, BE->Ring0
9434 + * 2.1) GMAC1:: LAN:VI/VO->Ring1, BE/BK->Ring0
9435 + * WAN:VI/VO->Ring3, BE/BK->Ring2
9437 + static unsigned char AcToRing_BridgeMap[4] = {0, 1, 2, 3};
9438 + static unsigned char AcToRing_GE1Map[2][4] = {{2, 2, 3, 3},{0, 0, 1, 1}};
9439 +#endif // CONFIG_RALINK_RT2883
9442 + * Set queue no - QN field in TX Descriptor
9443 + * always use queue 3 for the packet from CPU to GMAC
9447 + /* Get access category */
9448 + veth = (struct vlan_ethhdr *)(skb->data);
9449 + if(veth->h_vlan_proto == htons(ETH_P_8021Q)) { // VLAN traffic
9450 + iph= (struct iphdr *)(skb->data + VLAN_ETH_HLEN);
9452 + vlan_id = ntohs(veth->h_vlan_TCI & VLAN_VID_MASK);
9453 + if(vlan_id==1) { //LAN
9459 + if (veth->h_vlan_encapsulated_proto == htons(ETH_P_IP)) { //IPv4
9460 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9461 + ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9462 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9463 + ac = VlanPriToAcMap[skb->priority];
9465 + }else { //Ipv6, ARP ...etc
9468 + }else { // non-VLAN traffic
9469 + if (veth->h_vlan_proto == htons(ETH_P_IP)) { //IPv4
9470 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9471 + iph= (struct iphdr *)(skb->data + ETH_HLEN);
9472 + ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9473 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9474 + ac= VlanPriToAcMap[skb->priority];
9476 + }else { // IPv6, ARP ...etc
9484 + /* Set Tx Ring no */
9485 + if(gmac_no==1) { //GMAC1
9486 + if(bridge_traffic) { //Bridge Mode
9487 + *ring_no = AcToRing_BridgeMap[ac];
9488 + }else { //GateWay Mode
9489 + *ring_no = AcToRing_GE1Map[lan_traffic][ac];
9492 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9493 + *ring_no = AcToRing_GE2Map[ac];
9498 + /* Set Port No - PN field in Tx Descriptor*/
9499 +#if defined(CONFIG_RAETH_GMAC2)
9500 + *port_no = gmac_no;
9502 + if(bridge_traffic) {
9505 + if(lan_traffic==1) { //LAN use VP1
9507 + }else { //WAN use VP2
9511 +#endif // CONFIG_RAETH_GMAC2 //
9521 + * Routine Description :
9522 + * Hi/Li Rings and Queues definition for QoS Purpose
9524 + * Related registers: (Detail information refer to pp106 of RT3052_DS_20080226.doc)
9525 + * Priority High/Low Definition - PDMA_FC_CFG, GDMA1_FC_CFG, GDMA2_FC_CFG
9526 + * Bit 28 - Allows high priority Q to share low priority Q's reserved pages
9527 + * Bit 27:24 - Px high priority definition bitmap
9528 + * Weight Configuration - GDMA1_SCH_CFG, GDMA2_SCH_CFG, PDMA_SCH_CFG -> default 3210
9534 +#define PSE_P1_LQ_FULL (1<<2)
9535 +#define PSE_P1_HQ_FULL (1<<3)
9536 +#define PSE_P2_LQ_FULL (1<<4)
9537 +#define PSE_P2_HQ_FULL (1<<5)
9539 +#define HIGH_QUEUE(queue) (1<<(queue))
9540 +#define LOW_QUEUE(queue) (0<<(queue))
9541 +#define PAGES_SHARING (1<<28)
9542 +#define RSEV_PAGE_COUNT_HQ 0x10 /* Reserved page count for high priority Q */
9543 +#define RSEV_PAGE_COUNT_LQ 0x10 /* Reserved page count for low priority Q */
9544 +#define VIQ_FC_ASRT 0x10 /* Virtual input Q FC assertion threshold */
9546 +#define QUEUE_WEIGHT_1 0
9547 +#define QUEUE_WEIGHT_2 1
9548 +#define QUEUE_WEIGHT_4 2
9549 +#define QUEUE_WEIGHT_8 3
9550 +#define QUEUE_WEIGHT_16 4
9552 +#define WRR_SCH 0 /*WRR */
9553 +#define STRICT_PRI_SCH 1 /* Strict Priority */
9554 +#define MIX_SCH 2 /* Mixed : Q3>WRR(Q2,Q1,Q0) */
9557 + * Ring3 Ring2 Ring1 Ring0
9560 + * --------------------------------
9561 + * | WRR Scheduler |
9562 + * --------------------------------
9564 + * ---------------------------------------
9566 + * ---------------------------------------
9567 + * |Q3||Q2||Q1||Q0| |Q3||Q2||Q1||Q0|
9568 + * | || || || | | || || || |
9569 + * ------------------- -------------------
9570 + * | GDMA2 | | GDMA1 |
9571 + * ------------------- -------------------
9573 + * ------------------------------------
9575 + * ------------------------------------
9579 +void set_scheduler_weight(void)
9581 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
9583 + * STEP1: Queue scheduling configuration
9585 + *(unsigned long *)GDMA1_SCH_CFG = (WRR_SCH << 24) |
9586 + (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9587 + (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
9588 + (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
9589 + (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
9591 + *(unsigned long *)GDMA2_SCH_CFG = (WRR_SCH << 24) |
9592 + (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9593 + (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
9594 + (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
9595 + (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
9599 + * STEP2: Ring scheduling configuration
9601 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9602 + defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
9603 + /* MIN_RATE_RATIO0=0, MAX_RATE_ULMT0=1, Weight0=1 */
9604 + *(unsigned long *)SCH_Q01_CFG = (0 << 10) | (1<<14) | (0 << 12);
9605 + /* MIN_RATE_RATIO1=0, MAX_RATE_ULMT1=1, Weight1=4 */
9606 + *(unsigned long *)SCH_Q01_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9608 + /* MIN_RATE_RATIO2=0, MAX_RATE_ULMT2=1, Weight0=1 */
9609 + *(unsigned long *)SCH_Q23_CFG = (0 << 10) | (1<<14) | (0 << 12);
9610 + /* MIN_RATE_RATIO3=0, MAX_RATE_ULMT3=1, Weight1=4 */
9611 + *(unsigned long *)SCH_Q23_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9613 + *(unsigned long *)PDMA_SCH_CFG = (WRR_SCH << 24) |
9614 + (QUEUE_WEIGHT_16 << 12) | /* ring 3 weight */
9615 + (QUEUE_WEIGHT_4 << 8) | /* ring 2 weight */
9616 + (QUEUE_WEIGHT_16 << 4) | /* ring 1 weight */
9617 + (QUEUE_WEIGHT_4 << 0); /* ring 0 weight */
9622 + * Routine Description :
9623 + * Bucket size and related information from ASIC Designer,
9624 + * please check Max Lee to update these values
9626 + * Related Registers
9627 + * FE_GLO_CFG - initialize clock rate for rate limiting
9628 + * PDMA_FC_CFG - Pause mechanism for Rings (Ref to pp116 in datasheet)
9634 + * Bit 29:24 - Q3 flow control pause condition
9635 + * Bit 21:16 - Q2 flow control pause condition
9636 + * Bit 13:8 - Q1 flow control pause condition
9637 + * Bit 5:0 - Q0 flow control pause condition
9640 + * Bit[5] - Pause Qx when PSE p2 HQ full
9641 + * Bit[4] - Pause Qx when PSE p2 LQ full
9642 + * Bit[3] - Pause Qx when PSE p1 HQ full
9643 + * Bit[2] - Pause Qx when PSE p1 LQ full
9644 + * Bit[1] - Pause Qx when PSE p0 HQ full
9645 + * Bit[0] - Pause Qx when PSE p0 LQ full
9647 +void set_schedule_pause_condition(void)
9649 +#if defined (CONFIG_RALINK_MT7620)
9651 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
9652 + *(unsigned long *)SDM_TRING = (0xC << 28) | (0x3 << 24) | (0xC << 4) | 0x3;
9655 + * STEP1: Set queue priority is high or low
9657 + * Set queue 3 as high queue in GMAC1/GMAC2
9659 + *(unsigned long *)GDMA1_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
9660 + LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9661 + (RSEV_PAGE_COUNT_HQ << 16) |
9662 + (RSEV_PAGE_COUNT_LQ <<8) |
9663 + VIQ_FC_ASRT | PAGES_SHARING;
9665 + *(unsigned long *)GDMA2_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
9666 + LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9667 + (RSEV_PAGE_COUNT_HQ << 16) |
9668 + (RSEV_PAGE_COUNT_LQ <<8) |
9669 + VIQ_FC_ASRT | PAGES_SHARING;
9672 + * STEP2: Set flow control pause condition
9674 + * CPU always use queue 3, and queue3 is high queue.
9675 + * If P2(GMAC2) high queue is full, pause ring3/ring2
9676 + * If P1(GMAC1) high queue is full, pause ring1/ring0
9678 + *(unsigned long *)PDMA_FC_CFG = ( PSE_P2_HQ_FULL << 24 ) | /* queue 3 */
9679 + ( PSE_P2_HQ_FULL << 16 ) | /* queue 2 */
9680 + ( PSE_P1_HQ_FULL << 8 ) | /* queue 1 */
9681 + ( PSE_P1_HQ_FULL << 0 ); /* queue 0 */
9687 +void set_output_shaper(void)
9689 +#define GDMA1_TOKEN_RATE 16 /* unit=64bits/ms */
9690 +#define GDMA2_TOKEN_RATE 16 /* unit=64bits/ms */
9693 + *(unsigned long *)GDMA1_SHPR_CFG = (1 << 24) | /* output shaper enable */
9694 + (128 << 16) | /* bucket size (unit=1KB) */
9695 + (GDMA1_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9699 + *(unsigned long *)GDMA2_SHPR_CFG = (1 << 24) | /* output shaper enable */
9700 + (128 << 16) | /* bucket size (unit=1KB) */
9701 + (GDMA2_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9705 +++ b/drivers/net/ethernet/raeth/ra_qos.h
9710 +#include "ra2882ethreg.h"
9715 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc);
9716 +int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx);
9717 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn);
9718 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned int pn);
9720 +int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no);
9721 +void set_schedule_pause_condition(void);
9722 +void set_scheduler_weight(void);
9723 +void set_output_shaper(void);
9726 +++ b/drivers/net/ethernet/raeth/ra_rfrw.c
9728 +#include <linux/module.h>
9729 +#include <linux/version.h>
9730 +#include <linux/kernel.h>
9731 +#include <linux/sched.h>
9732 +#include <linux/types.h>
9733 +#include <linux/fcntl.h>
9734 +#include <linux/interrupt.h>
9735 +#include <linux/ptrace.h>
9736 +#include <linux/ioport.h>
9737 +#include <linux/in.h>
9738 +#include <linux/slab.h>
9739 +#include <linux/string.h>
9740 +#include <linux/signal.h>
9741 +#include <linux/irq.h>
9742 +#include <linux/netdevice.h>
9743 +#include <linux/etherdevice.h>
9744 +#include <linux/skbuff.h>
9746 +#include "ra2882ethreg.h"
9747 +#include "raether.h"
9748 +#include "ra_mac.h"
9750 +#define RF_CSR_CFG 0xb0180500
9751 +#define RF_CSR_KICK (1<<17)
9752 +int rw_rf_reg(int write, int reg, int *data)
9754 + unsigned long rfcsr, i = 0;
9757 + rfcsr = sysRegRead(RF_CSR_CFG);
9758 + if (! (rfcsr & (u32)RF_CSR_KICK) )
9760 + if (++i > 10000) {
9761 + printk("Warning: Abort rw rf register: too busy\n");
9766 + rfcsr = (u32)(RF_CSR_KICK | ((reg&0x3f) << 8) | (*data & 0xff));
9770 + sysRegRead(RF_CSR_CFG) = cpu_to_le32(rfcsr);
9774 + rfcsr = sysRegRead(RF_CSR_CFG);
9775 + if (! (rfcsr & (u32)RF_CSR_KICK) )
9777 + if (++i > 10000) {
9778 + printk("Warning: still busy\n");
9783 + rfcsr = sysRegRead(RF_CSR_CFG);
9785 + if (((rfcsr&0x1f00) >> 8) != (reg & 0x1f)) {
9786 + printk("Error: rw register failed\n");
9789 + *data = (int)(rfcsr & 0xff);
9795 +++ b/drivers/net/ethernet/raeth/ra_rfrw.h
9800 +int rw_rf_reg(int write, int reg, int *data);
9804 +++ b/drivers/net/ethernet/raeth/raether.c
9806 +#include <linux/module.h>
9807 +#include <linux/version.h>
9808 +#include <linux/kernel.h>
9809 +#include <linux/types.h>
9810 +#include <linux/pci.h>
9811 +#include <linux/init.h>
9812 +#include <linux/skbuff.h>
9813 +#include <linux/if_vlan.h>
9814 +#include <linux/if_ether.h>
9815 +#include <linux/fs.h>
9816 +#include <asm/uaccess.h>
9817 +#include <asm/rt2880/surfboardint.h>
9818 +#include <linux/platform_device.h>
9819 +#if defined (CONFIG_RAETH_TSO)
9820 +#include <linux/tcp.h>
9821 +#include <net/ipv6.h>
9822 +#include <linux/ip.h>
9823 +#include <net/ip.h>
9824 +#include <net/tcp.h>
9825 +#include <linux/in.h>
9826 +#include <linux/ppp_defs.h>
9827 +#include <linux/if_pppox.h>
9829 +#if defined (CONFIG_RAETH_LRO)
9830 +#include <linux/inet_lro.h>
9832 +#include <linux/delay.h>
9833 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9834 +#include <linux/sched.h>
9837 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
9838 +#include <asm/rt2880/rt_mmap.h>
9840 +#include <linux/libata-compat.h>
9843 +#include "ra2882ethreg.h"
9844 +#include "raether.h"
9845 +#include "ra_mac.h"
9846 +#include "ra_ioctl.h"
9847 +#include "ra_rfrw.h"
9848 +#ifdef CONFIG_RAETH_NETLINK
9849 +#include "ra_netlink.h"
9851 +#if defined (CONFIG_RAETH_QOS)
9852 +#include "ra_qos.h"
9855 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9856 +#include "../../../net/nat/hw_nat/ra_nat.h"
9858 +#if defined(CONFIG_RAETH_PDMA_DVT)
9859 +#include "dvt/raether_pdma_dvt.h"
9860 +#endif /* CONFIG_RAETH_PDMA_DVT */
9862 +static int fe_irq = 0;
9864 +#if defined (TASKLET_WORKQUEUE_SW)
9866 +int working_schedule;
9869 +#ifdef CONFIG_RAETH_NAPI
9870 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9871 +static int raeth_clean(struct napi_struct *napi, int budget);
9873 +static int raeth_clean(struct net_device *dev, int *budget);
9876 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do);
9878 +static int rt2880_eth_recv(struct net_device* dev);
9881 +#if !defined(CONFIG_RA_NAT_NONE)
9884 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
9885 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
9888 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
9891 +#include <asm/mipsregs.h>
9892 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
9893 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
9894 +#endif /* CONFIG_RA_CLASSIFIER */
9896 +#if defined (CONFIG_RALINK_RT3052_MP2)
9897 +int32_t mcast_rx(struct sk_buff * skb);
9898 +int32_t mcast_tx(struct sk_buff * skb);
9901 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf)
9907 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
9908 +void setup_internal_gsw(void);
9909 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
9910 +void apll_xtal_enable(void);
9911 +#define REGBIT(x, n) (x << n)
9915 +#if defined (CONFIG_MT7623_FPGA)
9916 +void setup_fpga_gsw(void);
9919 +/* gmac driver feature set config */
9920 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
9923 +#if defined (CONFIG_ARCH_MT7623)
9926 +#define DELAY_INT 1
9930 +//#define CONFIG_UNH_TEST
9931 +/* end of config */
9933 +#if defined (CONFIG_RAETH_JUMBOFRAME)
9934 +#define MAX_RX_LENGTH 4096
9936 +#define MAX_RX_LENGTH 1536
9939 +struct net_device *dev_raether;
9941 +static int rx_dma_owner_idx;
9942 +static int rx_dma_owner_idx0;
9943 +#if defined (CONFIG_RAETH_HW_LRO)
9944 +static int rx_dma_owner_lro1;
9945 +static int rx_dma_owner_lro2;
9946 +static int rx_dma_owner_lro3;
9947 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
9948 +static int rx_dma_owner_idx1;
9949 +#if defined(CONFIG_ARCH_MT7623)
9950 +static int rx_dma_owner_idx2;
9951 +static int rx_dma_owner_idx3;
9952 +#endif /* CONFIG_ARCH_MT7623 */
9953 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
9957 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
9960 +static int pending_recv;
9961 +static struct PDMA_rxdesc *rx_ring;
9962 +unsigned long tx_ring_full=0;
9964 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9965 + defined(CONFIG_RALINK_MT7620)
9966 +unsigned short p0_rx_good_cnt = 0;
9967 +unsigned short p1_rx_good_cnt = 0;
9968 +unsigned short p2_rx_good_cnt = 0;
9969 +unsigned short p3_rx_good_cnt = 0;
9970 +unsigned short p4_rx_good_cnt = 0;
9971 +unsigned short p5_rx_good_cnt = 0;
9972 +unsigned short p6_rx_good_cnt = 0;
9973 +unsigned short p0_tx_good_cnt = 0;
9974 +unsigned short p1_tx_good_cnt = 0;
9975 +unsigned short p2_tx_good_cnt = 0;
9976 +unsigned short p3_tx_good_cnt = 0;
9977 +unsigned short p4_tx_good_cnt = 0;
9978 +unsigned short p5_tx_good_cnt = 0;
9979 +unsigned short p6_tx_good_cnt = 0;
9981 +unsigned short p0_rx_byte_cnt = 0;
9982 +unsigned short p1_rx_byte_cnt = 0;
9983 +unsigned short p2_rx_byte_cnt = 0;
9984 +unsigned short p3_rx_byte_cnt = 0;
9985 +unsigned short p4_rx_byte_cnt = 0;
9986 +unsigned short p5_rx_byte_cnt = 0;
9987 +unsigned short p6_rx_byte_cnt = 0;
9988 +unsigned short p0_tx_byte_cnt = 0;
9989 +unsigned short p1_tx_byte_cnt = 0;
9990 +unsigned short p2_tx_byte_cnt = 0;
9991 +unsigned short p3_tx_byte_cnt = 0;
9992 +unsigned short p4_tx_byte_cnt = 0;
9993 +unsigned short p5_tx_byte_cnt = 0;
9994 +unsigned short p6_tx_byte_cnt = 0;
9996 +#if defined(CONFIG_RALINK_MT7620)
9997 +unsigned short p7_rx_good_cnt = 0;
9998 +unsigned short p7_tx_good_cnt = 0;
10000 +unsigned short p7_rx_byte_cnt = 0;
10001 +unsigned short p7_tx_byte_cnt = 0;
10008 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
10009 +#include "ra_ethtool.h"
10010 +extern struct ethtool_ops ra_ethtool_ops;
10011 +#ifdef CONFIG_PSEUDO_SUPPORT
10012 +extern struct ethtool_ops ra_virt_ethtool_ops;
10013 +#endif // CONFIG_PSEUDO_SUPPORT //
10014 +#endif // (CONFIG_ETHTOOL //
10016 +#ifdef CONFIG_RALINK_VISTA_BASIC
10017 +int is_switch_175c = 1;
10020 +unsigned int M2Q_table[64] = {0};
10021 +unsigned int lan_wan_separate = 0;
10023 +#if defined(CONFIG_HW_SFQ)
10024 +unsigned int web_sfq_enable = 0;
10025 +EXPORT_SYMBOL(web_sfq_enable);
10028 +EXPORT_SYMBOL(M2Q_table);
10029 +EXPORT_SYMBOL(lan_wan_separate);
10030 +#if defined (CONFIG_RAETH_LRO)
10031 +unsigned int lan_ip;
10032 +struct lro_para_struct lro_para;
10033 +int lro_flush_needed;
10034 +extern char const *nvram_get(int index, char *name);
10037 +#define KSEG1 0xa0000000
10038 +#if defined (CONFIG_MIPS)
10039 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
10040 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
10042 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
10043 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
10046 +extern int fe_dma_init(struct net_device *dev);
10047 +extern int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no);
10048 +extern void ei_xmit_housekeeping(unsigned long unused);
10049 +extern inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no);
10050 +#if defined (CONFIG_RAETH_HW_LRO)
10051 +extern int fe_hw_lro_init(struct net_device *dev);
10052 +#endif /* CONFIG_RAETH_HW_LRO */
10055 +void skb_dump(struct sk_buff* sk) {
10058 + printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
10059 + sk->dev?sk->dev->name:"ip stack",sk->len,sk->truesize,
10060 + skb_headroom(sk),skb_tailroom(sk));
10062 + //for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) {
10063 + for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) {
10064 + if((i % 20) == 0)
10066 + if(i==(unsigned int)sk->data) printk("{");
10067 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10068 + if(i==(unsigned int)sk->transport_header) printk("#");
10069 + if(i==(unsigned int)sk->network_header) printk("|");
10070 + if(i==(unsigned int)sk->mac_header) printk("*");
10072 + if(i==(unsigned int)sk->h.raw) printk("#");
10073 + if(i==(unsigned int)sk->nh.raw) printk("|");
10074 + if(i==(unsigned int)sk->mac.raw) printk("*");
10076 + printk("%02X-",*((unsigned char*)i));
10077 + if(i==(unsigned int)sk->tail) printk("}");
10085 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10086 +int isICPlusGigaPHY(int ge)
10088 + u32 phy_id0 = 0, phy_id1 = 0;
10090 +#ifdef CONFIG_GE2_RGMII_AN
10092 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10093 + printk("\n Read PhyID 1 is Fail!!\n");
10096 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10097 + printk("\n Read PhyID 1 is Fail!!\n");
10103 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10105 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10106 + printk("\n Read PhyID 0 is Fail!!\n");
10109 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10110 + printk("\n Read PhyID 0 is Fail!!\n");
10116 + if ((phy_id0 == EV_ICPLUS_PHY_ID0) && ((phy_id1 & 0xfff0) == EV_ICPLUS_PHY_ID1))
10122 +int isMarvellGigaPHY(int ge)
10124 + u32 phy_id0 = 0, phy_id1 = 0;
10126 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10128 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10129 + printk("\n Read PhyID 1 is Fail!!\n");
10132 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10133 + printk("\n Read PhyID 1 is Fail!!\n");
10139 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10141 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10142 + printk("\n Read PhyID 0 is Fail!!\n");
10145 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10146 + printk("\n Read PhyID 0 is Fail!!\n");
10152 + if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
10157 +int isVtssGigaPHY(int ge)
10159 + u32 phy_id0 = 0, phy_id1 = 0;
10161 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10163 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10164 + printk("\n Read PhyID 1 is Fail!!\n");
10167 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10168 + printk("\n Read PhyID 1 is Fail!!\n");
10174 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10176 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10177 + printk("\n Read PhyID 0 is Fail!!\n");
10180 + if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10181 + printk("\n Read PhyID 0 is Fail!!\n");
10187 + if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
10194 + * Set the hardware MAC address.
10196 +static int ei_set_mac_addr(struct net_device *dev, void *p)
10198 + struct sockaddr *addr = p;
10200 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10202 + if(netif_running(dev))
10205 + ra2880MacAddressSet(addr->sa_data);
10209 +#ifdef CONFIG_PSEUDO_SUPPORT
10210 +static int ei_set_mac2_addr(struct net_device *dev, void *p)
10212 + struct sockaddr *addr = p;
10214 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10216 + if(netif_running(dev))
10219 + ra2880Mac2AddressSet(addr->sa_data);
10224 +void set_fe_dma_glo_cfg(void)
10226 + int dma_glo_cfg=0;
10227 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10228 + defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10229 + int fe_glo_cfg=0;
10232 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
10233 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_32DWORDS);
10234 +#elif defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
10235 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS);
10236 +#elif defined (CONFIG_ARCH_MT7623)
10237 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | ADMA_RX_BT_SIZE_32DWORDS);
10239 + dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_4DWORDS);
10242 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10243 + dma_glo_cfg |= (RX_2B_OFFSET);
10246 +#if defined (CONFIG_32B_DESC)
10247 + dma_glo_cfg |= (DESC_32B_EN);
10249 + sysRegWrite(DMA_GLO_CFG, dma_glo_cfg);
10250 +#ifdef CONFIG_RAETH_QDMA
10251 + sysRegWrite(QDMA_GLO_CFG, dma_glo_cfg);
10254 + /* only the following chipset need to set it */
10255 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10256 + defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10257 + //set 1us timer count in unit of clock cycle
10258 + fe_glo_cfg = sysRegRead(FE_GLO_CFG);
10259 + fe_glo_cfg &= ~(0xff << 8); //clear bit8-bit15
10260 + fe_glo_cfg |= (((get_surfboard_sysclk()/1000000)) << 8);
10261 + sysRegWrite(FE_GLO_CFG, fe_glo_cfg);
10265 +int forward_config(struct net_device *dev)
10268 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
10270 + /* RT5350: No GDMA, PSE, CDMA, PPE */
10271 + unsigned int sdmVal;
10272 + sdmVal = sysRegRead(SDM_CON);
10274 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10275 + sdmVal |= 0x7<<16; // UDPCS, TCPCS, IPCS=1
10276 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10278 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10279 + sdmVal |= 0x1<<20; // TCI_81XX
10280 +#endif // CONFIG_RAETH_SPECIAL_TAG //
10282 + sysRegWrite(SDM_CON, sdmVal);
10284 +#else //Non RT5350 chipset
10286 + unsigned int regVal, regCsg;
10288 +#ifdef CONFIG_PSEUDO_SUPPORT
10289 + unsigned int regVal2;
10292 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10293 +#if defined(CONFIG_RALINK_MT7620)
10294 + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10295 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x430) = 0x00010000;
10296 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x434) = 0x00030002;
10297 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x438) = 0x00050004;
10298 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x43C) = 0x00070006;
10299 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x440) = 0x00090008;
10300 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x444) = 0x000b000a;
10301 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x448) = 0x000d000c;
10302 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x44C) = 0x000f000e;
10305 + * VLAN_IDX 0 = VLAN_ID 0
10307 + * VLAN_IDX 15 = VLAN ID 15
10310 + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10311 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xa8) = 0x00010000;
10312 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xac) = 0x00030002;
10313 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb0) = 0x00050004;
10314 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb4) = 0x00070006;
10315 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb8) = 0x00090008;
10316 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xbc) = 0x000b000a;
10317 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc0) = 0x000d000c;
10318 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc4) = 0x000f000e;
10322 + regVal = sysRegRead(GDMA1_FWD_CFG);
10323 + regCsg = sysRegRead(CDMA_CSG_CFG);
10325 +#ifdef CONFIG_PSEUDO_SUPPORT
10326 + regVal2 = sysRegRead(GDMA2_FWD_CFG);
10329 + //set unicast/multicast/broadcast frame to cpu
10330 +#if defined (CONFIG_RALINK_MT7620)
10331 + /* GDMA1 frames destination port is port0 CPU*/
10334 + regVal &= ~0xFFFF;
10335 + regVal |= GDMA1_FWD_PORT;
10339 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10340 + regVal |= (1 << 24); //GDM1_TCI_81xx
10344 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10345 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10346 + dev->features |= NETIF_F_HW_VLAN_TX;
10348 + dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
10351 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10352 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10353 + dev->features |= NETIF_F_HW_VLAN_RX;
10355 + dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
10359 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10360 + //enable ipv4 header checksum check
10361 + regVal |= GDM1_ICS_EN;
10362 + regCsg |= ICS_GEN_EN;
10364 + //enable tcp checksum check
10365 + regVal |= GDM1_TCS_EN;
10366 + regCsg |= TCS_GEN_EN;
10368 + //enable udp checksum check
10369 + regVal |= GDM1_UCS_EN;
10370 + regCsg |= UCS_GEN_EN;
10372 +#ifdef CONFIG_PSEUDO_SUPPORT
10373 + regVal2 &= ~0xFFFF;
10374 + regVal2 |= GDMA2_FWD_PORT;
10376 + regVal2 |= GDM1_ICS_EN;
10377 + regVal2 |= GDM1_TCS_EN;
10378 + regVal2 |= GDM1_UCS_EN;
10381 +#if defined (CONFIG_RAETH_HW_LRO)
10382 + dev->features |= NETIF_F_HW_CSUM;
10384 + dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
10385 +#endif /* CONFIG_RAETH_HW_LRO */
10386 +//#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10387 +// dev->vlan_features |= NETIF_F_IP_CSUM;
10390 +#if defined(CONFIG_RALINK_MT7620)
10391 +#if defined (CONFIG_RAETH_TSO)
10392 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10393 + dev->features |= NETIF_F_SG;
10394 + dev->features |= NETIF_F_TSO;
10396 +#endif // CONFIG_RAETH_TSO //
10398 +#if defined (CONFIG_RAETH_TSOV6)
10399 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10400 + dev->features |= NETIF_F_TSO6;
10401 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10403 +#endif // CONFIG_RAETH_TSOV6 //
10405 +#if defined (CONFIG_RAETH_TSO)
10406 + dev->features |= NETIF_F_SG;
10407 + dev->features |= NETIF_F_TSO;
10408 +#endif // CONFIG_RAETH_TSO //
10410 +#if defined (CONFIG_RAETH_TSOV6)
10411 + dev->features |= NETIF_F_TSO6;
10412 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10413 +#endif // CONFIG_RAETH_TSOV6 //
10414 +#endif // CONFIG_RALINK_MT7620 //
10415 +#else // Checksum offload disabled
10417 + //disable ipv4 header checksum check
10418 + regVal &= ~GDM1_ICS_EN;
10419 + regCsg &= ~ICS_GEN_EN;
10421 + //disable tcp checksum check
10422 + regVal &= ~GDM1_TCS_EN;
10423 + regCsg &= ~TCS_GEN_EN;
10425 + //disable udp checksum check
10426 + regVal &= ~GDM1_UCS_EN;
10427 + regCsg &= ~UCS_GEN_EN;
10429 +#ifdef CONFIG_PSEUDO_SUPPORT
10430 + regVal2 &= ~GDM1_ICS_EN;
10431 + regVal2 &= ~GDM1_TCS_EN;
10432 + regVal2 &= ~GDM1_UCS_EN;
10435 + dev->features &= ~NETIF_F_IP_CSUM; /* disable checksum TCP/UDP over IPv4 */
10436 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10438 +#ifdef CONFIG_RAETH_JUMBOFRAME
10439 + regVal |= GDM1_JMB_EN;
10440 +#ifdef CONFIG_PSEUDO_SUPPORT
10441 + regVal2 |= GDM1_JMB_EN;
10445 + sysRegWrite(GDMA1_FWD_CFG, regVal);
10446 + sysRegWrite(CDMA_CSG_CFG, regCsg);
10447 +#ifdef CONFIG_PSEUDO_SUPPORT
10448 + sysRegWrite(GDMA2_FWD_CFG, regVal2);
10451 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10452 + dev->vlan_features = dev->features;
10456 + * PSE_FQ_CFG register definition -
10458 + * Define max free queue page count in PSE. (31:24)
10459 + * RT2883/RT3883 - 0xff908000 (255 pages)
10460 + * RT3052 - 0x80504000 (128 pages)
10461 + * RT2880 - 0x80504000 (128 pages)
10463 + * In each page, there are 128 bytes in each page.
10465 + * 23:16 - free queue flow control release threshold
10466 + * 15:8 - free queue flow control assertion threshold
10467 + * 7:0 - free queue empty threshold
10469 + * The register affects QOS correctness in frame engine!
10472 +#if defined(CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
10473 + sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_RT2883_PSE_FQ_CFG));
10474 +#elif defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
10475 + defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
10476 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
10477 + defined (CONFIG_RALINK_MT7628) || defined(CONFIG_ARCH_MT7623)
10478 + /*use default value*/
10480 + sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_PSE_FQFC_CFG));
10484 + *FE_RST_GLO register definition -
10486 + *Reset PSE after re-programming PSE_FQ_CFG.
10489 + sysRegWrite(FE_RST_GL, regVal);
10490 + sysRegWrite(FE_RST_GL, 0); // update for RSTCTL issue
10492 + regCsg = sysRegRead(CDMA_CSG_CFG);
10493 + printk("CDMA_CSG_CFG = %0X\n",regCsg);
10494 + regVal = sysRegRead(GDMA1_FWD_CFG);
10495 + printk("GDMA1_FWD_CFG = %0X\n",regVal);
10497 +#ifdef CONFIG_PSEUDO_SUPPORT
10498 + regVal = sysRegRead(GDMA2_FWD_CFG);
10499 + printk("GDMA2_FWD_CFG = %0X\n",regVal);
10505 +#ifdef CONFIG_RAETH_LRO
10507 +rt_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
10508 + u64 *hdr_flags, void *priv)
10510 + struct iphdr *iph = NULL;
10511 + int vhdr_len = 0;
10514 + * Make sure that this packet is Ethernet II, is not VLAN
10515 + * tagged, is IPv4, has a valid IP header, and is TCP.
10517 + if (skb->protocol == 0x0081) {
10518 + vhdr_len = VLAN_HLEN;
10521 + iph = (struct iphdr *)(skb->data + vhdr_len);
10522 + if (iph->daddr != lro_para.lan_ip1) {
10526 + if(iph->protocol != IPPROTO_TCP) {
10530 + *tcph = skb->data + (iph->ihl << 2) + vhdr_len;
10531 + *hdr_flags = LRO_IPV4 | LRO_TCP;
10533 + lro_flush_needed = 1;
10537 +#endif // CONFIG_RAETH_LRO //
10539 +#ifdef CONFIG_RAETH_NAPI
10540 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do)
10542 +static int rt2880_eth_recv(struct net_device* dev)
10545 + struct sk_buff *skb, *rx_skb;
10546 + unsigned int length = 0;
10547 + unsigned long RxProcessed;
10549 + int bReschedule = 0;
10550 + END_DEVICE* ei_local = netdev_priv(dev);
10551 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) || defined (CONFIG_RAETH_HW_LRO)
10552 + int rx_ring_no=0;
10555 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10556 + struct vlan_ethhdr *veth=NULL;
10559 +#ifdef CONFIG_PSEUDO_SUPPORT
10560 + PSEUDO_ADAPTER *pAd;
10564 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10565 + rx_dma_owner_idx0 = (rx_calc_idx0 + 1) % NUM_RX_DESC;
10567 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
10570 +#if defined (CONFIG_32B_DESC)
10571 + dma_cache_sync(NULL, &ei_local->rx_ring0[rx_dma_owner_idx0], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10573 +#if defined (CONFIG_RAETH_HW_LRO)
10574 + rx_dma_owner_lro1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
10575 + rx_dma_owner_lro2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
10576 + rx_dma_owner_lro3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
10577 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10578 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10579 + rx_dma_owner_idx1 = (rx_calc_idx1 + 1) % NUM_RX_DESC;
10581 + rx_dma_owner_idx1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_RX_DESC;
10582 +#endif /* CONFIG_RAETH_RW_PDMAPTR_FROM_VAR */
10583 +#if defined(CONFIG_ARCH_MT7623)
10584 + rx_dma_owner_idx2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_RX_DESC;
10585 + rx_dma_owner_idx3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_RX_DESC;
10587 +#if defined (CONFIG_32B_DESC)
10588 + dma_cache_sync(NULL, &ei_local->rx_ring1[rx_dma_owner_idx1], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10594 +#ifdef CONFIG_RAETH_NAPI
10595 + if(*work_done >= work_to_do)
10599 + if (RxProcessed++ > NUM_RX_MAX_PROCESS)
10601 + // need to reschedule rx handle
10608 +#if defined (CONFIG_RAETH_HW_LRO)
10609 + if (ei_local->rx_ring3[rx_dma_owner_lro3].rxd_info2.DDONE_bit == 1) {
10610 + rx_ring = ei_local->rx_ring3;
10611 + rx_dma_owner_idx = rx_dma_owner_lro3;
10612 + // printk("rx_dma_owner_lro3=%x\n",rx_dma_owner_lro3);
10615 + else if (ei_local->rx_ring2[rx_dma_owner_lro2].rxd_info2.DDONE_bit == 1) {
10616 + rx_ring = ei_local->rx_ring2;
10617 + rx_dma_owner_idx = rx_dma_owner_lro2;
10618 + // printk("rx_dma_owner_lro2=%x\n",rx_dma_owner_lro2);
10621 + else if (ei_local->rx_ring1[rx_dma_owner_lro1].rxd_info2.DDONE_bit == 1) {
10622 + rx_ring = ei_local->rx_ring1;
10623 + rx_dma_owner_idx = rx_dma_owner_lro1;
10624 + // printk("rx_dma_owner_lro1=%x\n",rx_dma_owner_lro1);
10627 + else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10628 + rx_ring = ei_local->rx_ring0;
10629 + rx_dma_owner_idx = rx_dma_owner_idx0;
10630 + // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10635 + #if defined (CONFIG_RAETH_HW_LRO_DBG)
10636 + HwLroStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.LRO_AGG_CNT, \
10637 + (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0);
10639 + #if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
10640 + HwLroFlushStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.REV);
10642 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10643 + if (ei_local->rx_ring1[rx_dma_owner_idx1].rxd_info2.DDONE_bit == 1) {
10644 + rx_ring = ei_local->rx_ring1;
10645 + rx_dma_owner_idx = rx_dma_owner_idx1;
10646 + // printk("rx_dma_owner_idx1=%x\n",rx_dma_owner_idx1);
10649 +#if defined(CONFIG_ARCH_MT7623)
10650 + else if (ei_local->rx_ring2[rx_dma_owner_idx2].rxd_info2.DDONE_bit == 1) {
10651 + rx_ring = ei_local->rx_ring2;
10652 + rx_dma_owner_idx = rx_dma_owner_idx2;
10653 + // printk("rx_dma_owner_idx2=%x\n",rx_dma_owner_idx2);
10656 + else if (ei_local->rx_ring3[rx_dma_owner_idx3].rxd_info2.DDONE_bit == 1) {
10657 + rx_ring = ei_local->rx_ring3;
10658 + rx_dma_owner_idx = rx_dma_owner_idx3;
10659 + // printk("rx_dma_owner_idx3=%x\n",rx_dma_owner_idx3);
10662 +#endif /* CONFIG_ARCH_MT7623 */
10663 + else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10664 + rx_ring = ei_local->rx_ring0;
10665 + rx_dma_owner_idx = rx_dma_owner_idx0;
10666 + // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10673 + if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
10674 + rx_ring = ei_local->rx_ring0;
10675 + rx_dma_owner_idx = rx_dma_owner_idx0;
10681 +#if defined (CONFIG_32B_DESC)
10682 + prefetch(&rx_ring[(rx_dma_owner_idx + 1) % NUM_RX_DESC]);
10684 + /* skb processing */
10685 +#if defined (CONFIG_RAETH_HW_LRO)
10686 + length = (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10688 + length = rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10689 +#endif /* CONFIG_RAETH_HW_LRO */
10691 +#if defined (CONFIG_ARCH_MT7623)
10692 + dma_unmap_single(NULL, rx_ring[rx_dma_owner_idx].rxd_info1.PDP0, length, DMA_FROM_DEVICE);
10695 +#if defined (CONFIG_RAETH_HW_LRO)
10696 + if(rx_ring_no==3) {
10697 + rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10698 + rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10700 + else if(rx_ring_no==2) {
10701 + rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10702 + rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10704 + else if(rx_ring_no==1) {
10705 + rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10706 + rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10709 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10710 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10712 + #if defined(CONFIG_RAETH_PDMA_DVT)
10713 + raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10714 + #endif /* CONFIG_RAETH_PDMA_DVT */
10715 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10716 + if(rx_ring_no==1) {
10717 + rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10718 + rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10720 +#if defined(CONFIG_ARCH_MT7623)
10721 + else if(rx_ring_no==2) {
10722 + rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10723 + rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10725 + else if(rx_ring_no==3) {
10726 + rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10727 + rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10729 +#endif /* CONFIG_ARCH_MT7623 */
10731 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10732 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10734 + #if defined(CONFIG_RAETH_PDMA_DVT)
10735 + raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10736 + #endif /* CONFIG_RAETH_PDMA_DVT */
10738 + rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10739 + rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10740 + #if defined(CONFIG_RAETH_PDMA_DVT)
10741 + raeth_pdma_rx_desc_dvt( ei_local, rx_dma_owner_idx0 );
10742 + #endif /* CONFIG_RAETH_PDMA_DVT */
10744 + rx_skb->len = length;
10746 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10747 + rx_skb->data += NET_IP_ALIGN;
10749 + rx_skb->tail = rx_skb->data + length;
10751 +#ifdef CONFIG_PSEUDO_SUPPORT
10752 + if(rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10753 + if(ei_local->PseudoDev!=NULL) {
10754 + rx_skb->dev = ei_local->PseudoDev;
10755 + rx_skb->protocol = eth_type_trans(rx_skb,ei_local->PseudoDev);
10757 + printk("ERROR: PseudoDev is still not initialize but receive packet from GMAC2\n");
10760 + rx_skb->dev = dev;
10761 + rx_skb->protocol = eth_type_trans(rx_skb,dev);
10764 + rx_skb->dev = dev;
10765 + rx_skb->protocol = eth_type_trans(rx_skb,dev);
10768 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10769 +#if defined (CONFIG_PDMA_NEW)
10770 + if(rx_ring[rx_dma_owner_idx].rxd_info4.L4VLD) {
10771 + rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10773 + rx_skb->ip_summed = CHECKSUM_NONE;
10776 + if(rx_ring[rx_dma_owner_idx].rxd_info4.IPFVLD_bit) {
10777 + rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10779 + rx_skb->ip_summed = CHECKSUM_NONE;
10783 + rx_skb->ip_summed = CHECKSUM_NONE;
10786 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
10789 + if(ra_classifier_hook_rx!= NULL)
10791 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
10792 + ra_classifier_hook_rx(rx_skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
10794 + ra_classifier_hook_rx(rx_skb, read_c0_count());
10797 +#endif /* CONFIG_RA_CLASSIFIER */
10799 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
10800 + if(ra_sw_nat_hook_rx != NULL) {
10801 + FOE_MAGIC_TAG(rx_skb)= FOE_MAGIC_GE;
10802 + *(uint32_t *)(FOE_INFO_START_ADDR(rx_skb)+2) = *(uint32_t *)&rx_ring[rx_dma_owner_idx].rxd_info4;
10803 + FOE_ALG(rx_skb) = 0;
10807 + /* We have to check the free memory size is big enough
10808 + * before pass the packet to cpu*/
10809 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
10810 +#if defined (CONFIG_RAETH_HW_LRO)
10811 + if( rx_ring != ei_local->rx_ring0 )
10812 + skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10814 +#endif /* CONFIG_RAETH_HW_LRO */
10815 + skb = skbmgr_dev_alloc_skb2k();
10817 +#if defined (CONFIG_RAETH_HW_LRO)
10818 + if( rx_ring != ei_local->rx_ring0 )
10819 + skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10821 +#endif /* CONFIG_RAETH_HW_LRO */
10822 + skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10825 + if (unlikely(skb == NULL))
10827 + printk(KERN_ERR "skb not available...\n");
10828 +#ifdef CONFIG_PSEUDO_SUPPORT
10829 + if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10830 + if (ei_local->PseudoDev != NULL) {
10831 + pAd = netdev_priv(ei_local->PseudoDev);
10832 + pAd->stat.rx_dropped++;
10836 + ei_local->stat.rx_dropped++;
10840 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10841 + skb_reserve(skb, NET_IP_ALIGN);
10844 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10845 + // port0: 0x8100 => 0x8100 0001
10846 + // port1: 0x8101 => 0x8100 0002
10847 + // port2: 0x8102 => 0x8100 0003
10848 + // port3: 0x8103 => 0x8100 0004
10849 + // port4: 0x8104 => 0x8100 0005
10850 + // port5: 0x8105 => 0x8100 0006
10851 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10852 + veth = (struct vlan_ethhdr *)(rx_skb->mac_header);
10854 + veth = (struct vlan_ethhdr *)(rx_skb->mac.raw);
10856 + /*donot check 0x81 due to MT7530 SPEC*/
10857 + //if((veth->h_vlan_proto & 0xFF) == 0x81)
10859 + veth->h_vlan_TCI = htons( (((veth->h_vlan_proto >> 8) & 0xF) + 1) );
10860 + rx_skb->protocol = veth->h_vlan_proto = htons(ETH_P_8021Q);
10864 +/* ra_sw_nat_hook_rx return 1 --> continue
10865 + * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
10867 +#if !defined(CONFIG_RA_NAT_NONE)
10868 + if((ra_sw_nat_hook_rx == NULL) ||
10869 + (ra_sw_nat_hook_rx!= NULL && ra_sw_nat_hook_rx(rx_skb)))
10872 +#if defined (CONFIG_RALINK_RT3052_MP2)
10873 + if(mcast_rx(rx_skb)==0) {
10874 + kfree_skb(rx_skb);
10877 +#if defined (CONFIG_RAETH_LRO)
10878 + if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY) {
10879 + lro_receive_skb(&ei_local->lro_mgr, rx_skb, NULL);
10880 + //LroStatsUpdate(&ei_local->lro_mgr,0);
10883 +#ifdef CONFIG_RAETH_NAPI
10884 + netif_receive_skb(rx_skb);
10886 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10887 + if(ei_local->vlgrp && rx_ring[rx_dma_owner_idx].rxd_info2.TAG) {
10888 + vlan_hwaccel_rx(rx_skb, ei_local->vlgrp, rx_ring[rx_dma_owner_idx].rxd_info3.VID);
10890 + netif_rx(rx_skb);
10893 +#ifdef CONFIG_RAETH_CPU_LOOPBACK
10894 + skb_push(rx_skb,ETH_HLEN);
10895 + ei_start_xmit(rx_skb, dev, 1);
10897 + netif_rx(rx_skb);
10903 +#ifdef CONFIG_PSEUDO_SUPPORT
10904 + if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10905 + if (ei_local->PseudoDev != NULL) {
10906 + pAd = netdev_priv(ei_local->PseudoDev);
10907 + pAd->stat.rx_packets++;
10908 + pAd->stat.rx_bytes += length;
10913 + ei_local->stat.rx_packets++;
10914 + ei_local->stat.rx_bytes += length;
10918 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10919 +#if defined (CONFIG_RAETH_HW_LRO)
10920 + if( rx_ring != ei_local->rx_ring0 ){
10921 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
10922 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
10925 +#endif /* CONFIG_RAETH_HW_LRO */
10926 + rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = MAX_RX_LENGTH;
10927 + rx_ring[rx_dma_owner_idx].rxd_info2.LS0 = 0;
10929 + rx_ring[rx_dma_owner_idx].rxd_info2.DDONE_bit = 0;
10930 +#if defined (CONFIG_RAETH_HW_LRO)
10931 + if( rx_ring != ei_local->rx_ring0 )
10932 + rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
10934 +#endif /* CONFIG_RAETH_HW_LRO */
10935 + rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
10936 +#ifdef CONFIG_32B_DESC
10937 + dma_cache_sync(NULL, &rx_ring[rx_dma_owner_idx], sizeof(struct PDMA_rxdesc), DMA_TO_DEVICE);
10939 + /* Move point to next RXD which wants to alloc*/
10940 +#if defined (CONFIG_RAETH_HW_LRO)
10941 + if(rx_ring_no==3) {
10942 + sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
10943 + ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
10945 + else if(rx_ring_no==2) {
10946 + sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
10947 + ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
10949 + else if(rx_ring_no==1) {
10950 + sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
10951 + ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
10953 + else if(rx_ring_no==0) {
10954 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10955 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10957 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10958 + if(rx_ring_no==0) {
10959 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10960 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10961 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10962 + rx_calc_idx0 = rx_dma_owner_idx;
10965 +#if defined(CONFIG_ARCH_MT7623)
10966 + else if(rx_ring_no==3) {
10967 + sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
10968 + ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
10970 + else if(rx_ring_no==2) {
10971 + sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
10972 + ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
10974 +#endif /* CONFIG_ARCH_MT7623 */
10976 + sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
10977 + ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
10978 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10979 + rx_calc_idx1 = rx_dma_owner_idx;
10983 + sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10984 + ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10985 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10986 + rx_calc_idx0 = rx_dma_owner_idx;
10991 + /* Update to Next packet point that was received.
10993 +#if defined (CONFIG_RAETH_HW_LRO)
10994 + if(rx_ring_no==3)
10995 + rx_dma_owner_lro3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
10996 + else if(rx_ring_no==2)
10997 + rx_dma_owner_lro2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
10998 + else if(rx_ring_no==1)
10999 + rx_dma_owner_lro1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
11000 + else if(rx_ring_no==0)
11001 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11004 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11005 + if(rx_ring_no==0) {
11006 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11007 + rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11009 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11011 +#if defined(CONFIG_ARCH_MT7623)
11012 + }else if(rx_ring_no==3) {
11013 + rx_dma_owner_idx3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_RX_DESC;
11014 + }else if(rx_ring_no==2) {
11015 + rx_dma_owner_idx2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_RX_DESC;
11016 +#endif /* CONFIG_ARCH_MT7623 */
11018 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11019 + rx_dma_owner_idx1 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11021 + rx_dma_owner_idx1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_RX_DESC;
11025 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11026 + rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11028 + rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11033 +#if defined (CONFIG_RAETH_LRO)
11034 + if (lro_flush_needed) {
11035 + //LroStatsUpdate(&ei_local->lro_mgr,1);
11036 + lro_flush_all(&ei_local->lro_mgr);
11037 + lro_flush_needed = 0;
11040 + return bReschedule;
11044 +///////////////////////////////////////////////////////////////////
11046 +///// ra_get_stats - gather packet information for management plane
11048 +///// Pass net_device_stats to the upper layer.
11051 +///// RETURNS: pointer to net_device_stats
11052 +///////////////////////////////////////////////////////////////////
11054 +struct net_device_stats *ra_get_stats(struct net_device *dev)
11056 + END_DEVICE *ei_local = netdev_priv(dev);
11057 + return &ei_local->stat;
11060 +#if defined (CONFIG_RT_3052_ESW)
11061 +void kill_sig_workq(struct work_struct *work)
11065 + struct task_struct *p = NULL;
11067 + //read udhcpc pid from file, and send signal USR2,USR1 to get a new IP
11068 + fp = filp_open("/var/run/udhcpc.pid", O_RDONLY, 0);
11072 + if (fp->f_op && fp->f_op->read) {
11073 + if (fp->f_op->read(fp, pid, 8, &fp->f_pos) > 0) {
11074 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11075 + p = pid_task(find_get_pid(simple_strtoul(pid, NULL, 10)), PIDTYPE_PID);
11077 + p = find_task_by_pid(simple_strtoul(pid, NULL, 10));
11081 + send_sig(SIGUSR2, p, 0);
11082 + send_sig(SIGUSR1, p, 0);
11086 + filp_close(fp, NULL);
11092 +///////////////////////////////////////////////////////////////////
11094 +///// ra2880Recv - process the next incoming packet
11096 +///// Handle one incoming packet. The packet is checked for errors and sent
11097 +///// to the upper layer.
11099 +///// RETURNS: OK on success or ERROR.
11100 +///////////////////////////////////////////////////////////////////
11102 +#ifndef CONFIG_RAETH_NAPI
11103 +#if defined WORKQUEUE_BH || defined (TASKLET_WORKQUEUE_SW)
11104 +void ei_receive_workq(struct work_struct *work)
11106 +void ei_receive(unsigned long unused) // device structure
11107 +#endif // WORKQUEUE_BH //
11109 + struct net_device *dev = dev_raether;
11110 + END_DEVICE *ei_local = netdev_priv(dev);
11111 + unsigned long reg_int_mask=0;
11112 + int bReschedule=0;
11115 + if(tx_ring_full==0){
11116 + bReschedule = rt2880_eth_recv(dev);
11119 +#ifdef WORKQUEUE_BH
11120 + schedule_work(&ei_local->rx_wq);
11122 +#if defined (TASKLET_WORKQUEUE_SW)
11123 + if (working_schedule == 1)
11124 + schedule_work(&ei_local->rx_wq);
11127 + tasklet_hi_schedule(&ei_local->rx_tasklet);
11128 +#endif // WORKQUEUE_BH //
11130 + reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11131 +#if defined(DELAY_INT)
11132 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11134 + sysRegWrite(RAETH_FE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11136 +#ifdef CONFIG_RAETH_QDMA
11137 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11138 +#if defined(DELAY_INT)
11139 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11141 + sysRegWrite(QFE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1));
11148 +#ifdef WORKQUEUE_BH
11149 + schedule_work(&ei_local->rx_wq);
11151 +#if defined (TASKLET_WORKQUEUE_SW)
11152 + if (working_schedule == 1)
11153 + schedule_work(&ei_local->rx_wq);
11156 + tasklet_schedule(&ei_local->rx_tasklet);
11157 +#endif // WORKQUEUE_BH //
11162 +#if defined (CONFIG_RAETH_HW_LRO)
11163 +void ei_hw_lro_auto_adj(unsigned int index, END_DEVICE* ei_local)
11165 + unsigned int entry;
11166 + unsigned int pkt_cnt;
11167 + unsigned int tick_cnt;
11168 + unsigned int duration_us;
11169 + unsigned int byte_cnt;
11171 + /* read packet count statitics of the auto-learn table */
11172 + entry = index + 68;
11173 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
11174 + pkt_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC) & 0xfff;
11175 + tick_cnt = (sysRegRead(PDMA_FE_ALT_SGL_CFC) >> 16) & 0xffff;
11176 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11177 + printk("[HW LRO] ei_hw_lro_auto_adj(): pkt_cnt[%d]=%d, tick_cnt[%d]=%d\n", index, pkt_cnt, index, tick_cnt);
11178 + printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (ticks/pkt)\n", index, tick_cnt/pkt_cnt);
11181 + /* read byte count statitics of the auto-learn table */
11182 + entry = index + 64;
11183 + sysRegWrite( PDMA_FE_ALT_CF8, entry );
11184 + byte_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC);
11185 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11186 + printk("[HW LRO] ei_hw_lro_auto_adj(): byte_cnt[%d]=%d\n", index, byte_cnt);
11189 + /* calculate the packet interval of the rx flow */
11190 + duration_us = tick_cnt * HW_LRO_TIMER_UNIT;
11191 + ei_local->hw_lro_pkt_interval[index - 1] = (duration_us/pkt_cnt) * ei_local->hw_lro_alpha / 100;
11192 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11193 + printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (20us)\n", index, duration_us/pkt_cnt);
11196 + if ( !ei_local->hw_lro_fix_setting ){
11197 + /* adjust age_time, agg_time for the lro ring */
11198 + if(ei_local->hw_lro_pkt_interval[index - 1] > 0){
11199 + SET_PDMA_RXRING_AGE_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_MAX_AGG_CNT));
11200 + SET_PDMA_RXRING_AGG_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_AGG_DELTA));
11203 + SET_PDMA_RXRING_AGE_TIME(index, HW_LRO_MAX_AGG_CNT);
11204 + SET_PDMA_RXRING_AGG_TIME(index, HW_LRO_AGG_DELTA);
11209 +void ei_hw_lro_workq(struct work_struct *work)
11211 + END_DEVICE *ei_local;
11212 + unsigned int reg_int_val;
11213 + unsigned int reg_int_mask;
11215 + ei_local = container_of(work, struct end_device, hw_lro_wq);
11217 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11218 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11219 + printk("[HW LRO] ei_hw_lro_workq(): RAETH_FE_INT_STATUS=0x%x\n", reg_int_val);
11221 + if((reg_int_val & ALT_RPLC_INT3)){
11222 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11223 + printk("[HW LRO] ALT_RPLC_INT3 occurred!\n");
11225 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT3);
11226 + ei_hw_lro_auto_adj(3, ei_local);
11228 + if((reg_int_val & ALT_RPLC_INT2)){
11229 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11230 + printk("[HW LRO] ALT_RPLC_INT2 occurred!\n");
11232 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT2);
11233 + ei_hw_lro_auto_adj(2, ei_local);
11235 + if((reg_int_val & ALT_RPLC_INT1)){
11236 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11237 + printk("[HW LRO] ALT_RPLC_INT1 occurred!\n");
11239 + sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT1);
11240 + ei_hw_lro_auto_adj(1, ei_local);
11243 + /* unmask interrupts of rx flow to hw lor rings */
11244 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11245 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
11247 +#endif /* CONFIG_RAETH_HW_LRO */
11249 +#ifdef CONFIG_RAETH_NAPI
11251 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11252 +raeth_clean(struct napi_struct *napi, int budget)
11254 +raeth_clean(struct net_device *netdev, int *budget)
11257 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11258 + struct net_device *netdev=dev_raether;
11259 + int work_to_do = budget;
11261 + int work_to_do = min(*budget, netdev->quota);
11263 + END_DEVICE *ei_local =netdev_priv(netdev);
11264 + int work_done = 0;
11265 + unsigned long reg_int_mask=0;
11267 + ei_xmit_housekeeping(0);
11269 + rt2880_eth_recv(netdev, &work_done, work_to_do);
11271 + /* this could control when to re-enable interrupt, 0-> mean never enable interrupt*/
11272 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
11273 + *budget -= work_done;
11274 + netdev->quota -= work_done;
11276 + /* if no Tx and not enough Rx work done, exit the polling mode */
11277 + if(( (work_done < work_to_do)) || !netif_running(netdev)) {
11278 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11279 + napi_complete(&ei_local->napi);
11281 + netif_rx_complete(netdev);
11283 + atomic_dec_and_test(&ei_local->irq_sem);
11285 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL); // ack all fe interrupts
11286 + reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11289 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask |RAETH_FE_INT_DLY_INIT); // init delay interrupt only
11291 + sysRegWrite(RAETH_FE_INT_ENABLE,reg_int_mask | RAETH_FE_INT_SETTING);
11294 +#ifdef CONFIG_RAETH_QDMA
11295 + sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11296 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11298 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask |QFE_INT_DLY_INIT); // init delay interrupt only
11300 + sysRegWrite(QFE_INT_ENABLE,reg_int_mask | (RX_DONE_INT0 | RX_DONE_INT1 | RLS_DONE_INT));
11302 +#endif // CONFIG_RAETH_QDMA //
11313 +void gsw_delay_setting(void)
11315 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
11316 + END_DEVICE *ei_local = netdev_priv(dev_raether);
11317 + int reg_int_val = 0;
11318 + int link_speed = 0;
11320 + reg_int_val = sysRegRead(FE_INT_STATUS2);
11321 +#if defined (CONFIG_RALINK_MT7621)
11322 + if( reg_int_val & BIT(25))
11324 + if(sysRegRead(RALINK_ETH_SW_BASE+0x0208) & 0x1) // link up
11326 + link_speed = (sysRegRead(RALINK_ETH_SW_BASE+0x0208)>>2 & 0x3);
11327 + if(link_speed == 1)
11329 + // delay setting for 100M
11330 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
11331 + mii_mgr_write(31, 0x7b00, 8);
11332 + printk("MT7621 GE2 link rate to 100M\n");
11335 + //delay setting for 10/1000M
11336 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
11337 + mii_mgr_write(31, 0x7b00, 0x102);
11338 + printk("MT7621 GE2 link rate to 10M/1G\n");
11340 + schedule_work(&ei_local->kill_sig_wq);
11344 + sysRegWrite(FE_INT_STATUS2, reg_int_val);
11349 + * ei_interrupt - handle controler interrupt
11351 + * This routine is called at interrupt level in response to an interrupt from
11352 + * the controller.
11356 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
11357 +static irqreturn_t ei_interrupt(int irq, void *dev_id)
11359 +static irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
11362 +#if !defined(CONFIG_RAETH_NAPI)
11363 + unsigned long reg_int_val;
11364 + unsigned long reg_int_mask=0;
11365 + unsigned int recv = 0;
11366 + unsigned int transmit __maybe_unused = 0;
11367 + unsigned long flags;
11370 + struct net_device *dev = (struct net_device *) dev_id;
11371 + END_DEVICE *ei_local = netdev_priv(dev);
11375 + unsigned long old,cur,dcycle;
11376 + static int cnt = 0;
11377 + static unsigned long max_dcycle = 0,tcycle = 0;
11378 + old = read_c0_count();
11382 + printk (KERN_ERR "net_interrupt(): irq %x for unknown device.\n", IRQ_ENET0);
11386 +#ifdef CONFIG_RAETH_NAPI
11387 + gsw_delay_setting();
11388 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11389 + if(napi_schedule_prep(&ei_local->napi)) {
11391 + if(netif_rx_schedule_prep(dev)) {
11393 + atomic_inc(&ei_local->irq_sem);
11394 + sysRegWrite(RAETH_FE_INT_ENABLE, 0);
11395 +#ifdef CONFIG_RAETH_QDMA
11396 + sysRegWrite(QFE_INT_ENABLE, 0);
11398 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11399 + __napi_schedule(&ei_local->napi);
11401 + __netif_rx_schedule(dev);
11406 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11407 + reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11408 +#ifdef CONFIG_RAETH_QDMA
11409 + reg_int_val |= sysRegRead(QFE_INT_STATUS);
11411 +#if defined (DELAY_INT)
11412 + if((reg_int_val & RX_DLY_INT))
11415 + if (reg_int_val & RAETH_TX_DLY_INT)
11418 +#if defined(CONFIG_RAETH_PDMA_DVT)
11419 + raeth_pdma_lro_dly_int_dvt();
11420 +#endif /* CONFIG_RAETH_PDMA_DVT */
11423 + if((reg_int_val & (RX_DONE_INT0 | RX_DONE_INT3 | RX_DONE_INT2 | RX_DONE_INT1)))
11426 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11427 +#if defined(CONFIG_ARCH_MT7623)
11428 + if((reg_int_val & RX_DONE_INT3))
11430 + if((reg_int_val & RX_DONE_INT2))
11432 +#endif /* CONFIG_ARCH_MT7623 */
11433 + if((reg_int_val & RX_DONE_INT1))
11437 + if (reg_int_val & RAETH_TX_DONE_INT0)
11438 + transmit |= RAETH_TX_DONE_INT0;
11439 +#if defined (CONFIG_RAETH_QOS)
11440 + if (reg_int_val & TX_DONE_INT1)
11441 + transmit |= TX_DONE_INT1;
11442 + if (reg_int_val & TX_DONE_INT2)
11443 + transmit |= TX_DONE_INT2;
11444 + if (reg_int_val & TX_DONE_INT3)
11445 + transmit |= TX_DONE_INT3;
11446 +#endif //CONFIG_RAETH_QOS
11448 +#endif //DELAY_INT
11450 +#if defined (DELAY_INT)
11451 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT);
11453 + sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL);
11455 +#ifdef CONFIG_RAETH_QDMA
11456 +#if defined (DELAY_INT)
11457 + sysRegWrite(QFE_INT_STATUS, QFE_INT_DLY_INIT);
11459 + sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11463 +#if defined (CONFIG_RAETH_HW_LRO)
11464 + if( reg_int_val & (ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1) ){
11465 + /* mask interrupts of rx flow to hw lor rings */
11466 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11467 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1));
11468 + schedule_work(&ei_local->hw_lro_wq);
11470 +#endif /* CONFIG_RAETH_HW_LRO */
11472 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
11474 + ei_xmit_housekeeping(0);
11476 + ei_xmit_housekeeping(0);
11479 + if (((recv == 1) || (pending_recv ==1)) && (tx_ring_full==0))
11481 + reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11482 +#if defined (DELAY_INT)
11483 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11485 + sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11486 +#endif //DELAY_INT
11487 +#ifdef CONFIG_RAETH_QDMA
11488 + reg_int_mask = sysRegRead(QFE_INT_ENABLE);
11489 +#if defined (DELAY_INT)
11490 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11492 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11493 +#endif //DELAY_INT
11497 +#ifdef WORKQUEUE_BH
11498 + schedule_work(&ei_local->rx_wq);
11500 +#if defined (TASKLET_WORKQUEUE_SW)
11501 + if (working_schedule == 1)
11502 + schedule_work(&ei_local->rx_wq);
11505 + tasklet_hi_schedule(&ei_local->rx_tasklet);
11506 +#endif // WORKQUEUE_BH //
11508 + else if (recv == 1 && tx_ring_full==1)
11512 + else if((recv == 0) && (transmit == 0))
11514 + gsw_delay_setting();
11516 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11519 + return IRQ_HANDLED;
11522 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
11523 + defined (CONFIG_RALINK_MT7620)|| defined (CONFIG_RALINK_MT7621)
11524 +static void esw_link_status_changed(int port_no, void *dev_id)
11526 + unsigned int reg_val;
11527 + struct net_device *dev = (struct net_device *) dev_id;
11528 + END_DEVICE *ei_local = netdev_priv(dev);
11530 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11531 + defined (CONFIG_RALINK_MT7620)
11532 + reg_val = *((volatile u32 *)(RALINK_ETH_SW_BASE+ 0x3008 + (port_no*0x100)));
11533 +#elif defined (CONFIG_RALINK_MT7621)
11534 + mii_mgr_read(31, (0x3008 + (port_no*0x100)), ®_val);
11536 + if(reg_val & 0x1) {
11537 + printk("ESW: Link Status Changed - Port%d Link UP\n", port_no);
11538 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11539 + mii_mgr_write(port_no, 31, 0x52b5);
11540 + mii_mgr_write(port_no, 16, 0xb780);
11541 + mii_mgr_write(port_no, 17, 0x00e0);
11542 + mii_mgr_write(port_no, 16, 0x9780);
11545 +#if defined (CONFIG_WAN_AT_P0)
11547 + schedule_work(&ei_local->kill_sig_wq);
11549 +#elif defined (CONFIG_WAN_AT_P4)
11551 + schedule_work(&ei_local->kill_sig_wq);
11555 + printk("ESW: Link Status Changed - Port%d Link Down\n", port_no);
11556 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11557 + mii_mgr_write(port_no, 31, 0x52b5);
11558 + mii_mgr_write(port_no, 16, 0xb780);
11559 + mii_mgr_write(port_no, 17, 0x0000);
11560 + mii_mgr_write(port_no, 16, 0x9780);
11567 +#if defined (CONFIG_RT_3052_ESW) && ! defined(CONFIG_RALINK_MT7621) && ! defined(CONFIG_ARCH_MT7623)
11568 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11570 + unsigned long flags;
11571 + unsigned long reg_int_val;
11572 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11573 + defined(CONFIG_RALINK_MT7620)
11574 + unsigned long acl_int_val;
11575 + unsigned long mib_int_val;
11577 + static unsigned long stat;
11578 + unsigned long stat_curr;
11581 + struct net_device *dev = (struct net_device *) dev_id;
11582 + END_DEVICE *ei_local = netdev_priv(dev);
11585 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11586 + reg_int_val = (*((volatile u32 *)(ESW_ISR))); //Interrupt Status Register
11588 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11589 + defined(CONFIG_RALINK_MT7620)
11590 + if (reg_int_val & P5_LINK_CH) {
11591 + esw_link_status_changed(5, dev_id);
11593 + if (reg_int_val & P4_LINK_CH) {
11594 + esw_link_status_changed(4, dev_id);
11596 + if (reg_int_val & P3_LINK_CH) {
11597 + esw_link_status_changed(3, dev_id);
11599 + if (reg_int_val & P2_LINK_CH) {
11600 + esw_link_status_changed(2, dev_id);
11602 + if (reg_int_val & P1_LINK_CH) {
11603 + esw_link_status_changed(1, dev_id);
11605 + if (reg_int_val & P0_LINK_CH) {
11606 + esw_link_status_changed(0, dev_id);
11608 + if (reg_int_val & ACL_INT) {
11609 + acl_int_val = sysRegRead(ESW_AISR);
11610 + sysRegWrite(ESW_AISR, acl_int_val);
11612 + if (reg_int_val & MIB_INT) {
11614 + mib_int_val = sysRegRead(ESW_P0_IntSn);
11616 + sysRegWrite(ESW_P0_IntSn, mib_int_val);
11617 + if(mib_int_val & RX_GOOD_CNT)
11618 + p0_rx_good_cnt ++;
11619 + if(mib_int_val & TX_GOOD_CNT)
11620 + p0_tx_good_cnt ++;
11621 + if(mib_int_val & RX_GOCT_CNT)
11622 + p0_rx_byte_cnt ++;
11623 + if(mib_int_val & TX_GOCT_CNT)
11624 + p0_tx_byte_cnt ++;
11627 + mib_int_val = sysRegRead(ESW_P1_IntSn);
11629 + sysRegWrite(ESW_P1_IntSn, mib_int_val);
11630 + if(mib_int_val & RX_GOOD_CNT)
11631 + p1_rx_good_cnt ++;
11632 + if(mib_int_val & TX_GOOD_CNT)
11633 + p1_tx_good_cnt ++;
11634 + if(mib_int_val & RX_GOCT_CNT)
11635 + p1_rx_byte_cnt ++;
11636 + if(mib_int_val & TX_GOCT_CNT)
11637 + p1_tx_byte_cnt ++;
11640 + mib_int_val = sysRegRead(ESW_P2_IntSn);
11642 + sysRegWrite(ESW_P2_IntSn, mib_int_val);
11643 + if(mib_int_val & RX_GOOD_CNT)
11644 + p2_rx_good_cnt ++;
11645 + if(mib_int_val & TX_GOOD_CNT)
11646 + p2_tx_good_cnt ++;
11647 + if(mib_int_val & RX_GOCT_CNT)
11648 + p2_rx_byte_cnt ++;
11649 + if(mib_int_val & TX_GOCT_CNT)
11650 + p2_tx_byte_cnt ++;
11654 + mib_int_val = sysRegRead(ESW_P3_IntSn);
11656 + sysRegWrite(ESW_P3_IntSn, mib_int_val);
11657 + if(mib_int_val & RX_GOOD_CNT)
11658 + p3_rx_good_cnt ++;
11659 + if(mib_int_val & TX_GOOD_CNT)
11660 + p3_tx_good_cnt ++;
11661 + if(mib_int_val & RX_GOCT_CNT)
11662 + p3_rx_byte_cnt ++;
11663 + if(mib_int_val & TX_GOCT_CNT)
11664 + p3_tx_byte_cnt ++;
11667 + mib_int_val = sysRegRead(ESW_P4_IntSn);
11669 + sysRegWrite(ESW_P4_IntSn, mib_int_val);
11670 + if(mib_int_val & RX_GOOD_CNT)
11671 + p4_rx_good_cnt ++;
11672 + if(mib_int_val & TX_GOOD_CNT)
11673 + p4_tx_good_cnt ++;
11674 + if(mib_int_val & RX_GOCT_CNT)
11675 + p4_rx_byte_cnt ++;
11676 + if(mib_int_val & TX_GOCT_CNT)
11677 + p4_tx_byte_cnt ++;
11680 + mib_int_val = sysRegRead(ESW_P5_IntSn);
11682 + sysRegWrite(ESW_P5_IntSn, mib_int_val);
11683 + if(mib_int_val & RX_GOOD_CNT)
11684 + p5_rx_good_cnt ++;
11685 + if(mib_int_val & TX_GOOD_CNT)
11686 + p5_tx_good_cnt ++;
11687 + if(mib_int_val & RX_GOCT_CNT)
11688 + p5_rx_byte_cnt ++;
11689 + if(mib_int_val & TX_GOCT_CNT)
11690 + p5_tx_byte_cnt ++;
11693 + mib_int_val = sysRegRead(ESW_P6_IntSn);
11695 + sysRegWrite(ESW_P6_IntSn, mib_int_val);
11696 + if(mib_int_val & RX_GOOD_CNT)
11697 + p6_rx_good_cnt ++;
11698 + if(mib_int_val & TX_GOOD_CNT)
11699 + p6_tx_good_cnt ++;
11700 + if(mib_int_val & RX_GOCT_CNT)
11701 + p6_rx_byte_cnt ++;
11702 + if(mib_int_val & TX_GOCT_CNT)
11703 + p6_tx_byte_cnt ++;
11705 +#if defined (CONFIG_RALINK_MT7620)
11706 + mib_int_val = sysRegRead(ESW_P7_IntSn);
11708 + sysRegWrite(ESW_P7_IntSn, mib_int_val);
11709 + if(mib_int_val & RX_GOOD_CNT)
11710 + p7_rx_good_cnt ++;
11711 + if(mib_int_val & TX_GOOD_CNT)
11712 + p7_tx_good_cnt ++;
11713 + if(mib_int_val & RX_GOCT_CNT)
11714 + p7_rx_byte_cnt ++;
11715 + if(mib_int_val & TX_GOCT_CNT)
11716 + p7_tx_byte_cnt ++;
11722 +#else // not RT6855
11723 + if (reg_int_val & PORT_ST_CHG) {
11724 + printk("RT305x_ESW: Link Status Changed\n");
11726 + stat_curr = *((volatile u32 *)(RALINK_ETH_SW_BASE+0x80));
11727 +#ifdef CONFIG_WAN_AT_P0
11728 + //link down --> link up : send signal to user application
11729 + //link up --> link down : ignore
11730 + if ((stat & (1<<25)) || !(stat_curr & (1<<25)))
11732 + if ((stat & (1<<29)) || !(stat_curr & (1<<29)))
11736 + schedule_work(&ei_local->kill_sig_wq);
11738 + stat = stat_curr;
11741 +#endif // defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)//
11743 + sysRegWrite(ESW_ISR, reg_int_val);
11745 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11746 + return IRQ_HANDLED;
11751 +#elif defined (CONFIG_RT_3052_ESW) && defined(CONFIG_RALINK_MT7621)
11753 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11755 + unsigned long flags;
11756 + unsigned int reg_int_val;
11757 + struct net_device *dev = (struct net_device *) dev_id;
11758 + END_DEVICE *ei_local = netdev_priv(dev);
11760 + spin_lock_irqsave(&(ei_local->page_lock), flags);
11761 + mii_mgr_read(31, 0x700c, ®_int_val);
11763 + if (reg_int_val & P4_LINK_CH) {
11764 + esw_link_status_changed(4, dev_id);
11767 + if (reg_int_val & P3_LINK_CH) {
11768 + esw_link_status_changed(3, dev_id);
11770 + if (reg_int_val & P2_LINK_CH) {
11771 + esw_link_status_changed(2, dev_id);
11773 + if (reg_int_val & P1_LINK_CH) {
11774 + esw_link_status_changed(1, dev_id);
11776 + if (reg_int_val & P0_LINK_CH) {
11777 + esw_link_status_changed(0, dev_id);
11780 + mii_mgr_write(31, 0x700c, 0x1f); //ack switch link change
11782 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11783 + return IRQ_HANDLED;
11789 +static int ei_start_xmit_fake(struct sk_buff* skb, struct net_device *dev)
11791 + return ei_start_xmit(skb, dev, 1);
11795 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
11796 +void dump_phy_reg(int port_no, int from, int to, int is_local)
11801 + if(is_local==0) {
11802 + printk("Global Register\n");
11803 + printk("===============");
11804 + mii_mgr_write(0, 31, 0); //select global register
11805 + for(i=from;i<=to;i++) {
11809 + mii_mgr_read(port_no,i, &temp);
11810 + printk("%02d: %04X ",i, temp);
11813 + mii_mgr_write(0, 31, 0x8000); //select local register
11814 + printk("\n\nLocal Register Port %d\n",port_no);
11815 + printk("===============");
11816 + for(i=from;i<=to;i++) {
11820 + mii_mgr_read(port_no,i, &temp);
11821 + printk("%02d: %04X ",i, temp);
11827 +void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
11835 + if(is_local==0) {
11837 + printk("\n\nGlobal Register Page %d\n",page_no);
11838 + printk("===============");
11839 + r31 |= 0 << 15; //global
11840 + r31 |= ((page_no&0x7) << 12); //page no
11841 + mii_mgr_write(port_no, 31, r31); //select global page x
11842 + for(i=16;i<32;i++) {
11846 + mii_mgr_read(port_no,i, &temp);
11847 + printk("%02d: %04X ",i, temp);
11850 + printk("\n\nLocal Register Port %d Page %d\n",port_no, page_no);
11851 + printk("===============");
11852 + r31 |= 1 << 15; //local
11853 + r31 |= ((page_no&0x7) << 12); //page no
11854 + mii_mgr_write(port_no, 31, r31); //select local page x
11855 + for(i=16;i<32;i++) {
11859 + mii_mgr_read(port_no,i, &temp);
11860 + printk("%02d: %04X ",i, temp);
11868 +int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11870 +#if defined(CONFIG_RT_3052_ESW) || defined(CONFIG_RAETH_QDMA)
11873 +#if defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
11874 + defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11875 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
11876 + defined (CONFIG_RALINK_MT7628) || defined (CONFIG_ARCH_MT7623)
11877 + esw_rate ratelimit;
11879 +#if defined(CONFIG_RT_3052_ESW)
11880 + unsigned int offset = 0;
11881 + unsigned int value = 0;
11885 + END_DEVICE *ei_local = netdev_priv(dev);
11886 + ra_mii_ioctl_data mii;
11887 + spin_lock_irq(&ei_local->page_lock);
11890 +#if defined(CONFIG_RAETH_QDMA)
11891 +#define _HQOS_REG(x) (*((volatile u32 *)(RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + x)))
11892 + case RAETH_QDMA_REG_READ:
11893 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11894 + if (reg.off > REG_HQOS_MAX) {
11898 + reg.val = _HQOS_REG(reg.off);
11899 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11900 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
11902 + case RAETH_QDMA_REG_WRITE:
11903 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11904 + if (reg.off > REG_HQOS_MAX) {
11908 + _HQOS_REG(reg.off) = reg.val;
11909 + //printk("write reg off:%x val:%x\n", reg.off, reg.val);
11912 + case RAETH_QDMA_READ_CPU_CLK:
11913 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11914 + reg.val = get_surfboard_sysclk();
11915 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11916 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
11919 + case RAETH_QDMA_QUEUE_MAPPING:
11920 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11921 + if((reg.off&0x100) == 0x100){
11922 + lan_wan_separate = 1;
11925 + lan_wan_separate = 0;
11927 + M2Q_table[reg.off] = reg.val;
11929 +#if defined(CONFIG_HW_SFQ)
11930 + case RAETH_QDMA_SFQ_WEB_ENABLE:
11931 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11932 + if((reg.val) == 0x1){
11933 + web_sfq_enable = 1;
11936 + web_sfq_enable = 0;
11943 + case RAETH_MII_READ:
11944 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11945 + mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
11946 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
11947 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
11950 + case RAETH_MII_WRITE:
11951 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11952 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
11953 + mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
11955 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
11956 + case RAETH_MII_READ_CL45:
11957 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11958 + //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
11959 + mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, &mii.val_out);
11960 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
11962 + case RAETH_MII_WRITE_CL45:
11963 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11964 + //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
11965 + mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, mii.val_in);
11969 +#if defined(CONFIG_RT_3052_ESW)
11970 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
11971 + case RAETH_ESW_REG_READ:
11972 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11973 + if (reg.off > REG_ESW_MAX) {
11977 + reg.val = _ESW_REG(reg.off);
11978 + //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11979 + copy_to_user(ifr->ifr_data, ®, sizeof(reg));
11981 + case RAETH_ESW_REG_WRITE:
11982 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11983 + if (reg.off > REG_ESW_MAX) {
11987 + _ESW_REG(reg.off) = reg.val;
11988 + //printk("write reg off:%x val:%x\n", reg.off, reg.val);
11990 + case RAETH_ESW_PHY_DUMP:
11991 + copy_from_user(®, ifr->ifr_data, sizeof(reg));
11992 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
11993 + if (reg.val ==32 ) {//dump all phy register
11994 + /* Global Register 0~31
11995 + * Local Register 0~31
11997 + dump_phy_reg(0, 0, 31, 0); //dump global register
11998 + for(offset=0;offset<5;offset++) {
11999 + dump_phy_reg(offset, 0, 31, 1); //dump local register
12002 + dump_phy_reg(reg.val, 0, 31, 0); //dump global register
12003 + dump_phy_reg(reg.val, 0, 31, 1); //dump local register
12006 + /* SPEC defined Register 0~15
12007 + * Global Register 16~31 for each page
12008 + * Local Register 16~31 for each page
12010 + printk("SPEC defined Register");
12011 + if (reg.val ==32 ) {//dump all phy register
12013 + for(i=0; i<5; i++){
12014 + printk("\n[Port %d]===============",i);
12015 + for(offset=0;offset<16;offset++) {
12016 + if(offset%8==0) {
12019 + mii_mgr_read(i,offset, &value);
12020 + printk("%02d: %04X ",offset, value);
12025 + printk("\n[Port %d]===============",reg.val);
12026 + for(offset=0;offset<16;offset++) {
12027 + if(offset%8==0) {
12030 + mii_mgr_read(reg.val,offset, &value);
12031 + printk("%02d: %04X ",offset, value);
12035 +#if defined (CONFIG_RALINK_MT7628)
12036 + for(offset=0;offset<7;offset++) { //global register page 0~6
12038 + for(offset=0;offset<5;offset++) { //global register page 0~4
12040 + if(reg.val == 32) //dump all phy register
12041 + dump_phy_reg(0, 16, 31, 0, offset);
12043 + dump_phy_reg(reg.val, 16, 31, 0, offset);
12046 + if (reg.val == 32) {//dump all phy register
12047 +#if !defined (CONFIG_RAETH_HAS_PORT4)
12048 + for(offset=0;offset<5;offset++) { //local register port 0-port4
12050 + for(offset=0;offset<4;offset++) { //local register port 0-port3
12052 + dump_phy_reg(offset, 16, 31, 1, 0); //dump local page 0
12053 + dump_phy_reg(offset, 16, 31, 1, 1); //dump local page 1
12054 + dump_phy_reg(offset, 16, 31, 1, 2); //dump local page 2
12055 + dump_phy_reg(offset, 16, 31, 1, 3); //dump local page 3
12058 + dump_phy_reg(reg.val, 16, 31, 1, 0); //dump local page 0
12059 + dump_phy_reg(reg.val, 16, 31, 1, 1); //dump local page 1
12060 + dump_phy_reg(reg.val, 16, 31, 1, 2); //dump local page 2
12061 + dump_phy_reg(reg.val, 16, 31, 1, 3); //dump local page 3
12066 +#if defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12067 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12068 + case RAETH_ESW_INGRESS_RATE:
12069 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12070 + offset = 0x11c + (4 * (ratelimit.port / 2));
12071 + value = _ESW_REG(offset);
12073 + if((ratelimit.port % 2) == 0)
12075 + value &= 0xffff0000;
12076 + if(ratelimit.on_off == 1)
12078 + value |= (ratelimit.on_off << 14);
12079 + value |= (0x07 << 10);
12080 + value |= ratelimit.bw;
12083 + else if((ratelimit.port % 2) == 1)
12085 + value &= 0x0000ffff;
12086 + if(ratelimit.on_off == 1)
12088 + value |= (ratelimit.on_off << 30);
12089 + value |= (0x07 << 26);
12090 + value |= (ratelimit.bw << 16);
12093 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12095 + _ESW_REG(offset) = value;
12098 + case RAETH_ESW_EGRESS_RATE:
12099 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12100 + offset = 0x140 + (4 * (ratelimit.port / 2));
12101 + value = _ESW_REG(offset);
12103 + if((ratelimit.port % 2) == 0)
12105 + value &= 0xffff0000;
12106 + if(ratelimit.on_off == 1)
12108 + value |= (ratelimit.on_off << 12);
12109 + value |= (0x03 << 10);
12110 + value |= ratelimit.bw;
12113 + else if((ratelimit.port % 2) == 1)
12115 + value &= 0x0000ffff;
12116 + if(ratelimit.on_off == 1)
12118 + value |= (ratelimit.on_off << 28);
12119 + value |= (0x03 << 26);
12120 + value |= (ratelimit.bw << 16);
12123 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12124 + _ESW_REG(offset) = value;
12126 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
12127 + defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12128 +#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12129 + case RAETH_ESW_INGRESS_RATE:
12130 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12131 +#if defined(CONFIG_RALINK_RT6855A) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12132 + offset = 0x1800 + (0x100 * ratelimit.port);
12134 + offset = 0x1080 + (0x100 * ratelimit.port);
12136 + value = _ESW_REG(offset);
12138 + value &= 0xffff0000;
12139 + if(ratelimit.on_off == 1)
12141 + value |= (ratelimit.on_off << 15);
12142 + if (ratelimit.bw < 100)
12144 + value |= (0x0 << 8);
12145 + value |= ratelimit.bw;
12146 + }else if(ratelimit.bw < 1000)
12148 + value |= (0x1 << 8);
12149 + value |= ratelimit.bw/10;
12150 + }else if(ratelimit.bw < 10000)
12152 + value |= (0x2 << 8);
12153 + value |= ratelimit.bw/100;
12154 + }else if(ratelimit.bw < 100000)
12156 + value |= (0x3 << 8);
12157 + value |= ratelimit.bw/1000;
12160 + value |= (0x4 << 8);
12161 + value |= ratelimit.bw/10000;
12164 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12165 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12166 + mii_mgr_write(0x1f, offset, value);
12168 + _ESW_REG(offset) = value;
12172 + case RAETH_ESW_EGRESS_RATE:
12173 + copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12174 + offset = 0x1040 + (0x100 * ratelimit.port);
12175 + value = _ESW_REG(offset);
12177 + value &= 0xffff0000;
12178 + if(ratelimit.on_off == 1)
12180 + value |= (ratelimit.on_off << 15);
12181 + if (ratelimit.bw < 100)
12183 + value |= (0x0 << 8);
12184 + value |= ratelimit.bw;
12185 + }else if(ratelimit.bw < 1000)
12187 + value |= (0x1 << 8);
12188 + value |= ratelimit.bw/10;
12189 + }else if(ratelimit.bw < 10000)
12191 + value |= (0x2 << 8);
12192 + value |= ratelimit.bw/100;
12193 + }else if(ratelimit.bw < 100000)
12195 + value |= (0x3 << 8);
12196 + value |= ratelimit.bw/1000;
12199 + value |= (0x4 << 8);
12200 + value |= ratelimit.bw/10000;
12203 + printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12204 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12205 + mii_mgr_write(0x1f, offset, value);
12207 + _ESW_REG(offset) = value;
12211 +#endif // CONFIG_RT_3052_ESW
12213 + ret = -EOPNOTSUPP;
12218 + spin_unlock_irq(&ei_local->page_lock);
12223 + * Set new MTU size
12224 + * Change the mtu of Raeth Ethernet Device
12226 +static int ei_change_mtu(struct net_device *dev, int new_mtu)
12228 + END_DEVICE *ei_local = netdev_priv(dev); // get priv ei_local pointer from net_dev structure
12230 + if ( ei_local == NULL ) {
12231 + printk(KERN_EMERG "%s: ei_change_mtu passed a non-existent private pointer from net_dev!\n", dev->name);
12236 + if ( (new_mtu > 4096) || (new_mtu < 64)) {
12240 +#ifndef CONFIG_RAETH_JUMBOFRAME
12241 + if ( new_mtu > 1500 ) {
12246 + dev->mtu = new_mtu;
12251 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12252 +static void ei_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
12254 + END_DEVICE *ei_local = netdev_priv(dev);
12256 + ei_local->vlgrp = grp;
12258 + /* enable HW VLAN RX */
12259 + sysRegWrite(CDMP_EG_CTRL, 1);
12264 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12265 +static const struct net_device_ops ei_netdev_ops = {
12266 + .ndo_init = rather_probe,
12267 + .ndo_open = ei_open,
12268 + .ndo_stop = ei_close,
12269 + .ndo_start_xmit = ei_start_xmit_fake,
12270 + .ndo_get_stats = ra_get_stats,
12271 + .ndo_set_mac_address = eth_mac_addr,
12272 + .ndo_change_mtu = ei_change_mtu,
12273 + .ndo_do_ioctl = ei_ioctl,
12274 + .ndo_validate_addr = eth_validate_addr,
12275 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12276 + .ndo_vlan_rx_register = ei_vlan_rx_register,
12278 +#ifdef CONFIG_NET_POLL_CONTROLLER
12279 + .ndo_poll_controller = raeth_clean,
12281 +// .ndo_tx_timeout = ei_tx_timeout,
12285 +void ra2880_setup_dev_fptable(struct net_device *dev)
12287 + RAETH_PRINT(__FUNCTION__ "is called!\n");
12289 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12290 + dev->netdev_ops = &ei_netdev_ops;
12292 + dev->open = ei_open;
12293 + dev->stop = ei_close;
12294 + dev->hard_start_xmit = ei_start_xmit_fake;
12295 + dev->get_stats = ra_get_stats;
12296 + dev->set_mac_address = ei_set_mac_addr;
12297 + dev->change_mtu = ei_change_mtu;
12299 + dev->do_ioctl = ei_ioctl;
12300 +// dev->tx_timeout = ei_tx_timeout;
12302 +#ifdef CONFIG_RAETH_NAPI
12303 + dev->poll = &raeth_clean;
12304 +#if defined (CONFIG_RAETH_ROUTER)
12305 + dev->weight = 32;
12306 +#elif defined (CONFIG_RT_3052_ESW)
12307 + dev->weight = 32;
12309 + dev->weight = 128;
12313 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12314 + dev->ethtool_ops = &ra_ethtool_ops;
12316 +#define TX_TIMEOUT (5*HZ)
12317 + dev->watchdog_timeo = TX_TIMEOUT;
12321 +/* reset frame engine */
12322 +void fe_reset(void)
12324 +#if defined (CONFIG_RALINK_RT6855A)
12329 + //val = *(volatile u32 *)(0x1b000000);
12330 + //printk("0x1b000000 is 0x%x\n", val);
12331 + //val = sysRegRead(0xFB110100);
12333 + //sysRegWrite(0xFB110100, val);
12337 + val = sysRegRead(RSTCTRL);
12339 +// RT5350 need to reset ESW and FE at the same to avoid PDMA panic //
12340 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12341 + val = val | RALINK_FE_RST | RALINK_ESW_RST ;
12343 + val = val | RALINK_FE_RST;
12345 + sysRegWrite(RSTCTRL, val);
12346 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7628)
12347 + val = val & ~(RALINK_FE_RST | RALINK_ESW_RST);
12349 + val = val & ~(RALINK_FE_RST);
12352 + sysRegWrite(RSTCTRL, val);
12357 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
12358 +void trgmii_set_7621(void)
12363 + val = sysRegRead(RSTCTRL);
12364 +// MT7621 need to reset GMAC and FE first //
12365 + val = val | RALINK_FE_RST | RALINK_ETH_RST ;
12366 + sysRegWrite(RSTCTRL, val);
12368 +//set TRGMII clock//
12369 + val_0 = sysRegRead(CLK_CFG_0);
12370 + val_0 &= 0xffffff9f;
12371 + val_0 |= (0x1 << 5);
12372 + sysRegWrite(CLK_CFG_0, val_0);
12374 + val_0 = sysRegRead(CLK_CFG_0);
12375 + printk("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n",val_0);
12376 + val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
12377 + sysRegWrite(RSTCTRL, val);
12380 +void trgmii_set_7530(void)
12385 + mii_mgr_write(31, 103, 0x0020);
12389 + mii_mgr_write(0, 0x16, 0);
12390 + mii_mgr_write(1, 0x16, 0);
12391 + mii_mgr_write(2, 0x16, 0);
12392 + mii_mgr_write(3, 0x16, 0);
12393 + mii_mgr_write(4, 0x16, 0);
12396 + //PLL reset for E2
12397 + mii_mgr_write(31, 104, 0x0608);
12398 + mii_mgr_write(31, 104, 0x2608);
12400 + mii_mgr_write(31, 0x7808, 0x0);
12402 + mii_mgr_write(31, 0x7804, 0x01017e8f);
12404 + mii_mgr_write(31, 0x7808, 0x1);
12411 + mii_mgr_write(0, 13, 0x1f);
12412 + mii_mgr_write(0, 14, 0x404);
12413 + mii_mgr_write(0, 13, 0x401f);
12414 + mii_mgr_read(31, 0x7800, ®Value);
12415 + regValue = (regValue >> 9) & 0x3;
12416 + if(regValue == 0x3) { //25Mhz Xtal
12417 + mii_mgr_write(0, 14, 0x0A00);//25Mhz XTAL for 150Mhz CLK
12418 + } else if(regValue == 0x2) { //40Mhz
12419 + mii_mgr_write(0, 14, 0x0780);//40Mhz XTAL for 150Mhz CLK
12421 + //mii_mgr_write(0, 14, 0x0C00);//ori
12424 + mii_mgr_write(0, 13, 0x1f);
12425 + mii_mgr_write(0, 14, 0x409);
12426 + mii_mgr_write(0, 13, 0x401f);
12427 + mii_mgr_write(0, 14, 0x57);
12430 + mii_mgr_write(0, 13, 0x1f);
12431 + mii_mgr_write(0, 14, 0x40a);
12432 + mii_mgr_write(0, 13, 0x401f);
12433 + mii_mgr_write(0, 14, 0x57);
12436 + mii_mgr_write(0, 13, 0x1f);
12437 + mii_mgr_write(0, 14, 0x403);
12438 + mii_mgr_write(0, 13, 0x401f);
12439 + mii_mgr_write(0, 14, 0x1800);
12443 + mii_mgr_write(0, 13, 0x1f);
12444 + mii_mgr_write(0, 14, 0x403);
12445 + mii_mgr_write(0, 13, 0x401f);
12446 + mii_mgr_write(0, 14, 0x1c00);
12449 + mii_mgr_write(0, 13, 0x1f);
12450 + mii_mgr_write(0, 14, 0x401);
12451 + mii_mgr_write(0, 13, 0x401f);
12452 + mii_mgr_write(0, 14, 0xc020);
12455 + mii_mgr_write(0, 13, 0x1f);
12456 + mii_mgr_write(0, 14, 0x406);
12457 + mii_mgr_write(0, 13, 0x401f);
12458 + mii_mgr_write(0, 14, 0xa030);
12462 + mii_mgr_write(0, 13, 0x1f);
12463 + mii_mgr_write(0, 14, 0x410);
12464 + mii_mgr_write(0, 13, 0x401f);
12465 + mii_mgr_write(0, 14, 0x0003);
12468 + mii_mgr_write(31, 0x3600, 0x5e33b);
12471 + mii_mgr_write(31, 0x7830, 0x1);
12477 +void ei_reset_task(struct work_struct *work)
12479 + struct net_device *dev = dev_raether;
12487 +void ei_tx_timeout(struct net_device *dev)
12489 + END_DEVICE *ei_local = netdev_priv(dev);
12491 + schedule_work(&ei_local->reset_task);
12494 +void setup_statistics(END_DEVICE* ei_local)
12496 + ei_local->stat.tx_packets = 0;
12497 + ei_local->stat.tx_bytes = 0;
12498 + ei_local->stat.tx_dropped = 0;
12499 + ei_local->stat.tx_errors = 0;
12500 + ei_local->stat.tx_aborted_errors= 0;
12501 + ei_local->stat.tx_carrier_errors= 0;
12502 + ei_local->stat.tx_fifo_errors = 0;
12503 + ei_local->stat.tx_heartbeat_errors = 0;
12504 + ei_local->stat.tx_window_errors = 0;
12506 + ei_local->stat.rx_packets = 0;
12507 + ei_local->stat.rx_bytes = 0;
12508 + ei_local->stat.rx_dropped = 0;
12509 + ei_local->stat.rx_errors = 0;
12510 + ei_local->stat.rx_length_errors = 0;
12511 + ei_local->stat.rx_over_errors = 0;
12512 + ei_local->stat.rx_crc_errors = 0;
12513 + ei_local->stat.rx_frame_errors = 0;
12514 + ei_local->stat.rx_fifo_errors = 0;
12515 + ei_local->stat.rx_missed_errors = 0;
12517 + ei_local->stat.collisions = 0;
12518 +#if defined (CONFIG_RAETH_QOS)
12519 + ei_local->tx3_full = 0;
12520 + ei_local->tx2_full = 0;
12521 + ei_local->tx1_full = 0;
12522 + ei_local->tx0_full = 0;
12524 + ei_local->tx_full = 0;
12526 +#ifdef CONFIG_RAETH_NAPI
12527 + atomic_set(&ei_local->irq_sem, 1);
12533 + * rather_probe - pick up ethernet port at boot time
12534 + * @dev: network device to probe
12536 + * This routine probe the ethernet port at boot time.
12541 +int __init rather_probe(struct net_device *dev)
12544 + END_DEVICE *ei_local = netdev_priv(dev);
12545 + struct sockaddr addr;
12546 + unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12547 + unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12551 + //Get mac0 address from flash
12552 +#ifdef RA_MTD_RW_BY_NUM
12553 + i = ra_mtd_read(2, GMAC0_OFFSET, 6, addr.sa_data);
12555 + i = ra_mtd_read_nm("Factory", GMAC0_OFFSET, 6, addr.sa_data);
12557 + //If reading mtd failed or mac0 is empty, generate a mac address
12558 + if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
12559 + (memcmp(addr.sa_data, zero2, 6) == 0)) {
12560 + unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12561 + // net_srandom(jiffies);
12562 + memcpy(addr.sa_data, mac_addr01234, 5);
12563 + // addr.sa_data[5] = net_random()&0xFF;
12566 +#ifdef CONFIG_RAETH_NAPI
12567 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12568 + netif_napi_add(dev, &ei_local->napi, raeth_clean, 128);
12571 + ei_set_mac_addr(dev, &addr);
12572 + spin_lock_init(&ei_local->page_lock);
12573 + ether_setup(dev);
12575 +#ifdef CONFIG_RAETH_LRO
12576 + ei_local->lro_mgr.dev = dev;
12577 + memset(&ei_local->lro_mgr.stats, 0, sizeof(ei_local->lro_mgr.stats));
12578 + ei_local->lro_mgr.features = LRO_F_NAPI;
12579 + ei_local->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
12580 + ei_local->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
12581 + ei_local->lro_mgr.max_desc = ARRAY_SIZE(ei_local->lro_arr);
12582 + ei_local->lro_mgr.max_aggr = 64;
12583 + ei_local->lro_mgr.frag_align_pad = 0;
12584 + ei_local->lro_mgr.lro_arr = ei_local->lro_arr;
12585 + ei_local->lro_mgr.get_skb_header = rt_get_skb_header;
12588 + setup_statistics(ei_local);
12593 +#ifdef CONFIG_PSEUDO_SUPPORT
12594 +int VirtualIF_ioctl(struct net_device * net_dev,
12595 + struct ifreq * ifr, int cmd)
12597 + ra_mii_ioctl_data mii;
12600 + case RAETH_MII_READ:
12601 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12602 + mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
12603 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
12604 + copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
12607 + case RAETH_MII_WRITE:
12608 + copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12609 + //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
12610 + mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
12613 + return -EOPNOTSUPP;
12619 +struct net_device_stats *VirtualIF_get_stats(struct net_device *dev)
12621 + PSEUDO_ADAPTER *pAd = netdev_priv(dev);
12622 + return &pAd->stat;
12625 +int VirtualIF_open(struct net_device * dev)
12627 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12629 + printk("%s: ===> VirtualIF_open\n", dev->name);
12631 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
12632 + *((volatile u32 *)(FE_INT_ENABLE2)) |= (1<<25); //enable GE2 link change intr for MT7530 delay setting
12635 + netif_start_queue(pPesueoAd->PseudoDev);
12640 +int VirtualIF_close(struct net_device * dev)
12642 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12644 + printk("%s: ===> VirtualIF_close\n", dev->name);
12646 + netif_stop_queue(pPesueoAd->PseudoDev);
12651 +int VirtualIFSendPackets(struct sk_buff * pSkb,
12652 + struct net_device * dev)
12654 + PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12655 + END_DEVICE *ei_local __maybe_unused;
12658 + //printk("VirtualIFSendPackets --->\n");
12660 + ei_local = netdev_priv(dev);
12661 + if (!(pPesueoAd->RaethDev->flags & IFF_UP)) {
12662 + dev_kfree_skb_any(pSkb);
12665 + //pSkb->cb[40]=0x5a;
12666 + pSkb->dev = pPesueoAd->RaethDev;
12667 + ei_start_xmit(pSkb, pPesueoAd->RaethDev, 2);
12671 +void virtif_setup_statistics(PSEUDO_ADAPTER* pAd)
12673 + pAd->stat.tx_packets = 0;
12674 + pAd->stat.tx_bytes = 0;
12675 + pAd->stat.tx_dropped = 0;
12676 + pAd->stat.tx_errors = 0;
12677 + pAd->stat.tx_aborted_errors= 0;
12678 + pAd->stat.tx_carrier_errors= 0;
12679 + pAd->stat.tx_fifo_errors = 0;
12680 + pAd->stat.tx_heartbeat_errors = 0;
12681 + pAd->stat.tx_window_errors = 0;
12683 + pAd->stat.rx_packets = 0;
12684 + pAd->stat.rx_bytes = 0;
12685 + pAd->stat.rx_dropped = 0;
12686 + pAd->stat.rx_errors = 0;
12687 + pAd->stat.rx_length_errors = 0;
12688 + pAd->stat.rx_over_errors = 0;
12689 + pAd->stat.rx_crc_errors = 0;
12690 + pAd->stat.rx_frame_errors = 0;
12691 + pAd->stat.rx_fifo_errors = 0;
12692 + pAd->stat.rx_missed_errors = 0;
12694 + pAd->stat.collisions = 0;
12697 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12698 +static const struct net_device_ops VirtualIF_netdev_ops = {
12699 + .ndo_open = VirtualIF_open,
12700 + .ndo_stop = VirtualIF_close,
12701 + .ndo_start_xmit = VirtualIFSendPackets,
12702 + .ndo_get_stats = VirtualIF_get_stats,
12703 + .ndo_set_mac_address = ei_set_mac2_addr,
12704 + .ndo_change_mtu = ei_change_mtu,
12705 + .ndo_do_ioctl = VirtualIF_ioctl,
12706 + .ndo_validate_addr = eth_validate_addr,
12709 +// Register pseudo interface
12710 +void RAETH_Init_PSEUDO(pEND_DEVICE pAd, struct net_device *net_dev)
12713 + struct net_device *dev;
12714 + PSEUDO_ADAPTER *pPseudoAd;
12716 + struct sockaddr addr;
12717 + unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12718 + unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12720 + for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
12722 + dev = alloc_etherdev(sizeof(PSEUDO_ADAPTER));
12725 + printk(" alloc_etherdev for PSEUDO_ADAPTER failed.\n");
12728 + strcpy(dev->name, DEV2_NAME);
12730 + //Get mac2 address from flash
12731 +#ifdef RA_MTD_RW_BY_NUM
12732 + i = ra_mtd_read(2, GMAC2_OFFSET, 6, addr.sa_data);
12734 + i = ra_mtd_read_nm("Factory", GMAC2_OFFSET, 6, addr.sa_data);
12737 + //If reading mtd failed or mac0 is empty, generate a mac address
12738 + if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
12739 + (memcmp(addr.sa_data, zero2, 6) == 0)) {
12740 + unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12741 + // net_srandom(jiffies);
12742 + memcpy(addr.sa_data, mac_addr01234, 5);
12743 + // addr.sa_data[5] = net_random()&0xFF;
12746 + ei_set_mac2_addr(dev, &addr);
12747 + ether_setup(dev);
12748 + pPseudoAd = netdev_priv(dev);
12750 + pPseudoAd->PseudoDev = dev;
12751 + pPseudoAd->RaethDev = net_dev;
12752 + virtif_setup_statistics(pPseudoAd);
12753 + pAd->PseudoDev = dev;
12755 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12756 + dev->netdev_ops = &VirtualIF_netdev_ops;
12758 + dev->hard_start_xmit = VirtualIFSendPackets;
12759 + dev->stop = VirtualIF_close;
12760 + dev->open = VirtualIF_open;
12761 + dev->do_ioctl = VirtualIF_ioctl;
12762 + dev->set_mac_address = ei_set_mac2_addr;
12763 + dev->get_stats = VirtualIF_get_stats;
12764 + dev->change_mtu = ei_change_mtu;
12768 +#if defined (CONFIG_RAETH_HW_LRO)
12769 + dev->features |= NETIF_F_HW_CSUM;
12771 + dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
12772 +#endif /* CONFIG_RAETH_HW_LRO */
12774 +#if defined(CONFIG_RALINK_MT7620)
12775 +#if defined (CONFIG_RAETH_TSO)
12776 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12777 + dev->features |= NETIF_F_SG;
12778 + dev->features |= NETIF_F_TSO;
12780 +#endif // CONFIG_RAETH_TSO //
12782 +#if defined (CONFIG_RAETH_TSOV6)
12783 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12784 + dev->features |= NETIF_F_TSO6;
12785 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12789 +#if defined (CONFIG_RAETH_TSO)
12790 + dev->features |= NETIF_F_SG;
12791 + dev->features |= NETIF_F_TSO;
12792 +#endif // CONFIG_RAETH_TSO //
12794 +#if defined (CONFIG_RAETH_TSOV6)
12795 + dev->features |= NETIF_F_TSO6;
12796 + dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12798 +#endif // CONFIG_RALINK_MT7620 //
12800 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
12801 + dev->vlan_features = dev->features;
12805 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12806 + dev->ethtool_ops = &ra_virt_ethtool_ops;
12807 + // init mii structure
12808 + pPseudoAd->mii_info.dev = dev;
12809 + pPseudoAd->mii_info.mdio_read = mdio_virt_read;
12810 + pPseudoAd->mii_info.mdio_write = mdio_virt_write;
12811 + pPseudoAd->mii_info.phy_id_mask = 0x1f;
12812 + pPseudoAd->mii_info.reg_num_mask = 0x1f;
12813 + pPseudoAd->mii_info.phy_id = 0x1e;
12814 + pPseudoAd->mii_info.supports_gmii = mii_check_gmii_support(&pPseudoAd->mii_info);
12817 + // Register this device
12818 + register_netdevice(dev);
12824 + * ei_open - Open/Initialize the ethernet port.
12825 + * @dev: network device to initialize
12827 + * This routine goes all-out, setting everything
12828 + * up a new at each open, even though many of these registers should only need to be set once at boot.
12830 +int ei_open(struct net_device *dev)
12833 +#if !defined (CONFIG_MT7623_FPGA)
12834 + unsigned long flags;
12836 + END_DEVICE *ei_local;
12838 +#ifdef CONFIG_RAETH_LRO
12839 + const char *lan_ip_tmp;
12840 +#ifdef CONFIG_DUAL_IMAGE
12841 +#define RT2860_NVRAM 1
12843 +#define RT2860_NVRAM 0
12845 +#endif // CONFIG_RAETH_LRO //
12847 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
12848 + if (!try_module_get(THIS_MODULE))
12850 + printk("%s: Cannot reserve module\n", __FUNCTION__);
12854 + MOD_INC_USE_COUNT;
12857 + printk("Raeth %s (",RAETH_VERSION);
12858 +#if defined (CONFIG_RAETH_NAPI)
12859 + printk("NAPI\n");
12860 +#elif defined (CONFIG_RA_NETWORK_TASKLET_BH)
12861 + printk("Tasklet");
12862 +#elif defined (CONFIG_RA_NETWORK_WORKQUEUE_BH)
12863 + printk("Workqueue");
12866 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12867 + printk(",SkbRecycle");
12872 + ei_local = netdev_priv(dev); // get device pointer from System
12873 + // unsigned int flags;
12875 + if (ei_local == NULL)
12877 + printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
12881 + /* receiving packet buffer allocation - NUM_RX_DESC x MAX_RX_LENGTH */
12882 + for ( i = 0; i < NUM_RX_DESC; i++)
12884 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12885 + ei_local->netrx0_skbuf[i] = skbmgr_dev_alloc_skb2k();
12887 + ei_local->netrx0_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12889 + if (ei_local->netrx0_skbuf[i] == NULL ) {
12890 + printk("rx skbuff buffer allocation failed!");
12892 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12893 + skb_reserve(ei_local->netrx0_skbuf[i], NET_IP_ALIGN);
12898 +#if defined (CONFIG_RAETH_HW_LRO)
12899 + ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12900 + if (ei_local->netrx3_skbuf[i] == NULL ) {
12901 + printk("rx3 skbuff buffer allocation failed!");
12903 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12904 + skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
12907 + ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12908 + if (ei_local->netrx2_skbuf[i] == NULL ) {
12909 + printk("rx2 skbuff buffer allocation failed!");
12911 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12912 + skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
12915 + ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12916 + if (ei_local->netrx1_skbuf[i] == NULL ) {
12917 + printk("rx1 skbuff buffer allocation failed!");
12919 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12920 + skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
12923 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
12924 +#if defined(CONFIG_ARCH_MT7623)
12925 + ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12926 + if (ei_local->netrx3_skbuf[i] == NULL ) {
12927 + printk("rx3 skbuff buffer allocation failed!");
12929 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12930 + skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
12933 + ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12934 + if (ei_local->netrx2_skbuf[i] == NULL ) {
12935 + printk("rx2 skbuff buffer allocation failed!");
12937 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12938 + skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
12941 +#endif /* CONFIG_ARCH_MT7623 */
12942 + ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12943 + if (ei_local->netrx1_skbuf[i] == NULL ) {
12944 + printk("rx1 skbuff buffer allocation failed!");
12946 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12947 + skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
12952 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
12953 + trgmii_set_7621(); //reset FE/GMAC in this function
12956 + fe_dma_init(dev);
12958 +#if defined (CONFIG_RAETH_HW_LRO)
12959 + fe_hw_lro_init(dev);
12960 +#endif /* CONFIG_RAETH_HW_LRO */
12962 + fe_sw_init(); //initialize fe and switch register
12963 +#if defined (CONFIG_MIPS)
12964 + err = request_irq( dev->irq, ei_interrupt, IRQF_DISABLED, dev->name, dev); // try to fix irq in open
12966 + err = request_irq(dev->irq, ei_interrupt, /*IRQF_TRIGGER_LOW*/ 0, dev->name, dev); // try to fix irq in open
12971 + if ( dev->dev_addr != NULL) {
12972 + ra2880MacAddressSet((void *)(dev->dev_addr));
12974 + printk("dev->dev_addr is empty !\n");
12976 +/*TODO: MT7623 MCM INT */
12977 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
12978 + err = request_irq(SURFBOARDINT_ESW, esw_interrupt, IRQF_DISABLED, "Ralink_ESW", dev);
12981 + INIT_WORK(&ei_local->kill_sig_wq, kill_sig_workq);
12982 +#if defined (CONFIG_RALINK_MT7621)
12983 + mii_mgr_write(31, 0x7008, 0x1f); //enable switch link change intr
12986 + *((volatile u32 *)(RALINK_INTCL_BASE + 0x34)) = (1<<17);
12987 + *((volatile u32 *)(ESW_IMR)) &= ~(ESW_INT_ALL);
12989 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
12990 + defined (CONFIG_RALINK_MT7620)
12991 + *((volatile u32 *)(ESW_P0_IntMn)) &= ~(MSK_CNT_INT_ALL);
12992 + *((volatile u32 *)(ESW_P1_IntMn)) &= ~(MSK_CNT_INT_ALL);
12993 + *((volatile u32 *)(ESW_P2_IntMn)) &= ~(MSK_CNT_INT_ALL);
12994 + *((volatile u32 *)(ESW_P3_IntMn)) &= ~(MSK_CNT_INT_ALL);
12995 + *((volatile u32 *)(ESW_P4_IntMn)) &= ~(MSK_CNT_INT_ALL);
12996 + *((volatile u32 *)(ESW_P5_IntMn)) &= ~(MSK_CNT_INT_ALL);
12997 + *((volatile u32 *)(ESW_P6_IntMn)) &= ~(MSK_CNT_INT_ALL);
12999 +#if defined(CONFIG_RALINK_MT7620)
13000 + *((volatile u32 *)(ESW_P7_IntMn)) &= ~(MSK_CNT_INT_ALL);
13004 +#endif // CONFIG_RT_3052_ESW //
13007 +#if !defined (CONFIG_MT7623_FPGA)
13008 + spin_lock_irqsave(&(ei_local->page_lock), flags);
13013 + sysRegWrite(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
13014 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
13015 + #if defined (CONFIG_RAETH_HW_LRO)
13016 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13017 + #endif /* CONFIG_RAETH_HW_LRO */
13019 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
13020 + #if defined (CONFIG_RAETH_HW_LRO)
13021 + sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13022 + #endif /* CONFIG_RAETH_HW_LRO */
13025 +#ifdef CONFIG_RAETH_QDMA
13027 + sysRegWrite(QDMA_DELAY_INT, DELAY_INT_INIT);
13028 + sysRegWrite(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
13030 + sysRegWrite(QFE_INT_ENABLE, QFE_INT_ALL);
13035 + INIT_WORK(&ei_local->reset_task, ei_reset_task);
13037 +#ifdef WORKQUEUE_BH
13038 +#ifndef CONFIG_RAETH_NAPI
13039 + INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13040 +#endif // CONFIG_RAETH_NAPI //
13042 +#ifndef CONFIG_RAETH_NAPI
13043 +#if defined (TASKLET_WORKQUEUE_SW)
13044 + working_schedule = init_schedule;
13045 + INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13046 + tasklet_init(&ei_local->rx_tasklet, ei_receive_workq, 0);
13048 + tasklet_init(&ei_local->rx_tasklet, ei_receive, 0);
13050 +#endif // CONFIG_RAETH_NAPI //
13051 +#endif // WORKQUEUE_BH //
13053 + netif_start_queue(dev);
13055 +#ifdef CONFIG_RAETH_NAPI
13056 + atomic_dec(&ei_local->irq_sem);
13057 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13058 + napi_enable(&ei_local->napi);
13060 + netif_poll_enable(dev);
13064 +#if !defined (CONFIG_MT7623_FPGA)
13065 + spin_unlock_irqrestore(&(ei_local->page_lock), flags);
13068 +#ifdef CONFIG_PSEUDO_SUPPORT
13069 + if(ei_local->PseudoDev == NULL) {
13070 + RAETH_Init_PSEUDO(ei_local, dev);
13073 + if(ei_local->PseudoDev == NULL)
13074 + printk("Open PseudoDev failed.\n");
13076 + VirtualIF_open(ei_local->PseudoDev);
13080 +#ifdef CONFIG_RAETH_LRO
13081 + lan_ip_tmp = nvram_get(RT2860_NVRAM, "lan_ipaddr");
13082 + str_to_ip(&lan_ip, lan_ip_tmp);
13083 + lro_para.lan_ip1 = lan_ip = htonl(lan_ip);
13084 +#endif // CONFIG_RAETH_LRO //
13086 +#if defined (CONFIG_RAETH_HW_LRO)
13087 + INIT_WORK(&ei_local->hw_lro_wq, ei_hw_lro_workq);
13088 +#endif /* CONFIG_RAETH_HW_LRO */
13090 + forward_config(dev);
13095 + * ei_close - shut down network device
13096 + * @dev: network device to clear
13098 + * This routine shut down network device.
13102 +int ei_close(struct net_device *dev)
13105 + END_DEVICE *ei_local = netdev_priv(dev); // device pointer
13107 + netif_stop_queue(dev);
13108 + ra2880stop(ei_local);
13110 + free_irq(dev->irq, dev);
13112 +/*TODO: MT7623 MCM INT */
13113 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
13114 + free_irq(SURFBOARDINT_ESW, dev);
13116 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13117 + cancel_work_sync(&ei_local->reset_task);
13120 +#ifdef CONFIG_PSEUDO_SUPPORT
13121 + VirtualIF_close(ei_local->PseudoDev);
13125 +#ifdef WORKQUEUE_BH
13126 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13127 + cancel_work_sync(&ei_local->rx_wq);
13130 +#if defined (TASKLET_WORKQUEUE_SW)
13131 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13132 + cancel_work_sync(&ei_local->rx_wq);
13135 + tasklet_kill(&ei_local->tx_tasklet);
13136 + tasklet_kill(&ei_local->rx_tasklet);
13137 +#endif // WORKQUEUE_BH //
13139 +#ifdef CONFIG_RAETH_NAPI
13140 + atomic_inc(&ei_local->irq_sem);
13141 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13142 + napi_disable(&ei_local->napi);
13144 + netif_poll_disable(dev);
13149 +#if defined (CONFIG_RAETH_HW_LRO)
13150 + cancel_work_sync(&ei_local->hw_lro_wq);
13151 +#endif /* CONFIG_RAETH_HW_LRO */
13153 + for ( i = 0; i < NUM_RX_DESC; i++)
13155 + if (ei_local->netrx0_skbuf[i] != NULL) {
13156 + dev_kfree_skb(ei_local->netrx0_skbuf[i]);
13157 + ei_local->netrx0_skbuf[i] = NULL;
13159 +#if defined (CONFIG_RAETH_HW_LRO)
13160 + if (ei_local->netrx3_skbuf[i] != NULL) {
13161 + dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13162 + ei_local->netrx3_skbuf[i] = NULL;
13164 + if (ei_local->netrx2_skbuf[i] != NULL) {
13165 + dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13166 + ei_local->netrx2_skbuf[i] = NULL;
13168 + if (ei_local->netrx1_skbuf[i] != NULL) {
13169 + dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13170 + ei_local->netrx1_skbuf[i] = NULL;
13172 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13173 +#if defined(CONFIG_ARCH_MT7623)
13174 + if (ei_local->netrx3_skbuf[i] != NULL) {
13175 + dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13176 + ei_local->netrx3_skbuf[i] = NULL;
13178 + if (ei_local->netrx2_skbuf[i] != NULL) {
13179 + dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13180 + ei_local->netrx2_skbuf[i] = NULL;
13182 +#endif /* CONFIG_ARCH_MT7623 */
13183 + if (ei_local->netrx1_skbuf[i] != NULL) {
13184 + dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13185 + ei_local->netrx1_skbuf[i] = NULL;
13190 + for ( i = 0; i < NUM_TX_DESC; i++)
13192 + if((ei_local->skb_free[i]!=(struct sk_buff *)0xFFFFFFFF) && (ei_local->skb_free[i]!= 0))
13194 + dev_kfree_skb_any(ei_local->skb_free[i]);
13199 +#ifdef CONFIG_RAETH_QDMA
13200 + if (ei_local->txd_pool != NULL) {
13201 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct QDMA_txdesc), ei_local->txd_pool, ei_local->phy_txd_pool);
13203 + if (ei_local->free_head != NULL){
13204 + pci_free_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), ei_local->free_head, ei_local->phy_free_head);
13206 + if (ei_local->free_page_head != NULL){
13207 + pci_free_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, ei_local->free_page_head, ei_local->phy_free_page_head);
13210 + if (ei_local->tx_ring0 != NULL) {
13211 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring0, ei_local->phy_tx_ring0);
13215 +#if defined (CONFIG_RAETH_QOS)
13216 + if (ei_local->tx_ring1 != NULL) {
13217 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring1, ei_local->phy_tx_ring1);
13220 +#if !defined (CONFIG_RALINK_RT2880)
13221 + if (ei_local->tx_ring2 != NULL) {
13222 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring2, ei_local->phy_tx_ring2);
13225 + if (ei_local->tx_ring3 != NULL) {
13226 + pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring3, ei_local->phy_tx_ring3);
13231 +#ifdef CONFIG_32B_DESC
13232 + kfree(ei_local->rx_ring0);
13234 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring0, ei_local->phy_rx_ring0);
13236 +#if defined CONFIG_RAETH_QDMA && !defined(CONFIG_RAETH_QDMATX_QDMARX)
13237 +#ifdef CONFIG_32B_DESC
13238 + kfree(ei_local->qrx_ring);
13240 + pci_free_consistent(NULL, NUM_QRX_DESC*sizeof(struct PDMA_rxdesc), ei_local->qrx_ring, ei_local->phy_qrx_ring);
13243 +#if defined (CONFIG_RAETH_HW_LRO)
13244 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13245 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13246 + pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13247 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13248 +#ifdef CONFIG_32B_DESC
13249 + kfree(ei_local->rx_ring1);
13251 +#if defined(CONFIG_ARCH_MT7623)
13252 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13253 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13254 +#endif /* CONFIG_ARCH_MT7623 */
13255 + pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13259 + printk("Free TX/RX Ring Memory!\n");
13263 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
13264 + module_put(THIS_MODULE);
13266 + MOD_DEC_USE_COUNT;
13271 +#if defined (CONFIG_RT6855A_FPGA)
13272 +void rt6855A_eth_gpio_reset(void)
13274 + u8 ether_gpio = 12;
13276 + /* Load the ethernet gpio value to reset Ethernet PHY */
13277 + *(unsigned long *)(RALINK_PIO_BASE + 0x00) |= 1<<(ether_gpio<<1);
13278 + *(unsigned long *)(RALINK_PIO_BASE + 0x14) |= 1<<(ether_gpio);
13279 + *(unsigned long *)(RALINK_PIO_BASE + 0x04) &= ~(1<<ether_gpio);
13283 + *(unsigned long *)(RALINK_PIO_BASE + 0x04) |= (1<<ether_gpio);
13285 + /* must wait for 0.6 seconds after reset*/
13290 +#if defined(CONFIG_RALINK_RT6855A)
13291 +void rt6855A_gsw_init(void)
13296 +#if defined (CONFIG_RT6855A_FPGA)
13297 + /*keep dump switch mode */
13298 + rt6855A_eth_gpio_reset();
13300 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e353;//(P0,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13301 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e353;//(P1,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13302 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13303 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13304 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x8000;//link down
13305 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x8000;//link down
13306 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x8000;//link down
13307 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13308 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13310 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13312 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
13313 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
13314 + for(i=6;i<8;i++){
13315 + mii_mgr_write(i, 4, 0x07e1); //Capable of 10M&100M Full/Half Duplex, flow control on/off
13316 + //mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
13317 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
13318 + mii_mgr_read(i, 9, &phy_val);
13319 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
13320 + mii_mgr_write(i, 9, phy_val);
13322 +#elif defined (CONFIG_RT6855A_ASIC)
13323 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13324 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13326 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1ec) = 0x0fffffff;//Set PSE should pause 4 tx ring as default
13327 + *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1f0) = 0x0fffffff;//switch IOT more stable
13329 + *(unsigned long *)(CKGCR) &= ~(0x3 << 4); //keep rx/tx port clock ticking, disable internal clock-gating to avoid switch stuck
13332 + *Reg 31: Page Control
13333 + * Bit 15 => PortPageSel, 1=local, 0=global
13334 + * Bit 14:12 => PageSel, local:0~3, global:0~4
13336 + *Reg16~30:Local/Global registers
13339 + /*correct PHY setting J8.0*/
13340 + mii_mgr_read(0, 31, &rev);
13343 + mii_mgr_write(1, 31, 0x4000); //global, page 4
13345 + mii_mgr_write(1, 16, 0xd4cc);
13346 + mii_mgr_write(1, 17, 0x7444);
13347 + mii_mgr_write(1, 19, 0x0112);
13348 + mii_mgr_write(1, 21, 0x7160);
13349 + mii_mgr_write(1, 22, 0x10cf);
13350 + mii_mgr_write(1, 26, 0x0777);
13353 + mii_mgr_write(1, 25, 0x0102);
13354 + mii_mgr_write(1, 29, 0x8641);
13357 + mii_mgr_write(1, 25, 0x0212);
13358 + mii_mgr_write(1, 29, 0x4640);
13361 + mii_mgr_write(1, 31, 0x2000); //global, page 2
13362 + mii_mgr_write(1, 21, 0x0655);
13363 + mii_mgr_write(1, 22, 0x0fd3);
13364 + mii_mgr_write(1, 23, 0x003d);
13365 + mii_mgr_write(1, 24, 0x096e);
13366 + mii_mgr_write(1, 25, 0x0fed);
13367 + mii_mgr_write(1, 26, 0x0fc4);
13369 + mii_mgr_write(1, 31, 0x1000); //global, page 1
13370 + mii_mgr_write(1, 17, 0xe7f8);
13373 + mii_mgr_write(1, 31, 0xa000); //local, page 2
13375 + mii_mgr_write(0, 16, 0x0e0e);
13376 + mii_mgr_write(1, 16, 0x0c0c);
13377 + mii_mgr_write(2, 16, 0x0f0f);
13378 + mii_mgr_write(3, 16, 0x1010);
13379 + mii_mgr_write(4, 16, 0x0909);
13381 + mii_mgr_write(0, 17, 0x0000);
13382 + mii_mgr_write(1, 17, 0x0000);
13383 + mii_mgr_write(2, 17, 0x0000);
13384 + mii_mgr_write(3, 17, 0x0000);
13385 + mii_mgr_write(4, 17, 0x0000);
13388 +#if defined (CONFIG_RT6855A_ASIC)
13390 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
13391 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
13392 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c;//disable port0-port4 internal phy, set phy base address to 12
13393 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
13394 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
13396 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
13397 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13398 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
13399 + //rt6855/6 need to modify TX/RX phase
13400 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0xc;//TX/RX CLOCK Phase select
13402 + enable_auto_negotiate(1);
13404 + if (isICPlusGigaPHY(1)) {
13405 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
13406 + phy_val |= 1<<10; //enable pause ability
13407 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
13409 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13410 + phy_val |= 1<<9; //restart AN
13411 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13414 + if (isMarvellGigaPHY(1)) {
13415 + printk("Reset MARVELL phy1\n");
13416 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
13417 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
13418 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
13420 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13421 + phy_val |= 1<<15; //PHY Software Reset
13422 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13424 + if (isVtssGigaPHY(1)) {
13425 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
13426 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
13427 + printk("Vitesse phy skew: %x --> ", phy_val);
13428 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
13429 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
13430 + printk("%x\n", phy_val);
13431 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
13432 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
13434 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
13435 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13436 +#else // Port 5 Disabled //
13437 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13446 +#if defined (CONFIG_MT7623_FPGA)
13447 +void setup_fpga_gsw(void)
13452 + /* reduce RGMII2 PAD driving strength */
13453 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13455 + //RGMII1=Normal mode
13456 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13458 + //GMAC1= RGMII mode
13459 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13461 + //enable MDIO to control MT7530
13462 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13463 + regValue &= ~(0x3 << 12);
13464 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13466 + for(i=0;i<=4;i++)
13469 + mii_mgr_read(i, 0x0 ,®Value);
13470 + regValue |= (0x1<<11);
13471 + mii_mgr_write(i, 0x0, regValue);
13473 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
13475 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13476 + mii_mgr_write(31, 0x3600, 0x5e337);
13478 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13479 + mii_mgr_write(31, 0x3500, 0x8000);
13481 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13482 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13483 + mii_mgr_read(31, 0x7804 ,®Value);
13484 + regValue &= ~(1<<8); //Enable Port 6
13485 + regValue |= (1<<6); //Disable Port 5
13486 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13488 +#if defined (CONFIG_RAETH_GMAC2)
13489 + //RGMII2=Normal mode
13490 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13492 + //GMAC2= RGMII mode
13493 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13495 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13496 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13498 + enable_auto_negotiate(0);//set polling address
13499 + /* set MT7530 Port 5 to PHY 0/4 mode */
13500 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13501 + regValue &= ~((1<<13)|(1<<6));
13502 + regValue |= ((1<<7)|(1<<16)|(1<<20));
13503 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13504 + regValue &= ~((1<<13)|(1<<6)|((1<<20)));
13505 + regValue |= ((1<<7)|(1<<16));
13508 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13510 + regValue |= (1<<16);//change HW-TRAP
13511 + printk("change HW-TRAP to 0x%x\n",regValue);
13512 + mii_mgr_write(31, 0x7804 ,regValue);
13514 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
13515 +/* set MT7530 central align */
13516 + mii_mgr_read(31, 0x7830, ®Value);
13518 + regValue |= 1<<1;
13519 + mii_mgr_write(31, 0x7830, regValue);
13521 + mii_mgr_read(31, 0x7a40, ®Value);
13522 + regValue &= ~(1<<30);
13523 + mii_mgr_write(31, 0x7a40, regValue);
13525 + regValue = 0x855;
13526 + mii_mgr_write(31, 0x7a78, regValue);
13529 + mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
13530 + mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
13532 + for(i=0;i<=4;i++) {
13533 + mii_mgr_read(i, 4, ®Value);
13534 + regValue |= (3<<7); //turn on 100Base-T Advertisement
13535 + //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13536 + mii_mgr_write(i, 4, regValue);
13538 + mii_mgr_read(i, 9, ®Value);
13539 + //regValue |= (3<<8); //turn on 1000Base-T Advertisement
13540 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13541 + mii_mgr_write(i, 9, regValue);
13544 + mii_mgr_read(i, 0, ®Value);
13545 + regValue |= (1 << 9);
13546 + mii_mgr_write(i, 0, regValue);
13550 + mii_mgr_write(31, 0x7a54, 0x44); //lower driving
13551 + mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
13552 + mii_mgr_write(31, 0x7a64, 0x44); //lower driving
13553 + mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
13554 + mii_mgr_write(31, 0x7a74, 0x44); //lower driving
13555 + mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
13557 + for(i=0;i<=4;i++)
13560 + mii_mgr_read(i, 0x0 ,®Value);
13561 + regValue &= ~(0x1<<11);
13562 + mii_mgr_write(i, 0x0, regValue);
13568 +#if defined (CONFIG_RALINK_MT7621)
13571 +void setup_external_gsw(void)
13575 + /* reduce RGMII2 PAD driving strength */
13576 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13578 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13579 + regValue &= ~(0x3 << 12);
13580 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13582 + //RGMII1=Normal mode
13583 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13584 + //GMAC1= RGMII mode
13585 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13587 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x00008000);//(GE1, Link down)
13589 + //RGMII2=Normal mode
13590 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13591 + //GMAC2= RGMII mode
13592 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13594 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000M/FD, FC ON)
13607 +void IsSwitchVlanTableBusy(void)
13610 + unsigned int value = 0;
13612 + for (j = 0; j < 20; j++) {
13613 + mii_mgr_read(31, 0x90, &value);
13614 + if ((value & 0x80000000) == 0 ){ //table busy
13620 + printk("set vlan timeout value=0x%x.\n", value);
13623 +void LANWANPartition(void)
13626 +#ifdef CONFIG_WAN_AT_P0
13627 + printk("set LAN/WAN WLLLL\n");
13628 + //WLLLL, wan at P0
13629 + //LAN/WAN ports as security mode
13630 + mii_mgr_write(31, 0x2004, 0xff0003);//port0
13631 + mii_mgr_write(31, 0x2104, 0xff0003);//port1
13632 + mii_mgr_write(31, 0x2204, 0xff0003);//port2
13633 + mii_mgr_write(31, 0x2304, 0xff0003);//port3
13634 + mii_mgr_write(31, 0x2404, 0xff0003);//port4
13637 + mii_mgr_write(31, 0x2014, 0x10002);//port0
13638 + mii_mgr_write(31, 0x2114, 0x10001);//port1
13639 + mii_mgr_write(31, 0x2214, 0x10001);//port2
13640 + mii_mgr_write(31, 0x2314, 0x10001);//port3
13641 + mii_mgr_write(31, 0x2414, 0x10001);//port4
13644 + IsSwitchVlanTableBusy();
13645 + mii_mgr_write(31, 0x94, 0x407e0001);//VAWD1
13646 + mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13647 + IsSwitchVlanTableBusy();
13649 + mii_mgr_write(31, 0x94, 0x40610001);//VAWD1
13650 + mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13651 + IsSwitchVlanTableBusy();
13653 +#ifdef CONFIG_WAN_AT_P4
13654 + printk("set LAN/WAN LLLLW\n");
13655 + //LLLLW, wan at P4
13656 + //LAN/WAN ports as security mode
13657 + mii_mgr_write(31, 0x2004, 0xff0003);//port0
13658 + mii_mgr_write(31, 0x2104, 0xff0003);//port1
13659 + mii_mgr_write(31, 0x2204, 0xff0003);//port2
13660 + mii_mgr_write(31, 0x2304, 0xff0003);//port3
13661 + mii_mgr_write(31, 0x2404, 0xff0003);//port4
13664 + mii_mgr_write(31, 0x2014, 0x10001);//port0
13665 + mii_mgr_write(31, 0x2114, 0x10001);//port1
13666 + mii_mgr_write(31, 0x2214, 0x10001);//port2
13667 + mii_mgr_write(31, 0x2314, 0x10001);//port3
13668 + mii_mgr_write(31, 0x2414, 0x10002);//port4
13671 + IsSwitchVlanTableBusy();
13672 + mii_mgr_write(31, 0x94, 0x404f0001);//VAWD1
13673 + mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13674 + IsSwitchVlanTableBusy();
13675 + mii_mgr_write(31, 0x94, 0x40500001);//VAWD1
13676 + mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13677 + IsSwitchVlanTableBusy();
13681 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
13682 +void mt7621_eee_patch(void)
13689 + mii_mgr_write(i, 13, 0x07);
13690 + mii_mgr_write(i, 14, 0x3c);
13691 + mii_mgr_write(i, 13, 0x4007);
13692 + mii_mgr_write(i, 14, 0x6);
13694 + /* Forced Slave mode */
13695 + mii_mgr_write(i, 31, 0x0);
13696 + mii_mgr_write(i, 9, 0x1600);
13697 + /* Increase SlvDPSready time */
13698 + mii_mgr_write(i, 31, 0x52b5);
13699 + mii_mgr_write(i, 16, 0xafae);
13700 + mii_mgr_write(i, 18, 0x2f);
13701 + mii_mgr_write(i, 16, 0x8fae);
13702 + /* Incease post_update_timer */
13703 + mii_mgr_write(i, 31, 0x3);
13704 + mii_mgr_write(i, 17, 0x4b);
13705 + /* Adjust 100_mse_threshold */
13706 + mii_mgr_write(i, 13, 0x1e);
13707 + mii_mgr_write(i, 14, 0x123);
13708 + mii_mgr_write(i, 13, 0x401e);
13709 + mii_mgr_write(i, 14, 0xffff);
13711 + mii_mgr_write(i, 13, 0x1e);
13712 + mii_mgr_write(i, 14, 0xa6);
13713 + mii_mgr_write(i, 13, 0x401e);
13714 + mii_mgr_write(i, 14, 0x300);
13722 +#if defined (CONFIG_RALINK_MT7621)
13723 +void setup_internal_gsw(void)
13728 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13729 + /*Hardware reset Switch*/
13730 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
13732 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
13735 + /* reduce RGMII2 PAD driving strength */
13736 + *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13738 + //RGMII1=Normal mode
13739 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13741 + //GMAC1= RGMII mode
13742 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13744 + //enable MDIO to control MT7530
13745 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13746 + regValue &= ~(0x3 << 12);
13747 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13749 + for(i=0;i<=4;i++)
13752 + mii_mgr_read(i, 0x0 ,®Value);
13753 + regValue |= (0x1<<11);
13754 + mii_mgr_write(i, 0x0, regValue);
13756 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
13760 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13761 + trgmii_set_7530(); //reset FE, config MDIO again
13763 + //enable MDIO to control MT7530
13764 + regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13765 + regValue &= ~(0x3 << 12);
13766 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13768 + // switch to APLL if TRGMII and DDR2
13769 + if ((sysRegRead(0xBE000010)>>4)&0x1)
13771 + apll_xtal_enable();
13775 +#if defined (CONFIG_MT7621_ASIC)
13776 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
13777 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
13778 + mii_mgr_write(31, 0x3600, 0x5e30b);
13780 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
13781 + mii_mgr_write(31, 0x3600, 0x5e33b);
13783 +#elif defined (CONFIG_MT7621_FPGA)
13784 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13785 + mii_mgr_write(31, 0x3600, 0x5e337);
13788 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13791 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13792 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13793 + mii_mgr_read(31, 0x7804 ,®Value);
13794 + regValue &= ~(1<<8); //Enable Port 6
13795 + regValue |= (1<<6); //Disable Port 5
13796 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13798 +#if defined (CONFIG_RAETH_GMAC2)
13799 + //RGMII2=Normal mode
13800 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13802 + //GMAC2= RGMII mode
13803 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13804 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
13805 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13806 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13808 + enable_auto_negotiate(0);//set polling address
13810 +#if defined (CONFIG_RAETH_8023AZ_EEE)
13811 + mii_mgr_write(31, 0x3500, 0x5e33b); //MT7530 P5 Force 1000, we can ignore this setting??????
13812 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000)
13817 + /* set MT7530 Port 5 to PHY 0/4 mode */
13818 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13819 + regValue &= ~((1<<13)|(1<<6));
13820 + regValue |= ((1<<7)|(1<<16)|(1<<20));
13821 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13822 + regValue &= ~((1<<13)|(1<<6)|(1<<20));
13823 + regValue |= ((1<<7)|(1<<16));
13826 +#if defined (CONFIG_RAETH_8023AZ_EEE)
13827 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13829 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13831 + regValue |= (1<<16);//change HW-TRAP
13832 + //printk("change HW-TRAP to 0x%x\n",regValue);
13833 + mii_mgr_write(31, 0x7804 ,regValue);
13835 + mii_mgr_read(31, 0x7800, ®Value);
13836 + regValue = (regValue >> 9) & 0x3;
13837 + if(regValue == 0x3) { //25Mhz Xtal
13839 + } else if(regValue == 0x2) { //40Mhz
13841 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
13842 + mii_mgr_write(0, 14, 0x410);
13843 + mii_mgr_write(0, 13, 0x401f);
13844 + mii_mgr_write(0, 14, 0x0);
13846 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
13847 + mii_mgr_write(0, 14, 0x40d);
13848 + mii_mgr_write(0, 13, 0x401f);
13849 + mii_mgr_write(0, 14, 0x2020);
13851 + mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
13852 + mii_mgr_write(0, 14, 0x40e);
13853 + mii_mgr_write(0, 13, 0x401f);
13854 + mii_mgr_write(0, 14, 0x119);
13856 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
13857 + mii_mgr_write(0, 14, 0x40d);
13858 + mii_mgr_write(0, 13, 0x401f);
13859 + mii_mgr_write(0, 14, 0x2820);
13861 + udelay(20); //suggest by CD
13863 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
13864 + mii_mgr_write(0, 14, 0x410);
13865 + mii_mgr_write(0, 13, 0x401f);
13866 + }else { //20Mhz Xtal
13871 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13872 + mii_mgr_write(0, 14, 0x3); /*TRGMII*/
13874 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
13875 +/* set MT7530 central align */
13876 + mii_mgr_read(31, 0x7830, ®Value);
13878 + regValue |= 1<<1;
13879 + mii_mgr_write(31, 0x7830, regValue);
13881 + mii_mgr_read(31, 0x7a40, ®Value);
13882 + regValue &= ~(1<<30);
13883 + mii_mgr_write(31, 0x7a40, regValue);
13885 + regValue = 0x855;
13886 + mii_mgr_write(31, 0x7a78, regValue);
13889 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
13890 + mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
13891 + mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
13894 + for(i=0;i<=4;i++) {
13895 + mii_mgr_read(i, 4, ®Value);
13896 + regValue |= (3<<7); //turn on 100Base-T Advertisement
13897 + //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13898 + mii_mgr_write(i, 4, regValue);
13900 + mii_mgr_read(i, 9, ®Value);
13901 + regValue |= (3<<8); //turn on 1000Base-T Advertisement
13902 + //regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13903 + mii_mgr_write(i, 9, regValue);
13906 + mii_mgr_read(i, 0, ®Value);
13907 + regValue |= (1 << 9);
13908 + mii_mgr_write(i, 0, regValue);
13913 + mii_mgr_write(31, 0x7a54, 0x44); //lower driving
13914 + mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
13915 + mii_mgr_write(31, 0x7a64, 0x44); //lower driving
13916 + mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
13917 + mii_mgr_write(31, 0x7a74, 0x44); //lower driving
13918 + mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
13921 + LANWANPartition();
13923 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
13925 + for(i=0;i<=4;i++)
13927 + mii_mgr_write(i, 13, 0x7);
13928 + mii_mgr_write(i, 14, 0x3C);
13929 + mii_mgr_write(i, 13, 0x4007);
13930 + mii_mgr_write(i, 14, 0x0);
13933 + //Disable EEE 10Base-Te:
13934 + for(i=0;i<=4;i++)
13936 + mii_mgr_write(i, 13, 0x1f);
13937 + mii_mgr_write(i, 14, 0x027b);
13938 + mii_mgr_write(i, 13, 0x401f);
13939 + mii_mgr_write(i, 14, 0x1177);
13943 + for(i=0;i<=4;i++)
13946 + mii_mgr_read(i, 0x0 ,®Value);
13947 + regValue &= ~(0x1<<11);
13948 + mii_mgr_write(i, 0x0, regValue);
13951 + mii_mgr_read(31, 0x7808 ,®Value);
13952 + regValue |= (3<<16); //Enable INTR
13953 + mii_mgr_write(31, 0x7808 ,regValue);
13954 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
13955 + mt7621_eee_patch();
13960 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
13961 +void apll_xtal_enable(void)
13963 + unsigned long data = 0;
13964 + unsigned long regValue = 0;
13966 + /* Firstly, reset all required register to default value */
13967 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008000);
13968 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, 0x01401d61);
13969 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, 0x38233d0e);
13970 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, 0x80120004);
13971 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
13973 + /* toggle RG_XPTL_CHG */
13974 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008800);
13975 + sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008c00);
13977 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0014);
13978 + data &= ~(0x0000ffc0);
13980 + regValue = *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x10);
13981 + regValue = (regValue >> 6) & 0x7;
13982 + if(regValue < 6) { //20/40Mhz Xtal
13983 + data |= REGBIT(0x1d, 8);
13985 + data |= REGBIT(0x17, 8);
13988 + if(regValue < 6) { //20/40Mhz Xtal
13989 + data |= REGBIT(0x1, 6);
13992 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, data);
13994 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0018);
13995 + data &= ~(0xf0773f00);
13996 + data |= REGBIT(0x3, 28);
13997 + data |= REGBIT(0x2, 20);
13998 + if(regValue < 6) { //20/40Mhz Xtal
13999 + data |= REGBIT(0x3, 16);
14001 + data |= REGBIT(0x2, 16);
14003 + data |= REGBIT(0x3, 12);
14005 + if(regValue < 6) { //20/40Mhz Xtal
14006 + data |= REGBIT(0xd, 8);
14008 + data |= REGBIT(0x7, 8);
14010 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, data);
14012 + if(regValue < 6) { //20/40Mhz Xtal
14013 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
14015 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1697cc39);
14017 + //*Common setting - Set PLLGP_CTRL_4 *//
14018 + ///* 1. Bit 31 */
14019 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14020 + data &= ~(REGBIT(0x1, 31));
14021 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14024 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14025 + data |= REGBIT(0x1, 0);
14026 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14029 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14030 + data |= REGBIT(0x1, 3);
14031 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14034 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14035 + data |= REGBIT(0x1, 8);
14036 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14039 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14040 + data |= REGBIT(0x1, 6);
14041 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14044 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14045 + data |= REGBIT(0x1, 5);
14046 + data |= REGBIT(0x1, 7);
14047 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14050 + data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14051 + data &= ~REGBIT(0x1, 17);
14052 + sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14054 + /* 8. TRGMII TX CLK SEL APLL */
14055 + data = sysRegRead(0xbe00002c);
14056 + data &= 0xffffff9f;
14058 + sysRegWrite(0xbe00002c, data);
14064 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
14065 +void rt_gsw_init(void)
14067 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
14070 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14072 +#elif defined (CONFIG_MT7620_ASIC)
14075 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14076 + unsigned int regValue = 0;
14078 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14079 + /*keep dump switch mode */
14080 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14081 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14082 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x5e333;//(P2, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14083 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x5e333;//(P3, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14084 +#if defined (CONFIG_RAETH_HAS_PORT4)
14085 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P4, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14087 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e333;//(P4, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14089 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14091 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
14092 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14093 +#if defined (CONFIG_RAETH_HAS_PORT4)
14094 + for(i=0;i<4;i++){
14096 + for(i=0;i<5;i++){
14098 + mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
14099 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
14104 +#if defined (CONFIG_PDMA_NEW)
14105 + *(unsigned long *)(SYSCFG1) |= (0x1 << 8); //PCIE_RC_MODE=1
14109 +#if defined (CONFIG_MT7620_ASIC) && !defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14110 + is_BGA = (sysRegRead(RALINK_SYSCTL_BASE + 0xc) >> 16) & 0x1;
14112 + * Reg 31: Page Control
14113 + * Bit 15 => PortPageSel, 1=local, 0=global
14114 + * Bit 14:12 => PageSel, local:0~3, global:0~4
14116 + * Reg16~30:Local/Global registers
14119 + /*correct PHY setting L3.0 BGA*/
14120 + mii_mgr_write(1, 31, 0x4000); //global, page 4
14122 + mii_mgr_write(1, 17, 0x7444);
14124 + mii_mgr_write(1, 19, 0x0114);
14126 + mii_mgr_write(1, 19, 0x0117);
14129 + mii_mgr_write(1, 22, 0x10cf);
14130 + mii_mgr_write(1, 25, 0x6212);
14131 + mii_mgr_write(1, 26, 0x0777);
14132 + mii_mgr_write(1, 29, 0x4000);
14133 + mii_mgr_write(1, 28, 0xc077);
14134 + mii_mgr_write(1, 24, 0x0000);
14136 + mii_mgr_write(1, 31, 0x3000); //global, page 3
14137 + mii_mgr_write(1, 17, 0x4838);
14139 + mii_mgr_write(1, 31, 0x2000); //global, page 2
14141 + mii_mgr_write(1, 21, 0x0515);
14142 + mii_mgr_write(1, 22, 0x0053);
14143 + mii_mgr_write(1, 23, 0x00bf);
14144 + mii_mgr_write(1, 24, 0x0aaf);
14145 + mii_mgr_write(1, 25, 0x0fad);
14146 + mii_mgr_write(1, 26, 0x0fc1);
14148 + mii_mgr_write(1, 21, 0x0517);
14149 + mii_mgr_write(1, 22, 0x0fd2);
14150 + mii_mgr_write(1, 23, 0x00bf);
14151 + mii_mgr_write(1, 24, 0x0aab);
14152 + mii_mgr_write(1, 25, 0x00ae);
14153 + mii_mgr_write(1, 26, 0x0fff);
14155 + mii_mgr_write(1, 31, 0x1000); //global, page 1
14156 + mii_mgr_write(1, 17, 0xe7f8);
14158 + mii_mgr_write(1, 31, 0x8000); //local, page 0
14159 + mii_mgr_write(0, 30, 0xa000);
14160 + mii_mgr_write(1, 30, 0xa000);
14161 + mii_mgr_write(2, 30, 0xa000);
14162 + mii_mgr_write(3, 30, 0xa000);
14163 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14164 + mii_mgr_write(4, 30, 0xa000);
14167 + mii_mgr_write(0, 4, 0x05e1);
14168 + mii_mgr_write(1, 4, 0x05e1);
14169 + mii_mgr_write(2, 4, 0x05e1);
14170 + mii_mgr_write(3, 4, 0x05e1);
14171 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14172 + mii_mgr_write(4, 4, 0x05e1);
14175 + mii_mgr_write(1, 31, 0xa000); //local, page 2
14176 + mii_mgr_write(0, 16, 0x1111);
14177 + mii_mgr_write(1, 16, 0x1010);
14178 + mii_mgr_write(2, 16, 0x1515);
14179 + mii_mgr_write(3, 16, 0x0f0f);
14180 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14181 + mii_mgr_write(4, 16, 0x1313);
14184 +#if !defined (CONFIG_RAETH_8023AZ_EEE)
14185 + mii_mgr_write(1, 31, 0xb000); //local, page 3
14186 + mii_mgr_write(0, 17, 0x0);
14187 + mii_mgr_write(1, 17, 0x0);
14188 + mii_mgr_write(2, 17, 0x0);
14189 + mii_mgr_write(3, 17, 0x0);
14190 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14191 + mii_mgr_write(4, 17, 0x0);
14198 + // for ethernet extended mode
14199 + mii_mgr_write(1, 31, 0x3000);
14200 + mii_mgr_write(1, 19, 0x122);
14201 + mii_mgr_write(1, 20, 0x0044);
14202 + mii_mgr_write(1, 23, 0xa80c);
14203 + mii_mgr_write(1, 24, 0x129d);
14204 + mii_mgr_write(1, 31, 9000);
14205 + mii_mgr_write(0, 18, 0x140c);
14206 + mii_mgr_write(1, 18, 0x140c);
14207 + mii_mgr_write(2, 18, 0x140c);
14208 + mii_mgr_write(3, 18, 0x140c);
14209 + mii_mgr_write(0, 0, 0x3300);
14210 + mii_mgr_write(1, 0, 0x3300);
14211 + mii_mgr_write(2, 0, 0x3300);
14212 + mii_mgr_write(3, 0, 0x3300);
14213 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14214 + mii_mgr_write(4, 18, 0x140c);
14215 + mii_mgr_write(4, 0, 0x3300);
14221 +#if defined(CONFIG_RALINK_MT7620)
14222 + if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
14223 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x701c) = 0x800000c; //enlarge FE2SW_IPG
14225 +#endif // CONFIG_RAETH_7620 //
14229 +#if defined (CONFIG_MT7620_FPGA)|| defined (CONFIG_MT7620_ASIC)
14230 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
14231 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0x7f7f7fe0;//Set Port6 CPU Port
14233 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14234 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14235 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c; //disable port 0 ~ 4 internal phy, set phy base address to 12
14236 + /*MT7620 need mac learning for PPE*/
14237 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
14238 + //*(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
14239 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14240 + //rxclk_skew, txclk_skew = 0
14241 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14242 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14244 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14246 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;//(P4, AN)
14247 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14248 + //rxclk_skew, txclk_skew = 0
14249 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14252 + /* set MT7530 Port 0 to PHY mode */
14253 + mii_mgr_read(31, 0x7804 ,®Value);
14254 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14255 + regValue &= ~((1<<13)|(1<<6)|(1<<5)|(1<<15));
14256 + regValue |= ((1<<7)|(1<<16)|(1<<20)|(1<<24));
14257 + //mii_mgr_write(31, 0x7804 ,0x115c8f);
14258 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14259 + regValue &= ~((1<<13)|(1<<6)|(1<<20)|(1<<5)|(1<<15));
14260 + regValue |= ((1<<7)|(1<<16)|(1<<24));
14262 + regValue &= ~(1<<8); //Enable Port 6
14263 + mii_mgr_write(31, 0x7804 ,regValue); //bit 24 standalone switch
14265 +/* set MT7530 central align */
14266 + mii_mgr_read(31, 0x7830, ®Value);
14268 + regValue |= 1<<1;
14269 + mii_mgr_write(31, 0x7830, regValue);
14271 + mii_mgr_read(31, 0x7a40, ®Value);
14272 + regValue &= ~(1<<30);
14273 + mii_mgr_write(31, 0x7a40, regValue);
14275 + regValue = 0x855;
14276 + mii_mgr_write(31, 0x7a78, regValue);
14278 + /*AN should be set after MT7530 HWSTRAP*/
14279 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14280 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000100;//(P0, AN polling)
14281 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14282 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000504;//(P4, AN polling)
14286 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14287 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14288 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14289 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14290 + *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14292 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14293 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14294 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14295 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14297 + enable_auto_negotiate(1);
14299 + if (isICPlusGigaPHY(1)) {
14300 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
14301 + phy_val |= 1<<10; //enable pause ability
14302 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
14304 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14305 + phy_val |= 1<<9; //restart AN
14306 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14307 + }else if (isMarvellGigaPHY(1)) {
14308 +#if defined (CONFIG_MT7620_FPGA)
14309 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14310 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14311 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14313 + printk("Reset MARVELL phy1\n");
14314 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14315 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14316 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14318 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14319 + phy_val |= 1<<15; //PHY Software Reset
14320 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14321 + }else if (isVtssGigaPHY(1)) {
14322 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14323 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14324 + printk("Vitesse phy skew: %x --> ", phy_val);
14325 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14326 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14327 + printk("%x\n", phy_val);
14328 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14329 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14333 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14334 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14335 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14336 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14337 + *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14339 +#else // Port 5 Disabled //
14340 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
14341 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode
14345 +#if defined (CONFIG_P4_RGMII_TO_MAC_MODE)
14346 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e33b;//(P4, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14347 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14348 + //rxclk_skew, txclk_skew = 0
14349 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14351 +#elif defined (CONFIG_P4_MII_TO_MAC_MODE)
14352 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14353 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=Mii Mode
14354 + *(unsigned long *)(SYSCFG1) |= (0x1 << 14);
14356 +#elif defined (CONFIG_P4_MAC_TO_PHY_MODE)
14357 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14358 + *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14359 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14361 + enable_auto_negotiate(1);
14363 + if (isICPlusGigaPHY(2)) {
14364 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, &phy_val);
14365 + phy_val |= 1<<10; //enable pause ability
14366 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, phy_val);
14368 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14369 + phy_val |= 1<<9; //restart AN
14370 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14371 + }else if (isMarvellGigaPHY(2)) {
14372 +#if defined (CONFIG_MT7620_FPGA)
14373 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &phy_val);
14374 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14375 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, phy_val);
14377 + printk("Reset MARVELL phy2\n");
14378 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, &phy_val);
14379 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14380 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, phy_val);
14382 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14383 + phy_val |= 1<<15; //PHY Software Reset
14384 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14385 + }else if (isVtssGigaPHY(2)) {
14386 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0001); //extended page
14387 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, &phy_val);
14388 + printk("Vitesse phy skew: %x --> ", phy_val);
14389 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14390 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14391 + printk("%x\n", phy_val);
14392 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, phy_val);
14393 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0000); //main registers
14396 +#elif defined (CONFIG_P4_RMII_TO_MAC_MODE)
14397 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14398 + *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14399 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE1_MODE=RvMii Mode
14400 + *(unsigned long *)(SYSCFG1) |= (0x2 << 14);
14401 +#elif defined (CONFIG_GE_RGMII_MT7530_P0_AN) || defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14402 +#else // Port 4 Disabled //
14403 + *(unsigned long *)(SYSCFG1) |= (0x3 << 14); //GE2_MODE=RJ45 Mode
14404 + *(unsigned long *)(0xb0000060) |= (1 << 10); //set RGMII2 to GPIO mode
14410 +#if defined (CONFIG_RALINK_MT7628)
14412 +void mt7628_ephy_init(void)
14416 + mii_mgr_write(0, 31, 0x2000); //change G2 page
14417 + mii_mgr_write(0, 26, 0x0000);
14419 + for(i=0; i<5; i++){
14420 + mii_mgr_write(i, 31, 0x8000); //change L0 page
14421 + mii_mgr_write(i, 0, 0x3100);
14423 +#if defined (CONFIG_RAETH_8023AZ_EEE)
14424 + mii_mgr_read(i, 26, &phy_val);// EEE setting
14425 + phy_val |= (1 << 5);
14426 + mii_mgr_write(i, 26, phy_val);
14429 + mii_mgr_write(i, 13, 0x7);
14430 + mii_mgr_write(i, 14, 0x3C);
14431 + mii_mgr_write(i, 13, 0x4007);
14432 + mii_mgr_write(i, 14, 0x0);
14434 + mii_mgr_write(i, 30, 0xa000);
14435 + mii_mgr_write(i, 31, 0xa000); // change L2 page
14436 + mii_mgr_write(i, 16, 0x0606);
14437 + mii_mgr_write(i, 23, 0x0f0e);
14438 + mii_mgr_write(i, 24, 0x1610);
14439 + mii_mgr_write(i, 30, 0x1f15);
14440 + mii_mgr_write(i, 28, 0x6111);
14442 + mii_mgr_read(i, 4, &phy_val);
14443 + phy_val |= (1 << 10);
14444 + mii_mgr_write(i, 4, phy_val);
14447 + //100Base AOI setting
14448 + mii_mgr_write(0, 31, 0x5000); //change G5 page
14449 + mii_mgr_write(0, 19, 0x004a);
14450 + mii_mgr_write(0, 20, 0x015a);
14451 + mii_mgr_write(0, 21, 0x00ee);
14452 + mii_mgr_write(0, 22, 0x0033);
14453 + mii_mgr_write(0, 23, 0x020a);
14454 + mii_mgr_write(0, 24, 0x0000);
14455 + mii_mgr_write(0, 25, 0x024a);
14456 + mii_mgr_write(0, 26, 0x035a);
14457 + mii_mgr_write(0, 27, 0x02ee);
14458 + mii_mgr_write(0, 28, 0x0233);
14459 + mii_mgr_write(0, 29, 0x000a);
14460 + mii_mgr_write(0, 30, 0x0000);
14461 + /* Fix EPHY idle state abnormal behavior */
14462 + mii_mgr_write(0, 31, 0x4000); //change G4 page
14463 + mii_mgr_write(0, 29, 0x000d);
14464 + mii_mgr_write(0, 30, 0x0500);
14471 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14472 +void rt305x_esw_init(void)
14475 + u32 phy_val=0, val=0;
14476 +#if defined (CONFIG_RT3052_ASIC)
14480 +#if defined (CONFIG_RT5350_ASIC)
14481 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0168) = 0x17;
14485 + * FC_RLS_TH=200, FC_SET_TH=160
14486 + * DROP_RLS=120, DROP_SET_TH=80
14488 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0008) = 0xC8A07850;
14489 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00E4) = 0x00000000;
14490 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0014) = 0x00405555;
14491 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0050) = 0x00002001;
14492 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0090) = 0x00007f7f;
14493 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0098) = 0x00007f3f; //disable VLAN
14494 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00CC) = 0x0002500c;
14495 +#ifndef CONFIG_UNH_TEST
14496 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x0008a301; //hashing algorithm=XOR48, aging interval=300sec
14499 + * bit[30]:1 Backoff Algorithm Option: The latest one to pass UNH test
14500 + * bit[29]:1 Length of Received Frame Check Enable
14501 + * bit[8]:0 Enable collision 16 packet abort and late collision abort
14502 + * bit[7:6]:01 Maximum Packet Length: 1518
14504 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x6008a241;
14506 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x008C) = 0x02404040;
14507 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC) || defined (CONFIG_RT5350_ASIC) || defined (CONFIG_MT7628_ASIC)
14508 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x3f502b28; //Change polling Ext PHY Addr=0x1F
14509 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0x00000000;
14510 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x7d000000; //1us cycle number=125 (FE's clock=125Mhz)
14511 +#elif defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA) || defined (CONFIG_RT5350_FPGA) || defined (CONFIG_MT7628_FPGA)
14512 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x00f03ff9; //polling Ext PHY Addr=0x0, force port5 as 100F/D (disable auto-polling)
14513 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0xffdf1f00;
14514 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x0d000000; //1us cycle number=13 (FE's clock=12.5Mhz)
14516 + /* In order to use 10M/Full on FPGA board. We configure phy capable to
14517 + * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14518 + for(i=0;i<5;i++){
14519 + mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
14520 + mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
14525 + * set port 5 force to 1000M/Full when connecting to switch or iNIC
14527 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
14528 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14529 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14530 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3fff; //force 1000M full duplex
14531 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0xf<<20); //rxclk_skew, txclk_skew = 0
14532 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14533 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14534 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14535 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
14536 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14538 +#if defined (CONFIG_RALINK_RT3352)
14539 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14540 + *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14543 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14544 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14545 + *(unsigned long *)(0xb0000060) &= ~(1 << 7); //set MDIO to Normal mode
14546 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC)
14547 + enable_auto_negotiate(1);
14549 + if (isMarvellGigaPHY(1)) {
14550 +#if defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA)
14551 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14552 + phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
14553 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14555 + printk("\n Reset MARVELL phy\n");
14556 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14557 + phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14558 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14560 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14561 + phy_val |= 1<<15; //PHY Software Reset
14562 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14564 + if (isVtssGigaPHY(1)) {
14565 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14566 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14567 + printk("Vitesse phy skew: %x --> ", phy_val);
14568 + phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14569 + phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14570 + printk("%x\n", phy_val);
14571 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14572 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14575 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14576 + *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14577 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14578 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
14579 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14581 +#if defined (CONFIG_RALINK_RT3352)
14582 + *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14583 + *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14585 +#else // Port 5 Disabled //
14587 +#if defined (CONFIG_RALINK_RT3052)
14588 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14589 + *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14590 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO41-GPIO50)
14591 + *(unsigned long *)(0xb0000674) = 0xFFF; //GPIO41-GPIO50 output mode
14592 + *(unsigned long *)(0xb000067C) = 0x0; //GPIO41-GPIO50 output low
14593 +#elif defined (CONFIG_RALINK_RT3352)
14594 + *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14595 + *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14596 + *(unsigned long *)(0xb0000624) = 0xC0000000; //GPIO22-GPIO23 output mode
14597 + *(unsigned long *)(0xb000062C) = 0xC0000000; //GPIO22-GPIO23 output high
14599 + *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO24-GPIO35)
14600 + *(unsigned long *)(0xb000064C) = 0xFFF; //GPIO24-GPIO35 output mode
14601 + *(unsigned long *)(0xb0000654) = 0xFFF; //GPIO24-GPIO35 output high
14602 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14605 +#endif // CONFIG_P5_RGMII_TO_MAC_MODE //
14608 +#if defined (CONFIG_RT3052_ASIC)
14609 + rw_rf_reg(0, 0, &phy_val);
14610 + phy_val = phy_val >> 4;
14612 + if(phy_val > 0x5) {
14614 + rw_rf_reg(0, 26, &phy_val);
14615 + phy_val2 = (phy_val | (0x3 << 5));
14616 + rw_rf_reg(1, 26, &phy_val2);
14619 + val = sysRegRead(RSTCTRL);
14620 + val = val | RALINK_EPHY_RST;
14621 + sysRegWrite(RSTCTRL, val);
14622 + val = val & ~(RALINK_EPHY_RST);
14623 + sysRegWrite(RSTCTRL, val);
14625 + rw_rf_reg(1, 26, &phy_val);
14627 + //select local register
14628 + mii_mgr_write(0, 31, 0x8000);
14629 + for(i=0;i<5;i++){
14630 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14631 + mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
14632 + mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
14635 + //select global register
14636 + mii_mgr_write(0, 31, 0x0);
14637 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14638 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14639 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14640 +//#define ENABLE_LDPS
14641 +#if defined (ENABLE_LDPS)
14642 + mii_mgr_write(0, 12, 0x7eaa);
14643 + mii_mgr_write(0, 22, 0x252f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14645 + mii_mgr_write(0, 12, 0x0);
14646 + mii_mgr_write(0, 22, 0x052f);
14648 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14649 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14650 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14651 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14652 + mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14653 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14654 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14655 + mii_mgr_write(0, 31, 0x8000); //select local register
14657 + for(i=0;i<5;i++){
14658 + //LSB=1 enable PHY
14659 + mii_mgr_read(i, 26, &phy_val);
14660 + phy_val |= 0x0001;
14661 + mii_mgr_write(i, 26, phy_val);
14664 + //select local register
14665 + mii_mgr_write(0, 31, 0x8000);
14666 + for(i=0;i<5;i++){
14667 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14668 + mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
14669 + mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
14672 + //select global register
14673 + mii_mgr_write(0, 31, 0x0);
14674 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14675 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14676 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14677 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14678 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14679 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14680 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14681 + mii_mgr_write(0, 22, 0x052f); //tune TP_IDL tail and head waveform
14682 + mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14683 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14684 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14685 + mii_mgr_write(0, 31, 0x8000); //select local register
14687 + for(i=0;i<5;i++){
14688 + //LSB=1 enable PHY
14689 + mii_mgr_read(i, 26, &phy_val);
14690 + phy_val |= 0x0001;
14691 + mii_mgr_write(i, 26, phy_val);
14694 +#elif defined (CONFIG_RT3352_ASIC)
14697 + val = sysRegRead(RSTCTRL);
14698 + val = val | RALINK_EPHY_RST;
14699 + sysRegWrite(RSTCTRL, val);
14700 + val = val & ~(RALINK_EPHY_RST);
14701 + sysRegWrite(RSTCTRL, val);
14703 + //select local register
14704 + mii_mgr_write(0, 31, 0x8000);
14705 + for(i=0;i<5;i++){
14706 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14707 + mii_mgr_write(i, 29, 0x7016); //TX100/TX10 AD/DA current bias
14708 + mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
14711 + //select global register
14712 + mii_mgr_write(0, 31, 0x0);
14713 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14714 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14715 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14716 + mii_mgr_write(0, 12, 0x7eaa);
14717 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14718 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14719 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14720 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14721 + mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14722 + mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14723 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14724 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14725 + mii_mgr_write(0, 31, 0x8000); //select local register
14727 + for(i=0;i<5;i++){
14728 + //LSB=1 enable PHY
14729 + mii_mgr_read(i, 26, &phy_val);
14730 + phy_val |= 0x0001;
14731 + mii_mgr_write(i, 26, phy_val);
14734 +#elif defined (CONFIG_RT5350_ASIC)
14737 + val = sysRegRead(RSTCTRL);
14738 + val = val | RALINK_EPHY_RST;
14739 + sysRegWrite(RSTCTRL, val);
14740 + val = val & ~(RALINK_EPHY_RST);
14741 + sysRegWrite(RSTCTRL, val);
14743 + //select local register
14744 + mii_mgr_write(0, 31, 0x8000);
14745 + for(i=0;i<5;i++){
14746 + mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
14747 + mii_mgr_write(i, 29, 0x7015); //TX100/TX10 AD/DA current bias
14748 + mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
14751 + //select global register
14752 + mii_mgr_write(0, 31, 0x0);
14753 + mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14754 + mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14755 + mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
14756 + mii_mgr_write(0, 12, 0x7eaa);
14757 + mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
14758 + mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14759 + mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14760 + mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14761 + mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14762 + mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14763 + mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14764 + mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14765 + mii_mgr_write(0, 31, 0x8000); //select local register
14767 + for(i=0;i<5;i++){
14768 + //LSB=1 enable PHY
14769 + mii_mgr_read(i, 26, &phy_val);
14770 + phy_val |= 0x0001;
14771 + mii_mgr_write(i, 26, phy_val);
14773 +#elif defined (CONFIG_MT7628_ASIC)
14774 +/*INIT MT7628 PHY HERE*/
14775 + val = sysRegRead(RT2880_AGPIOCFG_REG);
14776 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14777 + val |= (MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14778 + val = val & ~(MT7628_P0_EPHY_AIO_EN);
14780 + val = val & ~(MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14782 + if ((*((volatile u32 *)(RALINK_SYSCTL_BASE + 0x8))) & 0x10000)
14783 + val &= ~0x1f0000;
14784 + sysRegWrite(RT2880_AGPIOCFG_REG, val);
14786 + val = sysRegRead(RSTCTRL);
14787 + val = val | RALINK_EPHY_RST;
14788 + sysRegWrite(RSTCTRL, val);
14789 + val = val & ~(RALINK_EPHY_RST);
14790 + sysRegWrite(RSTCTRL, val);
14793 + val = sysRegRead(RALINK_SYSCTL_BASE + 0x64);
14794 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14795 + val &= 0xf003f003;
14796 + val |= 0x05540554;
14797 + sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0 EPHY LED mode
14799 + val &= 0xf003f003;
14800 + sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0~P4 EPHY LED mode
14804 + mt7628_ephy_init();
14810 +#if defined (CONFIG_ARCH_MT7623) /* TODO: just for bring up, should be removed!!! */
14811 +void mt7623_pinmux_set(void)
14813 + unsigned long regValue;
14815 + //printk("[mt7623_pinmux_set]start\n");
14816 + /* Pin277: ESW_RST (1) */
14817 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14818 + regValue &= ~(BITS(6,8));
14819 + regValue |= BIT(6);
14820 + *(volatile u_long *)(0xf0005ad0) = regValue;
14822 + /* Pin262: G2_TXEN (1) */
14823 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14824 + regValue &= ~(BITS(6,8));
14825 + regValue |= BIT(6);
14826 + *(volatile u_long *)(0xf0005aa0) = regValue;
14827 + /* Pin263: G2_TXD3 (1) */
14828 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14829 + regValue &= ~(BITS(9,11));
14830 + regValue |= BIT(9);
14831 + *(volatile u_long *)(0xf0005aa0) = regValue;
14832 + /* Pin264: G2_TXD2 (1) */
14833 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14834 + regValue &= ~(BITS(12,14));
14835 + regValue |= BIT(12);
14836 + *(volatile u_long *)(0xf0005aa0) = regValue;
14837 + /* Pin265: G2_TXD1 (1) */
14838 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14839 + regValue &= ~(BITS(0,2));
14840 + regValue |= BIT(0);
14841 + *(volatile u_long *)(0xf0005ab0) = regValue;
14842 + /* Pin266: G2_TXD0 (1) */
14843 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14844 + regValue &= ~(BITS(3,5));
14845 + regValue |= BIT(3);
14846 + *(volatile u_long *)(0xf0005ab0) = regValue;
14847 + /* Pin267: G2_TXC (1) */
14848 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14849 + regValue &= ~(BITS(6,8));
14850 + regValue |= BIT(6);
14851 + *(volatile u_long *)(0xf0005ab0) = regValue;
14852 + /* Pin268: G2_RXC (1) */
14853 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14854 + regValue &= ~(BITS(9,11));
14855 + regValue |= BIT(9);
14856 + *(volatile u_long *)(0xf0005ab0) = regValue;
14857 + /* Pin269: G2_RXD0 (1) */
14858 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14859 + regValue &= ~(BITS(12,14));
14860 + regValue |= BIT(12);
14861 + *(volatile u_long *)(0xf0005ab0) = regValue;
14862 + /* Pin270: G2_RXD1 (1) */
14863 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14864 + regValue &= ~(BITS(0,2));
14865 + regValue |= BIT(0);
14866 + *(volatile u_long *)(0xf0005ac0) = regValue;
14867 + /* Pin271: G2_RXD2 (1) */
14868 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14869 + regValue &= ~(BITS(3,5));
14870 + regValue |= BIT(3);
14871 + *(volatile u_long *)(0xf0005ac0) = regValue;
14872 + /* Pin272: G2_RXD3 (1) */
14873 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14874 + regValue &= ~(BITS(6,8));
14875 + regValue |= BIT(6);
14876 + *(volatile u_long *)(0xf0005ac0) = regValue;
14877 + /* Pin274: G2_RXDV (1) */
14878 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14879 + regValue &= ~(BITS(12,14));
14880 + regValue |= BIT(12);
14881 + *(volatile u_long *)(0xf0005ac0) = regValue;
14883 + /* Pin275: MDC (1) */
14884 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14885 + regValue &= ~(BITS(0,2));
14886 + regValue |= BIT(0);
14887 + *(volatile u_long *)(0xf0005ad0) = regValue;
14888 + /* Pin276: MDIO (1) */
14889 + regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14890 + regValue &= ~(BITS(3,5));
14891 + regValue |= BIT(3);
14892 + *(volatile u_long *)(0xf0005ad0) = regValue;
14893 + //printk("[mt7623_pinmux_set]end\n");
14896 +void wait_loop(void) {
14901 + for(i = 0; i<32; i = i+1){
14902 + read_data = *(volatile u_long *)(0xFB110610);
14908 +void trgmii_calibration_7623(void) {
14910 + unsigned int tap_a[5]; // minumum delay for all correct
14911 + unsigned int tap_b[5]; // maximum delay for all correct
14912 + unsigned int final_tap[5];
14913 + unsigned int bslip_en;
14914 + unsigned int rxc_step_size;
14915 + unsigned int rxd_step_size;
14916 + unsigned int read_data;
14917 + unsigned int tmp;
14918 + unsigned int rd_wd;
14920 + unsigned int err_cnt[5];
14921 + unsigned int init_toggle_data;
14922 + unsigned int err_flag[5];
14923 + unsigned int err_total_flag;
14924 + unsigned int training_word;
14925 + unsigned int rd_tap;
14927 + u32 TRGMII_7623_base;
14928 + u32 TRGMII_7623_RD_0;
14932 + u32 TRGMII_RXCTL;
14933 + u32 TRGMII_RCK_CTRL;
14934 + u32 TRGMII_7530_base;
14935 + TRGMII_7623_base = 0xFB110300;
14936 + TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
14937 + TRGMII_RCK_CTRL = TRGMII_7623_base;
14938 + rxd_step_size =0x1;
14939 + rxc_step_size =0x4;
14940 + init_toggle_data = 0x00000055;
14941 + training_word = 0x000000AC;
14943 + //printk("Calibration begin ........");
14944 + *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
14945 + *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
14946 + *(volatile u_long *)(TRGMII_7623_base +0x78) |= 0x00002000; // Set TX OE edge in MT7623
14947 + *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
14948 + *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
14949 + //printk("Check Point 1 .....\n");
14950 + for (i = 0 ; i<5 ; i++) {
14951 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) |= 0x80000000; // Set bslip_en = 1
14954 + //printk("Enable Training Mode in MT7530\n");
14955 + mii_mgr_read(0x1F,0x7A40,&read_data);
14956 + read_data |= 0xc0000000;
14957 + mii_mgr_write(0x1F,0x7A40,read_data); //Enable Training Mode in MT7530
14958 + err_total_flag = 0;
14959 + //printk("Adjust RXC delay in MT7623\n");
14961 + while (err_total_flag == 0 && read_data != 0x68) {
14962 + //printk("2nd Enable EDGE CHK in MT7623\n");
14963 + /* Enable EDGE CHK in MT7623*/
14964 + for (i = 0 ; i<5 ; i++) {
14965 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
14966 + tmp |= 0x40000000;
14967 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
14970 + err_total_flag = 1;
14971 + for (i = 0 ; i<5 ; i++) {
14972 + err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f;
14973 + rd_wd = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 16) & 0x000000ff;
14974 + //printk("ERR_CNT = %d, RD_WD =%x\n",err_cnt[i],rd_wd);
14975 + if ( err_cnt[i] !=0 ) {
14978 + else if (rd_wd != 0x55) {
14984 + err_total_flag = err_flag[i] & err_total_flag;
14987 + //printk("2nd Disable EDGE CHK in MT7623\n");
14988 + /* Disable EDGE CHK in MT7623*/
14989 + for (i = 0 ; i<5 ; i++) {
14990 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
14991 + tmp |= 0x40000000;
14992 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
14995 + //printk("2nd Disable EDGE CHK in MT7623\n");
14996 + /* Adjust RXC delay */
14997 + *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
14998 + *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
14999 + read_data = *(volatile u_long *)(TRGMII_7623_base);
15000 + if (err_total_flag == 0) {
15001 + tmp = (read_data & 0x0000007f) + rxc_step_size;
15002 + //printk(" RXC delay = %d\n", tmp);
15004 + read_data &= 0xffffff80;
15005 + read_data |= tmp;
15007 + read_data &= 0xffffff80;
15009 + *(volatile u_long *)(TRGMII_7623_base) = read_data;
15011 + read_data &=0x000000ff;
15012 + *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
15013 + *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
15014 + for (i = 0 ; i<5 ; i++) {
15015 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) | 0x80000000; // Set bslip_en = ~bit_slip_en
15018 + //printk("Finish RXC Adjustment while loop\n");
15019 + //printk("Read RD_WD MT7623\n");
15020 + /* Read RD_WD MT7623*/
15021 + for (i = 0 ; i<5 ; i++) {
15023 + while (err_flag[i] != 0) {
15024 + /* Enable EDGE CHK in MT7623*/
15025 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15026 + tmp |= 0x40000000;
15027 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15029 + read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15030 + err_cnt[i] = (read_data >> 8) & 0x0000000f; // Read MT7623 Errcnt
15031 + rd_wd = (read_data >> 16) & 0x000000ff;
15032 + if (err_cnt[i] != 0 || rd_wd !=0x55){
15033 + err_flag [i] = 1;
15038 + /* Disable EDGE CHK in MT7623*/
15039 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) &= 0x4fffffff;
15040 + tmp |= 0x40000000;
15041 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15043 + //err_cnt[i] = ((read_data) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15044 + if (err_flag[i] !=0) {
15045 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
15046 + read_data = (read_data & 0xffffff80) | rd_tap;
15047 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15048 + tap_a[i] = rd_tap;
15050 + rd_tap = (read_data & 0x0000007f) + 4;
15051 + read_data = (read_data & 0xffffff80) | rd_tap;
15052 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15054 + //err_cnt[i] = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15057 + //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
15059 + //printk("Last While Loop\n");
15060 + for (i = 0 ; i<5 ; i++) {
15061 + //printk(" Bit%d\n", i);
15063 + while ((err_cnt[i] == 0) && (rd_tap !=128)) {
15064 + read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15065 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
15066 + read_data = (read_data & 0xffffff80) | rd_tap;
15067 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15068 + /* Enable EDGE CHK in MT7623*/
15069 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15070 + tmp |= 0x40000000;
15071 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15073 + err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15074 + /* Disable EDGE CHK in MT7623*/
15075 + tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15076 + tmp |= 0x40000000;
15077 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15079 + //err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
15082 + tap_b[i] = rd_tap;// -rxd_step_size; // Record the max delay TAP_B
15083 + //printk("tap_b[%d] is %d \n", i,tap_b[i]);
15084 + final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
15085 + //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15086 + read_data = (read_data & 0xffffff80) | final_tap[i];
15087 + *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15089 +// /*word alignment*/
15090 +// mii_mgr_read(0x1F,0x7A50,&read_data);
15091 +// read_data &= ~(0xff);
15092 +// read_data |= 0xac;
15093 +// mii_mgr_write(0x1F,0x7A50,read_data);
15094 +// while (i <10) {
15098 +// /* Enable EDGE CHK in MT7623*/
15099 +// for (i=0; i<5; i++) {
15100 +// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15101 +// tmp |= 0x40000000;
15102 +// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15104 +// /* Disable EDGE CHK in MT7623*/
15105 +// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15106 +// tmp |= 0x40000000;
15107 +// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15109 +// read_data = *(volatile u_long *)(TRGMII_7623_RD_0+i*8);
15110 +// printk(" MT7623 training word = %x\n", read_data);
15114 + mii_mgr_read(0x1F,0x7A40,&read_data);
15115 + //printk(" MT7530 0x7A40 = %x\n", read_data);
15116 + read_data &=0x3fffffff;
15117 + mii_mgr_write(0x1F,0x7A40,read_data);
15121 +void trgmii_calibration_7530(void){
15123 + unsigned int tap_a[5];
15124 + unsigned int tap_b[5];
15125 + unsigned int final_tap[5];
15126 + unsigned int bslip_en;
15127 + unsigned int rxc_step_size;
15128 + unsigned int rxd_step_size;
15129 + unsigned int read_data;
15130 + unsigned int tmp;
15132 + unsigned int err_cnt[5];
15133 + unsigned int rd_wd;
15134 + unsigned int init_toggle_data;
15135 + unsigned int err_flag[5];
15136 + unsigned int err_total_flag;
15137 + unsigned int training_word;
15138 + unsigned int rd_tap;
15140 + u32 TRGMII_7623_base;
15141 + u32 TRGMII_7530_RD_0;
15145 + u32 TRGMII_RXCTL;
15146 + u32 TRGMII_RCK_CTRL;
15147 + u32 TRGMII_7530_base;
15148 + u32 TRGMII_7530_TX_base;
15149 + TRGMII_7623_base = 0xFB110300;
15150 + TRGMII_7530_base = 0x7A00;
15151 + TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
15152 + TRGMII_RCK_CTRL = TRGMII_7623_base;
15153 + rxd_step_size = 0x1;
15154 + rxc_step_size = 0x8;
15155 + init_toggle_data = 0x00000055;
15156 + training_word = 0x000000AC;
15158 + TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
15160 + //printk("Calibration begin ........\n");
15161 + *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15162 + mii_mgr_read(0x1F, 0x7a10, &read_data);
15163 + //printk("TRGMII_7530_RD_0 is %x\n", read_data);
15165 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15166 + read_data &= 0x3fffffff;
15167 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
15169 + mii_mgr_read(0x1F,TRGMII_7530_base+0x78,&read_data);
15170 + read_data |= 0x00002000;
15171 + mii_mgr_write(0x1F,TRGMII_7530_base+0x78,read_data); // Set TX OE edge in MT7530
15173 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15174 + read_data |= 0x80000000;
15175 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
15178 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15179 + read_data &= 0x7fffffff;
15180 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
15182 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15183 + read_data |= 0xC0000000;
15184 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
15186 + //printk("Enable Training Mode in MT7623\n");
15187 + /*Enable Training Mode in MT7623*/
15188 + *(volatile u_long *)(TRGMII_7623_base + 0x40) &= 0xbfffffff;
15189 + *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15190 + *(volatile u_long *)(TRGMII_7623_base + 0x78) &= 0xfffff0ff;
15191 + *(volatile u_long *)(TRGMII_7623_base + 0x78) |= 0x00000400;
15193 + err_total_flag =0;
15194 + //printk("Adjust RXC delay in MT7530\n");
15196 + while (err_total_flag == 0 && (read_data != 0x68)) {
15197 + //printk("2nd Enable EDGE CHK in MT7530\n");
15198 + /* Enable EDGE CHK in MT7530*/
15199 + for (i = 0 ; i<5 ; i++) {
15200 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15201 + read_data |= 0x40000000;
15202 + read_data &= 0x4fffffff;
15203 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15205 + //printk("2nd Disable EDGE CHK in MT7530\n");
15206 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&err_cnt[i]);
15207 + //printk("***** MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15208 + //printk("MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15209 + err_cnt[i] >>= 8;
15210 + err_cnt[i] &= 0x0000ff0f;
15211 + rd_wd = err_cnt[i] >> 8;
15212 + rd_wd &= 0x000000ff;
15213 + err_cnt[i] &= 0x0000000f;
15214 + //mii_mgr_read(0x1F,0x7a10,&read_data);
15215 + if ( err_cnt[i] !=0 ) {
15218 + else if (rd_wd != 0x55) {
15224 + err_total_flag = err_flag[i];
15226 + err_total_flag = err_flag[i] & err_total_flag;
15228 + /* Disable EDGE CHK in MT7530*/
15229 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15230 + read_data |= 0x40000000;
15231 + read_data &= 0x4fffffff;
15232 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15235 + /*Adjust RXC delay*/
15236 + if (err_total_flag ==0) {
15237 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15238 + read_data |= 0x80000000;
15239 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
15241 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15242 + read_data &= 0x3fffffff;
15243 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
15245 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15247 + tmp &= 0x0000007f;
15248 + tmp += rxc_step_size;
15249 + //printk("Current rxc delay = %d\n", tmp);
15250 + read_data &= 0xffffff80;
15251 + read_data |= tmp;
15252 + mii_mgr_write (0x1F,TRGMII_7530_base,read_data);
15253 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15254 + //printk("Current RXC delay = %x\n", read_data);
15256 + mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15257 + read_data &= 0x7fffffff;
15258 + mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
15260 + mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15261 + read_data |= 0xc0000000;
15262 + mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
15266 + //printk("RXC delay is %d\n", tmp);
15267 + //printk("Finish RXC Adjustment while loop\n");
15269 + //printk("Read RD_WD MT7530\n");
15270 + /* Read RD_WD MT7530*/
15271 + for (i = 0 ; i<5 ; i++) {
15273 + while (err_flag[i] != 0) {
15274 + /* Enable EDGE CHK in MT7530*/
15275 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15276 + read_data |= 0x40000000;
15277 + read_data &= 0x4fffffff;
15278 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15280 + err_cnt[i] = (read_data >> 8) & 0x0000000f;
15281 + rd_wd = (read_data >> 16) & 0x000000ff;
15282 + //printk("##### %dth bit ERR_CNT = %x RD_WD =%x ######\n", i, err_cnt[i],rd_wd);
15283 + if (err_cnt[i] != 0 || rd_wd !=0x55){
15284 + err_flag [i] = 1;
15289 + if (err_flag[i] !=0 ) {
15290 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
15291 + read_data = (read_data & 0xffffff80) | rd_tap;
15292 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15293 + tap_a[i] = rd_tap;
15295 + tap_a[i] = (read_data & 0x0000007f); // Record the min delay TAP_A
15296 + rd_tap = tap_a[i] + 0x4;
15297 + read_data = (read_data & 0xffffff80) | rd_tap ;
15298 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15301 + /* Disable EDGE CHK in MT7530*/
15302 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15303 + read_data |= 0x40000000;
15304 + read_data &= 0x4fffffff;
15305 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15309 + //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
15311 + //printk("Last While Loop\n");
15312 + for (i = 0 ; i<5 ; i++) {
15314 + while (err_cnt[i] == 0 && (rd_tap!=128)) {
15315 + /* Enable EDGE CHK in MT7530*/
15316 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15317 + read_data |= 0x40000000;
15318 + read_data &= 0x4fffffff;
15319 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15321 + err_cnt[i] = (read_data >> 8) & 0x0000000f;
15322 + //rd_tap = (read_data & 0x0000007f) + 0x4; // Add RXD delay in MT7530
15323 + if (err_cnt[i] == 0 && (rd_tap!=128)) {
15324 + rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
15325 + read_data = (read_data & 0xffffff80) | rd_tap;
15326 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15328 + /* Disable EDGE CHK in MT7530*/
15329 + mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15330 + read_data |= 0x40000000;
15331 + read_data &= 0x4fffffff;
15332 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15335 + tap_b[i] = rd_tap;// - rxd_step_size; // Record the max delay TAP_B
15336 + //printk("%dth bit Tap_b = %d, ERR_CNT=%d\n", i, tap_b[i],err_cnt[i]);
15337 + final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
15338 + //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15340 + read_data = ( read_data & 0xffffff80) | final_tap[i];
15341 + mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15343 + *(volatile u_long *)(TRGMII_7623_base + 0x40) &=0x3fffffff;
15347 +void set_trgmii_325_delay_setting(void)
15350 + *(volatile u_long *)(0xfb110300) = 0x80020050;
15351 + *(volatile u_long *)(0xfb110304) = 0x00980000;
15352 + *(volatile u_long *)(0xfb110300) = 0x40020050;
15353 + *(volatile u_long *)(0xfb110304) = 0xc0980000;
15354 + *(volatile u_long *)(0xfb110310) = 0x00000028;
15355 + *(volatile u_long *)(0xfb110318) = 0x0000002e;
15356 + *(volatile u_long *)(0xfb110320) = 0x0000002d;
15357 + *(volatile u_long *)(0xfb110328) = 0x0000002b;
15358 + *(volatile u_long *)(0xfb110330) = 0x0000002a;
15359 + *(volatile u_long *)(0xfb110340) = 0x00020000;
15361 + mii_mgr_write(31, 0x7a00, 0x10);
15362 + mii_mgr_write(31, 0x7a10, 0x23);
15363 + mii_mgr_write(31, 0x7a18, 0x27);
15364 + mii_mgr_write(31, 0x7a20, 0x24);
15365 + mii_mgr_write(31, 0x7a28, 0x29);
15366 + mii_mgr_write(31, 0x7a30, 0x24);
15371 +void setup_internal_gsw(void)
15377 + mt7623_pinmux_set(); /* TODO: just for bring up, should be removed!!! */
15380 + /* GE1: RGMII mode setting */
15381 + *(volatile u_long *)(0xfb110300) = 0x80020000;
15382 + *(volatile u_long *)(0xfb110304) = 0x00980000;
15383 + *(volatile u_long *)(0xfb110300) = 0x40020000;
15384 + *(volatile u_long *)(0xfb110304) = 0xc0980000;
15385 + *(volatile u_long *)(0xfb110310) = 0x00000041;
15386 + *(volatile u_long *)(0xfb110318) = 0x00000044;
15387 + *(volatile u_long *)(0xfb110320) = 0x00000043;
15388 + *(volatile u_long *)(0xfb110328) = 0x00000042;
15389 + *(volatile u_long *)(0xfb110330) = 0x00000042;
15390 + *(volatile u_long *)(0xfb110340) = 0x00020000;
15391 + *(volatile u_long *)(0xfb110390) &= 0xfffffff8; //RGMII mode
15393 + /* GE1: TRGMII mode setting */
15394 + *(volatile u_long *)(0xfb110390) |= 0x00000002; //TRGMII mode
15397 + /*Todo: Hardware reset Switch*/
15398 + /*Hardware reset Switch*/
15399 +#if defined(CONFIG_ARCH_MT7623)
15400 + regValue = *(volatile u_long *)(0xfb00000c);
15401 + /*MT7530 Reset. Flows for MT7623 and MT7683 are both excuted.*/
15402 + /* Should Modify this section if EFUSE is ready*/
15403 + /*For MT7683 reset MT7530*/
15404 + if(!(regValue & (1<<16)))
15406 + *(volatile u_long *)(0xf0005520) &= ~(1<<1);
15408 + *(volatile u_long *)(0xf0005520) |= (1<<1);
15411 + //printk("Assert MT7623 RXC reset\n");
15412 + *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert MT7623 RXC reset
15413 + /*For MT7623 reset MT7530*/
15414 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
15416 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
15420 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15421 + for(i=0;i<=4;i++)
15424 + mii_mgr_read(i, 0x0 ,®Value);
15425 + regValue |= (0x1<<11);
15426 + mii_mgr_write(i, 0x0, regValue);
15428 + mii_mgr_write(31, 0x7000, 0x3); //reset switch
15431 +#if defined (CONFIG_MT7621_ASIC) || defined (CONFIG_ARCH_MT7623)
15433 + if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
15434 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
15435 + mii_mgr_write(31, 0x3600, 0x5e30b);
15439 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
15440 + mii_mgr_write(31, 0x3600, 0x5e33b);
15441 + mii_mgr_read(31, 0x3600 ,®Value);
15444 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
15447 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15448 + //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
15449 + mii_mgr_read(31, 0x7804 ,®Value);
15450 + regValue &= ~(1<<8); //Enable Port 6
15451 + regValue |= (1<<6); //Disable Port 5
15452 + regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
15454 +#if defined (CONFIG_RAETH_GMAC2)
15455 + //RGMII2=Normal mode
15456 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
15458 + //GMAC2= RGMII mode
15459 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
15460 + mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
15461 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
15462 + enable_auto_negotiate(0);//set polling address
15464 + /* set MT7530 Port 5 to PHY 0/4 mode */
15465 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
15466 + regValue &= ~((1<<13)|(1<<6));
15467 + regValue |= ((1<<7)|(1<<16)|(1<<20));
15468 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
15469 + regValue &= ~((1<<13)|(1<<6)|(1<<20));
15470 + regValue |= ((1<<7)|(1<<16));
15472 + /*Set MT7530 phy direct access mode**/
15473 + regValue &= ~(1<<5);
15475 + //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
15477 + regValue |= (1<<16);//change HW-TRAP
15478 + printk("change HW-TRAP to 0x%x\n",regValue);
15479 + mii_mgr_write(31, 0x7804 ,regValue);
15481 + mii_mgr_read(31, 0x7800, ®Value);
15482 + regValue = (regValue >> 9) & 0x3;
15483 + if(regValue == 0x3)//25Mhz Xtal
15485 + else if(regValue == 0x2) //40Mhz
15490 + if(xtal_mode == 1) { //25Mhz Xtal
15492 + } else if(xtal_mode = 2) { //40Mhz
15493 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
15494 + mii_mgr_write(0, 14, 0x410);
15495 + mii_mgr_write(0, 13, 0x401f);
15496 + mii_mgr_write(0, 14, 0x0);
15498 + mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
15499 + mii_mgr_write(0, 14, 0x40d);
15500 + mii_mgr_write(0, 13, 0x401f);
15501 + mii_mgr_write(0, 14, 0x2020);
15503 + mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
15504 + mii_mgr_write(0, 14, 0x40e);
15505 + mii_mgr_write(0, 13, 0x401f);
15506 + mii_mgr_write(0, 14, 0x119);
15508 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
15509 + mii_mgr_write(0, 14, 0x40d);
15510 + mii_mgr_write(0, 13, 0x401f);
15511 + mii_mgr_write(0, 14, 0x2820);
15513 + udelay(20); //suggest by CD
15515 + mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
15516 + mii_mgr_write(0, 14, 0x410);
15517 + mii_mgr_write(0, 13, 0x401f);
15522 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
15523 + mii_mgr_write(0, 14, 0x3); /*TRGMII*/
15525 + mii_mgr_write(0, 14, 0x1); /*RGMII*/
15526 +/* set MT7530 central align */
15527 + mii_mgr_read(31, 0x7830, ®Value);
15529 + regValue |= 1<<1;
15530 + mii_mgr_write(31, 0x7830, regValue);
15532 + mii_mgr_read(31, 0x7a40, ®Value);
15533 + regValue &= ~(1<<30);
15534 + mii_mgr_write(31, 0x7a40, regValue);
15536 + regValue = 0x855;
15537 + mii_mgr_write(31, 0x7a78, regValue);
15540 + mii_mgr_write(31, 0x7b00, 0x104); //delay setting for 10/1000M
15541 + mii_mgr_write(31, 0x7b04, 0x10); //delay setting for 10/1000M
15544 + mii_mgr_write(31, 0x7a54, 0x88); //lower GE1 driving
15545 + mii_mgr_write(31, 0x7a5c, 0x88); //lower GE1 driving
15546 + mii_mgr_write(31, 0x7a64, 0x88); //lower GE1 driving
15547 + mii_mgr_write(31, 0x7a6c, 0x88); //lower GE1 driving
15548 + mii_mgr_write(31, 0x7a74, 0x88); //lower GE1 driving
15549 + mii_mgr_write(31, 0x7a7c, 0x88); //lower GE1 driving
15550 + mii_mgr_write(31, 0x7810, 0x11); //lower GE2 driving
15551 + /*Set MT7623/MT7683 TX Driving*/
15552 + *(volatile u_long *)(0xfb110354) = 0x88;
15553 + *(volatile u_long *)(0xfb11035c) = 0x88;
15554 + *(volatile u_long *)(0xfb110364) = 0x88;
15555 + *(volatile u_long *)(0xfb11036c) = 0x88;
15556 + *(volatile u_long *)(0xfb110374) = 0x88;
15557 + *(volatile u_long *)(0xfb11037c) = 0x88;
15558 +#if defined (CONFIG_GE2_RGMII_AN)
15559 + *(volatile u_long *)(0xf0005f00) = 0xe00; //Set GE2 driving and slew rate
15561 + *(volatile u_long *)(0xf0005f00) = 0xa00; //Set GE2 driving and slew rate
15563 + *(volatile u_long *)(0xf00054c0) = 0x5; //set GE2 TDSEL
15564 + *(volatile u_long *)(0xf0005ed0) = 0; //set GE2 TUNE
15566 + /* TRGMII Clock */
15567 +// printk("Set TRGMII mode clock stage 1\n");
15568 + mii_mgr_write(0, 13, 0x1f);
15569 + mii_mgr_write(0, 14, 0x404);
15570 + mii_mgr_write(0, 13, 0x401f);
15571 + if (xtal_mode == 1){ //25MHz
15572 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15573 + mii_mgr_write(0, 14, 0x1d00); // 362.5MHz
15574 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15575 + mii_mgr_write(0, 14, 0x1a00); // 325MHz
15576 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15577 + mii_mgr_write(0, 14, 0x1400); //250MHz
15578 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15579 + mii_mgr_write(0, 14, 0x00a0); //125MHz
15581 + }else if(xtal_mode == 2){//40MHz
15582 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15583 + mii_mgr_write(0, 14, 0x1220); // 362.5MHz
15584 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15585 + mii_mgr_write(0, 14, 0x1040); // 325MHz
15586 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15587 + mii_mgr_write(0, 14, 0x0c80); //250MHz
15588 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15589 + mii_mgr_write(0, 14, 0x0640); //125MHz
15592 +// printk("Set TRGMII mode clock stage 2\n");
15593 + mii_mgr_write(0, 13, 0x1f);
15594 + mii_mgr_write(0, 14, 0x405);
15595 + mii_mgr_write(0, 13, 0x401f);
15596 + mii_mgr_write(0, 14, 0x0);
15598 +// printk("Set TRGMII mode clock stage 3\n");
15599 + mii_mgr_write(0, 13, 0x1f);
15600 + mii_mgr_write(0, 14, 0x409);
15601 + mii_mgr_write(0, 13, 0x401f);
15602 + mii_mgr_write(0, 14, 0x0087);
15604 +// printk("Set TRGMII mode clock stage 4\n");
15605 + mii_mgr_write(0, 13, 0x1f);
15606 + mii_mgr_write(0, 14, 0x40a);
15607 + mii_mgr_write(0, 13, 0x401f);
15608 + mii_mgr_write(0, 14, 0x0087);
15610 +// printk("Set TRGMII mode clock stage 5\n");
15611 + mii_mgr_write(0, 13, 0x1f);
15612 + mii_mgr_write(0, 14, 0x403);
15613 + mii_mgr_write(0, 13, 0x401f);
15614 + mii_mgr_write(0, 14, 0x1800);
15616 +// printk("Set TRGMII mode clock stage 6\n");
15617 + mii_mgr_write(0, 13, 0x1f);
15618 + mii_mgr_write(0, 14, 0x403);
15619 + mii_mgr_write(0, 13, 0x401f);
15620 + mii_mgr_write(0, 14, 0x1c00);
15622 +// printk("Set TRGMII mode clock stage 7\n");
15623 + mii_mgr_write(0, 13, 0x1f);
15624 + mii_mgr_write(0, 14, 0x401);
15625 + mii_mgr_write(0, 13, 0x401f);
15626 + mii_mgr_write(0, 14, 0xc020);
15628 +// printk("Set TRGMII mode clock stage 8\n");
15629 + mii_mgr_write(0, 13, 0x1f);
15630 + mii_mgr_write(0, 14, 0x406);
15631 + mii_mgr_write(0, 13, 0x401f);
15632 + mii_mgr_write(0, 14, 0xa030);
15634 +// printk("Set TRGMII mode clock stage 9\n");
15635 + mii_mgr_write(0, 13, 0x1f);
15636 + mii_mgr_write(0, 14, 0x406);
15637 + mii_mgr_write(0, 13, 0x401f);
15638 + mii_mgr_write(0, 14, 0xa038);
15640 + udelay(120); // for MT7623 bring up test
15642 +// printk("Set TRGMII mode clock stage 10\n");
15643 + mii_mgr_write(0, 13, 0x1f);
15644 + mii_mgr_write(0, 14, 0x410);
15645 + mii_mgr_write(0, 13, 0x401f);
15646 + mii_mgr_write(0, 14, 0x3);
15648 +// printk("Set TRGMII mode clock stage 11\n");
15650 + mii_mgr_read(31, 0x7830 ,®Value);
15651 + regValue &=0xFFFFFFFC;
15652 + regValue |=0x00000001;
15653 + mii_mgr_write(31, 0x7830, regValue);
15655 +// printk("Set TRGMII mode clock stage 12\n");
15656 + mii_mgr_read(31, 0x7a40 ,®Value);
15657 + regValue &= ~(0x1<<30);
15658 + regValue &= ~(0x1<<28);
15659 + mii_mgr_write(31, 0x7a40, regValue);
15661 + //mii_mgr_write(31, 0x7a78, 0x855);
15662 + mii_mgr_write(31, 0x7a78, 0x55);
15663 +// printk(" Adjust MT7530 TXC delay\n");
15664 + udelay(100); // for mt7623 bring up test
15666 +// printk(" Release MT7623 RXC Reset\n");
15667 + *(volatile u_long *)(0xfb110300) &= 0x7fffffff; // Release MT7623 RXC reset
15669 + for(i=0;i<=4;i++)
15671 + mii_mgr_write(i, 13, 0x7);
15672 + mii_mgr_write(i, 14, 0x3C);
15673 + mii_mgr_write(i, 13, 0x4007);
15674 + mii_mgr_write(i, 14, 0x0);
15677 + //Disable EEE 10Base-Te:
15678 + for(i=0;i<=4;i++)
15680 + mii_mgr_write(i, 13, 0x1f);
15681 + mii_mgr_write(i, 14, 0x027b);
15682 + mii_mgr_write(i, 13, 0x401f);
15683 + mii_mgr_write(i, 14, 0x1177);
15686 + for(i=0;i<=4;i++)
15689 + mii_mgr_read(i, 0x0 ,®Value);
15690 + regValue &= ~(0x1<<11);
15691 + mii_mgr_write(i, 0x0, regValue);
15694 + for(i=0;i<=4;i++) {
15695 + mii_mgr_read(i, 4, ®Value);
15696 + regValue |= (3<<7); //turn on 100Base-T Advertisement
15697 + mii_mgr_write(i, 4, regValue);
15699 + mii_mgr_read(i, 9, ®Value);
15700 + regValue |= (3<<8); //turn on 1000Base-T Advertisement
15701 + mii_mgr_write(i, 9, regValue);
15704 + mii_mgr_read(i, 0, ®Value);
15705 + regValue |= (1 << 9);
15706 + mii_mgr_write(i, 0, regValue);
15709 + mii_mgr_read(31, 0x7808 ,®Value);
15710 + regValue |= (3<<16); //Enable INTR
15711 + mii_mgr_write(31, 0x7808 ,regValue);
15714 +void mt7623_ethifsys_init(void)
15716 +#define TRGPLL_CON0 (0xF0209280)
15717 +#define TRGPLL_CON1 (0xF0209284)
15718 +#define TRGPLL_CON2 (0xF0209288)
15719 +#define TRGPLL_PWR_CON0 (0xF020928C)
15720 +#define ETHPLL_CON0 (0xF0209290)
15721 +#define ETHPLL_CON1 (0xF0209294)
15722 +#define ETHPLL_CON2 (0xF0209298)
15723 +#define ETHPLL_PWR_CON0 (0xF020929C)
15724 +#define ETH_PWR_CON (0xF00062A0)
15725 +#define HIF_PWR_CON (0xF00062A4)
15727 + u32 temp, pwr_ack_status;
15728 + /*=========================================================================*/
15729 + /* Enable ETHPLL & TRGPLL*/
15730 + /*=========================================================================*/
15732 + temp = sysRegRead(ETHPLL_PWR_CON0);
15733 + sysRegWrite(ETHPLL_PWR_CON0, temp | 0x1);
15735 + temp = sysRegRead(TRGPLL_PWR_CON0);
15736 + sysRegWrite(TRGPLL_PWR_CON0, temp | 0x1);
15738 + udelay(5); /* wait for xPLL_PWR_ON ready (min delay is 1us)*/
15740 + /* xPLL ISO Disable*/
15741 + temp = sysRegRead(ETHPLL_PWR_CON0);
15742 + sysRegWrite(ETHPLL_PWR_CON0, temp & ~0x2);
15744 + temp = sysRegRead(TRGPLL_PWR_CON0);
15745 + sysRegWrite(TRGPLL_PWR_CON0, temp & ~0x2);
15747 + /* xPLL Frequency Set*/
15748 + temp = sysRegRead(ETHPLL_CON0);
15749 + sysRegWrite(ETHPLL_CON0, temp | 0x1);
15750 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15751 + temp = sysRegRead(TRGPLL_CON0);
15752 + sysRegWrite(TRGPLL_CON0, temp | 0x1);
15753 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15754 + sysRegWrite(TRGPLL_CON1, 0xB2000000);
15755 + temp = sysRegRead(TRGPLL_CON0);
15756 + sysRegWrite(TRGPLL_CON0, temp | 0x1);
15757 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15758 + sysRegWrite(TRGPLL_CON1, 0xCCEC4EC5);
15759 + sysRegWrite(TRGPLL_CON0, 0x121);
15761 + udelay(40); /* wait for PLL stable (min delay is 20us)*/
15764 + /*=========================================================================*/
15765 + /* Power on ETHDMASYS and HIFSYS*/
15766 + /*=========================================================================*/
15767 + /* Power on ETHDMASYS*/
15768 + sysRegWrite(0xF0006000, 0x0b160001);
15769 + pwr_ack_status = (sysRegRead(ETH_PWR_CON) & 0x0000f000) >> 12;
15771 + if(pwr_ack_status == 0x0) {
15772 + printk("ETH already turn on and power on flow will be skipped...\n");
15774 + temp = sysRegRead(ETH_PWR_CON) ;
15775 + sysRegWrite(ETH_PWR_CON, temp | 0x4); /* PWR_ON*/
15776 + temp = sysRegRead(ETH_PWR_CON) ;
15777 + sysRegWrite(ETH_PWR_CON, temp | 0x8); /* PWR_ON_S*/
15779 + udelay(5); /* wait power settle time (min delay is 1us)*/
15781 + temp = sysRegRead(ETH_PWR_CON) ;
15782 + sysRegWrite(ETH_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
15783 + temp = sysRegRead(ETH_PWR_CON) ;
15784 + sysRegWrite(ETH_PWR_CON, temp & ~0x2); /* PWR_ISO*/
15785 + temp = sysRegRead(ETH_PWR_CON) ;
15786 + sysRegWrite(ETH_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
15787 + temp = sysRegRead(ETH_PWR_CON) ;
15788 + sysRegWrite(ETH_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
15789 + temp = sysRegRead(ETH_PWR_CON) ;
15790 + sysRegWrite(ETH_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
15791 + temp = sysRegRead(ETH_PWR_CON) ;
15792 + sysRegWrite(ETH_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
15794 + udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15796 + temp = sysRegRead(ETH_PWR_CON) ;
15797 + sysRegWrite(ETH_PWR_CON, temp | 0x1); /* PWR_RST_B*/
15800 + /* Power on HIFSYS*/
15801 + pwr_ack_status = (sysRegRead(HIF_PWR_CON) & 0x0000f000) >> 12;
15802 + if(pwr_ack_status == 0x0) {
15803 + printk("HIF already turn on and power on flow will be skipped...\n");
15806 + temp = sysRegRead(HIF_PWR_CON) ;
15807 + sysRegWrite(HIF_PWR_CON, temp | 0x4); /* PWR_ON*/
15808 + temp = sysRegRead(HIF_PWR_CON) ;
15809 + sysRegWrite(HIF_PWR_CON, temp | 0x8); /* PWR_ON_S*/
15811 + udelay(5); /* wait power settle time (min delay is 1us)*/
15813 + temp = sysRegRead(HIF_PWR_CON) ;
15814 + sysRegWrite(HIF_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
15815 + temp = sysRegRead(HIF_PWR_CON) ;
15816 + sysRegWrite(HIF_PWR_CON, temp & ~0x2); /* PWR_ISO*/
15817 + temp = sysRegRead(HIF_PWR_CON) ;
15818 + sysRegWrite(HIF_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
15819 + temp = sysRegRead(HIF_PWR_CON) ;
15820 + sysRegWrite(HIF_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
15821 + temp = sysRegRead(HIF_PWR_CON) ;
15822 + sysRegWrite(HIF_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
15823 + temp = sysRegRead(HIF_PWR_CON) ;
15824 + sysRegWrite(HIF_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
15826 + udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15828 + temp = sysRegRead(HIF_PWR_CON) ;
15829 + sysRegWrite(HIF_PWR_CON, temp | 0x1); /* PWR_RST_B*/
15832 + /* Release mt7530 reset */
15833 + temp = le32_to_cpu(*(volatile u_long *)(0xfb000034));
15834 + temp &= ~(BIT(2));
15835 + *(volatile u_long *)(0xfb000034) = temp;
15840 + * ra2882eth_init - Module Init code
15842 + * Called by kernel to register net_device
15846 +static int fe_probe(struct platform_device *pdev)
15849 + struct net_device *dev = alloc_etherdev(sizeof(END_DEVICE));
15851 + fe_irq = platform_get_irq(pdev, 0);
15853 +#ifdef CONFIG_RALINK_VISTA_BASIC
15855 + mii_mgr_read(29, 31, &sw_id);
15856 + is_switch_175c = (sw_id == 0x175c) ? 1:0;
15862 + strcpy(dev->name, DEV_NAME);
15863 + printk("%s:%s[%d]%d\n", __FILE__, __func__, __LINE__, fe_irq);
15864 + dev->irq = fe_irq;
15865 + dev->addr_len = 6;
15866 + dev->base_addr = RALINK_FRAME_ENGINE_BASE;
15868 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15869 + rather_probe(dev);
15871 + dev->init = rather_probe;
15873 + ra2880_setup_dev_fptable(dev);
15875 + /* net_device structure Init */
15876 + ethtool_init(dev);
15877 + printk("Ralink APSoC Ethernet Driver Initilization. %s %d rx/tx descriptors allocated, mtu = %d!\n", RAETH_VERSION, NUM_RX_DESC, dev->mtu);
15878 +#ifdef CONFIG_RAETH_NAPI
15879 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15880 + printk("NAPI enable, Tx Ring = %d, Rx Ring = %d\n", NUM_TX_DESC, NUM_RX_DESC);
15882 + printk("NAPI enable, weight = %d, Tx Ring = %d, Rx Ring = %d\n", dev->weight, NUM_TX_DESC, NUM_RX_DESC);
15886 + /* Register net device for the driver */
15887 + if ( register_netdev(dev) != 0) {
15888 + printk(KERN_WARNING " " __FILE__ ": No ethernet port found.\n");
15893 +#ifdef CONFIG_RAETH_NETLINK
15894 + csr_netlink_init();
15896 + ret = debug_proc_init();
15898 + dev_raether = dev;
15899 +#ifdef CONFIG_ARCH_MT7623
15900 + mt7623_ethifsys_init();
15911 +void fe_sw_init(void)
15913 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
15914 + unsigned int regValue = 0;
15917 + // Case1: RT288x/RT3883/MT7621 GE1 + GigaPhy
15918 +#if defined (CONFIG_GE1_RGMII_AN)
15919 + enable_auto_negotiate(1);
15920 + if (isMarvellGigaPHY(1)) {
15921 +#if defined (CONFIG_RT3883_FPGA)
15922 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
15923 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15924 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
15926 + printk("\n Reset MARVELL phy\n");
15927 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, ®Value);
15928 + regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
15929 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, regValue);
15931 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
15932 + regValue |= 1<<15; //PHY Software Reset
15933 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
15934 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
15935 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
15936 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15937 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
15939 + /*10Mbps, debug*/
15940 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, 0x461);
15942 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
15943 + regValue |= 1<<9; //restart AN
15944 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
15948 + if (isVtssGigaPHY(1)) {
15949 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 1);
15950 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, ®Value);
15951 + printk("Vitesse phy skew: %x --> ", regValue);
15952 + regValue |= (0x3<<12);
15953 + regValue &= ~(0x3<<14);
15954 + printk("%x\n", regValue);
15955 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, regValue);
15956 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0);
15958 +#if defined (CONFIG_RALINK_MT7621)
15959 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
15961 +#endif // CONFIG_GE1_RGMII_AN //
15963 + // Case2: RT3883/MT7621 GE2 + GigaPhy
15964 +#if defined (CONFIG_GE2_RGMII_AN)
15965 + enable_auto_negotiate(2);
15966 + if (isMarvellGigaPHY(2)) {
15967 +#if defined (CONFIG_RT3883_FPGA)
15968 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
15969 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15970 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
15972 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, ®Value);
15973 + regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
15974 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, regValue);
15976 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
15977 + regValue |= 1<<15; //PHY Software Reset
15978 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
15979 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
15980 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
15981 + regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15982 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
15984 + /*10Mbps, debug*/
15985 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, 0x461);
15988 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
15989 + regValue |= 1<<9; //restart AN
15990 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
15994 + if (isVtssGigaPHY(2)) {
15995 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 1);
15996 + mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, ®Value);
15997 + printk("Vitesse phy skew: %x --> ", regValue);
15998 + regValue |= (0x3<<12);
15999 + regValue &= ~(0x3<<14);
16000 + printk("%x\n", regValue);
16001 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, regValue);
16002 + mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0);
16004 +#if defined (CONFIG_RALINK_MT7621)
16005 + //RGMII2=Normal mode
16006 + *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
16007 + //GMAC2= RGMII mode
16008 + *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
16010 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16012 +#endif // CONFIG_GE2_RGMII_AN //
16014 + // Case3: RT305x/RT335x/RT6855/RT6855A/MT7620 + EmbeddedSW
16015 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_RALINK_MT7621) && !defined(CONFIG_ARCH_MT7623)
16016 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
16018 +#elif defined(CONFIG_RALINK_RT6855A)
16019 + rt6855A_gsw_init();
16021 + rt305x_esw_init();
16024 + // Case4: RT288x/RT388x/MT7621 GE1 + Internal GigaSW
16025 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
16026 +#if defined (CONFIG_RALINK_MT7621)
16027 + setup_internal_gsw();
16029 +#elif defined (CONFIG_ARCH_MT7623)
16030 +#if defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
16031 + *(volatile u_long *)(0xfb00002c) |= (1<<11);
16033 + *(volatile u_long *)(0xfb00002c) &= ~(1<<11);
16035 + setup_internal_gsw();
16036 + trgmii_calibration_7623();
16037 + trgmii_calibration_7530();
16038 + //*(volatile u_long *)(0xfb110300) |= (0x1f << 24); //Just only for 312.5/325MHz
16039 + *(volatile u_long *)(0xfb110340) = 0x00020000;
16040 + *(volatile u_long *)(0xfb110304) &= 0x3fffffff; // RX clock gating in MT7623
16041 + *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert RX reset in MT7623
16042 + *(volatile u_long *)(0xfb110300 ) &= 0x7fffffff; // Release RX reset in MT7623
16043 + *(volatile u_long *)(0xfb110300 +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
16044 +/*GE1@125MHz(RGMII mode) TX delay adjustment*/
16045 +#if defined (CONFIG_GE1_RGMII_FORCE_1000)
16046 + *(volatile u_long *)(0xfb110350) = 0x55;
16047 + *(volatile u_long *)(0xfb110358) = 0x55;
16048 + *(volatile u_long *)(0xfb110360) = 0x55;
16049 + *(volatile u_long *)(0xfb110368) = 0x55;
16050 + *(volatile u_long *)(0xfb110370) = 0x55;
16051 + *(volatile u_long *)(0xfb110378) = 0x855;
16055 +#elif defined (CONFIG_MT7623_FPGA) /* Nelson: remove for bring up, should be added!!! */
16056 + setup_fpga_gsw();
16058 + sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
16062 + // Case5: RT388x/MT7621 GE2 + GigaSW
16063 +#if defined (CONFIG_GE2_RGMII_FORCE_1000)
16064 +#if defined (CONFIG_RALINK_MT7621)
16065 + setup_external_gsw();
16067 + sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
16071 + // Case6: RT288x GE1 /RT388x,MT7621 GE1/GE2 + (10/100 Switch or 100PHY)
16072 +#if defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
16074 + //set GMAC to MII or RvMII mode
16075 +#if defined (CONFIG_RALINK_RT3883)
16076 + regValue = sysRegRead(SYSCFG1);
16077 +#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE1_MII_AN)
16078 + regValue &= ~(0x3 << 12);
16079 + regValue |= 0x1 << 12; // GE1 MII Mode
16080 +#elif defined (CONFIG_GE1_RVMII_FORCE_100)
16081 + regValue &= ~(0x3 << 12);
16082 + regValue |= 0x2 << 12; // GE1 RvMII Mode
16085 +#if defined (CONFIG_GE2_MII_FORCE_100) || defined (CONFIG_GE2_MII_AN)
16086 + regValue &= ~(0x3 << 14);
16087 + regValue |= 0x1 << 14; // GE2 MII Mode
16088 +#elif defined (CONFIG_GE2_RVMII_FORCE_100)
16089 + regValue &= ~(0x3 << 14);
16090 + regValue |= 0x2 << 14; // GE2 RvMII Mode
16092 + sysRegWrite(SYSCFG1, regValue);
16093 +#endif // CONFIG_RALINK_RT3883 //
16095 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
16097 +#if defined (CONFIG_GE1_MII_FORCE_100)
16098 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x5e337);//(P0, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16100 +#if defined (CONFIG_GE2_MII_FORCE_100)
16101 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x5e337);//(P1, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16103 +#if defined (CONFIG_GE1_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16104 + enable_auto_negotiate(1);
16105 +#if defined (CONFIG_RALINK_MT7621)
16106 + sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
16109 +#if defined (CONFIG_GE2_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16110 + enable_auto_negotiate(2);
16111 +#if defined (CONFIG_RALINK_MT7621)
16112 + sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16117 +#if defined (CONFIG_GE1_MII_FORCE_100)
16118 +#if defined (CONFIG_RALINK_MT7621)
16120 + sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_100_FD);
16123 +#if defined (CONFIG_GE2_MII_FORCE_100)
16124 +#if defined (CONFIG_RALINK_MT7621)
16126 + sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_100_FD);
16129 + //add switch configuration here for other switch chips.
16130 +#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE2_MII_FORCE_100)
16131 + // IC+ 175x: force IC+ switch cpu port is 100/FD
16132 + mii_mgr_write(29, 22, 0x8420);
16136 +#endif // defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY) //
16142 + * ra2882eth_cleanup_module - Module Exit code
16144 + * Cmd 'rmmod' will invode the routine to exit the module
16148 + void ra2882eth_cleanup_module(void)
16150 + struct net_device *dev = dev_raether;
16151 + END_DEVICE *ei_local;
16153 + ei_local = netdev_priv(dev);
16155 +#ifdef CONFIG_PSEUDO_SUPPORT
16156 + unregister_netdev(ei_local->PseudoDev);
16157 + free_netdev(ei_local->PseudoDev);
16159 + unregister_netdev(dev);
16160 + RAETH_PRINT("Free ei_local and unregister netdev...\n");
16162 + free_netdev(dev);
16163 + debug_proc_exit();
16164 +#ifdef CONFIG_RAETH_NETLINK
16165 + csr_netlink_end();
16169 +EXPORT_SYMBOL(set_fe_dma_glo_cfg);
16170 +//module_init(ra2882eth_init);
16171 +//module_exit(ra2882eth_cleanup_module);
16173 +const struct of_device_id of_fe_match[] = {
16174 + { .compatible = "mediatek,mt7623-net", },
16178 +MODULE_DEVICE_TABLE(of, of_fe_match);
16180 +static struct platform_driver fe_driver = {
16181 + .probe = fe_probe,
16182 +// .remove = ra2882eth_cleanup_module,
16184 + .name = "ralink_soc_eth",
16185 + .owner = THIS_MODULE,
16186 + .of_match_table = of_fe_match,
16190 +static int __init init_rtfe(void)
16193 + ret = platform_driver_register(&fe_driver);
16197 +static void __exit exit_rtfe(void)
16199 + platform_driver_unregister(&fe_driver);
16202 +module_init(init_rtfe);
16203 +module_exit(exit_rtfe);
16206 +MODULE_LICENSE("GPL");
16208 +++ b/drivers/net/ethernet/raeth/raether.h
16210 +#ifndef RA2882ETHEND_H
16211 +#define RA2882ETHEND_H
16213 +#ifdef DSP_VIA_NONCACHEABLE
16214 +#define ESRAM_BASE 0xa0800000 /* 0x0080-0000 ~ 0x00807FFF */
16216 +#define ESRAM_BASE 0x80800000 /* 0x0080-0000 ~ 0x00807FFF */
16219 +#define RX_RING_BASE ((int)(ESRAM_BASE + 0x7000))
16220 +#define TX_RING_BASE ((int)(ESRAM_BASE + 0x7800))
16222 +#if defined(CONFIG_RALINK_RT2880)
16223 +#define NUM_TX_RINGS 1
16225 +#define NUM_TX_RINGS 4
16227 +#ifdef MEMORY_OPTIMIZATION
16228 +#ifdef CONFIG_RAETH_ROUTER
16229 +#define NUM_RX_DESC 32 //128
16230 +#define NUM_TX_DESC 32 //128
16231 +#elif CONFIG_RT_3052_ESW
16232 +#define NUM_RX_DESC 16 //64
16233 +#define NUM_TX_DESC 16 //64
16235 +#define NUM_RX_DESC 32 //128
16236 +#define NUM_TX_DESC 32 //128
16238 +//#define NUM_RX_MAX_PROCESS 32
16239 +#define NUM_RX_MAX_PROCESS 32
16241 +#if defined (CONFIG_RAETH_ROUTER)
16242 +#define NUM_RX_DESC 256
16243 +#define NUM_TX_DESC 256
16244 +#elif defined (CONFIG_RT_3052_ESW)
16245 +#if defined (CONFIG_RALINK_MT7621)
16246 +#define NUM_RX_DESC 512
16247 +#define NUM_QRX_DESC 16
16248 +#define NUM_TX_DESC 512
16250 +#define NUM_RX_DESC 256
16251 +#define NUM_QRX_DESC NUM_RX_DESC
16252 +#define NUM_TX_DESC 256
16255 +#define NUM_RX_DESC 256
16256 +#define NUM_QRX_DESC NUM_RX_DESC
16257 +#define NUM_TX_DESC 256
16259 +#if defined(CONFIG_RALINK_RT3883) || defined(CONFIG_RALINK_MT7620)
16260 +#define NUM_RX_MAX_PROCESS 2
16262 +#define NUM_RX_MAX_PROCESS 16
16265 +#define NUM_LRO_RX_DESC 16
16267 +#if defined (CONFIG_SUPPORT_OPENWRT)
16268 +#define DEV_NAME "eth0"
16269 +#define DEV2_NAME "eth1"
16271 +#define DEV_NAME "eth2"
16272 +#define DEV2_NAME "eth3"
16275 +#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7621)
16276 +#define GMAC0_OFFSET 0xE000
16277 +#define GMAC2_OFFSET 0xE006
16279 +#define GMAC0_OFFSET 0x28
16280 +#define GMAC2_OFFSET 0x22
16283 +#if defined(CONFIG_RALINK_RT6855A)
16284 +#define IRQ_ENET0 22
16285 +#elif defined(CONFIG_ARCH_MT7623)
16286 +#define IRQ_ENET0 232
16288 +#define IRQ_ENET0 3 /* hardware interrupt #3, defined in RT2880 Soc Design Spec Rev 0.03, pp43 */
16291 +#if defined (CONFIG_RAETH_HW_LRO)
16292 +#define HW_LRO_TIMER_UNIT 1
16293 +#define HW_LRO_REFRESH_TIME 50000
16294 +#define HW_LRO_MAX_AGG_CNT 64
16295 +#define HW_LRO_AGG_DELTA 1
16296 +#if defined(CONFIG_RAETH_PDMA_DVT)
16297 +#define MAX_LRO_RX_LENGTH 10240
16299 +#define MAX_LRO_RX_LENGTH (PAGE_SIZE - SKB_DATA_ALIGN(NET_SKB_PAD + sizeof(struct skb_shared_info)))
16301 +#define HW_LRO_AGG_TIME 10 /* 200us */
16302 +#define HW_LRO_AGE_TIME 50
16303 +#define HW_LRO_BW_THRE 3000
16304 +#define HW_LRO_PKT_INT_ALPHA 100
16305 +#endif /* CONFIG_RAETH_HW_LRO */
16306 +#define FE_INT_STATUS_REG (*(volatile unsigned long *)(FE_INT_STATUS))
16307 +#define FE_INT_STATUS_CLEAN(reg) (*(volatile unsigned long *)(FE_INT_STATUS)) = reg
16309 +//#define RAETH_DEBUG
16310 +#ifdef RAETH_DEBUG
16311 +#define RAETH_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
16313 +#define RAETH_PRINT(fmt, args...) { }
16316 +struct net_device_stats *ra_get_stats(struct net_device *dev);
16318 +void ei_tx_timeout(struct net_device *dev);
16319 +int rather_probe(struct net_device *dev);
16320 +int ei_open(struct net_device *dev);
16321 +int ei_close(struct net_device *dev);
16323 +int ra2882eth_init(void);
16324 +void ra2882eth_cleanup_module(void);
16326 +void ei_xmit_housekeeping(unsigned long data);
16328 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
16329 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
16330 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
16331 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data);
16332 +u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data);
16333 +void fe_sw_init(void);
16337 +++ b/drivers/net/ethernet/raeth/raether_hwlro.c
16339 +#include <linux/module.h>
16340 +#include <linux/version.h>
16341 +#include <linux/kernel.h>
16342 +#include <linux/types.h>
16343 +#include <linux/pci.h>
16344 +#include <linux/init.h>
16345 +#include <linux/skbuff.h>
16346 +#include <linux/if_vlan.h>
16347 +#include <linux/if_ether.h>
16348 +#include <linux/fs.h>
16349 +#include <asm/uaccess.h>
16350 +#include <asm/rt2880/surfboardint.h>
16351 +#include <linux/delay.h>
16352 +#include <linux/sched.h>
16353 +#include <asm/rt2880/rt_mmap.h>
16354 +#include "ra2882ethreg.h"
16355 +#include "raether.h"
16356 +#include "ra_mac.h"
16357 +#include "ra_ioctl.h"
16358 +#include "ra_rfrw.h"
16360 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16361 +int set_fe_lro_ring1_cfg(struct net_device *dev)
16365 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring1_cfg()\n");
16367 + /* 1. Set RX ring mode to force port */
16368 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
16370 + /* 2. Configure lro ring */
16371 + /* 2.1 set src/destination TCP ports */
16372 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
16373 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
16374 + /* 2.2 set src/destination IPs */
16375 + str_to_ip(&ip, "10.10.10.3");
16376 + sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
16377 + str_to_ip(&ip, "10.10.10.254");
16378 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16379 + /* 2.3 IPv4 force port mode */
16380 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
16381 + /* 2.4 IPv6 force port mode */
16382 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
16384 + /* 3. Set Age timer: 10 msec. */
16385 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16387 + /* 4. Valid LRO ring */
16388 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16393 +int set_fe_lro_ring2_cfg(struct net_device *dev)
16397 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring2_cfg()\n");
16399 + /* 1. Set RX ring mode to force port */
16400 + SET_PDMA_RXRING2_MODE(PDMA_RX_FORCE_PORT);
16402 + /* 2. Configure lro ring */
16403 + /* 2.1 set src/destination TCP ports */
16404 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
16405 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
16406 + /* 2.2 set src/destination IPs */
16407 + str_to_ip(&ip, "10.10.10.3");
16408 + sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
16409 + str_to_ip(&ip, "10.10.10.254");
16410 + sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
16411 + /* 2.3 IPv4 force port mode */
16412 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
16413 + /* 2.4 IPv6 force port mode */
16414 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
16416 + /* 3. Set Age timer: 10 msec. */
16417 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16419 + /* 4. Valid LRO ring */
16420 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16425 +int set_fe_lro_ring3_cfg(struct net_device *dev)
16429 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring3_cfg()\n");
16431 + /* 1. Set RX ring mode to force port */
16432 + SET_PDMA_RXRING3_MODE(PDMA_RX_FORCE_PORT);
16434 + /* 2. Configure lro ring */
16435 + /* 2.1 set src/destination TCP ports */
16436 + SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
16437 + SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
16438 + /* 2.2 set src/destination IPs */
16439 + str_to_ip(&ip, "10.10.10.3");
16440 + sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
16441 + str_to_ip(&ip, "10.10.10.254");
16442 + sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
16443 + /* 2.3 IPv4 force port mode */
16444 + SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
16445 + /* 2.4 IPv6 force port mode */
16446 + SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
16448 + /* 3. Set Age timer: 10 msec. */
16449 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16451 + /* 4. Valid LRO ring */
16452 + SET_PDMA_RXRING3_VALID(1);
16457 +int set_fe_lro_glo_cfg(struct net_device *dev)
16459 + unsigned int regVal = 0;
16461 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_glo_cfg()\n");
16463 + /* 1 Set max AGG timer: 10 msec. */
16464 + SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
16466 + /* 2. Set max LRO agg count */
16467 + SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
16469 + /* PDMA prefetch enable setting */
16470 + SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16472 + /* 2.1 IPv4 checksum update enable */
16473 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16475 + /* 3. Polling relinguish */
16476 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16479 + /* 4. Enable LRO */
16480 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16481 + regVal |= PDMA_LRO_EN;
16482 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16487 +int set_fe_lro_auto_cfg(struct net_device *dev)
16489 + unsigned int regVal = 0;
16492 + netdev_printk(KERN_CRIT, dev, "set_fe_lro_auto_cfg()\n");
16494 + /* 1.1 Set my IP_1 */
16495 + str_to_ip(&ip, "10.10.10.254");
16496 + sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
16497 + sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
16498 + sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
16499 + sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
16500 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
16502 + /* 1.2 Set my IP_2 */
16503 + str_to_ip(&ip, "10.10.20.254");
16504 + sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16505 + sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
16506 + sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
16507 + sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
16508 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
16510 + /* 1.3 Set my IP_3 */
16511 + sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
16512 + sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
16513 + sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
16514 + sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
16515 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
16517 + /* 1.4 Set my IP_4 */
16518 + sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
16519 + sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
16520 + sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
16521 + sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
16522 + SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
16524 + /* 2.1 Set RX ring1~3 to auto-learn modes */
16525 + SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
16526 + SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
16527 + SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
16529 + /* 2.2 Valid LRO ring */
16530 + SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
16531 + SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16532 + SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16533 + SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
16535 + /* 2.3 Set AGE timer */
16536 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16537 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16538 + SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16540 + /* 2.4 Set max AGG timer */
16541 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
16542 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
16543 + SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
16545 + /* 2.5 Set max LRO agg count */
16546 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
16547 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
16548 + SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
16550 + /* 3.0 IPv6 LRO enable */
16551 + SET_PDMA_LRO_IPV6_EN(1);
16553 + /* 3.1 IPv4 checksum update enable */
16554 + SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16556 + /* 3.2 TCP push option check disable */
16557 + //SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0);
16559 + /* PDMA prefetch enable setting */
16560 + SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16562 + /* 3.2 switch priority comparison to byte count mode */
16563 +/* SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE); */
16564 + SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
16566 + /* 3.3 bandwidth threshold setting */
16567 + SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
16569 + /* 3.4 auto-learn score delta setting */
16570 + sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
16572 + /* 3.5 Set ALT timer to 20us: (unit: 20us) */
16573 + SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
16574 + /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
16575 + SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
16577 + /* 3.7 the least remaining room of SDL0 in RXD for lro aggregation */
16578 + SET_PDMA_LRO_MIN_RXD_SDL(1522);
16580 + /* 4. Polling relinguish */
16581 + while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16584 + /* 5. Enable LRO */
16585 + regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16586 + regVal |= PDMA_LRO_EN;
16587 + sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16591 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16593 +int fe_hw_lro_init(struct net_device *dev)
16596 + END_DEVICE *ei_local = netdev_priv(dev);
16598 + /* Initial RX Ring 3 */
16599 + ei_local->rx_ring3 =
16600 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16601 + &ei_local->phy_rx_ring3);
16602 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16603 + memset(&ei_local->rx_ring3[i], 0, sizeof(struct PDMA_rxdesc));
16604 + ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
16605 + ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
16606 + ei_local->rx_ring3[i].rxd_info2.PLEN0 =
16607 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16608 + ei_local->rx_ring3[i].rxd_info2.PLEN1 =
16609 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16610 + ei_local->rx_ring3[i].rxd_info1.PDP0 =
16611 + dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data,
16612 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16614 + netdev_printk(KERN_CRIT, dev,
16615 + "\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",
16616 + ei_local->phy_rx_ring3, ei_local->rx_ring3);
16617 + /* Initial RX Ring 2 */
16618 + ei_local->rx_ring2 =
16619 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16620 + &ei_local->phy_rx_ring2);
16621 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16622 + memset(&ei_local->rx_ring2[i], 0, sizeof(struct PDMA_rxdesc));
16623 + ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
16624 + ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
16625 + ei_local->rx_ring2[i].rxd_info2.PLEN0 =
16626 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16627 + ei_local->rx_ring2[i].rxd_info2.PLEN1 =
16628 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16629 + ei_local->rx_ring2[i].rxd_info1.PDP0 =
16630 + dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data,
16631 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16633 + netdev_printk(KERN_CRIT, dev,
16634 + "\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",
16635 + ei_local->phy_rx_ring2, ei_local->rx_ring2);
16636 + /* Initial RX Ring 1 */
16637 + ei_local->rx_ring1 =
16638 + pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16639 + &ei_local->phy_rx_ring1);
16640 + for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16641 + memset(&ei_local->rx_ring1[i], 0, sizeof(struct PDMA_rxdesc));
16642 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
16643 + ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
16644 + ei_local->rx_ring1[i].rxd_info2.PLEN0 =
16645 + SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16646 + ei_local->rx_ring1[i].rxd_info2.PLEN1 =
16647 + SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16648 + ei_local->rx_ring1[i].rxd_info1.PDP0 =
16649 + dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data,
16650 + MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16652 + netdev_printk(KERN_CRIT, dev,
16653 + "\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",
16654 + ei_local->phy_rx_ring1, ei_local->rx_ring1);
16656 + sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
16657 + sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16658 + sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16659 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
16660 + sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
16661 + sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16662 + sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16663 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
16664 + sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
16665 + sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16666 + sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16667 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
16669 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16670 + set_fe_lro_ring1_cfg(dev);
16671 + set_fe_lro_ring2_cfg(dev);
16672 + set_fe_lro_ring3_cfg(dev);
16673 + set_fe_lro_glo_cfg(dev);
16675 + set_fe_lro_auto_cfg(dev);
16676 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16678 + /* HW LRO parameter settings */
16679 + ei_local->hw_lro_alpha = HW_LRO_PKT_INT_ALPHA;
16680 + ei_local->hw_lro_fix_setting = 1;
16684 +EXPORT_SYMBOL(fe_hw_lro_init);
16687 +++ b/drivers/net/ethernet/raeth/raether_pdma.c
16689 +#include <linux/module.h>
16690 +#include <linux/version.h>
16691 +#include <linux/kernel.h>
16692 +#include <linux/types.h>
16693 +#include <linux/pci.h>
16694 +#include <linux/init.h>
16695 +#include <linux/skbuff.h>
16696 +#include <linux/if_vlan.h>
16697 +#include <linux/if_ether.h>
16698 +#include <linux/fs.h>
16699 +#include <asm/uaccess.h>
16700 +#include <asm/rt2880/surfboardint.h>
16701 +#if defined (CONFIG_RAETH_TSO)
16702 +#include <linux/tcp.h>
16703 +#include <net/ipv6.h>
16704 +#include <linux/ip.h>
16705 +#include <net/ip.h>
16706 +#include <net/tcp.h>
16707 +#include <linux/in.h>
16708 +#include <linux/ppp_defs.h>
16709 +#include <linux/if_pppox.h>
16711 +#if defined (CONFIG_RAETH_LRO)
16712 +#include <linux/inet_lro.h>
16714 +#include <linux/delay.h>
16715 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
16716 +#include <linux/sched.h>
16719 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
16720 +#include <asm/rt2880/rt_mmap.h>
16722 +#include <linux/libata-compat.h>
16725 +#include "ra2882ethreg.h"
16726 +#include "raether.h"
16727 +#include "ra_mac.h"
16728 +#include "ra_ioctl.h"
16729 +#include "ra_rfrw.h"
16730 +#ifdef CONFIG_RAETH_NETLINK
16731 +#include "ra_netlink.h"
16733 +#if defined (CONFIG_RAETH_QOS)
16734 +#include "ra_qos.h"
16737 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
16738 +#include "../../../net/nat/hw_nat/ra_nat.h"
16740 +#if defined(CONFIG_RAETH_PDMA_DVT)
16741 +#include "dvt/raether_pdma_dvt.h"
16742 +#endif /* CONFIG_RAETH_PDMA_DVT */
16744 +#if !defined(CONFIG_RA_NAT_NONE)
16747 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
16748 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
16751 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
16754 +#include <asm/mipsregs.h>
16755 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
16756 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
16757 +#endif /* CONFIG_RA_CLASSIFIER */
16759 +#if defined (CONFIG_RALINK_RT3052_MP2)
16760 +int32_t mcast_rx(struct sk_buff * skb);
16761 +int32_t mcast_tx(struct sk_buff * skb);
16765 +#ifdef RA_MTD_RW_BY_NUM
16766 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
16768 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
16771 +/* gmac driver feature set config */
16772 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
16775 +#if defined (CONFIG_ARCH_MT7623)
16778 +#define DELAY_INT 1
16782 +//#define CONFIG_UNH_TEST
16783 +/* end of config */
16785 +#if defined (CONFIG_RAETH_JUMBOFRAME)
16786 +#define MAX_RX_LENGTH 4096
16788 +#define MAX_RX_LENGTH 1536
16791 +extern struct net_device *dev_raether;
16794 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
16795 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16796 +extern int rx_calc_idx1;
16799 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16800 +extern int rx_calc_idx0;
16801 +static unsigned long tx_cpu_owner_idx0=0;
16803 +extern unsigned long tx_ring_full;
16805 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
16806 +#include "ra_ethtool.h"
16807 +extern struct ethtool_ops ra_ethtool_ops;
16808 +#ifdef CONFIG_PSEUDO_SUPPORT
16809 +extern struct ethtool_ops ra_virt_ethtool_ops;
16810 +#endif // CONFIG_PSEUDO_SUPPORT //
16811 +#endif // (CONFIG_ETHTOOL //
16813 +#ifdef CONFIG_RALINK_VISTA_BASIC
16814 +int is_switch_175c = 1;
16817 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
16818 +struct QDMA_txdesc *free_head = NULL;
16821 +//#if defined (CONFIG_RAETH_LRO)
16823 +unsigned int lan_ip;
16824 +struct lro_para_struct lro_para;
16825 +int lro_flush_needed;
16826 +extern char const *nvram_get(int index, char *name);
16829 +#define KSEG1 0xa0000000
16830 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
16831 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
16833 +extern void set_fe_dma_glo_cfg(void);
16836 + * @brief cal txd number for a page
16840 + * @return frag_txd_num
16843 +unsigned int cal_frag_txd_num(unsigned int size)
16845 + unsigned int frag_txd_num = 0;
16849 + if(size > MAX_TXD_LEN){
16851 + size -= MAX_TXD_LEN;
16857 + return frag_txd_num;
16861 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
16862 +bool fq_qdma_init(struct net_device *dev)
16864 + END_DEVICE* ei_local = netdev_priv(dev);
16865 + unsigned int phy_free_head;
16866 + unsigned int phy_free_tail;
16867 + unsigned int *free_page_head = NULL;
16868 + unsigned int phy_free_page_head;
16871 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
16872 + if (unlikely(free_head == NULL)){
16873 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
16876 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
16878 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
16879 + if (unlikely(free_page_head == NULL)){
16880 + printk(KERN_ERR "QDMA FQ page not available...\n");
16883 + for (i=0; i < NUM_QDMA_PAGE; i++) {
16884 + free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
16885 + if(i < (NUM_QDMA_PAGE-1)){
16886 + free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
16890 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
16891 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
16892 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
16893 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
16896 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
16899 + phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
16901 + printk("phy_free_head is 0x%x!!!\n", phy_free_head);
16902 + printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
16903 + sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
16904 + sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
16905 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
16906 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
16908 + ei_local->free_head = free_head;
16909 + ei_local->phy_free_head = phy_free_head;
16910 + ei_local->free_page_head = free_page_head;
16911 + ei_local->phy_free_page_head = phy_free_page_head;
16916 +int fe_dma_init(struct net_device *dev)
16920 + unsigned int regVal;
16921 + END_DEVICE* ei_local = netdev_priv(dev);
16922 +#if defined (CONFIG_RAETH_QOS)
16928 + regVal = sysRegRead(PDMA_GLO_CFG);
16929 + if((regVal & RX_DMA_BUSY))
16931 + printk("\n RX_DMA_BUSY !!! ");
16934 + if((regVal & TX_DMA_BUSY))
16936 + printk("\n TX_DMA_BUSY !!! ");
16942 +#if defined(CONFIG_RAETH_PDMA_DVT)
16943 + pdma_dvt_set_dma_mode();
16944 +#endif /* CONFIG_RAETH_PDMA_DVT */
16946 +#if defined (CONFIG_RAETH_QOS)
16947 + for (i=0;i<NUM_TX_RINGS;i++){
16948 + for (j=0;j<NUM_TX_DESC;j++){
16949 + ei_local->skb_free[i][j]=0;
16951 + ei_local->free_idx[i]=0;
16954 + * RT2880: 2 x TX_Ring, 1 x Rx_Ring
16955 + * RT2883: 4 x TX_Ring, 1 x Rx_Ring
16956 + * RT3883: 4 x TX_Ring, 1 x Rx_Ring
16957 + * RT3052: 4 x TX_Ring, 1 x Rx_Ring
16959 + fe_tx_desc_init(dev, 0, 3, 1);
16960 + if (ei_local->tx_ring0 == NULL) {
16961 + printk("RAETH: tx ring0 allocation failed\n");
16965 + fe_tx_desc_init(dev, 1, 3, 1);
16966 + if (ei_local->tx_ring1 == NULL) {
16967 + printk("RAETH: tx ring1 allocation failed\n");
16971 + printk("\nphy_tx_ring0 = %08x, tx_ring0 = %p, size: %d bytes\n", ei_local->phy_tx_ring0, ei_local->tx_ring0, sizeof(struct PDMA_txdesc));
16973 + printk("\nphy_tx_ring1 = %08x, tx_ring1 = %p, size: %d bytes\n", ei_local->phy_tx_ring1, ei_local->tx_ring1, sizeof(struct PDMA_txdesc));
16975 +#if ! defined (CONFIG_RALINK_RT2880)
16976 + fe_tx_desc_init(dev, 2, 3, 1);
16977 + if (ei_local->tx_ring2 == NULL) {
16978 + printk("RAETH: tx ring2 allocation failed\n");
16982 + fe_tx_desc_init(dev, 3, 3, 1);
16983 + if (ei_local->tx_ring3 == NULL) {
16984 + printk("RAETH: tx ring3 allocation failed\n");
16988 + printk("\nphy_tx_ring2 = %08x, tx_ring2 = %p, size: %d bytes\n", ei_local->phy_tx_ring2, ei_local->tx_ring2, sizeof(struct PDMA_txdesc));
16990 + printk("\nphy_tx_ring3 = %08x, tx_ring3 = %p, size: %d bytes\n", ei_local->phy_tx_ring3, ei_local->tx_ring3, sizeof(struct PDMA_txdesc));
16992 +#endif // CONFIG_RALINK_RT2880 //
16994 + for (i=0;i<NUM_TX_DESC;i++){
16995 + ei_local->skb_free[i]=0;
16997 + ei_local->free_idx =0;
16998 +#if defined (CONFIG_MIPS)
16999 + ei_local->tx_ring0 = pci_alloc_consistent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0);
17001 + ei_local->tx_ring0 = dma_alloc_coherent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0, GFP_KERNEL);
17003 + printk("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n", ei_local->phy_tx_ring0, ei_local->tx_ring0);
17005 + for (i=0; i < NUM_TX_DESC; i++) {
17006 + memset(&ei_local->tx_ring0[i],0,sizeof(struct PDMA_txdesc));
17007 + ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
17008 + ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
17011 +#endif // CONFIG_RAETH_QOS
17013 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
17015 + fq_qdma_init(dev);
17019 + regVal = sysRegRead(QDMA_GLO_CFG);
17020 + if((regVal & RX_DMA_BUSY))
17022 + printk("\n RX_DMA_BUSY !!! ");
17025 + if((regVal & TX_DMA_BUSY))
17027 + printk("\n TX_DMA_BUSY !!! ");
17033 + /* Initial RX Ring 0*/
17035 +#ifdef CONFIG_32B_DESC
17036 + ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17037 + ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
17039 + ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
17041 + for (i = 0; i < NUM_QRX_DESC; i++) {
17042 + memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
17043 + ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
17044 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17045 + ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
17046 + ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17048 + ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
17050 + ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17052 + printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
17054 + regVal = sysRegRead(QDMA_GLO_CFG);
17055 + regVal &= 0x000000FF;
17057 + sysRegWrite(QDMA_GLO_CFG, regVal);
17058 + regVal=sysRegRead(QDMA_GLO_CFG);
17060 + /* Tell the adapter where the TX/RX rings are located. */
17062 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17063 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
17064 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17065 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17066 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
17068 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
17070 + ei_local->rx_ring0 = ei_local->qrx_ring;
17072 +#else /* PDMA RX */
17074 + /* Initial RX Ring 0*/
17075 +#ifdef CONFIG_32B_DESC
17076 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17077 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
17079 +#if defined (CONFIG_MIPS)
17080 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
17082 + ei_local->rx_ring0 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0, GFP_KERNEL);
17085 + for (i = 0; i < NUM_RX_DESC; i++) {
17086 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
17087 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
17088 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17089 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
17090 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17092 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
17094 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17096 + printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
17098 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17099 + /* Initial RX Ring 1*/
17100 +#ifdef CONFIG_32B_DESC
17101 + ei_local->rx_ring1 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17102 + ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
17104 +#if defined (CONFIG_MIPS)
17105 + ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
17107 + ei_local->rx_ring1 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1, GFP_KERNEL);
17111 + for (i = 0; i < NUM_RX_DESC; i++) {
17112 + memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
17113 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
17114 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17115 + ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
17116 + ei_local->rx_ring1[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17118 + ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
17120 + ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17122 + printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
17123 +#if defined(CONFIG_ARCH_MT7623)
17124 + /* Initial RX Ring 2*/
17125 + ei_local->rx_ring2 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring2);
17126 + for (i = 0; i < NUM_RX_DESC; i++) {
17127 + memset(&ei_local->rx_ring2[i],0,sizeof(struct PDMA_rxdesc));
17128 + ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
17129 + ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
17130 + ei_local->rx_ring2[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17131 + ei_local->rx_ring2[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17132 + ei_local->rx_ring2[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17134 + printk("\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",ei_local->phy_rx_ring2,ei_local->rx_ring2);
17135 + /* Initial RX Ring 3*/
17136 + ei_local->rx_ring3 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring3);
17137 + for (i = 0; i < NUM_RX_DESC; i++) {
17138 + memset(&ei_local->rx_ring3[i],0,sizeof(struct PDMA_rxdesc));
17139 + ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
17140 + ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
17141 + ei_local->rx_ring3[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17142 + ei_local->rx_ring3[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17143 + ei_local->rx_ring3[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17145 + printk("\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",ei_local->phy_rx_ring3,ei_local->rx_ring3);
17146 +#endif /* CONFIG_ARCH_MT7623 */
17151 + regVal = sysRegRead(PDMA_GLO_CFG);
17152 + regVal &= 0x000000FF;
17153 + sysRegWrite(PDMA_GLO_CFG, regVal);
17154 + regVal=sysRegRead(PDMA_GLO_CFG);
17156 + /* Tell the adapter where the TX/RX rings are located. */
17157 +#if !defined (CONFIG_RAETH_QOS)
17158 + sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0));
17159 + sysRegWrite(TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC));
17160 + sysRegWrite(TX_CTX_IDX0, 0);
17161 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17162 + tx_cpu_owner_idx0 = 0;
17164 + sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
17167 +#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
17168 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17169 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
17170 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17171 +#else /* PDMA RX */
17172 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
17173 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
17174 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17177 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17178 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
17180 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
17181 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17182 + sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
17183 + sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_RX_DESC));
17184 + sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17185 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17186 + rx_calc_idx1 = sysRegRead(RX_CALC_IDX1);
17188 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
17189 +#if defined(CONFIG_ARCH_MT7623)
17190 + sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
17191 + sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_RX_DESC));
17192 + sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17193 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
17194 + sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
17195 + sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_RX_DESC));
17196 + sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17197 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
17198 +#endif /* CONFIG_ARCH_MT7623 */
17200 +#if defined (CONFIG_RALINK_RT6855A)
17201 + regVal = sysRegRead(RX_DRX_IDX0);
17202 + regVal = (regVal == 0)? (NUM_RX_DESC - 1) : (regVal - 1);
17203 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32(regVal));
17204 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17205 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
17207 + regVal = sysRegRead(TX_DTX_IDX0);
17208 + sysRegWrite(TX_CTX_IDX0, cpu_to_le32(regVal));
17209 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17210 + tx_cpu_owner_idx0 = regVal;
17212 + ei_local->free_idx = regVal;
17215 +#if defined (CONFIG_RAETH_QOS)
17216 + set_scheduler_weight();
17217 + set_schedule_pause_condition();
17218 + set_output_shaper();
17221 + set_fe_dma_glo_cfg();
17226 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
17228 + unsigned int length=skb->len;
17229 + END_DEVICE* ei_local = netdev_priv(dev);
17230 +#ifndef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17231 + unsigned long tx_cpu_owner_idx0 = sysRegRead(TX_CTX_IDX0);
17233 +#if defined (CONFIG_RAETH_TSO)
17234 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17235 + unsigned long ctx_idx_start_addr = tx_cpu_owner_idx0;
17237 + struct iphdr *iph = NULL;
17238 + struct tcphdr *th = NULL;
17239 + struct skb_frag_struct *frag;
17240 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
17242 + unsigned int len, size, offset, frag_txd_num, skb_txd_num ;
17243 +#endif // CONFIG_RAETH_TSO //
17245 +#if defined (CONFIG_RAETH_TSOV6)
17246 + struct ipv6hdr *ip6h = NULL;
17249 +#ifdef CONFIG_PSEUDO_SUPPORT
17250 + PSEUDO_ADAPTER *pAd;
17253 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17255 +#ifdef CONFIG_PSEUDO_SUPPORT
17256 + if (gmac_no == 2) {
17257 + if (ei_local->PseudoDev != NULL) {
17258 + pAd = netdev_priv(ei_local->PseudoDev);
17259 + pAd->stat.tx_errors++;
17263 + ei_local->stat.tx_errors++;
17266 +#if !defined (CONFIG_RAETH_TSO)
17267 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17268 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = length;
17269 +#if defined (CONFIG_RALINK_MT7620)
17270 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17271 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17272 + if (gmac_no == 1) {
17273 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17275 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17278 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17279 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17282 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17283 + if (skb->ip_summed == CHECKSUM_PARTIAL){
17284 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17286 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17290 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17291 + if(vlan_tx_tag_present(skb)) {
17292 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17293 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17295 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17298 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17299 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17301 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17306 +#if defined(CONFIG_RAETH_PDMA_DVT)
17307 + raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17308 +#endif /* CONFIG_RAETH_PDMA_DVT */
17310 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17311 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17312 + if(ra_sw_nat_hook_rx!= NULL){
17313 +#if defined (CONFIG_RALINK_MT7620)
17314 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17315 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17316 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17318 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17320 + FOE_MAGIC_TAG(skb) = 0;
17325 +#if defined(CONFIG_RAETH_PDMA_DVT)
17326 + raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17327 +#endif /* CONFIG_RAETH_PDMA_DVT */
17329 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17332 + printk("---------------\n");
17333 + printk("tx_info1=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1);
17334 + printk("tx_info2=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2);
17335 + printk("tx_info3=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3);
17336 + printk("tx_info4=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4);
17340 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17341 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = (length - skb->data_len);
17342 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = nr_frags ? 0:1;
17343 +#if defined (CONFIG_RALINK_MT7620)
17344 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17345 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17346 + if (gmac_no == 1) {
17347 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17349 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17352 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17353 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17355 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO = 0;
17357 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17358 + if (skb->ip_summed == CHECKSUM_PARTIAL){
17359 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17361 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17365 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17366 + if(vlan_tx_tag_present(skb)) {
17367 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17368 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17370 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17373 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17374 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17376 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17381 +#if defined(CONFIG_RAETH_PDMA_DVT)
17382 + raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17383 +#endif /* CONFIG_RAETH_PDMA_DVT */
17385 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17386 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17387 + if(ra_sw_nat_hook_rx!= NULL){
17388 +#if defined (CONFIG_RALINK_MT7620)
17389 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17390 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17391 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17393 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17395 + FOE_MAGIC_TAG(skb) = 0;
17402 + if(nr_frags > 0) {
17404 + for(i=0;i<nr_frags;i++) {
17405 + frag = &skb_shinfo(skb)->frags[i];
17406 + offset = frag->page_offset;
17407 + len = frag->size;
17408 + frag_txd_num = cal_frag_txd_num(len);
17410 + while(frag_txd_num > 0){
17411 + if(len < MAX_TXD_LEN)
17414 + size = MAX_TXD_LEN;
17415 + if(skb_txd_num%2 == 0) {
17416 + tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
17418 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17420 +#ifdef config_pseudo_support
17421 + if (gmac_no == 2) {
17422 + if (ei_local->pseudodev != null) {
17423 + pad = netdev_priv(ei_local->pseudodev);
17424 + pad->stat.tx_errors++;
17428 + ei_local->stat.tx_errors++;
17431 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
17432 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17434 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17436 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = size;
17438 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17439 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 1;
17441 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 0;
17442 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17444 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
17445 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17447 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17450 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1 = size;
17451 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17452 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 1;
17454 + ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 0;
17464 +#if defined(CONFIG_RAETH_PDMA_DVT)
17465 + if( (pdma_dvt_get_debug_test_config() & PDMA_TEST_TSO_DEBUG) ){
17466 + printk("skb_shinfo(skb)->gso_segs = %d\n", skb_shinfo(skb)->gso_segs);
17468 +#endif /* CONFIG_RAETH_PDMA_DVT */
17469 + /* fill in MSS info in tcp checksum field */
17470 + if(skb_shinfo(skb)->gso_segs > 1) {
17472 +// TsoLenUpdate(skb->len);
17474 + /* TCP over IPv4 */
17475 + iph = (struct iphdr *)skb_network_header(skb);
17476 +#if defined (CONFIG_RAETH_TSOV6)
17477 + /* TCP over IPv6 */
17478 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
17480 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
17481 + th = (struct tcphdr *)skb_transport_header(skb);
17482 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17483 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17485 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17487 + th->check = htons(skb_shinfo(skb)->gso_size);
17488 +#if defined (CONFIG_MIPS)
17489 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17491 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17495 +#if defined (CONFIG_RAETH_TSOV6)
17496 + /* TCP over IPv6 */
17497 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
17498 + th = (struct tcphdr *)skb_transport_header(skb);
17499 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17500 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17502 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17504 + th->check = htons(skb_shinfo(skb)->gso_size);
17505 +#if defined (CONFIG_MIPS)
17506 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17508 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17511 +#endif // CONFIG_RAETH_TSOV6 //
17514 +#if defined(CONFIG_RAETH_PDMA_DVT)
17515 + raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17516 +#endif /* CONFIG_RAETH_PDMA_DVT */
17518 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17519 + ei_local->tx_ring0[ctx_idx_start_addr].txd_info2.DDONE_bit = 0;
17521 + ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info2.DDONE_bit = 0;
17523 +#endif // CONFIG_RAETH_TSO //
17525 + tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
17526 + while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17528 +// printk(KERN_ERR "%s: TXD=%lu TX DMA is Busy !!\n", dev->name, tx_cpu_owner_idx0);
17529 +#ifdef CONFIG_PSEUDO_SUPPORT
17530 + if (gmac_no == 2) {
17531 + if (ei_local->PseudoDev != NULL) {
17532 + pAd = netdev_priv(ei_local->PseudoDev);
17533 + pAd->stat.tx_errors++;
17537 + ei_local->stat.tx_errors++;
17539 + sysRegWrite(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
17541 +#ifdef CONFIG_PSEUDO_SUPPORT
17542 + if (gmac_no == 2) {
17543 + if (ei_local->PseudoDev != NULL) {
17544 + pAd = netdev_priv(ei_local->PseudoDev);
17545 + pAd->stat.tx_packets++;
17546 + pAd->stat.tx_bytes += length;
17551 + ei_local->stat.tx_packets++;
17552 + ei_local->stat.tx_bytes += length;
17554 +#ifdef CONFIG_RAETH_NAPI
17555 + if ( ei_local->tx_full == 1) {
17556 + ei_local->tx_full = 0;
17557 + netif_wake_queue(dev);
17564 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
17566 + END_DEVICE *ei_local = netdev_priv(dev);
17567 + unsigned long flags;
17568 + unsigned long tx_cpu_owner_idx;
17569 + unsigned int tx_cpu_owner_idx_next;
17570 + unsigned int num_of_txd = 0;
17571 +#if defined (CONFIG_RAETH_TSO)
17572 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
17573 + struct skb_frag_struct *frag;
17575 +#if !defined(CONFIG_RAETH_QOS)
17576 + unsigned int tx_cpu_owner_idx_next2;
17578 + int ring_no, queue_no, port_no;
17580 +#ifdef CONFIG_RALINK_VISTA_BASIC
17581 + struct vlan_ethhdr *veth;
17583 +#ifdef CONFIG_PSEUDO_SUPPORT
17584 + PSEUDO_ADAPTER *pAd;
17587 +#if !defined(CONFIG_RA_NAT_NONE)
17588 + if(ra_sw_nat_hook_tx!= NULL)
17590 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17591 + if(FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
17594 + //spin_lock_irqsave(&ei_local->page_lock, flags);
17595 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
17596 + //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17599 + //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17605 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17608 + if(ra_classifier_hook_tx!= NULL)
17610 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
17611 + ra_classifier_hook_tx(skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
17613 + ra_classifier_hook_tx(skb, read_c0_count());
17616 +#endif /* CONFIG_RA_CLASSIFIER */
17618 +#if defined (CONFIG_RALINK_RT3052_MP2)
17622 +#if !defined (CONFIG_RALINK_RT6855) && !defined (CONFIG_RALINK_RT6855A) && \
17623 + !defined(CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
17625 +#define MIN_PKT_LEN 60
17626 + if (skb->len < MIN_PKT_LEN) {
17627 + if (skb_padto(skb, MIN_PKT_LEN)) {
17628 + printk("raeth: skb_padto failed\n");
17631 + skb_put(skb, MIN_PKT_LEN - skb->len);
17635 + dev->trans_start = jiffies; /* save the timestamp */
17636 + spin_lock_irqsave(&ei_local->page_lock, flags);
17637 +#if defined (CONFIG_MIPS)
17638 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
17640 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
17644 +#ifdef CONFIG_RALINK_VISTA_BASIC
17645 + veth = (struct vlan_ethhdr *)(skb->data);
17646 + if (is_switch_175c && veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
17647 + if ((veth->h_vlan_TCI & __constant_htons(VLAN_VID_MASK)) == 0) {
17648 + veth->h_vlan_TCI |= htons(VLAN_DEV_INFO(dev)->vlan_id);
17653 +#if defined (CONFIG_RAETH_QOS)
17654 + if(pkt_classifier(skb, gmac_no, &ring_no, &queue_no, &port_no)) {
17655 + get_tx_ctx_idx(ring_no, &tx_cpu_owner_idx);
17656 + tx_cpu_owner_idx_next = (tx_cpu_owner_idx + 1) % NUM_TX_DESC;
17657 + if(((ei_local->skb_free[ring_no][tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[ring_no][tx_cpu_owner_idx_next]==0)){
17658 + fe_qos_packet_send(dev, skb, ring_no, queue_no, port_no);
17660 + ei_local->stat.tx_dropped++;
17662 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17667 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17668 + tx_cpu_owner_idx = tx_cpu_owner_idx0;
17670 + tx_cpu_owner_idx = sysRegRead(TX_CTX_IDX0);
17672 +#if defined (CONFIG_RAETH_TSO)
17673 +// num_of_txd = (nr_frags==0) ? 1 : ((nr_frags>>1) + 1);
17674 +// NumOfTxdUpdate(num_of_txd);
17675 + if(nr_frags != 0){
17676 + for(i=0;i<nr_frags;i++) {
17677 + frag = &skb_shinfo(skb)->frags[i];
17678 + num_of_txd += cal_frag_txd_num(frag->size);
17680 + num_of_txd = (num_of_txd >> 1) + 1;
17687 + tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % NUM_TX_DESC;
17689 + if(((ei_local->skb_free[tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[tx_cpu_owner_idx_next]==0)){
17690 + rt2880_eth_send(dev, skb, gmac_no);
17692 + tx_cpu_owner_idx_next2 = (tx_cpu_owner_idx_next + 1) % NUM_TX_DESC;
17694 + if(ei_local->skb_free[tx_cpu_owner_idx_next2]!=0){
17695 +#if defined (CONFIG_RAETH_SW_FC)
17696 + netif_stop_queue(dev);
17697 +#ifdef CONFIG_PSEUDO_SUPPORT
17698 + netif_stop_queue(ei_local->PseudoDev);
17704 +#ifdef CONFIG_PSEUDO_SUPPORT
17705 + if (gmac_no == 2) {
17706 + if (ei_local->PseudoDev != NULL) {
17707 + pAd = netdev_priv(ei_local->PseudoDev);
17708 + pAd->stat.tx_dropped++;
17712 + ei_local->stat.tx_dropped++;
17713 +#if defined (CONFIG_RAETH_SW_FC)
17714 + printk("tx_ring_full, drop packet\n");
17717 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17721 +#if defined (CONFIG_RAETH_TSO)
17722 + /* SG: use multiple TXD to send the packet (only have one skb) */
17723 + ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd - 1) % NUM_TX_DESC] = skb;
17724 + while(--num_of_txd) {
17725 + ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd -1) % NUM_TX_DESC] = (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
17728 + ei_local->skb_free[tx_cpu_owner_idx] = skb;
17731 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
17735 +void ei_xmit_housekeeping(unsigned long unused)
17737 + struct net_device *dev = dev_raether;
17738 + END_DEVICE *ei_local = netdev_priv(dev);
17739 + struct PDMA_txdesc *tx_desc;
17740 + unsigned long skb_free_idx;
17741 + unsigned long tx_dtx_idx __maybe_unused;
17742 +#ifndef CONFIG_RAETH_NAPI
17743 + unsigned long reg_int_mask=0;
17746 +#ifdef CONFIG_RAETH_QOS
17748 + for (i=0;i<NUM_TX_RINGS;i++){
17749 + skb_free_idx = ei_local->free_idx[i];
17750 + if((ei_local->skb_free[i][skb_free_idx])==0){
17754 + get_tx_desc_and_dtx_idx(ei_local, i, &tx_dtx_idx, &tx_desc);
17756 + while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[i][skb_free_idx])!=0 ){
17757 + dev_kfree_skb_any((ei_local->skb_free[i][skb_free_idx]));
17759 + ei_local->skb_free[i][skb_free_idx]=0;
17760 + skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17762 + ei_local->free_idx[i] = skb_free_idx;
17765 + tx_dtx_idx = sysRegRead(TX_DTX_IDX0);
17766 + tx_desc = ei_local->tx_ring0;
17767 + skb_free_idx = ei_local->free_idx;
17768 + if ((ei_local->skb_free[skb_free_idx]) != 0 && tx_desc[skb_free_idx].txd_info2.DDONE_bit==1) {
17769 + while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[skb_free_idx])!=0 ){
17770 +#if defined (CONFIG_RAETH_TSO)
17771 + if(ei_local->skb_free[skb_free_idx]!=(struct sk_buff *)0xFFFFFFFF) {
17772 + dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17775 + dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17777 + ei_local->skb_free[skb_free_idx]=0;
17778 + skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17781 + netif_wake_queue(dev);
17782 +#ifdef CONFIG_PSEUDO_SUPPORT
17783 + netif_wake_queue(ei_local->PseudoDev);
17786 + ei_local->free_idx = skb_free_idx;
17787 + } /* if skb_free != 0 */
17790 +#ifndef CONFIG_RAETH_NAPI
17791 + reg_int_mask=sysRegRead(FE_INT_ENABLE);
17792 +#if defined (DELAY_INT)
17793 + sysRegWrite(FE_INT_ENABLE, reg_int_mask| TX_DLY_INT);
17796 + sysRegWrite(FE_INT_ENABLE, reg_int_mask | TX_DONE_INT0 \
17801 +#endif //CONFIG_RAETH_NAPI//
17806 +EXPORT_SYMBOL(ei_start_xmit);
17807 +EXPORT_SYMBOL(ei_xmit_housekeeping);
17808 +EXPORT_SYMBOL(fe_dma_init);
17809 +EXPORT_SYMBOL(rt2880_eth_send);
17811 +++ b/drivers/net/ethernet/raeth/raether_qdma.c
17813 +#include <linux/module.h>
17814 +#include <linux/version.h>
17815 +#include <linux/kernel.h>
17816 +#include <linux/types.h>
17817 +#include <linux/pci.h>
17818 +#include <linux/init.h>
17819 +#include <linux/skbuff.h>
17820 +#include <linux/if_vlan.h>
17821 +#include <linux/if_ether.h>
17822 +#include <linux/fs.h>
17823 +#include <asm/uaccess.h>
17824 +#include <asm/rt2880/surfboardint.h>
17825 +#if defined (CONFIG_RAETH_TSO)
17826 +#include <linux/tcp.h>
17827 +#include <net/ipv6.h>
17828 +#include <linux/ip.h>
17829 +#include <net/ip.h>
17830 +#include <net/tcp.h>
17831 +#include <linux/in.h>
17832 +#include <linux/ppp_defs.h>
17833 +#include <linux/if_pppox.h>
17835 +#include <linux/delay.h>
17836 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
17837 +#include <linux/sched.h>
17839 +#if defined (CONFIG_HW_SFQ)
17840 +#include <linux/if_vlan.h>
17841 +#include <net/ipv6.h>
17842 +#include <net/ip.h>
17843 +#include <linux/if_pppox.h>
17844 +#include <linux/ppp_defs.h>
17847 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
17848 +#include <asm/rt2880/rt_mmap.h>
17850 +#include <linux/libata-compat.h>
17853 +#include "ra2882ethreg.h"
17854 +#include "raether.h"
17855 +#include "ra_mac.h"
17856 +#include "ra_ioctl.h"
17857 +#include "ra_rfrw.h"
17858 +#ifdef CONFIG_RAETH_NETLINK
17859 +#include "ra_netlink.h"
17861 +#if defined (CONFIG_RAETH_QOS)
17862 +#include "ra_qos.h"
17865 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17866 +#include "../../../net/nat/hw_nat/ra_nat.h"
17870 +#if !defined(CONFIG_RA_NAT_NONE)
17873 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
17874 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
17877 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17880 +#include <asm/mipsregs.h>
17881 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
17882 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
17883 +#endif /* CONFIG_RA_CLASSIFIER */
17885 +#if defined (CONFIG_RALINK_RT3052_MP2)
17886 +int32_t mcast_rx(struct sk_buff * skb);
17887 +int32_t mcast_tx(struct sk_buff * skb);
17890 +#ifdef RA_MTD_RW_BY_NUM
17891 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
17893 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
17896 +/* gmac driver feature set config */
17897 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
17900 +#if defined (CONFIG_ARCH_MT7623)
17903 +#define DELAY_INT 1
17907 +//#define CONFIG_UNH_TEST
17908 +/* end of config */
17910 +#if defined (CONFIG_RAETH_JUMBOFRAME)
17911 +#define MAX_RX_LENGTH 4096
17913 +#define MAX_RX_LENGTH 1536
17916 +extern struct net_device *dev_raether;
17918 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17919 +static int rx_dma_owner_idx1;
17920 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17921 +static int rx_calc_idx1;
17924 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17925 +static int rx_calc_idx0;
17926 +static unsigned long tx_cpu_owner_idx0=0;
17928 +extern unsigned long tx_ring_full;
17930 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
17931 +#include "ra_ethtool.h"
17932 +extern struct ethtool_ops ra_ethtool_ops;
17933 +#ifdef CONFIG_PSEUDO_SUPPORT
17934 +extern struct ethtool_ops ra_virt_ethtool_ops;
17935 +#endif // CONFIG_PSEUDO_SUPPORT //
17936 +#endif // (CONFIG_ETHTOOL //
17938 +#ifdef CONFIG_RALINK_VISTA_BASIC
17939 +int is_switch_175c = 1;
17942 +//skb->mark to queue mapping table
17943 +extern unsigned int M2Q_table[64];
17944 +struct QDMA_txdesc *free_head = NULL;
17945 +extern unsigned int lan_wan_separate;
17946 +#if defined (CONFIG_HW_SFQ)
17947 +extern unsigned int web_sfq_enable;
17948 +#define HwSfqQUp 3
17949 +#define HwSfqQDl 1
17951 +int dbg =0;//debug used
17952 +#if defined (CONFIG_HW_SFQ)
17953 +struct SFQ_table *sfq0;
17954 +struct SFQ_table *sfq1;
17955 +struct SFQ_table *sfq2;
17956 +struct SFQ_table *sfq3;
17959 +#define KSEG1 0xa0000000
17960 +#if defined (CONFIG_MIPS)
17961 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
17962 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
17964 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
17965 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
17968 +extern void set_fe_dma_glo_cfg(void);
17970 +#if defined (CONFIG_HW_SFQ)
17971 +ParseResult SfqParseResult;
17976 + * @brief: get the TXD index from its address
17978 + * @param: cpu_ptr
17980 + * @return: TXD index
17983 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
17985 + struct net_device *dev = dev_raether;
17986 + END_DEVICE *ei_local = netdev_priv(dev);
17988 + //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
17989 + //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
17990 + ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
17992 + return ctx_offset;
17999 + * @brief cal txd number for a page
18003 + * @return frag_txd_num
18006 +unsigned int cal_frag_txd_num(unsigned int size)
18008 + unsigned int frag_txd_num = 0;
18012 + if(size > MAX_TXD_LEN){
18014 + size -= MAX_TXD_LEN;
18020 + return frag_txd_num;
18025 + * @brief get free TXD from TXD queue
18027 + * @param free_txd
18031 +static int get_free_txd(struct QDMA_txdesc **free_txd)
18033 + struct net_device *dev = dev_raether;
18034 + END_DEVICE *ei_local = netdev_priv(dev);
18035 + unsigned int tmp_idx;
18037 + if(ei_local->free_txd_num > 0){
18038 + tmp_idx = ei_local->free_txd_head;
18039 + ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
18040 + ei_local->free_txd_num -= 1;
18041 + //*free_txd = &ei_local->txd_pool[tmp_idx];
18042 + *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
18045 + return NUM_TX_DESC;
18050 + * @brief add free TXD into TXD queue
18052 + * @param free_txd
18056 +int put_free_txd(int free_txd_idx)
18058 + struct net_device *dev = dev_raether;
18059 + END_DEVICE *ei_local = netdev_priv(dev);
18060 + ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
18061 + ei_local->free_txd_tail = free_txd_idx;
18062 + ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
18063 + ei_local->free_txd_num += 1;
18067 +/*define qdma initial alloc*/
18073 + * @return 0: fail
18076 +bool qdma_tx_desc_alloc(void)
18078 + struct net_device *dev = dev_raether;
18079 + END_DEVICE *ei_local = netdev_priv(dev);
18080 + struct QDMA_txdesc *free_txd = NULL;
18081 + unsigned int txd_idx;
18085 + ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
18086 + printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
18088 + if (ei_local->txd_pool == NULL) {
18089 + printk("adapter->txd_pool allocation failed!\n");
18092 + printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
18093 + //set all txd_pool_info to 0.
18094 + for ( i = 0; i < NUM_TX_DESC; i++)
18096 + ei_local->skb_free[i]= 0;
18097 + ei_local->txd_pool_info[i] = i + 1;
18098 + ei_local->txd_pool[i].txd_info3.LS_bit = 1;
18099 + ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
18102 + ei_local->free_txd_head = 0;
18103 + ei_local->free_txd_tail = NUM_TX_DESC - 1;
18104 + ei_local->free_txd_num = NUM_TX_DESC;
18107 + //get free txd from txd pool
18108 + txd_idx = get_free_txd(&free_txd);
18109 + if( txd_idx == NUM_TX_DESC) {
18110 + printk("get_free_txd fail\n");
18114 + //add null TXD for transmit
18115 + //ei_local->tx_dma_ptr = VIRT_TO_PHYS(free_txd);
18116 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18117 + ei_local->tx_dma_ptr = free_txd;
18118 + ei_local->tx_cpu_ptr = free_txd;
18119 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18120 + sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
18122 + //get free txd from txd pool
18124 + txd_idx = get_free_txd(&free_txd);
18125 + if( txd_idx == NUM_TX_DESC) {
18126 + printk("get_free_txd fail\n");
18129 + // add null TXD for release
18130 + //sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(free_txd));
18131 + //sysRegWrite(QTX_DRX_PTR, VIRT_TO_PHYS(free_txd));
18132 + sysRegWrite(QTX_CRX_PTR, free_txd);
18133 + sysRegWrite(QTX_DRX_PTR, free_txd);
18134 + printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
18136 + printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
18137 + printk("----------------+---------+--------\n");
18138 + printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool, ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
18141 +#if defined (CONFIG_HW_SFQ)
18142 +bool sfq_init(void)
18144 + unsigned int regVal;
18146 + unsigned int sfq_phy0;
18147 + unsigned int sfq_phy1;
18148 + unsigned int sfq_phy2;
18149 + unsigned int sfq_phy3;
18150 + struct SFQ_table *sfq0;
18151 + struct SFQ_table *sfq1;
18152 + struct SFQ_table *sfq2;
18153 + struct SFQ_table *sfq3;
18155 + regVal = sysRegRead(VQTX_GLO);
18156 + regVal = regVal | VQTX_MIB_EN |(1<<16) ;
18157 + sysRegWrite(VQTX_GLO, regVal);// Virtual table extends to 32bytes
18158 + regVal = sysRegRead(VQTX_GLO);
18159 + sysRegWrite(VQTX_NUM, (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) | (VQTX_NUM_3));
18160 + sysRegWrite(VQTX_HASH_CFG, 0xF002710); //10 s change hash algorithm
18161 + sysRegWrite(VQTX_VLD_CFG, 0x00);
18162 + sysRegWrite(VQTX_HASH_SD, 0x0D);
18163 + sysRegWrite(QDMA_FC_THRES, 0x9b9b4444);
18164 + sysRegWrite(QDMA_HRED1, 0);
18165 + sysRegWrite(QDMA_HRED2, 0);
18166 + sysRegWrite(QDMA_SRED1, 0);
18167 + sysRegWrite(QDMA_SRED2, 0);
18168 + sfq0 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy0);
18169 + memset(sfq0, 0x0, 256*sizeof(struct SFQ_table) );
18170 + for (i=0; i < 256; i++) {
18171 + sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
18172 + sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
18175 + sfq1 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy1);
18177 + memset(sfq1, 0x0, 256*sizeof(struct SFQ_table) );
18178 + for (i=0; i < 256; i++) {
18179 + sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
18180 + sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
18183 + sfq2 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy2);
18184 + memset(sfq2, 0x0, 256*sizeof(struct SFQ_table) );
18185 + for (i=0; i < 256; i++) {
18186 + sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
18187 + sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
18190 + sfq3 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy3);
18191 + memset(sfq3, 0x0, 256*sizeof(struct SFQ_table) );
18192 + for (i=0; i < 256; i++) {
18193 + sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
18194 + sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
18198 + printk("*****sfq_phy0 is 0x%x!!!*******\n", sfq_phy0);
18199 + printk("*****sfq_phy1 is 0x%x!!!*******\n", sfq_phy1);
18200 + printk("*****sfq_phy2 is 0x%x!!!*******\n", sfq_phy2);
18201 + printk("*****sfq_phy3 is 0x%x!!!*******\n", sfq_phy3);
18202 + printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18203 + printk("*****sfq_virt1 is 0x%x!!!*******\n", sfq1);
18204 + printk("*****sfq_virt2 is 0x%x!!!*******\n", sfq2);
18205 + printk("*****sfq_virt3 is 0x%x!!!*******\n", sfq3);
18206 + printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18207 + sysRegWrite(VQTX_TB_BASE0, (u32)sfq_phy0);
18208 + sysRegWrite(VQTX_TB_BASE1, (u32)sfq_phy1);
18209 + sysRegWrite(VQTX_TB_BASE2, (u32)sfq_phy2);
18210 + sysRegWrite(VQTX_TB_BASE3, (u32)sfq_phy3);
18215 +bool fq_qdma_init(struct net_device *dev)
18217 + END_DEVICE* ei_local = netdev_priv(dev);
18218 + //struct QDMA_txdesc *free_head = NULL;
18219 + unsigned int phy_free_head;
18220 + unsigned int phy_free_tail;
18221 + unsigned int *free_page_head = NULL;
18222 + unsigned int phy_free_page_head;
18225 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
18226 + if (unlikely(free_head == NULL)){
18227 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
18230 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
18232 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
18233 + if (unlikely(free_page_head == NULL)){
18234 + printk(KERN_ERR "QDMA FQ page not available...\n");
18237 + for (i=0; i < NUM_QDMA_PAGE; i++) {
18238 + free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
18239 + if(i < (NUM_QDMA_PAGE-1)){
18240 + free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
18244 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
18245 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
18246 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
18247 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
18250 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
18253 + phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
18255 + printk("phy_free_head is 0x%x!!!\n", phy_free_head);
18256 + printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
18257 + sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
18258 + sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
18259 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
18260 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
18262 + ei_local->free_head = free_head;
18263 + ei_local->phy_free_head = phy_free_head;
18264 + ei_local->free_page_head = free_page_head;
18265 + ei_local->phy_free_page_head = phy_free_page_head;
18269 +int fe_dma_init(struct net_device *dev)
18273 + unsigned int regVal;
18274 + END_DEVICE* ei_local = netdev_priv(dev);
18277 + #if defined (CONFIG_HW_SFQ)
18280 + fq_qdma_init(dev);
18284 + regVal = sysRegRead(QDMA_GLO_CFG);
18285 + if((regVal & RX_DMA_BUSY))
18287 + printk("\n RX_DMA_BUSY !!! ");
18290 + if((regVal & TX_DMA_BUSY))
18292 + printk("\n TX_DMA_BUSY !!! ");
18297 + /*tx desc alloc, add a NULL TXD to HW*/
18299 + qdma_tx_desc_alloc();
18301 + /* Initial RX Ring 0*/
18303 +#ifdef CONFIG_32B_DESC
18304 + ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18305 + ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
18307 + ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
18309 + for (i = 0; i < NUM_QRX_DESC; i++) {
18310 + memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
18311 + ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
18312 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18313 + ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
18314 + ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18316 + ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
18318 + ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18320 + printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
18322 + regVal = sysRegRead(QDMA_GLO_CFG);
18323 + regVal &= 0x000000FF;
18325 + sysRegWrite(QDMA_GLO_CFG, regVal);
18326 + regVal=sysRegRead(QDMA_GLO_CFG);
18328 + /* Tell the adapter where the TX/RX rings are located. */
18330 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
18331 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
18332 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
18333 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18334 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
18336 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
18338 + ei_local->rx_ring0 = ei_local->qrx_ring;
18339 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
18340 + /* Initial PDMA RX Ring 0*/
18341 +#ifdef CONFIG_32B_DESC
18342 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18343 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
18345 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
18347 + for (i = 0; i < NUM_RX_DESC; i++) {
18348 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
18349 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
18350 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18351 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
18352 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18354 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
18356 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18358 + printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
18360 + regVal = sysRegRead(PDMA_GLO_CFG);
18361 + regVal &= 0x000000FF;
18362 + sysRegWrite(PDMA_GLO_CFG, regVal);
18363 + regVal=sysRegRead(PDMA_GLO_CFG);
18365 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
18366 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
18367 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
18368 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18369 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
18371 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
18373 +#if !defined (CONFIG_HW_SFQ)
18374 + /* Enable randon early drop and set drop threshold automatically */
18375 + sysRegWrite(QDMA_FC_THRES, 0x174444);
18377 + sysRegWrite(QDMA_HRED2, 0x0);
18378 + set_fe_dma_glo_cfg();
18379 +#if defined (CONFIG_ARCH_MT7623)
18380 + printk("Enable QDMA TX NDP coherence check and re-read mechanism\n");
18381 + regVal=sysRegRead(QDMA_GLO_CFG);
18382 + regVal = regVal | 0x400;
18383 + sysRegWrite(QDMA_GLO_CFG, regVal);
18384 + printk("***********QDMA_GLO_CFG=%x\n", sysRegRead(QDMA_GLO_CFG));
18390 +#if defined (CONFIG_HW_SFQ)
18394 +int udp_source_port=0;
18395 +int tcp_source_port=0;
18397 +int SfqParseLayerInfo(struct sk_buff * skb)
18400 + struct vlan_hdr *vh_sfq = NULL;
18401 + struct ethhdr *eth_sfq = NULL;
18402 + struct iphdr *iph_sfq = NULL;
18403 + struct ipv6hdr *ip6h_sfq = NULL;
18404 + struct tcphdr *th_sfq = NULL;
18405 + struct udphdr *uh_sfq = NULL;
18406 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18407 + struct vlan_hdr pseudo_vhdr_sfq;
18410 + memset(&SfqParseResult, 0, sizeof(SfqParseResult));
18412 + eth_sfq = (struct ethhdr *)skb->data;
18413 + memcpy(SfqParseResult.dmac, eth_sfq->h_dest, ETH_ALEN);
18414 + memcpy(SfqParseResult.smac, eth_sfq->h_source, ETH_ALEN);
18415 + SfqParseResult.eth_type = eth_sfq->h_proto;
18418 + if (SfqParseResult.eth_type == htons(ETH_P_8021Q)){
18419 + SfqParseResult.vlan1_gap = VLAN_HLEN;
18420 + vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18421 + SfqParseResult.eth_type = vh_sfq->h_vlan_encapsulated_proto;
18423 + SfqParseResult.vlan1_gap = 0;
18428 + LAYER2_HEADER(skb) = skb->data;
18429 + LAYER3_HEADER(skb) = (skb->data + ETH_HLEN + (SfqParseResult.vlan1_gap));
18433 + /* set layer4 start addr */
18434 + if ((SfqParseResult.eth_type == htons(ETH_P_IP)) || (SfqParseResult.eth_type == htons(ETH_P_PPP_SES)
18435 + && SfqParseResult.ppp_tag == htons(PPP_IP))) {
18436 + iph_sfq = (struct iphdr *)LAYER3_HEADER(skb);
18438 + //prepare layer3/layer4 info
18439 + memcpy(&SfqParseResult.iph, iph_sfq, sizeof(struct iphdr));
18440 + if (iph_sfq->protocol == IPPROTO_TCP) {
18442 + LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + (iph_sfq->ihl * 4));
18443 + th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18444 + memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18445 + SfqParseResult.pkt_type = IPV4_HNAPT;
18446 + //printk("tcp parsing\n");
18447 + tcp_source_port = ntohs(SfqParseResult.th.source);
18448 + udp_source_port = 0;
18449 + #if(0) //for TCP ack, test use
18450 + if(ntohl(SfqParseResult.iph.saddr) == 0xa0a0a04){ // tcp ack packet
18456 + sfq_prot = 2;//IPV4_HNAPT
18457 + proto_id = 1;//TCP
18458 + if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18461 + } else if (iph_sfq->protocol == IPPROTO_UDP) {
18462 + LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + iph_sfq->ihl * 4);
18463 + uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18464 + memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18465 + SfqParseResult.pkt_type = IPV4_HNAPT;
18466 + udp_source_port = ntohs(SfqParseResult.uh.source);
18467 + tcp_source_port = 0;
18469 + sfq_prot = 2;//IPV4_HNAPT
18470 + proto_id =2;//UDP
18471 + if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18477 + }else if (SfqParseResult.eth_type == htons(ETH_P_IPV6) ||
18478 + (SfqParseResult.eth_type == htons(ETH_P_PPP_SES) &&
18479 + SfqParseResult.ppp_tag == htons(PPP_IPV6))) {
18480 + ip6h_sfq = (struct ipv6hdr *)LAYER3_HEADER(skb);
18481 + memcpy(&SfqParseResult.ip6h, ip6h_sfq, sizeof(struct ipv6hdr));
18483 + if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
18484 + LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18485 + th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18486 + memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18487 + SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18488 + sfq_prot = 4;//IPV6_5T
18489 + #if(0) //for TCP ack, test use
18490 + if(ntohl(SfqParseResult.ip6h.saddr.s6_addr32[3]) == 8){
18496 + } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
18497 + LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18498 + uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18499 + memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18500 + SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18502 + sfq_prot = 4;//IPV6_5T
18505 + sfq_prot = 3;//IPV6_3T
18513 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
18515 + unsigned int length=skb->len;
18516 + END_DEVICE* ei_local = netdev_priv(dev);
18518 + struct QDMA_txdesc *cpu_ptr;
18520 + struct QDMA_txdesc *dma_ptr __maybe_unused;
18521 + struct QDMA_txdesc *free_txd;
18523 +#if defined (CONFIG_RAETH_TSO)
18524 + struct iphdr *iph = NULL;
18525 + struct QDMA_txdesc *init_cpu_ptr;
18526 + struct tcphdr *th = NULL;
18527 + struct skb_frag_struct *frag;
18528 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
18529 + unsigned int len, size, offset, frag_txd_num;
18530 + int init_txd_idx, i;
18531 +#endif // CONFIG_RAETH_TSO //
18533 +#if defined (CONFIG_RAETH_TSOV6)
18534 + struct ipv6hdr *ip6h = NULL;
18537 +#ifdef CONFIG_PSEUDO_SUPPORT
18538 + PSEUDO_ADAPTER *pAd;
18540 + //cpu_ptr = PHYS_TO_VIRT(ei_local->tx_cpu_ptr);
18541 + //dma_ptr = PHYS_TO_VIRT(ei_local->tx_dma_ptr);
18542 + //ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18543 + cpu_ptr = (ei_local->tx_cpu_ptr);
18544 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18545 + cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
18546 + dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
18547 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18548 + ei_local->skb_free[ctx_offset] = skb;
18549 +#if defined (CONFIG_RAETH_TSO)
18550 + init_cpu_ptr = cpu_ptr;
18551 + init_txd_idx = ctx_offset;
18554 +#if !defined (CONFIG_RAETH_TSO)
18556 + //2. prepare data
18557 + //cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data);
18558 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18559 + cpu_ptr->txd_info3.SDL = skb->len;
18560 +#if defined (CONFIG_HW_SFQ)
18561 + SfqParseLayerInfo(skb);
18562 + cpu_ptr->txd_info4.VQID0 = 1;//1:HW hash 0:CPU
18565 +#if(0)// for tcp ack use, test use
18566 + if (ack_packt==1){
18567 + cpu_ptr->txd_info3.QID = 0x0a;
18568 + //cpu_ptr->txd_info3.VQID = 0;
18570 + cpu_ptr->txd_info3.QID = 0;
18573 + cpu_ptr->txd_info3.PROT = sfq_prot;
18574 + cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18577 + if (gmac_no == 1) {
18578 + cpu_ptr->txd_info4.FPORT = 1;
18580 + cpu_ptr->txd_info4.FPORT = 2;
18583 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18584 +#ifdef CONFIG_PSEUDO_SUPPORT
18585 + if((lan_wan_separate==1) && (gmac_no==2)){
18586 + cpu_ptr->txd_info3.QID += 8;
18587 +#if defined (CONFIG_HW_SFQ)
18588 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18589 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18593 +#if defined (CONFIG_HW_SFQ)
18594 + if((lan_wan_separate==1) && (gmac_no==1)){
18595 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18596 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18600 +#endif //end CONFIG_PSEUDO_SUPPORT
18603 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18604 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18607 + iph = (struct iphdr *)skb_network_header(skb);
18608 + if (iph->tos == 0xe0)
18609 + cpu_ptr->txd_info3.QID = 3;
18610 + else if (iph->tos == 0xa0)
18611 + cpu_ptr->txd_info3.QID = 2;
18612 + else if (iph->tos == 0x20)
18613 + cpu_ptr->txd_info3.QID = 1;
18615 + cpu_ptr->txd_info3.QID = 0;
18618 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18619 + if (skb->ip_summed == CHECKSUM_PARTIAL){
18620 + cpu_ptr->txd_info4.TUI_CO = 7;
18622 + cpu_ptr->txd_info4.TUI_CO = 0;
18626 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18627 + if(vlan_tx_tag_present(skb)) {
18628 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18630 + cpu_ptr->txd_info4.VLAN_TAG = 0;
18634 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used
18636 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18637 + cpu_ptr->txd_info3.QID += 8;
18638 +#if defined (CONFIG_HW_SFQ)
18639 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18640 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18644 +#if defined (CONFIG_HW_SFQ)
18645 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18646 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18647 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18651 +#endif // CONFIG_RAETH_HW_VLAN_TX
18654 +//no hw van, no GE2, web UI used
18655 +#ifndef CONFIG_PSEUDO_SUPPORT
18656 +#ifndef CONFIG_RAETH_HW_VLAN_TX
18657 + if(lan_wan_separate==1){
18658 + struct vlan_hdr *vh = NULL;
18659 + unsigned short vlanid = 0;
18660 + unsigned short vlan_TCI;
18661 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18662 + vlan_TCI = vh->h_vlan_TCI;
18663 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18664 + if(vlanid == 2)//to wan
18666 + cpu_ptr->txd_info3.QID += 8;
18667 +#if defined (CONFIG_HW_SFQ)
18668 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18669 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18672 + }else if(vlanid == 1){ //to lan
18673 +#if defined (CONFIG_HW_SFQ)
18674 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18675 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18682 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18683 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18684 + if(ra_sw_nat_hook_rx!= NULL){
18685 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18686 + FOE_MAGIC_TAG(skb) = 0;
18691 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18692 + cpu_ptr->txd_info4.UDF = 0x2F;
18695 +#if defined (CONFIG_MIPS)
18696 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
18698 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
18700 + cpu_ptr->txd_info3.SWC_bit = 1;
18702 + //3. get NULL TXD and decrease free_tx_num by 1.
18703 + ctx_offset = get_free_txd(&free_txd);
18704 + if(ctx_offset == NUM_TX_DESC) {
18705 + printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
18709 + //4. hook new TXD in the end of queue
18710 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18711 + cpu_ptr->txd_info2.NDP = (free_txd);
18714 + //5. move CPU_PTR to new TXD
18715 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18716 + ei_local->tx_cpu_ptr = (free_txd);
18717 + cpu_ptr->txd_info3.OWN_bit = 0;
18718 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18721 + printk("----------------------------------------------\n");
18722 + printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
18723 + printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
18724 + printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
18725 + printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
18728 +#else //#if !defined (CONFIG_RAETH_TSO)
18729 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18730 + cpu_ptr->txd_info3.SDL = (length - skb->data_len);
18731 + cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
18732 +#if defined (CONFIG_HW_SFQ)
18733 + SfqParseLayerInfo(skb);
18734 + // printk("tcp_source_port=%d\n", tcp_source_port);
18736 + cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
18737 + if (tcp_source_port==1000) cpu_ptr->txd_info3.VQID = 0;
18738 + else if (tcp_source_port==1100) cpu_ptr->txd_info3.VQID = 1;
18739 + else if (tcp_source_port==1200) cpu_ptr->txd_info3.VQID = 2;
18740 + else cpu_ptr->txd_info3.VQID = 0;
18742 + cpu_ptr->txd_info4.VQID0 = 1;
18743 + cpu_ptr->txd_info3.PROT = sfq_prot;
18744 + cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18747 + if (gmac_no == 1) {
18748 + cpu_ptr->txd_info4.FPORT = 1;
18750 + cpu_ptr->txd_info4.FPORT = 2;
18753 + cpu_ptr->txd_info4.TSO = 0;
18754 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18755 +#ifdef CONFIG_PSEUDO_SUPPORT //web UI used tso
18756 + if((lan_wan_separate==1) && (gmac_no==2)){
18757 + cpu_ptr->txd_info3.QID += 8;
18758 +#if defined (CONFIG_HW_SFQ)
18759 + if(web_sfq_enable == 1 &&(skb->mark == 2)){
18760 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18764 +#if defined (CONFIG_HW_SFQ)
18765 + if((lan_wan_separate==1) && (gmac_no==1)){
18766 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18767 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18771 +#endif //CONFIG_PSEUDO_SUPPORT
18773 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18774 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18776 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18777 + if (skb->ip_summed == CHECKSUM_PARTIAL){
18778 + cpu_ptr->txd_info4.TUI_CO = 7;
18780 + cpu_ptr->txd_info4.TUI_CO = 0;
18784 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18785 + if(vlan_tx_tag_present(skb)) {
18786 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18788 + cpu_ptr->txd_info4.VLAN_TAG = 0;
18791 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used tso
18793 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18794 + //cpu_ptr->txd_info3.QID += 8;
18795 + cpu_ptr->txd_info3.QID += 8;
18796 +#if defined (CONFIG_HW_SFQ)
18797 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18798 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18802 +#if defined (CONFIG_HW_SFQ)
18803 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18804 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18805 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18809 +#endif // CONFIG_RAETH_HW_VLAN_TX
18812 +//no hw van, no GE2, web UI used
18813 +#ifndef CONFIG_PSEUDO_SUPPORT
18814 +#ifndef CONFIG_RAETH_HW_VLAN_TX
18815 + if(lan_wan_separate==1){
18816 + struct vlan_hdr *vh = NULL;
18817 + unsigned short vlanid = 0;
18818 + unsigned short vlan_TCI;
18819 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18820 + vlan_TCI = vh->h_vlan_TCI;
18821 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18822 + if(vlanid == 2)//eth2.2 to wan
18824 + cpu_ptr->txd_info3.QID += 8;
18825 +#if defined (CONFIG_HW_SFQ)
18826 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18827 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18830 + }else if(!strcmp(netdev, "eth2.1")){ // eth2.1 to lan
18831 +#if defined (CONFIG_HW_SFQ)
18832 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18833 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18841 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18842 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18843 + if(ra_sw_nat_hook_rx!= NULL){
18844 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18845 + FOE_MAGIC_TAG(skb) = 0;
18850 + cpu_ptr->txd_info3.SWC_bit = 1;
18852 + ctx_offset = get_free_txd(&free_txd);
18853 + if(ctx_offset == NUM_TX_DESC) {
18854 + printk("get_free_txd fail\n");
18857 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18858 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18859 + cpu_ptr->txd_info2.NDP = free_txd;
18860 + ei_local->tx_cpu_ptr = free_txd;
18863 + if(nr_frags > 0) {
18864 + for(i=0;i<nr_frags;i++) {
18865 + // 1. set or get init value for current fragment
18867 + frag = &skb_shinfo(skb)->frags[i];
18868 + len = frag->size;
18869 + frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
18870 + for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
18871 + // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
18872 + if(len < MAX_TXD_LEN)
18875 + size = MAX_TXD_LEN;
18877 + //3. Update TXD info
18878 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18879 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18880 +#ifdef CONFIG_PSEUDO_SUPPORT //QoS Web UI used , nr_frags
18881 + if((lan_wan_separate==1) && (gmac_no==2)){
18882 + //cpu_ptr->txd_info3.QID += 8;
18883 + cpu_ptr->txd_info3.QID += 8;
18884 +#if defined (CONFIG_HW_SFQ)
18885 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18886 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18890 +#if defined (CONFIG_HW_SFQ)
18891 + if((lan_wan_separate==1) && (gmac_no==1)){
18892 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18893 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18897 +#endif //CONFIG_PSEUDO_SUPPORT
18899 +//QoS web used, nr_frags
18900 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18901 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18902 + cpu_ptr->txd_info3.QID += 8;
18903 +#if defined (CONFIG_HW_SFQ)
18904 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18905 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18909 +#if defined (CONFIG_HW_SFQ)
18910 + if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18911 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18912 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18916 +#endif // CONFIG_RAETH_HW_VLAN_TX
18917 +//no hw van, no GE2, web UI used
18918 +#ifndef CONFIG_PSEUDO_SUPPORT
18919 +#ifndef CONFIG_RAETH_HW_VLAN_TX
18920 + if(lan_wan_separate==1){
18921 + struct vlan_hdr *vh = NULL;
18922 + unsigned short vlanid = 0;
18923 + unsigned short vlan_TCI;
18924 + vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18925 + vlan_TCI = vh->h_vlan_TCI;
18926 + vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18927 + if(vlanid == 2))//eth2.2 to wan
18929 + cpu_ptr->txd_info3.QID += 8;
18930 +#if defined (CONFIG_HW_SFQ)
18931 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18932 + cpu_ptr->txd_info3.QID = HwSfqQUp;
18936 + }else if(vlanid == 1){ // eth2.1 to lan
18937 +#if defined (CONFIG_HW_SFQ)
18938 + if(web_sfq_enable==1 &&(skb->mark == 2)){
18939 + cpu_ptr->txd_info3.QID = HwSfqQDl;
18947 + printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18948 + printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18950 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
18951 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
18953 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
18954 +// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
18956 + cpu_ptr->txd_info3.SDL = size;
18957 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
18958 + cpu_ptr->txd_info3.LS_bit = 1;
18960 + cpu_ptr->txd_info3.LS_bit = 0;
18961 + cpu_ptr->txd_info3.OWN_bit = 0;
18962 + cpu_ptr->txd_info3.SWC_bit = 1;
18963 + //4. Update skb_free for housekeeping
18964 + ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
18966 + //5. Get next TXD
18967 + ctx_offset = get_free_txd(&free_txd);
18968 + //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18969 + //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18970 + cpu_ptr->txd_info2.NDP = free_txd;
18971 + ei_local->tx_cpu_ptr = free_txd;
18972 + //6. Update offset and len.
18977 + ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
18980 + if(skb_shinfo(skb)->gso_segs > 1) {
18982 +// TsoLenUpdate(skb->len);
18984 + /* TCP over IPv4 */
18985 + iph = (struct iphdr *)skb_network_header(skb);
18986 +#if defined (CONFIG_RAETH_TSOV6)
18987 + /* TCP over IPv6 */
18988 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
18990 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
18991 + th = (struct tcphdr *)skb_transport_header(skb);
18992 +#if defined (CONFIG_HW_SFQ)
18994 + init_cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
18995 + if (tcp_source_port==1000) init_cpu_ptr->txd_info3.VQID = 0;
18996 + else if (tcp_source_port==1100) init_cpu_ptr->txd_info3.VQID = 1;
18997 + else if (tcp_source_port==1200) init_cpu_ptr->txd_info3.VQID = 2;
18998 + else cpu_ptr->txd_info3.VQID = 0;
19000 + init_cpu_ptr->txd_info4.VQID0 = 1;
19001 + init_cpu_ptr->txd_info3.PROT = sfq_prot;
19002 + init_cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
19005 + init_cpu_ptr->txd_info4.TSO = 1;
19007 + th->check = htons(skb_shinfo(skb)->gso_size);
19008 +#if defined (CONFIG_MIPS)
19009 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19011 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19015 +#if defined (CONFIG_RAETH_TSOV6)
19016 + /* TCP over IPv6 */
19017 + //ip6h = (struct ipv6hdr *)skb_network_header(skb);
19018 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
19019 + th = (struct tcphdr *)skb_transport_header(skb);
19020 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19021 + init_cpu_ptr->txd_info4.TSO = 1;
19023 + init_cpu_ptr->txd_info4.TSO = 1;
19025 + th->check = htons(skb_shinfo(skb)->gso_size);
19026 +#if defined (CONFIG_MIPS)
19027 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19029 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19036 +// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19038 + init_cpu_ptr->txd_info3.OWN_bit = 0;
19039 +#endif // CONFIG_RAETH_TSO //
19041 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19043 +#ifdef CONFIG_PSEUDO_SUPPORT
19044 + if (gmac_no == 2) {
19045 + if (ei_local->PseudoDev != NULL) {
19046 + pAd = netdev_priv(ei_local->PseudoDev);
19047 + pAd->stat.tx_packets++;
19048 + pAd->stat.tx_bytes += length;
19054 + ei_local->stat.tx_packets++;
19055 + ei_local->stat.tx_bytes += skb->len;
19057 +#ifdef CONFIG_RAETH_NAPI
19058 + if ( ei_local->tx_full == 1) {
19059 + ei_local->tx_full = 0;
19060 + netif_wake_queue(dev);
19067 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
19069 + END_DEVICE *ei_local = netdev_priv(dev);
19070 + unsigned long flags;
19071 + unsigned int num_of_txd = 0;
19072 +#if defined (CONFIG_RAETH_TSO)
19073 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
19074 + struct skb_frag_struct *frag;
19076 +#ifdef CONFIG_PSEUDO_SUPPORT
19077 + PSEUDO_ADAPTER *pAd;
19080 +#if !defined(CONFIG_RA_NAT_NONE)
19081 + if(ra_sw_nat_hook_tx!= NULL)
19083 +// spin_lock_irqsave(&ei_local->page_lock, flags);
19084 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
19085 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
19088 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
19096 + dev->trans_start = jiffies; /* save the timestamp */
19097 + spin_lock_irqsave(&ei_local->page_lock, flags);
19098 +#if defined (CONFIG_MIPS)
19099 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19101 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19105 +//check free_txd_num before calling rt288_eth_send()
19107 +#if defined (CONFIG_RAETH_TSO)
19108 + // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
19109 + if(nr_frags != 0){
19110 + for(i=0;i<nr_frags;i++) {
19111 + frag = &skb_shinfo(skb)->frags[i];
19112 + num_of_txd += cal_frag_txd_num(frag->size);
19120 +#if defined(CONFIG_RALINK_MT7621)
19121 + if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
19122 + ei_xmit_housekeeping(0);
19127 + if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
19129 + rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
19130 + if (ei_local->free_txd_num < 3)
19132 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
19133 + netif_stop_queue(dev);
19134 +#ifdef CONFIG_PSEUDO_SUPPORT
19135 + netif_stop_queue(ei_local->PseudoDev);
19137 + tx_ring_full = 1;
19141 +#ifdef CONFIG_PSEUDO_SUPPORT
19142 + if (gmac_no == 2)
19144 + if (ei_local->PseudoDev != NULL)
19146 + pAd = netdev_priv(ei_local->PseudoDev);
19147 + pAd->stat.tx_dropped++;
19151 + ei_local->stat.tx_dropped++;
19153 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
19156 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
19160 +void ei_xmit_housekeeping(unsigned long unused)
19162 + struct net_device *dev = dev_raether;
19163 + END_DEVICE *ei_local = netdev_priv(dev);
19164 +#ifndef CONFIG_RAETH_NAPI
19165 + unsigned long reg_int_mask=0;
19167 + struct QDMA_txdesc *dma_ptr = NULL;
19168 + struct QDMA_txdesc *cpu_ptr = NULL;
19169 + struct QDMA_txdesc *tmp_ptr = NULL;
19170 + unsigned int ctx_offset = 0;
19171 + unsigned int dtx_offset = 0;
19173 + cpu_ptr = sysRegRead(QTX_CRX_PTR);
19174 + dma_ptr = sysRegRead(QTX_DRX_PTR);
19175 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19176 + dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19177 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19178 + dma_ptr = (ei_local->txd_pool + (dtx_offset));
19180 + while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
19181 + //1. keep cpu next TXD
19182 + tmp_ptr = cpu_ptr->txd_info2.NDP;
19184 + put_free_txd(ctx_offset);
19185 + //3. update ctx_offset and free skb memory
19186 + ctx_offset = GET_TXD_OFFSET(&tmp_ptr);
19187 +#if defined (CONFIG_RAETH_TSO)
19188 + if(ei_local->skb_free[ctx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
19189 + dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19192 + dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19194 + ei_local->skb_free[ctx_offset] = 0;
19196 + netif_wake_queue(dev);
19197 +#ifdef CONFIG_PSEUDO_SUPPORT
19198 + netif_wake_queue(ei_local->PseudoDev);
19201 + //4. update cpu_ptr
19202 + cpu_ptr = (ei_local->txd_pool + ctx_offset);
19204 + sysRegWrite(QTX_CRX_PTR, (ei_local->phy_txd_pool + (ctx_offset << 4)));
19205 +#ifndef CONFIG_RAETH_NAPI
19206 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
19207 +#if defined (DELAY_INT)
19208 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
19211 + sysRegWrite(QFE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
19213 +#endif //CONFIG_RAETH_NAPI//
19216 +EXPORT_SYMBOL(ei_start_xmit);
19217 +EXPORT_SYMBOL(ei_xmit_housekeeping);
19218 +EXPORT_SYMBOL(fe_dma_init);
19219 +EXPORT_SYMBOL(rt2880_eth_send);
19221 +++ b/drivers/net/ethernet/raeth/raether_qdma_mt7623.c
19223 +#include <linux/module.h>
19224 +#include <linux/version.h>
19225 +#include <linux/kernel.h>
19226 +#include <linux/types.h>
19227 +#include <linux/pci.h>
19228 +#include <linux/init.h>
19229 +#include <linux/skbuff.h>
19230 +#include <linux/if_vlan.h>
19231 +#include <linux/if_ether.h>
19232 +#include <linux/fs.h>
19233 +#include <asm/uaccess.h>
19234 +#include <asm/rt2880/surfboardint.h>
19235 +#if defined (CONFIG_RAETH_TSO)
19236 +#include <linux/tcp.h>
19237 +#include <net/ipv6.h>
19238 +#include <linux/ip.h>
19239 +#include <net/ip.h>
19240 +#include <net/tcp.h>
19241 +#include <linux/in.h>
19242 +#include <linux/ppp_defs.h>
19243 +#include <linux/if_pppox.h>
19245 +#include <linux/delay.h>
19246 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
19247 +#include <linux/sched.h>
19250 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
19251 +#include <asm/rt2880/rt_mmap.h>
19253 +#include <linux/libata-compat.h>
19256 +#include "ra2882ethreg.h"
19257 +#include "raether.h"
19258 +#include "ra_mac.h"
19259 +#include "ra_ioctl.h"
19260 +#include "ra_rfrw.h"
19261 +#ifdef CONFIG_RAETH_NETLINK
19262 +#include "ra_netlink.h"
19264 +#if defined (CONFIG_RAETH_QOS)
19265 +#include "ra_qos.h"
19268 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19269 +#include "../../../net/nat/hw_nat/ra_nat.h"
19273 +#if !defined(CONFIG_RA_NAT_NONE)
19276 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
19277 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
19280 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
19283 +#include <asm/mipsregs.h>
19284 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
19285 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
19286 +#endif /* CONFIG_RA_CLASSIFIER */
19288 +#if defined (CONFIG_RALINK_RT3052_MP2)
19289 +int32_t mcast_rx(struct sk_buff * skb);
19290 +int32_t mcast_tx(struct sk_buff * skb);
19293 +#ifdef RA_MTD_RW_BY_NUM
19294 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
19296 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
19299 +/* gmac driver feature set config */
19300 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
19303 +#define DELAY_INT 1
19306 +//#define CONFIG_UNH_TEST
19307 +/* end of config */
19309 +#if defined (CONFIG_RAETH_JUMBOFRAME)
19310 +#define MAX_RX_LENGTH 4096
19312 +#define MAX_RX_LENGTH 1536
19315 +extern struct net_device *dev_raether;
19317 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19318 +static int rx_dma_owner_idx1;
19319 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19320 +static int rx_calc_idx1;
19323 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19324 +static int rx_calc_idx0;
19325 +static unsigned long tx_cpu_owner_idx0=0;
19327 +static unsigned long tx_ring_full=0;
19329 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
19330 +#include "ra_ethtool.h"
19331 +extern struct ethtool_ops ra_ethtool_ops;
19332 +#ifdef CONFIG_PSEUDO_SUPPORT
19333 +extern struct ethtool_ops ra_virt_ethtool_ops;
19334 +#endif // CONFIG_PSEUDO_SUPPORT //
19335 +#endif // (CONFIG_ETHTOOL //
19337 +#ifdef CONFIG_RALINK_VISTA_BASIC
19338 +int is_switch_175c = 1;
19341 +//skb->mark to queue mapping table
19342 +extern unsigned int M2Q_table[64];
19345 +#define KSEG1 0xa0000000
19346 +#if defined (CONFIG_MIPS)
19347 +#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
19348 +#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
19350 +#define PHYS_TO_VIRT(x) phys_to_virt(x)
19351 +#define VIRT_TO_PHYS(x) virt_to_phys(x)
19355 +extern void set_fe_dma_glo_cfg(void);
19360 + * @brief: get the TXD index from its address
19362 + * @param: cpu_ptr
19364 + * @return: TXD index
19367 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
19369 + struct net_device *dev = dev_raether;
19370 + END_DEVICE *ei_local = netdev_priv(dev);
19372 + //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19373 + //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
19375 + ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19376 + return ctx_offset;
19383 + * @brief cal txd number for a page
19387 + * @return frag_txd_num
19390 +unsigned int cal_frag_txd_num(unsigned int size)
19392 + unsigned int frag_txd_num = 0;
19396 + if(size > MAX_TXD_LEN){
19398 + size -= MAX_TXD_LEN;
19404 + return frag_txd_num;
19409 + * @brief get free TXD from TXD queue
19411 + * @param free_txd
19415 +static int get_free_txd(struct QDMA_txdesc **free_txd)
19417 + struct net_device *dev = dev_raether;
19418 + END_DEVICE *ei_local = netdev_priv(dev);
19419 + unsigned int tmp_idx;
19421 + if(ei_local->free_txd_num > 0){
19422 + tmp_idx = ei_local->free_txd_head;
19423 + ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
19424 + ei_local->free_txd_num -= 1;
19425 + //*free_txd = &ei_local->txd_pool[tmp_idx];
19426 + *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
19429 + return NUM_TX_DESC;
19434 + * @brief add free TXD into TXD queue
19436 + * @param free_txd
19440 +int put_free_txd(int free_txd_idx)
19442 + struct net_device *dev = dev_raether;
19443 + END_DEVICE *ei_local = netdev_priv(dev);
19444 + ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
19445 + ei_local->free_txd_tail = free_txd_idx;
19446 + ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
19447 + ei_local->free_txd_num += 1;
19451 +/*define qdma initial alloc*/
19457 + * @return 0: fail
19460 +bool qdma_tx_desc_alloc(void)
19462 + struct net_device *dev = dev_raether;
19463 + END_DEVICE *ei_local = netdev_priv(dev);
19464 + struct QDMA_txdesc *free_txd = NULL;
19465 + unsigned int txd_idx;
19469 + ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
19470 + printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
19472 + if (ei_local->txd_pool == NULL) {
19473 + printk("adapter->txd_pool allocation failed!\n");
19476 + printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
19477 + //set all txd_pool_info to 0.
19478 + for ( i = 0; i < NUM_TX_DESC; i++)
19480 + ei_local->skb_free[i]= 0;
19481 + ei_local->txd_pool_info[i] = i + 1;
19482 + ei_local->txd_pool[i].txd_info3.LS_bit = 1;
19483 + ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
19486 + ei_local->free_txd_head = 0;
19487 + ei_local->free_txd_tail = NUM_TX_DESC - 1;
19488 + ei_local->free_txd_num = NUM_TX_DESC;
19491 + //get free txd from txd pool
19492 + txd_idx = get_free_txd(&free_txd);
19493 + if( txd_idx == NUM_TX_DESC) {
19494 + printk("get_free_txd fail\n");
19498 + //add null TXD for transmit
19501 + ei_local->tx_dma_ptr = free_txd;
19502 + ei_local->tx_cpu_ptr = free_txd;
19503 + //ei_local->tx_dma_ptr = virt_to_phys(free_txd);
19504 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19505 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19506 + sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
19508 + printk("kurtis: free_txd = 0x%x!!!\n", free_txd);
19509 + printk("kurtis: ei_local->tx_dma_ptr = 0x%x!!!\n", ei_local->tx_dma_ptr);
19511 + //get free txd from txd pool
19513 + txd_idx = get_free_txd(&free_txd);
19514 + if( txd_idx == NUM_TX_DESC) {
19515 + printk("get_free_txd fail\n");
19518 + // add null TXD for release
19519 + //sysRegWrite(QTX_CRX_PTR, virt_to_phys(free_txd));
19520 + //sysRegWrite(QTX_DRX_PTR, virt_to_phys(free_txd));
19521 + sysRegWrite(QTX_CRX_PTR, free_txd);
19522 + sysRegWrite(QTX_DRX_PTR, free_txd);
19524 + printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
19526 + printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
19527 + printk("----------------+---------+--------\n");
19529 + printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool,
19530 + ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
19535 +bool fq_qdma_init(void)
19537 + struct QDMA_txdesc *free_head = NULL;
19538 + unsigned int free_head_phy;
19539 + unsigned int free_tail_phy;
19540 + unsigned int *free_page_head = NULL;
19541 + unsigned int free_page_head_phy;
19544 + free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &free_head_phy);
19545 + if (unlikely(free_head == NULL)){
19546 + printk(KERN_ERR "QDMA FQ decriptor not available...\n");
19549 + memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
19551 + free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &free_page_head_phy);
19552 + if (unlikely(free_page_head == NULL)){
19553 + printk(KERN_ERR "QDMA FQ pager not available...\n");
19556 + for (i=0; i < NUM_QDMA_PAGE; i++) {
19557 + free_head[i].txd_info1.SDP = (free_page_head_phy + (i * QDMA_PAGE_SIZE));
19558 + if(i < (NUM_QDMA_PAGE-1)){
19559 + free_head[i].txd_info2.NDP = (free_head_phy + ((i+1) * sizeof(struct QDMA_txdesc)));
19563 + printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
19564 + printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
19565 + printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
19566 + printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
19569 + free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
19572 + free_tail_phy = (free_head_phy + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
19574 + printk("free_head_phy is 0x%x!!!\n", free_head_phy);
19575 + printk("free_tail_phy is 0x%x!!!\n", free_tail_phy);
19576 + sysRegWrite(QDMA_FQ_HEAD, (u32)free_head_phy);
19577 + sysRegWrite(QDMA_FQ_TAIL, (u32)free_tail_phy);
19578 + sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
19579 + sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
19583 +int fe_dma_init(struct net_device *dev)
19587 + unsigned int regVal;
19588 + END_DEVICE* ei_local = netdev_priv(dev);
19594 + regVal = sysRegRead(QDMA_GLO_CFG);
19595 + if((regVal & RX_DMA_BUSY))
19597 + printk("\n RX_DMA_BUSY !!! ");
19600 + if((regVal & TX_DMA_BUSY))
19602 + printk("\n TX_DMA_BUSY !!! ");
19607 + /*tx desc alloc, add a NULL TXD to HW*/
19609 + qdma_tx_desc_alloc();
19612 + /* Initial RX Ring 0*/
19613 +#ifdef CONFIG_32B_DESC
19614 + ei_local->rx_ring0 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19615 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19617 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19619 + for (i = 0; i < NUM_QRX_DESC; i++) {
19620 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19621 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19622 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19623 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19624 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19626 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19628 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19630 + printk("QDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19632 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19633 + /* Initial RX Ring 1*/
19634 +#ifdef CONFIG_32B_DESC
19635 + ei_local->rx_ring1 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19636 + ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
19638 + ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
19640 + for (i = 0; i < NUM_QRX_DESC; i++) {
19641 + memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
19642 + ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
19643 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19644 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19645 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19647 + ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
19649 + ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19651 + printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
19654 + regVal = sysRegRead(QDMA_GLO_CFG);
19655 + regVal &= 0x000000FF;
19656 + sysRegWrite(QDMA_GLO_CFG, regVal);
19657 + regVal=sysRegRead(QDMA_GLO_CFG);
19659 + /* Tell the adapter where the TX/RX rings are located. */
19661 + sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19662 + sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
19663 + sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19664 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19665 + rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
19667 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
19668 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19669 + sysRegWrite(QRX_BASE_PTR_1, phys_to_bus((u32) ei_local->phy_rx_ring1));
19670 + sysRegWrite(QRX_MAX_CNT_1, cpu_to_le32((u32) NUM_QRX_DESC));
19671 + sysRegWrite(QRX_CRX_IDX_1, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19672 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19673 + rx_calc_idx1 = rx_dma_owner_idx1 = sysRegRead(QRX_CRX_IDX_1);
19675 + sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX1);
19678 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
19679 + /* Initial PDMA RX Ring 0*/
19680 +#ifdef CONFIG_32B_DESC
19681 + ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19682 + ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19684 + ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19686 + for (i = 0; i < NUM_RX_DESC; i++) {
19687 + memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19688 + ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19689 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19690 + ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19691 + ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19693 + ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19695 + ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19697 + printk("PDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19699 + regVal = sysRegRead(PDMA_GLO_CFG);
19700 + regVal &= 0x000000FF;
19701 + sysRegWrite(PDMA_GLO_CFG, regVal);
19702 + regVal=sysRegRead(PDMA_GLO_CFG);
19704 + sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19705 + sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
19706 + sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
19707 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19708 + rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
19710 + sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
19713 + /* Enable randon early drop and set drop threshold automatically */
19714 + sysRegWrite(QDMA_FC_THRES, 0x174444);
19715 + sysRegWrite(QDMA_HRED2, 0x0);
19716 + set_fe_dma_glo_cfg();
19721 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
19723 + unsigned int length=skb->len;
19724 + END_DEVICE* ei_local = netdev_priv(dev);
19726 + struct QDMA_txdesc *cpu_ptr;
19728 + struct QDMA_txdesc *dma_ptr __maybe_unused;
19729 + struct QDMA_txdesc *free_txd;
19730 + unsigned int ctx_offset = 0;
19731 + unsigned int dtx_offset = 0;
19732 +#if defined (CONFIG_RAETH_TSO)
19733 + struct iphdr *iph = NULL;
19734 + struct QDMA_txdesc *init_cpu_ptr;
19735 + struct tcphdr *th = NULL;
19736 + struct skb_frag_struct *frag;
19737 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
19738 + unsigned int len, size, offset, frag_txd_num;
19739 + int init_txd_idx, i;
19740 +#endif // CONFIG_RAETH_TSO //
19742 +#if defined (CONFIG_RAETH_TSOV6)
19743 + struct ipv6hdr *ip6h = NULL;
19746 +#ifdef CONFIG_PSEUDO_SUPPORT
19747 + PSEUDO_ADAPTER *pAd;
19749 + cpu_ptr = (ei_local->tx_cpu_ptr);
19750 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19751 + cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
19752 + dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
19754 + //dma_ptr = (ei_local->tx_dma_ptr);
19757 + /*only modify virtual address*/
19758 + //cpu_ptr = (ei_local->txd_pool) + (ctx_offset * sizeof(struct QDMA_txdesc));
19759 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19761 + //dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19762 + //dma_ptr = (ei_local->txd_pool) + (dtx_offset * sizeof(struct QDMA_txdesc));
19764 + //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
19765 + //printk("eth_send dtx_offset = 0x%x!!!\n", dtx_offset);
19766 + //printk("eth_send ei_local->txd_pool = 0x%x!!!\n", ei_local->txd_pool);
19767 + //printk("eth_send cpu_ptr = 0x%x!!!\n", cpu_ptr);
19768 + //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
19769 + //printk("eth_send ei_local->skb_free[ctx_offset] = 0x%x!!!\n", skb);
19772 + ei_local->skb_free[ctx_offset] = skb;
19773 +#if defined (CONFIG_RAETH_TSO)
19774 + init_cpu_ptr = cpu_ptr;
19775 + init_txd_idx = ctx_offset;
19778 +#if !defined (CONFIG_RAETH_TSO)
19780 + //2. prepare data
19781 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19782 + cpu_ptr->txd_info3.SDL = skb->len;
19784 + if (gmac_no == 1) {
19785 + cpu_ptr->txd_info4.FPORT = 1;
19787 + cpu_ptr->txd_info4.FPORT = 2;
19791 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19793 + iph = (struct iphdr *)skb_network_header(skb);
19794 + if (iph->tos == 0xe0)
19795 + cpu_ptr->txd_info3.QID = 3;
19796 + else if (iph->tos == 0xa0)
19797 + cpu_ptr->txd_info3.QID = 2;
19798 + else if (iph->tos == 0x20)
19799 + cpu_ptr->txd_info3.QID = 1;
19801 + cpu_ptr->txd_info3.QID = 0;
19804 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19805 + if (skb->ip_summed == CHECKSUM_PARTIAL){
19806 + cpu_ptr->txd_info4.TUI_CO = 7;
19808 + cpu_ptr->txd_info4.TUI_CO = 0;
19812 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19813 + if(vlan_tx_tag_present(skb)) {
19814 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19816 + cpu_ptr->txd_info4.VLAN_TAG = 0;
19820 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19821 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19822 + if(ra_sw_nat_hook_rx!= NULL){
19823 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19824 + FOE_MAGIC_TAG(skb) = 0;
19829 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19830 + cpu_ptr->txd_info4.UDF = 0x2F;
19833 +#if defined (CONFIG_MIPS)
19834 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19836 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19838 + cpu_ptr->txd_info3.SWC_bit = 1;
19840 + //3. get NULL TXD and decrease free_tx_num by 1.
19841 + ctx_offset = get_free_txd(&free_txd);
19842 + if(ctx_offset == NUM_TX_DESC) {
19843 + printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
19847 + //4. hook new TXD in the end of queue
19848 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19849 + cpu_ptr->txd_info2.NDP = (free_txd);
19852 + //5. move CPU_PTR to new TXD
19853 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19854 + ei_local->tx_cpu_ptr = (free_txd);
19855 + cpu_ptr->txd_info3.OWN_bit = 0;
19856 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19859 + printk("----------------------------------------------\n");
19860 + printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
19861 + printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
19862 + printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
19863 + printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
19866 +#else //#if !defined (CONFIG_RAETH_TSO)
19867 + cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19868 + cpu_ptr->txd_info3.SDL = (length - skb->data_len);
19869 + cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
19870 + if (gmac_no == 1) {
19871 + cpu_ptr->txd_info4.FPORT = 1;
19873 + cpu_ptr->txd_info4.FPORT = 2;
19876 + cpu_ptr->txd_info4.TSO = 0;
19877 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19878 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19879 + if (skb->ip_summed == CHECKSUM_PARTIAL){
19880 + cpu_ptr->txd_info4.TUI_CO = 7;
19882 + cpu_ptr->txd_info4.TUI_CO = 0;
19886 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19887 + if(vlan_tx_tag_present(skb)) {
19888 + cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19890 + cpu_ptr->txd_info4.VLAN_TAG = 0;
19894 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19895 + if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19896 + if(ra_sw_nat_hook_rx!= NULL){
19897 + cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19898 + FOE_MAGIC_TAG(skb) = 0;
19903 + cpu_ptr->txd_info3.SWC_bit = 1;
19905 + ctx_offset = get_free_txd(&free_txd);
19906 + if(ctx_offset == NUM_TX_DESC) {
19907 + printk("get_free_txd fail\n");
19910 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19911 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19912 + cpu_ptr->txd_info2.NDP = free_txd;
19913 + ei_local->tx_cpu_ptr = free_txd;
19915 + if(nr_frags > 0) {
19916 + for(i=0;i<nr_frags;i++) {
19917 + // 1. set or get init value for current fragment
19919 + frag = &skb_shinfo(skb)->frags[i];
19920 + len = frag->size;
19921 + frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
19922 + for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
19923 + // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
19924 + if(len < MAX_TXD_LEN)
19927 + size = MAX_TXD_LEN;
19929 + //3. Update TXD info
19930 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19931 + cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
19933 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
19935 + cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
19936 +// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
19938 + cpu_ptr->txd_info3.SDL = size;
19939 + if( (i==(nr_frags-1)) && (frag_txd_num == 1))
19940 + cpu_ptr->txd_info3.LS_bit = 1;
19942 + cpu_ptr->txd_info3.LS_bit = 0;
19943 + cpu_ptr->txd_info3.OWN_bit = 0;
19944 + cpu_ptr->txd_info3.SWC_bit = 1;
19945 + //4. Update skb_free for housekeeping
19946 + ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
19948 + //5. Get next TXD
19949 + ctx_offset = get_free_txd(&free_txd);
19950 + //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19951 + //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19952 + cpu_ptr->txd_info2.NDP = free_txd;
19953 + ei_local->tx_cpu_ptr = free_txd;
19954 + //6. Update offset and len.
19959 + ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
19962 + if(skb_shinfo(skb)->gso_segs > 1) {
19964 +// TsoLenUpdate(skb->len);
19966 + /* TCP over IPv4 */
19967 + iph = (struct iphdr *)skb_network_header(skb);
19968 +#if defined (CONFIG_RAETH_TSOV6)
19969 + /* TCP over IPv6 */
19970 + ip6h = (struct ipv6hdr *)skb_network_header(skb);
19972 + if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
19973 + th = (struct tcphdr *)skb_transport_header(skb);
19975 + init_cpu_ptr->txd_info4.TSO = 1;
19977 + th->check = htons(skb_shinfo(skb)->gso_size);
19978 +#if defined (CONFIG_MIPS)
19979 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19981 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19985 +#if defined (CONFIG_RAETH_TSOV6)
19986 + /* TCP over IPv6 */
19987 + //ip6h = (struct ipv6hdr *)skb_network_header(skb);
19988 + else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
19989 + th = (struct tcphdr *)skb_transport_header(skb);
19990 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19991 + init_cpu_ptr->txd_info4.TSO = 1;
19993 + init_cpu_ptr->txd_info4.TSO = 1;
19995 + th->check = htons(skb_shinfo(skb)->gso_size);
19996 +#if defined (CONFIG_MIPS)
19997 + dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19999 + dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
20006 +// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
20008 + init_cpu_ptr->txd_info3.OWN_bit = 0;
20009 +#endif // CONFIG_RAETH_TSO //
20011 + sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
20013 +#ifdef CONFIG_PSEUDO_SUPPORT
20014 + if (gmac_no == 2) {
20015 + if (ei_local->PseudoDev != NULL) {
20016 + pAd = netdev_priv(ei_local->PseudoDev);
20017 + pAd->stat.tx_packets++;
20018 + pAd->stat.tx_bytes += length;
20024 + ei_local->stat.tx_packets++;
20025 + ei_local->stat.tx_bytes += skb->len;
20027 +#ifdef CONFIG_RAETH_NAPI
20028 + if ( ei_local->tx_full == 1) {
20029 + ei_local->tx_full = 0;
20030 + netif_wake_queue(dev);
20037 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
20039 + END_DEVICE *ei_local = netdev_priv(dev);
20040 + unsigned long flags;
20041 + unsigned int num_of_txd = 0;
20042 +#if defined (CONFIG_RAETH_TSO)
20043 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
20044 + struct skb_frag_struct *frag;
20046 +#ifdef CONFIG_PSEUDO_SUPPORT
20047 + PSEUDO_ADAPTER *pAd;
20050 +#if !defined(CONFIG_RA_NAT_NONE)
20051 + if(ra_sw_nat_hook_tx!= NULL)
20053 +// spin_lock_irqsave(&ei_local->page_lock, flags);
20054 + if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
20055 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
20058 +// spin_unlock_irqrestore(&ei_local->page_lock, flags);
20064 +#if defined(CONFIG_RALINK_MT7621) || defined(CONFIG_ARCH_MT7623)
20065 +#define MIN_PKT_LEN 64
20066 + if (skb->len < MIN_PKT_LEN) {
20067 + if (skb_padto(skb, MIN_PKT_LEN)) {
20068 + printk("raeth: skb_padto failed\n");
20071 + skb_put(skb, MIN_PKT_LEN - skb->len);
20076 + dev->trans_start = jiffies; /* save the timestamp */
20077 + spin_lock_irqsave(&ei_local->page_lock, flags);
20078 +#if defined (CONFIG_MIPS)
20079 + dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
20081 + dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
20085 +//check free_txd_num before calling rt288_eth_send()
20087 +#if defined (CONFIG_RAETH_TSO)
20088 + // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
20089 + if(nr_frags != 0){
20090 + for(i=0;i<nr_frags;i++) {
20091 + frag = &skb_shinfo(skb)->frags[i];
20092 + num_of_txd += cal_frag_txd_num(frag->size);
20100 +#if defined(CONFIG_RALINK_MT7621)
20101 + if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
20102 + ei_xmit_housekeeping(0);
20106 + ei_xmit_housekeeping(0);
20108 + //if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
20109 + if ((ei_local->free_txd_num > num_of_txd + 5) && (ei_local->free_txd_num != NUM_TX_DESC))
20111 + rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
20112 + if (ei_local->free_txd_num < 3)
20114 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
20115 + netif_stop_queue(dev);
20116 +#ifdef CONFIG_PSEUDO_SUPPORT
20117 + netif_stop_queue(ei_local->PseudoDev);
20119 + tx_ring_full = 1;
20123 +#ifdef CONFIG_PSEUDO_SUPPORT
20124 + if (gmac_no == 2)
20126 + if (ei_local->PseudoDev != NULL)
20128 + pAd = netdev_priv(ei_local->PseudoDev);
20129 + pAd->stat.tx_dropped++;
20133 + ei_local->stat.tx_dropped++;
20135 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
20138 + spin_unlock_irqrestore(&ei_local->page_lock, flags);
20142 +void ei_xmit_housekeeping(unsigned long unused)
20144 + struct net_device *dev = dev_raether;
20145 + END_DEVICE *ei_local = netdev_priv(dev);
20146 +#ifndef CONFIG_RAETH_NAPI
20147 + unsigned long reg_int_mask=0;
20149 + struct QDMA_txdesc *dma_ptr = NULL;
20150 + struct QDMA_txdesc *cpu_ptr = NULL;
20151 + struct QDMA_txdesc *tmp_ptr = NULL;
20152 + unsigned int htx_offset = 0;
20153 + unsigned int ctx_offset = 0;
20154 + unsigned int dtx_offset = 0;
20156 + //dma_ptr = phys_to_virt(sysRegRead(QTX_DRX_PTR));
20157 + //cpu_ptr = phys_to_virt(sysRegRead(QTX_CRX_PTR));
20158 + //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", sysRegRead(QTX_DRX_PTR));
20159 + //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
20161 + cpu_ptr = sysRegRead(QTX_CRX_PTR);
20162 + dma_ptr = sysRegRead(QTX_DRX_PTR);
20164 + //printk("kurtis:housekeeping QTX_CRX_PTR = 0x%x!!!\n", cpu_ptr);
20165 + //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", dma_ptr);
20166 + ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
20167 + dtx_offset = GET_TXD_OFFSET(&dma_ptr);
20168 + htx_offset = ctx_offset;
20169 + cpu_ptr = (ei_local->txd_pool + (ctx_offset));
20170 + dma_ptr = (ei_local->txd_pool + (dtx_offset));
20173 + //printk("kurtis:housekeeping CPU_PTR = 0x%x!!!\n", cpu_ptr);
20174 + //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
20180 + if(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20181 + while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20183 + //1. keep cpu next TXD
20184 + //tmp_ptr = phys_to_virt(cpu_ptr->txd_info2.NDP);
20185 + tmp_ptr = cpu_ptr->txd_info2.NDP;
20186 + htx_offset = GET_TXD_OFFSET(&tmp_ptr);
20187 + //printk("kurtis:housekeeping cpu_ptr->txd_info2.NDP = 0x%x!!!\n", cpu_ptr->txd_info2.NDP);
20188 + //printk("kurtis:housekeeping tmp_ptr = 0x%x!!!\n", tmp_ptr);
20189 + //printk("kurtis:housekeeping htx_offset = 0x%x!!!\n", htx_offset);
20190 + //2. free skb meomry
20191 +#if defined (CONFIG_RAETH_TSO)
20192 + if(ei_local->skb_free[htx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
20193 + dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
20196 + dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
20200 + //htx_offset = GET_TXD_OFFSET(&cpu_ptr);
20201 + //put_free_txd(htx_offset);
20202 + put_free_txd(ctx_offset);
20206 + netif_wake_queue(dev);
20207 +#ifdef CONFIG_PSEUDO_SUPPORT
20208 + netif_wake_queue(ei_local->PseudoDev);
20212 + //4. update cpu_ptr to next ptr
20213 + //cpu_ptr = tmp_ptr;
20214 + cpu_ptr = (ei_local->txd_pool + htx_offset);
20215 + ctx_offset = htx_offset;
20216 + //cpu_ptr = (cpu_ptr + (htx_offset));
20217 + //printk("kurtis:housekeeping 4. update cpu_ptr = 0x%x!!!\n", cpu_ptr);
20220 + //sysRegWrite(QTX_CRX_PTR, virt_to_phys(cpu_ptr));
20221 + //sysRegWrite(QTX_CRX_PTR, cpu_ptr);
20222 + tmp_ptr = (ei_local->phy_txd_pool + (htx_offset << 4));
20223 + //printk("kurtis:housekeeping 5. update QTX_CRX_PTR = 0x%x!!!\n", tmp_ptr);
20224 + sysRegWrite(QTX_CRX_PTR, tmp_ptr);
20228 +#ifndef CONFIG_RAETH_NAPI
20229 + reg_int_mask=sysRegRead(QFE_INT_ENABLE);
20230 +#if defined (DELAY_INT)
20231 + sysRegWrite(FE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
20234 + sysRegWrite(FE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
20236 +#endif //CONFIG_RAETH_NAPI//
20239 +EXPORT_SYMBOL(ei_start_xmit);
20240 +EXPORT_SYMBOL(ei_xmit_housekeeping);
20241 +EXPORT_SYMBOL(fe_dma_init);
20242 +EXPORT_SYMBOL(rt2880_eth_send);
20244 +++ b/drivers/net/ethernet/raeth/smb_hook.c
20246 +#include <linux/version.h>
20247 +#include <linux/module.h>
20248 +#include <linux/kernel.h>
20249 +#include <linux/types.h>
20250 +#include <linux/skbuff.h>
20253 +int (*smb_nf_local_in_hook)(struct sk_buff *skb) = NULL;
20254 +int (*smb_nf_pre_routing_hook)(struct sk_buff *skb) = NULL;
20255 +int (*smb_nf_local_out_hook)(struct sk_buff *skb) = NULL;
20256 +int (*smb_nf_post_routing_hook)(struct sk_buff *skb) = NULL;
20257 +EXPORT_SYMBOL(smb_nf_local_in_hook);
20258 +EXPORT_SYMBOL(smb_nf_pre_routing_hook);
20259 +EXPORT_SYMBOL(smb_nf_local_out_hook);
20260 +EXPORT_SYMBOL(smb_nf_post_routing_hook);
20264 +++ b/drivers/net/ethernet/raeth/smb_nf.c
20266 +#include <linux/module.h>
20267 +#include <linux/version.h>
20268 +#include <linux/kernel.h>
20269 +#include <linux/types.h>
20271 +#include <linux/inetdevice.h>
20272 +#include <linux/tcp.h>
20273 +#include <linux/ip.h>
20274 +#include <net/tcp.h>
20275 +#include <net/ip.h>
20277 +extern int (*smb_nf_local_in_hook)(struct sk_buff *skb);
20278 +extern int (*smb_nf_pre_routing_hook)(struct sk_buff *skb);
20279 +extern int (*smb_nf_local_out_hook)(struct sk_buff *skb);
20280 +extern int (*smb_nf_post_routing_hook)(struct sk_buff *skb);
20282 +struct net_device *lan_int = NULL;
20283 +struct in_ifaddr *lan_ifa = NULL;
20286 +int mtk_smb_nf_local_in_hook(struct sk_buff *skb)
20288 + struct iphdr *iph = ip_hdr(skb);
20290 + if (skb->protocol == htons(ETH_P_IP)) {
20291 + struct iphdr *iph = ip_hdr(skb);
20293 + if (iph->protocol == IPPROTO_TCP) {
20294 + struct tcphdr *th = tcp_hdr(skb);
20295 + unsigned short sport, dport;
20297 + th = tcp_hdr(skb);
20298 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20300 + if ((iph->daddr == lan_ifa->ifa_local)
20301 + && ((th->dest == 0xbd01) || (th->dest == 0x8900)
20302 + || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20313 +int mtk_smb_nf_pre_routing_hook(struct sk_buff *skb)
20315 + struct iphdr *iph = ip_hdr(skb);
20317 + if (skb->protocol == htons(ETH_P_IP)) {
20318 + struct iphdr *iph = ip_hdr(skb);
20320 + if (iph->protocol == IPPROTO_TCP) {
20321 + struct tcphdr *th = tcp_hdr(skb);
20322 + unsigned short sport, dport;
20324 + th = tcp_hdr(skb);
20325 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20326 + if ((iph->daddr == lan_ifa->ifa_local)
20327 + && ((th->dest == 0xbd01) || (th->dest == 0x8900)
20328 + || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20339 +int mtk_smb_nf_local_out_hook(struct sk_buff *skb)
20341 + struct iphdr *iph = ip_hdr(skb);
20343 + if (iph->protocol == IPPROTO_TCP) {
20344 + struct tcphdr *th = tcp_hdr(skb);
20346 + th = tcp_hdr(skb);
20347 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20349 + if ((iph->saddr == lan_ifa->ifa_local)
20350 + && ((th->source == 0xbd01) || (th->source == 0x8900)
20351 + || (th->source == 0x8a00) || (th->source == 0x8b00)))
20360 +int mtk_smb_nf_post_routing_hook(struct sk_buff *skb)
20362 + struct iphdr *iph = ip_hdr(skb);
20364 + if (skb->protocol == htons(ETH_P_IP)) {
20365 + struct iphdr *iph = ip_hdr(skb);
20367 + if (iph->protocol == IPPROTO_TCP) {
20368 + struct tcphdr *th = tcp_hdr(skb);
20370 + th = tcp_hdr(skb);
20371 + th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20373 + if ((iph->saddr == lan_ifa->ifa_local)
20374 + && ((th->source == 0xbd01) || (th->source == 0x8900)
20375 + || (th->source == 0x8a00) || (th->source == 0x8b00)))
20386 +int __init mtk_smb_hook_init(void)
20388 + struct in_device *in_dev;
20389 + struct in_ifaddr **ifap = NULL;
20390 + struct in_ifaddr *ifa = NULL;
20392 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
20393 + lan_int = dev_get_by_name(&init_net, "br0");
20395 + lan_int = dev_get_by_name("br0");
20398 + in_dev = __in_dev_get_rtnl(lan_int);
20403 + for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
20404 + ifap = &ifa->ifa_next) {
20405 + if (!strcmp("br0", ifa->ifa_label))
20408 + break; /* found */
20416 + smb_nf_local_in_hook = mtk_smb_nf_local_in_hook;
20417 + smb_nf_pre_routing_hook = mtk_smb_nf_pre_routing_hook;
20418 + smb_nf_local_out_hook = mtk_smb_nf_local_out_hook;
20419 + smb_nf_post_routing_hook = mtk_smb_nf_post_routing_hook;
20422 + printk("Samba Netfilter Hook Enabled\n");
20427 +void mtk_smb_hook_cleanup(void)
20431 + smb_nf_local_in_hook = NULL;
20432 + smb_nf_pre_routing_hook = NULL;
20433 + smb_nf_local_out_hook = NULL;
20434 + smb_nf_post_routing_hook = NULL;
20439 +module_init(mtk_smb_hook_init);
20440 +module_exit(mtk_smb_hook_cleanup);
20442 +MODULE_LICENSE("GPL");
20444 +++ b/drivers/net/ethernet/raeth/sync_write.h
20446 +#ifndef _MT_SYNC_WRITE_H
20447 +#define _MT_SYNC_WRITE_H
20449 +#if defined(__KERNEL__)
20451 +#include <linux/io.h>
20452 +#include <asm/cacheflush.h>
20453 +//#include <asm/system.h>
20459 +#define mt65xx_reg_sync_writel(v, a) \
20461 + __raw_writel((v), IOMEM((a))); \
20465 +#define mt65xx_reg_sync_writew(v, a) \
20467 + __raw_writew((v), IOMEM((a))); \
20471 +#define mt65xx_reg_sync_writeb(v, a) \
20473 + __raw_writeb((v), IOMEM((a))); \
20477 +#define mt_reg_sync_writel(v, a) \
20479 + __raw_writel((v), IOMEM((a))); \
20483 +#define mt_reg_sync_writew(v, a) \
20485 + __raw_writew((v), IOMEM((a))); \
20489 +#define mt_reg_sync_writeb(v, a) \
20491 + __raw_writeb((v), IOMEM((a))); \
20496 +#else /* __KERNEL__ */
20498 +#include <sys/types.h>
20499 +#include <sys/stat.h>
20500 +#include <fcntl.h>
20501 +#include <unistd.h>
20502 +#include <string.h>
20506 + __asm__ __volatile__ ("dsb" : : : "memory"); \
20509 +#define mt65xx_reg_sync_writel(v, a) \
20511 + *(volatile unsigned int *)(a) = (v); \
20515 +#define mt65xx_reg_sync_writew(v, a) \
20517 + *(volatile unsigned short *)(a) = (v); \
20521 +#define mt65xx_reg_sync_writeb(v, a) \
20523 + *(volatile unsigned char *)(a) = (v); \
20527 +#define mt_reg_sync_writel(v, a) \
20529 + *(volatile unsigned int *)(a) = (v); \
20533 +#define mt_reg_sync_writew(v, a) \
20535 + *(volatile unsigned short *)(a) = (v); \
20539 +#define mt_reg_sync_writeb(v, a) \
20541 + *(volatile unsigned char *)(a) = (v); \
20546 +#endif /* __KERNEL__ */
20548 +#endif /* !_MT_SYNC_WRITE_H */