generic/4.4: remove ISSI SI25CD512 SPI flash support patch
[openwrt.git] / target / linux / mediatek / patches / 0063-arm-mediatek-add-SDK-ethernet.patch
1 From e3aece79d5003b6879298b05551e113117d5cdd8 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sat, 27 Jun 2015 13:13:36 +0200
4 Subject: [PATCH 63/76] arm: mediatek: add SDK ethernet
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8  drivers/net/ethernet/Kconfig                      |    1 +
9  drivers/net/ethernet/Makefile                     |    1 +
10  drivers/net/ethernet/raeth/Kconfig                |  415 ++
11  drivers/net/ethernet/raeth/Makefile               |   67 +
12  drivers/net/ethernet/raeth/Makefile.release       |   60 +
13  drivers/net/ethernet/raeth/csr_netlink.h          |   27 +
14  drivers/net/ethernet/raeth/dvt/pkt_gen.c          |   88 +
15  drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c |  138 +
16  drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c |  191 +
17  drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c | 1527 +++++
18  drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h |   75 +
19  drivers/net/ethernet/raeth/ethtool_readme.txt     |   44 +
20  drivers/net/ethernet/raeth/mcast.c                |  187 +
21  drivers/net/ethernet/raeth/mii_mgr.c              |  603 ++
22  drivers/net/ethernet/raeth/ra2882ethreg.h         | 1985 +++++++
23  drivers/net/ethernet/raeth/ra_ethtool.c           |  515 ++
24  drivers/net/ethernet/raeth/ra_ethtool.h           |   13 +
25  drivers/net/ethernet/raeth/ra_ioctl.h             |  102 +
26  drivers/net/ethernet/raeth/ra_mac.c               | 2645 +++++++++
27  drivers/net/ethernet/raeth/ra_mac.h               |   57 +
28  drivers/net/ethernet/raeth/ra_netlink.c           |  142 +
29  drivers/net/ethernet/raeth/ra_netlink.h           |   10 +
30  drivers/net/ethernet/raeth/ra_qos.c               |  655 +++
31  drivers/net/ethernet/raeth/ra_qos.h               |   18 +
32  drivers/net/ethernet/raeth/ra_rfrw.c              |   66 +
33  drivers/net/ethernet/raeth/ra_rfrw.h              |    6 +
34  drivers/net/ethernet/raeth/raether.c              | 6401 +++++++++++++++++++++
35  drivers/net/ethernet/raeth/raether.h              |  126 +
36  drivers/net/ethernet/raeth/raether_hwlro.c        |  347 ++
37  drivers/net/ethernet/raeth/raether_pdma.c         | 1121 ++++
38  drivers/net/ethernet/raeth/raether_qdma.c         | 1407 +++++
39  drivers/net/ethernet/raeth/raether_qdma_mt7623.c  | 1020 ++++
40  drivers/net/ethernet/raeth/smb_hook.c             |   17 +
41  drivers/net/ethernet/raeth/smb_nf.c               |  177 +
42  drivers/net/ethernet/raeth/sync_write.h           |  103 +
43  35 files changed, 20357 insertions(+)
44  create mode 100644 drivers/net/ethernet/raeth/Kconfig
45  create mode 100644 drivers/net/ethernet/raeth/Makefile
46  create mode 100644 drivers/net/ethernet/raeth/Makefile.release
47  create mode 100644 drivers/net/ethernet/raeth/csr_netlink.h
48  create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen.c
49  create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
50  create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
51  create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
52  create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
53  create mode 100644 drivers/net/ethernet/raeth/ethtool_readme.txt
54  create mode 100644 drivers/net/ethernet/raeth/mcast.c
55  create mode 100644 drivers/net/ethernet/raeth/mii_mgr.c
56  create mode 100644 drivers/net/ethernet/raeth/ra2882ethreg.h
57  create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.c
58  create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.h
59  create mode 100644 drivers/net/ethernet/raeth/ra_ioctl.h
60  create mode 100644 drivers/net/ethernet/raeth/ra_mac.c
61  create mode 100644 drivers/net/ethernet/raeth/ra_mac.h
62  create mode 100644 drivers/net/ethernet/raeth/ra_netlink.c
63  create mode 100644 drivers/net/ethernet/raeth/ra_netlink.h
64  create mode 100644 drivers/net/ethernet/raeth/ra_qos.c
65  create mode 100644 drivers/net/ethernet/raeth/ra_qos.h
66  create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.c
67  create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.h
68  create mode 100644 drivers/net/ethernet/raeth/raether.c
69  create mode 100644 drivers/net/ethernet/raeth/raether.h
70  create mode 100755 drivers/net/ethernet/raeth/raether_hwlro.c
71  create mode 100755 drivers/net/ethernet/raeth/raether_pdma.c
72  create mode 100644 drivers/net/ethernet/raeth/raether_qdma.c
73  create mode 100644 drivers/net/ethernet/raeth/raether_qdma_mt7623.c
74  create mode 100644 drivers/net/ethernet/raeth/smb_hook.c
75  create mode 100644 drivers/net/ethernet/raeth/smb_nf.c
76  create mode 100644 drivers/net/ethernet/raeth/sync_write.h
77
78 --- a/drivers/net/ethernet/Kconfig
79 +++ b/drivers/net/ethernet/Kconfig
80 @@ -17,6 +17,7 @@ config MDIO
81  config SUNGEM_PHY
82         tristate
83  
84 +source "drivers/net/ethernet/raeth/Kconfig"
85  source "drivers/net/ethernet/3com/Kconfig"
86  source "drivers/net/ethernet/adaptec/Kconfig"
87  source "drivers/net/ethernet/aeroflex/Kconfig"
88 --- a/drivers/net/ethernet/Makefile
89 +++ b/drivers/net/ethernet/Makefile
90 @@ -84,3 +84,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
91  obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
92  obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
93  obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
94 +obj-$(CONFIG_RAETH) += raeth/
95 --- /dev/null
96 +++ b/drivers/net/ethernet/raeth/Kconfig
97 @@ -0,0 +1,415 @@
98 +config ARCH_MT7623
99 +       bool
100 +       default y
101 +
102 +config RA_NAT_NONE
103 +       bool
104 +       default y
105 +
106 +config RAETH
107 +        tristate "Ralink GMAC"
108 +       ---help---
109 +          This driver supports Ralink gigabit ethernet family of
110 +          adapters.
111 +
112 +config PDMA_NEW
113 +        bool
114 +       default y if  (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
115 +        depends on RAETH
116 +
117 +config RAETH_SCATTER_GATHER_RX_DMA
118 +        bool
119 +       default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
120 +        depends on RAETH
121 +
122 +
123 +choice
124 +       prompt "Network BottomHalves"   
125 +        depends on RAETH
126 +       default RA_NETWORK_WORKQUEUE_BH
127 +
128 +       config RA_NETWORK_TASKLET_BH
129 +       bool "Tasklet"
130 +
131 +       config RA_NETWORK_WORKQUEUE_BH
132 +       bool "Work Queue"
133 +
134 +       config RAETH_NAPI
135 +        bool "NAPI"
136 +
137 +endchoice
138 +
139 +#config TASKLET_WORKQUEUE_SW
140 +#        bool "Tasklet and Workqueue switch"
141 +#        depends on RA_NETWORK_TASKLET_BH
142 +
143 +config RAETH_SKB_RECYCLE_2K
144 +        bool "SKB Recycling"
145 +        depends on RAETH
146 +
147 +config RAETH_SPECIAL_TAG
148 +        bool "Ralink Special Tag (0x810x)"
149 +        depends on RAETH && RT_3052_ESW
150 +
151 +#config RAETH_JUMBOFRAME
152 +#        bool "Jumbo Frame up to 4K bytes"
153 +#        depends on RAETH && !(RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_MT7628)
154 +
155 +config RAETH_CHECKSUM_OFFLOAD
156 +        bool "TCP/UDP/IP checksum offload"
157 +       default y
158 +        depends on RAETH && !RALINK_RT2880
159 +
160 +#config RAETH_SW_FC
161 +#        bool "When TX ring is full, inform kernel stop transmit and stop RX handler"
162 +#       default n
163 +#        depends on RAETH
164 +
165 +#config RAETH_8023AZ_EEE
166 +#        bool "Enable Embeded Switch EEE"
167 +#       default n
168 +#        depends on RAETH && (RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628)
169 +
170 +
171 +
172 +config 32B_DESC
173 +        bool "32bytes TX/RX description"
174 +       default n
175 +        depends on RAETH && (RALINK_MT7620 || RALINK_MT7621)
176 +        ---help---
177 +          At this moment, you cannot enable 32B description with Multiple RX ring at the same time.
178 +
179 +config RAETH_LRO
180 +        bool "LRO (Large Receive Offload )"
181 +       select INET_LRO
182 +        depends on RAETH && (RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
183 +
184 +config RAETH_HW_LRO
185 +        bool "HW LRO (Large Receive Offload)"
186 +       default n
187 +        depends on RAETH
188 +
189 +config RAETH_HW_LRO_DBG
190 +        bool "HW LRO Debug"
191 +       default n
192 +       depends on RAETH_HW_LRO
193 +
194 +config RAETH_HW_LRO_AUTO_ADJ_DBG
195 +        bool "HW LRO Auto Adjustment Debug"
196 +       default y
197 +       depends on RAETH_HW_LRO
198 +
199 +config RAETH_HW_LRO_REASON_DBG
200 +        bool "HW LRO Flush Reason Debug"
201 +       default n
202 +       depends on RAETH_HW_LRO
203 +
204 +config RAETH_HW_VLAN_TX
205 +        bool "Transmit VLAN HW (DoubleVLAN is not supported)"
206 +        depends on RAETH && !(RALINK_RT5350 || RALINK_MT7628)
207 +        ---help---
208 +          Please disable HW_VLAN_TX if you need double vlan
209 +
210 +config RAETH_HW_VLAN_RX
211 +        bool "Receive VLAN HW (DoubleVLAN is not supported)"
212 +        depends on RAETH && RALINK_MT7621
213 +        ---help---
214 +          Please disable HW_VLAN_RX if you need double vlan
215 +
216 +config RAETH_TSO
217 +        bool "TSOV4 (Tcp Segmentaton Offload)"
218 +       depends on (RAETH_HW_VLAN_TX && (RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620))||((RALINK_MT7621 || ARCH_MT7623) &&(RAETH_HW_VLAN_TX || RAETH_GMAC2 ))
219 +
220 +config RAETH_TSOV6
221 +        bool "TSOV6 (Tcp Segmentaton Offload)"
222 +       depends on RAETH_TSO
223 +
224 +config RAETH_RW_PDMAPTR_FROM_VAR
225 +       bool
226 +       default y if RALINK_RT6855A || RALINK_MT7620
227 +        depends on RAETH
228 +
229 +config MTK_SMB_HOOK
230 +        bool "Samba Speedup Module"
231 +       depends on RAETH
232 +
233 +config SPLICE_NET_SUPPORT
234 +       default y if MTK_SMB_HOOK
235 +       depends on MTK_SMB_HOOK
236 +       bool
237 +
238 +
239 +config RAETH_DVT
240 +        bool "RAETH DVT"
241 +       depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
242 +       
243 +config RAETH_PDMA_DVT
244 +        bool "PDMA DVT"
245 +       depends on RAETH_DVT
246 +
247 +config RAETH_PDMA_LEGACY_MODE
248 +        bool "PDMA legacy mode"
249 +       depends on RAETH_PDMA_DVT
250 +
251 +#config RAETH_QOS
252 +#        bool "QoS Feature"
253 +#        depends on RAETH && !RALINK_RT2880 && !RALINK_MT7620 && !RALINK_MT7621 && !RAETH_TSO
254 +
255 +choice
256 +        prompt "QoS Type"
257 +        depends on RAETH_QOS
258 +        default DSCP_QOS_DSCP
259 +
260 +config  RAETH_QOS_DSCP_BASED
261 +        bool "DSCP-based"
262 +        depends on RAETH_QOS 
263 +
264 +config  RAETH_QOS_VPRI_BASED
265 +        bool "VPRI-based"
266 +        depends on RAETH_QOS
267 +
268 +endchoice
269 +
270 +config RAETH_QDMA
271 +        bool "Choose QDMA instead PDMA"
272 +       default n
273 +        depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
274 +
275 +config RAETH_QDMATX_QDMARX
276 +        bool "Choose QDMA RX instead PDMA RX"
277 +       default n
278 +        depends on RAETH_QDMA && !RALINK_MT7621
279 +
280 +   
281 +
282 +choice
283 +        prompt "GMAC is connected to"
284 +        depends on RAETH
285 +        default GE1_RGMII_FORCE_1000
286 +
287 +config  GE1_MII_FORCE_100
288 +        bool "MII_FORCE_100 (10/100M Switch)"
289 +        depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) 
290 +
291 +config  GE1_MII_AN
292 +        bool "MII_AN (100Phy)"
293 +        depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) 
294 +
295 +config  GE1_RVMII_FORCE_100
296 +        bool "RvMII_FORCE_100 (CPU)"
297 +        depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) 
298 +
299 +config  GE1_RGMII_FORCE_1000
300 +        bool "RGMII_FORCE_1000 (GigaSW, CPU)"
301 +        depends on (RALINK_RT2880 || RALINK_RT3883)
302 +       select RALINK_SPI
303 +
304 +config  GE1_RGMII_FORCE_1000
305 +        bool "RGMII_FORCE_1000 (GigaSW, CPU)"
306 +        depends on (RALINK_MT7621 || ARCH_MT7623)
307 +       select RT_3052_ESW
308 +
309 +config  GE1_TRGMII_FORCE_1200
310 +        bool "TRGMII_FORCE_1200 (GigaSW, CPU)"
311 +        depends on (RALINK_MT7621)
312 +       select RT_3052_ESW
313 +
314 +config  GE1_TRGMII_FORCE_2000
315 +        bool "TRGMII_FORCE_2000 (GigaSW, CPU, for MT7623 and MT7683)"
316 +        depends on (ARCH_MT7623)
317 +        select RT_3052_ESW
318 +
319 +config  GE1_TRGMII_FORCE_2600
320 +        bool "TRGMII_FORCE_2600 (GigaSW, CPU, MT7623 only)"
321 +        depends on (ARCH_MT7623)
322 +        select RT_3052_ESW
323 +
324 +config  GE1_RGMII_AN
325 +        bool "RGMII_AN (GigaPhy)"
326 +        depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623) 
327 +
328 +config  GE1_RGMII_NONE
329 +        bool "NONE (NO CONNECT)"
330 +        depends on (RALINK_MT7621 || ARCH_MT7623)
331 +
332 +endchoice
333 +
334 +config HW_SFQ
335 +        bool "HW_SFQ"
336 +       default n
337 +        depends on RAETH_QDMA && (ARCH_MT7623)
338 +        
339 +        
340 +config  RT_3052_ESW
341 +        bool "Ralink Embedded Switch"
342 +       default y
343 +        depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628 || ARCH_MT7623)
344 +
345 +config LAN_WAN_SUPPORT
346 +        bool "LAN/WAN Partition"
347 +        depends on RAETH && (RAETH_ROUTER || RT_3052_ESW)
348 +
349 +config ETH_MEMORY_OPTIMIZATION
350 +       bool "Ethernet memory optimization"
351 +       depends on RALINK_MT7628
352 +
353 +config ETH_ONE_PORT_ONLY
354 +       bool "One Port Only"
355 +       depends on RALINK_MT7628
356 +
357 +choice
358 +        prompt "Switch Board Layout Type"
359 +        depends on LAN_WAN_SUPPORT || P5_RGMII_TO_MAC_MODE ||  GE1_RGMII_FORCE_1000 || GE1_TRGMII_FORCE_1200 || GE2_RGMII_FORCE_1000
360 +       default WAN_AT_P0
361 +
362 +       config  WAN_AT_P4
363 +               bool "LLLL/W"
364 +               
365 +       config  WAN_AT_P0
366 +               bool "W/LLLL"
367 +endchoice
368 +
369 +config RALINK_VISTA_BASIC
370 +       bool 'Vista Basic Logo for IC+ 175C'
371 +        depends on LAN_WAN_SUPPORT && (RALINK_RT2880 || RALINK_RT3883)
372 +
373 +config ESW_DOUBLE_VLAN_TAG
374 +       bool
375 +       default y if RT_3052_ESW
376 +
377 +config RAETH_HAS_PORT4
378 +        bool "Port 4 Support"
379 +        depends on RAETH && RALINK_MT7620
380 +choice
381 +        prompt "Target Mode"
382 +        depends on RAETH_HAS_PORT4
383 +       default P4_RGMII_TO_MAC_MODE
384 +
385 +       config P4_MAC_TO_PHY_MODE
386 +               bool "Giga_Phy (RGMII)"
387 +       config  GE_RGMII_MT7530_P0_AN
388 +               bool "GE_RGMII_MT7530_P0_AN (MT7530 Internal GigaPhy)"
389 +       config  GE_RGMII_MT7530_P4_AN
390 +               bool "GE_RGMII_MT7530_P4_AN (MT7530 Internal GigaPhy)"
391 +       config P4_RGMII_TO_MAC_MODE
392 +               bool "Giga_SW/iNIC (RGMII)"
393 +       config P4_MII_TO_MAC_MODE
394 +               bool "External_CPU (MII_RvMII)"
395 +       config P4_RMII_TO_MAC_MODE
396 +               bool "External_CPU (RvMII_MII)"
397 +endchoice
398 +
399 +config  MAC_TO_GIGAPHY_MODE_ADDR2
400 +        hex "Port4 Phy Address"
401 +       default 0x4
402 +        depends on P4_MAC_TO_PHY_MODE
403 +
404 +config RAETH_HAS_PORT5
405 +        bool "Port 5 Support"
406 +        depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620)
407 +choice
408 +        prompt "Target Mode"
409 +        depends on RAETH_HAS_PORT5
410 +       default P5_RGMII_TO_MAC_MODE
411 +
412 +       config P5_MAC_TO_PHY_MODE
413 +               bool "Giga_Phy (RGMII)"
414 +       config P5_RGMII_TO_MAC_MODE
415 +               bool "Giga_SW/iNIC (RGMII)"
416 +       config P5_RGMII_TO_MT7530_MODE
417 +               bool "MT7530 Giga_SW (RGMII)"
418 +               depends on RALINK_MT7620
419 +       config P5_MII_TO_MAC_MODE
420 +               bool "External_CPU (MII_RvMII)"
421 +       config P5_RMII_TO_MAC_MODE
422 +               bool "External_CPU (RvMII_MII)"
423 +endchoice
424 +
425 +config  MAC_TO_GIGAPHY_MODE_ADDR
426 +        hex "GE1 Phy Address"
427 +       default 0x1F
428 +        depends on GE1_MII_AN || GE1_RGMII_AN
429 +
430 +config  MAC_TO_GIGAPHY_MODE_ADDR
431 +        hex "Port5 Phy Address"
432 +       default 0x5
433 +        depends on P5_MAC_TO_PHY_MODE
434 +
435 +config RAETH_GMAC2
436 +        bool "GMAC2 Support"
437 +        depends on RAETH && (RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
438 +
439 +choice
440 +        prompt "GMAC2 is connected to"
441 +        depends on RAETH_GMAC2
442 +        default GE2_RGMII_AN
443 +
444 +config  GE2_MII_FORCE_100
445 +        bool "MII_FORCE_100 (10/100M Switch)"
446 +        depends on RAETH_GMAC2
447 +
448 +config  GE2_MII_AN
449 +        bool "MII_AN (100Phy)"
450 +        depends on RAETH_GMAC2
451 +
452 +config  GE2_RVMII_FORCE_100
453 +        bool "RvMII_FORCE_100 (CPU)"
454 +        depends on RAETH_GMAC2
455 +
456 +config  GE2_RGMII_FORCE_1000
457 +        bool "RGMII_FORCE_1000 (GigaSW, CPU)"
458 +        depends on RAETH_GMAC2
459 +       select RALINK_SPI
460 +
461 +config  GE2_RGMII_AN
462 +        bool "RGMII_AN (External GigaPhy)"
463 +        depends on RAETH_GMAC2
464 +
465 +config  GE2_INTERNAL_GPHY
466 +        bool "RGMII_AN (Internal GigaPny)"
467 +        depends on RAETH_GMAC2
468 +       select LAN_WAN_SUPPORT
469 +
470 +endchoice
471 +
472 +config  GE_RGMII_INTERNAL_P0_AN
473 +       bool
474 +        depends on GE2_INTERNAL_GPHY
475 +       default y if WAN_AT_P0
476 +
477 +config  GE_RGMII_INTERNAL_P4_AN
478 +       bool
479 +        depends on GE2_INTERNAL_GPHY
480 +       default y if WAN_AT_P4
481 +
482 +config  MAC_TO_GIGAPHY_MODE_ADDR2
483 +        hex
484 +       default 0 if GE_RGMII_INTERNAL_P0_AN
485 +       default 4 if GE_RGMII_INTERNAL_P4_AN
486 +        depends on GE_RGMII_INTERNAL_P0_AN || GE_RGMII_INTERNAL_P4_AN
487 +
488 +config  MAC_TO_GIGAPHY_MODE_ADDR2
489 +        hex "GE2 Phy Address"
490 +       default 0x1E
491 +        depends on GE2_MII_AN || GE2_RGMII_AN
492 +
493 +#force 100M
494 +config RAETH_ROUTER
495 +bool
496 +default y if GE1_MII_FORCE_100 || GE2_MII_FORCE_100 || GE1_RVMII_FORCE_100 || GE2_RVMII_FORCE_100
497 +
498 +#force 1000M
499 +config MAC_TO_MAC_MODE
500 +bool
501 +default y if GE1_RGMII_FORCE_1000 || GE2_RGMII_FORCE_1000
502 +depends on (RALINK_RT2880 || RALINK_RT3883) 
503 +
504 +#AN
505 +config GIGAPHY
506 +bool
507 +default y if GE1_RGMII_AN || GE2_RGMII_AN
508 +
509 +#AN
510 +config 100PHY
511 +bool
512 +default y if GE1_MII_AN || GE2_MII_AN
513 --- /dev/null
514 +++ b/drivers/net/ethernet/raeth/Makefile
515 @@ -0,0 +1,67 @@
516 +obj-$(CONFIG_RAETH) += raeth.o
517 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
518 +
519 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
520 +obj-y += smb_hook.o
521 +obj-m += smb.o
522 +smb-objs := smb_nf.o
523 +endif
524 +
525 +#EXTRA_CFLAGS   += -DCONFIG_RAETH_MULTIPLE_RX_RING
526 +
527 +ifeq ($(CONFIG_RAETH_QOS),y)
528 +raeth-objs += ra_qos.o
529 +endif
530 +
531 +ifeq ($(CONFIG_RAETH_QDMA),y)
532 +raeth-objs += raether_qdma.o
533 +endif
534 +
535 +ifneq ($(CONFIG_RAETH_QDMA),y)
536 +raeth-objs += raether_pdma.o
537 +endif
538 +
539 +raeth-objs += raether.o
540 +
541 +ifeq ($(CONFIG_ETHTOOL),y)
542 +raeth-objs += ra_ethtool.o
543 +endif
544 +
545 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
546 +raeth-objs += mcast.o
547 +endif
548 +
549 +ifeq ($(CONFIG_RAETH_NETLINK),y)
550 +raeth-objs += ra_netlink.o
551 +endif
552 +
553 +ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
554 +raeth-objs += dvt/raether_pdma_dvt.o
555 +obj-m += dvt/pkt_gen.o
556 +obj-m += dvt/pkt_gen_udp_frag.o
557 +obj-m += dvt/pkt_gen_tcp_frag.o
558 +endif
559 +
560 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
561 +raeth-objs += raether_hwlro.o
562 +endif
563 +
564 +ifeq ($(CONFIG_RAETH_GMAC2),y)
565 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
566 +endif
567 +
568 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
569 +EXTRA_CFLAGS   += -DMEMORY_OPTIMIZATION
570 +endif
571 +
572 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
573 +EXTRA_CFLAGS   += -DMEMORY_OPTIMIZATION
574 +endif
575 +
576 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
577 +EXTRA_CFLAGS   += -DWORKQUEUE_BH
578 +endif
579 +
580 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
581 +EXTRA_CFLAGS   += -DTASKLET_WORKQUEUE_SW
582 +endif
583 --- /dev/null
584 +++ b/drivers/net/ethernet/raeth/Makefile.release
585 @@ -0,0 +1,60 @@
586 +obj-$(CONFIG_RAETH) += raeth.o
587 +raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
588 +
589 +ifeq ($(CONFIG_MTK_SMB_HOOK),y)
590 +obj-y += smb_hook.o
591 +obj-m += smb.o
592 +smb-objs := smb_nf.o
593 +endif
594 +
595 +#EXTRA_CFLAGS   += -DCONFIG_RAETH_MULTIPLE_RX_RING
596 +
597 +ifeq ($(CONFIG_RAETH_QOS),y)
598 +raeth-objs += ra_qos.o
599 +endif
600 +
601 +ifeq ($(CONFIG_RAETH_QDMA),y)
602 +raeth-objs += raether_qdma.o
603 +endif
604 +
605 +ifneq ($(CONFIG_RAETH_QDMA),y)
606 +raeth-objs += raether_pdma.o
607 +endif
608 +
609 +raeth-objs += raether.o
610 +
611 +ifeq ($(CONFIG_ETHTOOL),y)
612 +raeth-objs += ra_ethtool.o
613 +endif
614 +
615 +ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
616 +raeth-objs += mcast.o
617 +endif
618 +
619 +ifeq ($(CONFIG_RAETH_NETLINK),y)
620 +raeth-objs += ra_netlink.o
621 +endif
622 +
623 +ifeq ($(CONFIG_RAETH_HW_LRO),y)
624 +raeth-objs += raether_hwlro.o
625 +endif
626 +
627 +ifeq ($(CONFIG_RAETH_GMAC2),y)
628 +EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
629 +endif
630 +
631 +ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
632 +EXTRA_CFLAGS   += -DMEMORY_OPTIMIZATION
633 +endif
634 +
635 +ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
636 +EXTRA_CFLAGS   += -DMEMORY_OPTIMIZATION
637 +endif
638 +
639 +ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
640 +EXTRA_CFLAGS   += -DWORKQUEUE_BH
641 +endif
642 +
643 +ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
644 +EXTRA_CFLAGS   += -DTASKLET_WORKQUEUE_SW
645 +endif
646 --- /dev/null
647 +++ b/drivers/net/ethernet/raeth/csr_netlink.h
648 @@ -0,0 +1,27 @@
649 +#ifndef        CSR_NETLINK_H
650 +#define CSR_NETLINK_H
651 +
652 +#define        CSR_NETLINK     30
653 +#define        CSR_READ        0
654 +#define        CSR_WRITE       1
655 +#define        CSR_TEST        2
656 +
657 +#define RALINK_CSR_GROUP        2882   
658 +
659 +typedef struct rt2880_csr_msg {
660 +       int     enable;
661 +       char    reg_name[32];
662 +       unsigned long address;
663 +       unsigned long default_value;
664 +       unsigned long reserved_bits;    /* 1 : not reserved, 0 : reserved */
665 +       unsigned long write_mask;
666 +       unsigned long write_value;
667 +       int     status;
668 +} CSR_MSG;
669 +
670 +int csr_msg_send(CSR_MSG* msg);
671 +int csr_msg_recv(void);
672 +
673 +// static CSR_MSG      input_csr_msg;
674 +
675 +#endif
676 --- /dev/null
677 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen.c
678 @@ -0,0 +1,88 @@
679 +//#include <linux/config.h>
680 +#include <linux/version.h>
681 +#include <linux/module.h>
682 +#include <linux/skbuff.h>
683 +#include <linux/kernel.h>
684 +#include <linux/init.h>
685 +#include <linux/netfilter.h>
686 +#include <linux/netdevice.h>
687 +#include <linux/types.h>
688 +#include <asm/uaccess.h>
689 +#include <linux/moduleparam.h>
690 +
691 +char *ifname="eth3";
692 +
693 +static int32_t PktGenInitMod(void)
694 +{
695 +
696 +    struct net_dev *dev;
697 +    struct sk_buff *skb;
698 +    int i=0;
699 +
700 +    unsigned char pkt[]={
701 +       //0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // dest bcast mac
702 +       0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest macA
703 +       //0x00, 0x30, 0xdb, 0x02, 0x02, 0x01, // dest macB
704 +       0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
705 +       0x81, 0x00, // vlan tag
706 +       //0x81, 0x10, // vlan tag
707 +       //0x87, 0x39, // do not learn
708 +       //0xc1, 0x03, // vlan tag SA=0, VID=2, LV=1
709 +       0x00, 0x03, // pri=0, vlan=3
710 +       0x08, 0x00, // eth type=ip
711 +       0x45, 0x00, 0x00, 0x30, 0x12, 0x34, 0x40, 0x00, 0xff, 0x06,
712 +       0x40, 0x74, 0x0a, 0x0a, 0x1e, 0x0a, 0x0a, 0x0a, 0x1e, 0x0b,
713 +       0x00, 0x1e, 0x00, 0x28, 0x00, 0x1c, 0x81, 0x06, 0x00, 0x00,
714 +       0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
715 +       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
716 +
717 +    skb = alloc_skb(256, GFP_ATOMIC);
718 +
719 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
720 +    if((dev=dev_get_by_name(&init_net,ifname))){
721 +#else
722 +    if((dev=dev_get_by_name(ifname))){
723 +#endif
724 +
725 +
726 +
727 +       skb->dev=dev;
728 +       skb_put(skb,sizeof(pkt));
729 +       memcpy(skb->data, pkt, sizeof(pkt));
730 +
731 +       printk("send pkt(len=%d) to %s\n", skb->len, skb->dev->name);
732 +
733 +
734 +       for(i=0;i<sizeof(pkt);i++){
735 +           if(i%16==0) {
736 +               printk("\n");
737 +           }
738 +           printk("%02X-",skb->data[i]);
739 +       }
740 +
741 +       dev_queue_xmit(skb);
742 +    }else{
743 +       printk("interface %s not found\n",ifname);
744 +       return 1;
745 +    }
746 +
747 +    return 0;
748 +}
749 +
750 +static void PktGenCleanupMod(void)
751 +{
752 +}
753 +
754 +module_init(PktGenInitMod);
755 +module_exit(PktGenCleanupMod);
756 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
757 +MODULE_PARM (ifname, "s");
758 +#else
759 +module_param (ifname, charp, 0);
760 +#endif
761 +
762 +MODULE_DESCRIPTION("Ralink PktGen Module");
763 +MODULE_AUTHOR("Steven Liu");
764 +MODULE_LICENSE("Proprietary");
765 +MODULE_PARM_DESC (ifname, "interface name");
766 +
767 --- /dev/null
768 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
769 @@ -0,0 +1,138 @@
770 +//#include <linux/config.h>
771 +#include <linux/version.h>
772 +#include <linux/module.h>
773 +#include <linux/skbuff.h>
774 +#include <linux/kernel.h>
775 +#include <linux/init.h>
776 +#include <linux/netfilter.h>
777 +#include <linux/netdevice.h>
778 +#include <linux/types.h>
779 +#include <asm/uaccess.h>
780 +#include <linux/moduleparam.h>
781 +
782 +char *ifname="eth3";
783 +
784 +
785 +static int32_t PktGenInitMod(void)
786 +{
787 +    unsigned char pkt_1[]={
788 +    0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
789 +    0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
790 +    0x08, 0x00, // type: ip
791 +    0x45, 0x00, 0x00, 0x34, // ip: ..., total len (0x034 = 52)
792 +    0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
793 +    0x80, 0x06, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
794 +    0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
795 +    0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
796 +    0x0d, 0xd5,  //tcp src port
797 +    0x13, 0x89,  //tcp dst port
798 +    0x40, 0xf5, 0x15, 0x04,  //tcp sequence number
799 +    0xf6, 0x4f, 0x1e, 0x31,  //tcp ack number
800 +    0x50, 0x10, 0xfc, 0x00,  //tcp flags, win size
801 +    0xf1, 0xfe, 0x00, 0x00,  //tcp checksum (0xf1fe)
802 +    0x01, 0x02, 0x03, 0x04, 0x05,  //payload (12 bytes)
803 +    0x06, 0x07, 0x08, 0x09, 0x0a,
804 +    0x0b, 0x0c
805 +    };
806 +    
807 +    unsigned char pkt_2[]={
808 +    0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
809 +    0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
810 +    0x08, 0x00, // type: ip
811 +    0x45, 0x00, 0x00, 0x20, // ip: ..., total len (0x020 = 32)
812 +    0xa1, 0x78, 0x00, 0x04, // ip: id, frag, frag offset (32)
813 +    0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
814 +    0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
815 +    0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
816 +    0x11, 0x12, 0x13, 0x14, 0x15,  //payload (12 bytes)
817 +    0x16, 0x17, 0x18, 0x19, 0x1a,
818 +    0x1b, 0x1c
819 +    };
820 +
821 +    struct net_dev *dev;
822 +    struct sk_buff *skb_1;
823 +    struct sk_buff *skb_2;
824 +    int i=0;
825 +
826 +    skb_1 = alloc_skb(256, GFP_ATOMIC);
827 +    skb_2 = alloc_skb(256, GFP_ATOMIC);
828 +
829 +
830 +#if 1
831 +/* send packet 1 */
832 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
833 +    if((dev=dev_get_by_name(&init_net,ifname))){
834 +#else
835 +    if((dev=dev_get_by_name(ifname))){
836 +#endif
837 +
838 +       skb_1->dev=dev;
839 +       skb_put(skb_1,sizeof(pkt_1));
840 +       memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
841 +
842 +       printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
843 +
844 +
845 +       for(i=0;i<sizeof(pkt_1);i++){
846 +           if(i%16==0) {
847 +               printk("\n");
848 +           }
849 +           printk("%02X-",skb_1->data[i]);
850 +       }
851 +
852 +       dev_queue_xmit(skb_1);
853 +    }else{
854 +       printk("interface %s not found\n",ifname);
855 +       return 1;
856 +    }
857 +#endif
858 +
859 +#if 1
860 +/* send packet 2 */
861 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
862 +        if((dev=dev_get_by_name(&init_net,ifname))){
863 +#else
864 +        if((dev=dev_get_by_name(ifname))){
865 +#endif
866 +
867 +       skb_2->dev=dev;
868 +       skb_put(skb_2,sizeof(pkt_2));
869 +       memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
870 +
871 +       printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
872 +
873 +
874 +       for(i=0;i<sizeof(pkt_2);i++){
875 +           if(i%16==0) {
876 +               printk("\n");
877 +           }
878 +           printk("%02X-",skb_2->data[i]);
879 +       }
880 +
881 +       dev_queue_xmit(skb_2);
882 +    }else{
883 +       printk("interface %s not found\n",ifname);
884 +       return 1;
885 +    }
886 +#endif
887 +
888 +    return 0;
889 +}
890 +
891 +static void PktGenCleanupMod(void)
892 +{
893 +}
894 +
895 +module_init(PktGenInitMod);
896 +module_exit(PktGenCleanupMod);
897 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
898 +MODULE_PARM (ifname, "s");
899 +#else
900 +module_param (ifname, charp, 0);
901 +#endif
902 +
903 +MODULE_DESCRIPTION("Ralink PktGen Module");
904 +MODULE_AUTHOR("Steven Liu");
905 +MODULE_LICENSE("Proprietary");
906 +MODULE_PARM_DESC (ifname, "interface name");
907 +
908 --- /dev/null
909 +++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
910 @@ -0,0 +1,191 @@
911 +//#include <linux/config.h>
912 +#include <linux/version.h>
913 +#include <linux/module.h>
914 +#include <linux/skbuff.h>
915 +#include <linux/kernel.h>
916 +#include <linux/init.h>
917 +#include <linux/netfilter.h>
918 +#include <linux/netdevice.h>
919 +#include <linux/types.h>
920 +#include <asm/uaccess.h>
921 +#include <linux/moduleparam.h>
922 +
923 +char *ifname="eth3";
924 +
925 +
926 +static int32_t PktGenInitMod(void)
927 +{
928 +#if 0
929 +    unsigned char pkt_0[]={
930 +//    0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
931 +    0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
932 +    0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
933 +    0x08, 0x00, // type: ip
934 +    0x45, 0x00, 0x00, 0x26, // ip: ..., total len (0x026 = 38)
935 +//    0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
936 +    0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
937 +    0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
938 +    0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
939 +//    0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
940 +    0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
941 +    0xca, 0x7b,  //udp src port
942 +    0x13, 0x89,  //udp dst port
943 +    0x00, 0x12,  //udp len (0x01c = 18) 
944 +    0x2f, 0x96,  //udp checksum (0x2f96)
945 +    0x01, 0x02, 0x03, 0x04, 0x05,  //payload (10 bytes)
946 +    0x06, 0x07, 0x08, 0x09, 0x0a
947 +    };
948 +#endif
949 +
950 +    unsigned char pkt_1[]={
951 +//    0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
952 +    0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
953 +    0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
954 +    0x08, 0x00, // type: ip
955 +    0x45, 0x00, 0x00, 0x24, // ip: ..., total len (0x024 = 36)
956 +    0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
957 +//    0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
958 +    0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
959 +    0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
960 +//    0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
961 +    0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
962 +    0xca, 0x7b,  //udp src port
963 +    0x13, 0x89,  //udp dst port
964 +    0x00, 0x1a,  //udp len (0x01a = 26) 
965 +    0x2f, 0x96,  //udp checksum (0x2f96)
966 +    0x01, 0x02, 0x03, 0x04, 0x05,  //payload (8 bytes)
967 +    0x06, 0x07, 0x08
968 +    };
969 +    
970 +    unsigned char pkt_2[]={
971 +//    0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
972 +    0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
973 +    0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
974 +    0x08, 0x00, // type: ip
975 +    0x45, 0x00, 0x00, 0x1e, // ip: ..., total len (0x01e = 30)
976 +    0xa1, 0x78, 0x00, 0x02, // ip: id, frag, frag offset (16)
977 +    0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
978 +    0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
979 +//   0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
980 +    0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
981 +    0x11, 0x12, 0x13, 0x14, 0x15,  //payload (10 bytes)
982 +    0x16, 0x17, 0x18, 0x19, 0x1a
983 +    };
984 +
985 +    struct net_dev *dev;
986 +//    struct sk_buff *skb_0;
987 +    struct sk_buff *skb_1;
988 +    struct sk_buff *skb_2;
989 +    int i=0;
990 +
991 +//    skb_0 = alloc_skb(256, GFP_ATOMIC);
992 +    skb_1 = alloc_skb(256, GFP_ATOMIC);
993 +    skb_2 = alloc_skb(256, GFP_ATOMIC);
994 +
995 +#if 0
996 +/* send packet 0 */
997 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
998 +    if((dev=dev_get_by_name(&init_net,ifname))){
999 +#else
1000 +    if((dev=dev_get_by_name(ifname))){
1001 +#endif
1002 +
1003 +       skb_0->dev=dev;
1004 +       skb_put(skb_0,sizeof(pkt_0));
1005 +       memcpy(skb_0->data, pkt_0, sizeof(pkt_0));
1006 +
1007 +       printk("send pkt(len=%d) to %s\n", skb_0->len, skb_0->dev->name);
1008 +
1009 +
1010 +       for(i=0;i<sizeof(pkt_0);i++){
1011 +           if(i%16==0) {
1012 +               printk("\n");
1013 +           }
1014 +           printk("%02X-",skb_0->data[i]);
1015 +       }
1016 +
1017 +       dev_queue_xmit(skb_0);
1018 +    }else{
1019 +       printk("interface %s not found\n",ifname);
1020 +       return 1;
1021 +    }
1022 +#endif
1023 +
1024 +#if 1
1025 +/* send packet 1 */
1026 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1027 +    if((dev=dev_get_by_name(&init_net,ifname))){
1028 +#else
1029 +    if((dev=dev_get_by_name(ifname))){
1030 +#endif
1031 +
1032 +       skb_1->dev=dev;
1033 +       skb_put(skb_1,sizeof(pkt_1));
1034 +       memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
1035 +
1036 +       printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
1037 +
1038 +
1039 +       for(i=0;i<sizeof(pkt_1);i++){
1040 +           if(i%16==0) {
1041 +               printk("\n");
1042 +           }
1043 +           printk("%02X-",skb_1->data[i]);
1044 +       }
1045 +
1046 +       dev_queue_xmit(skb_1);
1047 +    }else{
1048 +       printk("interface %s not found\n",ifname);
1049 +       return 1;
1050 +    }
1051 +#endif
1052 +
1053 +#if 1
1054 +/* send packet 2 */
1055 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
1056 +        if((dev=dev_get_by_name(&init_net,ifname))){
1057 +#else
1058 +        if((dev=dev_get_by_name(ifname))){
1059 +#endif
1060 +
1061 +       skb_2->dev=dev;
1062 +       skb_put(skb_2,sizeof(pkt_2));
1063 +       memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
1064 +
1065 +       printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
1066 +
1067 +
1068 +       for(i=0;i<sizeof(pkt_2);i++){
1069 +           if(i%16==0) {
1070 +               printk("\n");
1071 +           }
1072 +           printk("%02X-",skb_2->data[i]);
1073 +       }
1074 +
1075 +       dev_queue_xmit(skb_2);
1076 +    }else{
1077 +       printk("interface %s not found\n",ifname);
1078 +       return 1;
1079 +    }
1080 +#endif
1081 +
1082 +    return 0;
1083 +}
1084 +
1085 +static void PktGenCleanupMod(void)
1086 +{
1087 +}
1088 +
1089 +module_init(PktGenInitMod);
1090 +module_exit(PktGenCleanupMod);
1091 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
1092 +MODULE_PARM (ifname, "s");
1093 +#else
1094 +module_param (ifname, charp, 0);
1095 +#endif
1096 +
1097 +MODULE_DESCRIPTION("Ralink PktGen Module");
1098 +MODULE_AUTHOR("Steven Liu");
1099 +MODULE_LICENSE("Proprietary");
1100 +MODULE_PARM_DESC (ifname, "interface name");
1101 +
1102 --- /dev/null
1103 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
1104 @@ -0,0 +1,1527 @@
1105 +#include <linux/module.h>
1106 +#include <linux/version.h>
1107 +#include <linux/kernel.h>
1108 +#include <linux/types.h>
1109 +#include <linux/pci.h>
1110 +#include <linux/init.h>
1111 +#include <linux/skbuff.h>
1112 +#include <linux/if_vlan.h>
1113 +#include <linux/if_ether.h>
1114 +#include <linux/fs.h>
1115 +#include <asm/uaccess.h>
1116 +#include <asm/rt2880/surfboardint.h>
1117 +#if defined(CONFIG_RAETH_TSO)
1118 +#include <linux/tcp.h>
1119 +#include <net/ipv6.h>
1120 +#include <linux/ip.h>
1121 +#include <net/ip.h>
1122 +#include <net/tcp.h>
1123 +#include <linux/in.h>
1124 +#include <linux/ppp_defs.h>
1125 +#include <linux/if_pppox.h>
1126 +#endif
1127 +#if defined(CONFIG_RAETH_LRO)
1128 +#include <linux/inet_lro.h>
1129 +#endif
1130 +#include <linux/delay.h>
1131 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
1132 +#include <linux/sched.h>
1133 +#endif
1134 +
1135 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
1136 +#include <asm/rt2880/rt_mmap.h>
1137 +#else
1138 +#include <linux/libata-compat.h>
1139 +#endif
1140 +
1141 +#include "../ra2882ethreg.h"
1142 +#include "../raether.h"
1143 +#include "../ra_mac.h"
1144 +#include "../ra_ioctl.h"
1145 +#include "../ra_rfrw.h"
1146 +#ifdef CONFIG_RAETH_NETLINK
1147 +#include "../ra_netlink.h"
1148 +#endif
1149 +#if defined(CONFIG_RAETH_QOS)
1150 +#include "../ra_qos.h"
1151 +#endif
1152 +#include "raether_pdma_dvt.h"
1153 +
1154 +/* Global variables */
1155 +static unsigned int g_pdma_dvt_show_config;
1156 +static unsigned int g_pdma_dvt_rx_test_config;
1157 +static unsigned int g_pdma_dvt_tx_test_config;
1158 +static unsigned int g_pdma_dvt_debug_test_config;
1159 +static unsigned int g_pdma_dvt_lro_test_config;
1160 +
1161 +unsigned int g_pdma_dev_lanport = 0;
1162 +unsigned int g_pdma_dev_wanport = 0;
1163 +
1164 +void skb_dump(struct sk_buff *sk)
1165 +{
1166 +       unsigned int i;
1167 +
1168 +       printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
1169 +              sk->dev ? sk->dev->name : "ip stack", sk->len, sk->truesize,
1170 +              skb_headroom(sk), skb_tailroom(sk));
1171 +
1172 +       /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) { */
1173 +       /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) { */
1174 +       for (i = (unsigned int)sk->head; i <= (unsigned int)sk->data + 60; i++) {
1175 +               if ((i % 20) == 0)
1176 +                       printk("\n");
1177 +               if (i == (unsigned int)sk->data)
1178 +                       printk("{");
1179 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 21)
1180 +               if (i == (unsigned int)sk->transport_header)
1181 +                       printk("#");
1182 +               if (i == (unsigned int)sk->network_header)
1183 +                       printk("|");
1184 +               if (i == (unsigned int)sk->mac_header)
1185 +                       printk("*");
1186 +#else
1187 +               if (i == (unsigned int)sk->h.raw)
1188 +                       printk("#");
1189 +               if (i == (unsigned int)sk->nh.raw)
1190 +                       printk("|");
1191 +               if (i == (unsigned int)sk->mac.raw)
1192 +                       printk("*");
1193 +#endif
1194 +               printk("%02X-", *((unsigned char *)i));
1195 +               if (i == (unsigned int)sk->tail)
1196 +                       printk("}");
1197 +       }
1198 +       printk("\n");
1199 +}
1200 +
1201 +#if defined(CONFIG_RAETH_HW_LRO)
1202 +/* PDMA LRO test functions start */
1203 +int pdma_lro_disable_dvt(void)
1204 +{
1205 +       unsigned int regVal = 0;
1206 +
1207 +       printk("pdma_lro_disable_dvt()\n");
1208 +
1209 +       /* 1. Invalid LRO ring1~3 */
1210 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1211 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1212 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1213 +
1214 +       /* 2 Polling relinguish */
1215 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1216 +       }
1217 +
1218 +       /* 3. Disable LRO */
1219 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1220 +       regVal &= ~(PDMA_LRO_EN);
1221 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1222 +
1223 +#if 0
1224 +       /* 4. Disable non-lro multiple rx */
1225 +       SET_PDMA_NON_LRO_MULTI_EN(0);
1226 +
1227 +       /* 5.1. Set GDM1 to ring0 */
1228 +       SET_GDM_PID1_RXID_SEL(0);
1229 +       /* 5.2. Set GDM2 to ring0 */
1230 +       SET_GDM_PID2_RXID_SEL(0);
1231 +#endif
1232 +
1233 +       return 0;
1234 +}
1235 +
1236 +int pdma_lro_force_aggre_dvt(void)
1237 +{
1238 +       unsigned int regVal = 0;
1239 +       unsigned int ip;
1240 +
1241 +       printk("pdma_lro_force_aggre_dvt()\n");
1242 +
1243 +/* pdma rx ring1 */
1244 +       /* 1. Set RX ring mode to force port */
1245 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1246 +
1247 +       /* 2. Configure lro ring */
1248 +       /* 2.1 set src/destination TCP ports */
1249 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1250 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1251 +       /* 2.2 set src/destination IPs */
1252 +       str_to_ip(&ip, "10.10.10.3");
1253 +       sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1254 +       str_to_ip(&ip, "10.10.10.100");
1255 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1256 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1257 +
1258 +       /* 2.3 Valid LRO ring */
1259 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1260 +
1261 +       /* 2.4 Set AGE timer */
1262 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1263 +
1264 +       /* 2.5 Set max AGG timer */
1265 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1266 +
1267 +       /* 2.6 Set max LRO agg count */
1268 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1269 +
1270 +       /* 3. IPv4 checksum update enable */
1271 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1272 +
1273 +       /* 4. Polling relinguish */
1274 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1275 +       }
1276 +
1277 +       /* 5. Enable LRO */
1278 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1279 +       regVal |= PDMA_LRO_EN;
1280 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1281 +
1282 +       return 0;
1283 +}
1284 +
1285 +int pdma_lro_auto_aggre_dvt(void)
1286 +{
1287 +       unsigned int regVal = 0;
1288 +       unsigned int ip;
1289 +
1290 +       printk("pdma_lro_auto_aggre_dvt()\n");
1291 +
1292 +       /* 1.1 Set my IP_1 */
1293 +       str_to_ip(&ip, "10.10.10.254");
1294 +       sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1295 +       sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1296 +       sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1297 +       sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1298 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1299 +
1300 +       /* 1.2 Set my IP_2 */
1301 +       str_to_ip(&ip, "10.10.20.254");
1302 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1303 +       sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1304 +       sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1305 +       sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1306 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1307 +
1308 +       /* 1.3 Set my IP_3 */
1309 +       sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1310 +       sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1311 +       sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1312 +       sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1313 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1314 +
1315 +       /* 1.4 Set my IP_4 */
1316 +       sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1317 +       sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1318 +       sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1319 +       sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1320 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1321 +
1322 +       /* 2.1 Set RX ring1~3 to auto-learn modes */
1323 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1324 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1325 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1326 +
1327 +       /* 2.2 Valid LRO ring */
1328 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1329 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1330 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1331 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1332 +
1333 +       /* 2.3 Set AGE timer */
1334 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
1335 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, 0);
1336 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, 0);
1337 +
1338 +       /* 2.4 Set max AGG timer */
1339 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
1340 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, 0);
1341 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, 0);
1342 +
1343 +       /* 2.5 Set max LRO agg count */
1344 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
1345 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
1346 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
1347 +
1348 +       /* 3.0 IPv6 LRO enable */
1349 +       SET_PDMA_LRO_IPV6_EN(1);
1350 +
1351 +       /* 3.1 IPv4 checksum update disable */
1352 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1353 +
1354 +       /* 3.2 switch priority comparision to byte count mode */
1355 +       SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1356 +
1357 +       /* 3.3 bandwidth threshold setting */
1358 +       SET_PDMA_LRO_BW_THRESHOLD(0);
1359 +
1360 +       /* 3.4 auto-learn score delta setting */
1361 +       sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1362 +
1363 +       /* 3.5 Set ALT timer to 20us: (unit: 20us) */
1364 +       SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
1365 +       /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
1366 +       SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
1367 +
1368 +       /* 4. Polling relinguish */
1369 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1370 +       }
1371 +
1372 +       /* 5. Enable LRO */
1373 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1374 +       regVal |= PDMA_LRO_EN;
1375 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1376 +
1377 +       return 0;
1378 +}
1379 +
1380 +int pdma_lro_auto_ipv6_dvt(void)
1381 +{
1382 +       unsigned int regVal = 0;
1383 +
1384 +       printk("pdma_lro_auto_ipv6_dvt()\n");
1385 +
1386 +       /* 1. Set my IP */
1387 +       sysRegWrite(LRO_RX_RING1_DIP_DW3, 0x20010238);
1388 +       sysRegWrite(LRO_RX_RING1_DIP_DW2, 0x08000000);
1389 +       sysRegWrite(LRO_RX_RING1_DIP_DW1, 0x00000000);
1390 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, 0x00000254);
1391 +
1392 +       /* 2.1 Set RX ring1~3 to auto-learn modes */
1393 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1394 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1395 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1396 +
1397 +       /* 2.2 Valid LRO ring */
1398 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1399 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1400 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1401 +
1402 +       /* 2.3 Set AGE timer */
1403 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1404 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1405 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1406 +
1407 +       /* 3.0 IPv6 LRO enable */
1408 +       SET_PDMA_LRO_IPV6_EN(1);
1409 +
1410 +       /* 3.1 IPv4 checksum update disable */
1411 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1412 +
1413 +       /* 3.2 switch priority comparision to byte count mode */
1414 +       SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1415 +
1416 +       /* 3.3 bandwidth threshold setting */
1417 +       SET_PDMA_LRO_BW_THRESHOLD(0);
1418 +
1419 +       /* 3.4 auto-learn score delta setting */
1420 +       sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1421 +
1422 +       /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1423 +       SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1424 +       /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1425 +       SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1426 +
1427 +       /* 3.7 Set max AGG timer: 10 msec. */
1428 +       SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1429 +
1430 +       /* 4. Polling relinguish */
1431 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1432 +       }
1433 +
1434 +       /* 5. Enable LRO */
1435 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1436 +       regVal |= PDMA_LRO_EN;
1437 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1438 +
1439 +       return 0;
1440 +}
1441 +
1442 +int pdma_lro_auto_myIP_dvt(void)
1443 +{
1444 +       unsigned int regVal = 0;
1445 +       unsigned int ip;
1446 +
1447 +       printk("pdma_lro_auto_myIP_dvt()\n");
1448 +
1449 +       /* 1.1 Set my IP_1 */
1450 +       str_to_ip(&ip, "10.10.10.254");
1451 +       sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1452 +       sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1453 +       sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1454 +       sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1455 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
1456 +       /* 1.2 Set my IP_2 */
1457 +       str_to_ip(&ip, "10.10.20.254");
1458 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1459 +       sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
1460 +       sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
1461 +       sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
1462 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1463 +       /* 1.3 Set my IP_3 */
1464 +       sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
1465 +       sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
1466 +       sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
1467 +       sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
1468 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1469 +       /* 1.4 Set my IP_4 */
1470 +       sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
1471 +       sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
1472 +       sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
1473 +       sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
1474 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1475 +
1476 +       /* 2.1 Set RX ring1~3 to auto-learn modes */
1477 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1478 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1479 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1480 +
1481 +       /* 2.2 Valid LRO ring */
1482 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1483 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1484 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1485 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1486 +
1487 +       /* 2.3 Set AGE timer */
1488 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1489 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1490 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1491 +
1492 +       /* 3.0 IPv6 LRO enable */
1493 +       SET_PDMA_LRO_IPV6_EN(1);
1494 +
1495 +       /* 3.1 IPv4 checksum update disable */
1496 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1497 +
1498 +       /* 3.2 switch priority comparision to byte count mode */
1499 +       SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1500 +
1501 +       /* 3.3 bandwidth threshold setting */
1502 +       SET_PDMA_LRO_BW_THRESHOLD(0);
1503 +
1504 +       /* 3.4 auto-learn score delta setting */
1505 +       sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1506 +
1507 +       /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1508 +       SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1509 +       /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1510 +       SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1511 +
1512 +       /* 3.7 Set max AGG timer: 10 msec. */
1513 +       SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1514 +
1515 +       /* 4. Polling relinguish */
1516 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1517 +       }
1518 +
1519 +       /* 5. Enable LRO */
1520 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1521 +       regVal |= PDMA_LRO_EN;
1522 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1523 +
1524 +       return 0;
1525 +}
1526 +
1527 +int pdma_lro_dly_int_dvt(int index)
1528 +{
1529 +       unsigned int regVal = 0;
1530 +       unsigned int ip;
1531 +
1532 +       printk("pdma_lro_dly_int_dvt(%d)\n", index);
1533 +
1534 +#if 0
1535 +       /* 1.1 Set my IP_1 */
1536 +       /* str_to_ip( &ip, "10.10.10.254" ); */
1537 +       str_to_ip(&ip, "10.10.10.100");
1538 +       sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
1539 +       sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
1540 +       sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
1541 +       sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
1542 +#else
1543 +       /* 1.1 set src/destination TCP ports */
1544 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
1545 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
1546 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 3423);
1547 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 2301);
1548 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 3423);
1549 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 2301);
1550 +       /* 1.2 set src/destination IPs */
1551 +       str_to_ip(&ip, "10.10.10.3");
1552 +       sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
1553 +       str_to_ip(&ip, "10.10.10.100");
1554 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
1555 +       str_to_ip(&ip, "10.10.10.3");
1556 +       sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
1557 +       str_to_ip(&ip, "10.10.10.100");
1558 +       sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
1559 +       str_to_ip(&ip, "10.10.10.3");
1560 +       sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
1561 +       str_to_ip(&ip, "10.10.10.100");
1562 +       sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
1563 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
1564 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
1565 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
1566 +#endif
1567 +
1568 +       if (index == 0) {
1569 +               /* 1.2 Disable DLY_INT for lro ring */
1570 +               SET_PDMA_LRO_DLY_INT_EN(0);
1571 +       } else {
1572 +               /* 1.2 Enable DLY_INT for lro ring */
1573 +               SET_PDMA_LRO_DLY_INT_EN(1);
1574 +       }
1575 +
1576 +       /* 1.3 LRO ring DLY_INT setting */
1577 +       if (index == 1) {
1578 +               sysRegWrite(LRO_RX1_DLY_INT, DELAY_INT_INIT);
1579 +       } else if (index == 2) {
1580 +               sysRegWrite(LRO_RX2_DLY_INT, DELAY_INT_INIT);
1581 +       } else if (index == 3) {
1582 +               sysRegWrite(LRO_RX3_DLY_INT, DELAY_INT_INIT);
1583 +       }
1584 +#if 0
1585 +       /* 2.1 Set RX rings to auto-learn modes */
1586 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
1587 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
1588 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
1589 +#else
1590 +       /* 2.0 set rx ring mode */
1591 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
1592 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
1593 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
1594 +
1595 +       /* 2.1 IPv4 force port mode */
1596 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
1597 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
1598 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
1599 +#endif
1600 +
1601 +       /* 2.2 Valid LRO ring */
1602 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
1603 +       if ((index == 0) || (index == 1)) {
1604 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
1605 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1606 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1607 +       } else if (index == 2) {
1608 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1609 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
1610 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
1611 +       } else {
1612 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
1613 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
1614 +               SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
1615 +       }
1616 +
1617 +       /* 2.3 Set AGE timer */
1618 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
1619 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
1620 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
1621 +
1622 +       /* 3.1 IPv4 checksum update enable */
1623 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
1624 +
1625 +       /* 3.2 switch priority comparision to byte count mode */
1626 +       SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
1627 +
1628 +       /* 3.3 bandwidth threshold setting */
1629 +       SET_PDMA_LRO_BW_THRESHOLD(0);
1630 +
1631 +       /* 3.4 auto-learn score delta setting */
1632 +       sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
1633 +
1634 +       /* 3.5 Set ALT timer to 500us: (unit: 20us) */
1635 +       SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
1636 +       /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
1637 +       SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
1638 +
1639 +       /* 3.7 Set max AGG timer */
1640 +       SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
1641 +
1642 +       /* 4. Polling relinguish */
1643 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
1644 +       }
1645 +
1646 +       /* 5. Enable LRO */
1647 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
1648 +       regVal |= PDMA_LRO_EN;
1649 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
1650 +
1651 +       return 0;
1652 +}
1653 +
1654 +int pdma_lro_dly_int0_dvt(void)
1655 +{
1656 +       return pdma_lro_dly_int_dvt(0);
1657 +}
1658 +
1659 +int pdma_lro_dly_int1_dvt(void)
1660 +{
1661 +       return pdma_lro_dly_int_dvt(1);
1662 +}
1663 +
1664 +int pdma_lro_dly_int2_dvt(void)
1665 +{
1666 +       return pdma_lro_dly_int_dvt(2);
1667 +}
1668 +
1669 +int pdma_lro_dly_int3_dvt(void)
1670 +{
1671 +       return pdma_lro_dly_int_dvt(3);
1672 +}
1673 +
1674 +#endif /* CONFIG_RAETH_HW_LRO */
1675 +
1676 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1677 +int pdma_gdm_rxid_config(void)
1678 +{
1679 +       unsigned int regVal = 0;
1680 +
1681 +       printk("pdma_gdm_rxid_config()\n");
1682 +
1683 +       /* 1. Set RX ring1~3 to pse modes */
1684 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
1685 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
1686 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
1687 +
1688 +       /* 2. Enable non-lro multiple rx */
1689 +       SET_PDMA_NON_LRO_MULTI_EN(1);
1690 +
1691 +       return 0;
1692 +}
1693 +
1694 +int pdma_non_lro_portid_dvt(void)
1695 +{
1696 +       unsigned int regVal = 0;
1697 +
1698 +       printk("pdma_non_lro_portid_dvt()\n");
1699 +
1700 +       /* 1. Set GDM1 to ring3 */
1701 +       SET_GDM_PID1_RXID_SEL(3);
1702 +#if 0
1703 +       /* 2. Set GDM2 to ring1 */
1704 +       SET_GDM_PID2_RXID_SEL(1);
1705 +#endif
1706 +
1707 +       /* 3. Set priority rule: pid */
1708 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1709 +
1710 +       /* PDMA multi-rx enable */
1711 +       pdma_gdm_rxid_config();
1712 +
1713 +       return 0;
1714 +}
1715 +
1716 +int pdma_non_lro_stag_dvt(void)
1717 +{
1718 +       unsigned int regVal = 0;
1719 +
1720 +       printk("pdma_non_lro_stag_dvt()\n");
1721 +
1722 +       /* 1. Set STAG4 to ring0 */
1723 +       GDM_STAG_RXID_SEL(4, 0);
1724 +       /* 2. Set STAG3 to ring1 */
1725 +       GDM_STAG_RXID_SEL(3, 1);
1726 +       /* 3. Set STAG2 to ring2 */
1727 +       GDM_STAG_RXID_SEL(2, 2);
1728 +       /* 4. Set STAG1 to ring3 */
1729 +       GDM_STAG_RXID_SEL(1, 3);
1730 +
1731 +       /* 5. Set priority rule: stag/pid */
1732 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
1733 +
1734 +       /* PDMA multi-rx enable */
1735 +       pdma_gdm_rxid_config();
1736 +
1737 +       return 0;
1738 +}
1739 +
1740 +int pdma_non_lro_vlan_dvt(void)
1741 +{
1742 +       unsigned int regVal = 0;
1743 +
1744 +       printk("pdma_non_lro_vlan_dvt()\n");
1745 +
1746 +       /* 1. Set vlan priority=3 to ring1 */
1747 +       SET_GDM_VLAN_PRI_RXID_SEL(3, 1);
1748 +       /* 2. Set vlan priority=2 to ring2 */
1749 +       SET_GDM_VLAN_PRI_RXID_SEL(2, 2);
1750 +       /* 3. Set vlan priority=1 to ring3 */
1751 +       SET_GDM_VLAN_PRI_RXID_SEL(1, 3);
1752 +       /* 4. Set vlan priority=0 to ring3 */
1753 +       SET_GDM_VLAN_PRI_RXID_SEL(0, 3);
1754 +
1755 +       /* 1. Set vlan priority=4 to ring1 */
1756 +       SET_GDM_VLAN_PRI_RXID_SEL(4, 1);
1757 +       /* 2. Set vlan priority=5 to ring2 */
1758 +       SET_GDM_VLAN_PRI_RXID_SEL(5, 2);
1759 +       /* 3. Set vlan priority=6 to ring3 */
1760 +       SET_GDM_VLAN_PRI_RXID_SEL(6, 3);
1761 +       /* 4. Set vlan priority=7 to ring3 */
1762 +       SET_GDM_VLAN_PRI_RXID_SEL(7, 3);
1763 +
1764 +       /* 4. Set priority rule: vlan > pid */
1765 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_PID);
1766 +
1767 +       /* PDMA multi-rx enable */
1768 +       pdma_gdm_rxid_config();
1769 +
1770 +       return 0;
1771 +}
1772 +
1773 +int pdma_non_lro_tcpack_dvt(void)
1774 +{
1775 +       unsigned int regVal = 0;
1776 +
1777 +       printk("pdma_non_lro_tcpack_dvt()\n");
1778 +
1779 +       /* 1. Enable TCP ACK with zero payload check */
1780 +       SET_GDM_TCP_ACK_WZPC(1);
1781 +       /* 2. Set TCP ACK to ring3 */
1782 +       SET_GDM_TCP_ACK_RXID_SEL(3);
1783 +
1784 +       /* 3. Set priority rule: ack > pid */
1785 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_PID);
1786 +
1787 +       /* PDMA multi-rx enable */
1788 +       pdma_gdm_rxid_config();
1789 +
1790 +       return 0;
1791 +}
1792 +
1793 +int pdma_non_lro_pri1_dvt(void)
1794 +{
1795 +       unsigned int regVal = 0;
1796 +
1797 +       printk("pdma_non_lro_pri1_dvt()\n");
1798 +
1799 +       /* 1. Set GDM1 to ring0 */
1800 +       SET_GDM_PID1_RXID_SEL(0);
1801 +
1802 +       /* 2.1 Disable TCP ACK with zero payload check */
1803 +       SET_GDM_TCP_ACK_WZPC(0);
1804 +       /* 2.2 Set TCP ACK to ring1 */
1805 +       SET_GDM_TCP_ACK_RXID_SEL(1);
1806 +
1807 +       /* 3. Set vlan priority=1 to ring2 */
1808 +       SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1809 +
1810 +       /* 4. Set priority rule: vlan > ack > pid */
1811 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_ACK_PID);
1812 +
1813 +       /* PDMA multi-rx enable */
1814 +       pdma_gdm_rxid_config();
1815 +
1816 +       return 0;
1817 +}
1818 +
1819 +int pdma_non_lro_pri2_dvt(void)
1820 +{
1821 +       unsigned int regVal = 0;
1822 +
1823 +       printk("pdma_non_lro_pri2_dvt()\n");
1824 +
1825 +       /* 1. Set GDM1 to ring0 */
1826 +       SET_GDM_PID1_RXID_SEL(0);
1827 +
1828 +       /* 2.1 Disable TCP ACK with zero payload check */
1829 +       SET_GDM_TCP_ACK_WZPC(0);
1830 +       /* 2.2 Set TCP ACK to ring1 */
1831 +       SET_GDM_TCP_ACK_RXID_SEL(1);
1832 +
1833 +       /* 3. Set vlan priority=1 to ring2 */
1834 +       SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
1835 +
1836 +       /* 4. Set priority rule: ack > vlan > pid */
1837 +       SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_VLAN_PID);
1838 +
1839 +       /* PDMA multi-rx enable */
1840 +       pdma_gdm_rxid_config();
1841 +
1842 +       return 0;
1843 +}
1844 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1845 +const static PDMA_LRO_DVT_FUNC pdma_dvt_lro_func[] = {
1846 +#if defined(CONFIG_RAETH_HW_LRO)
1847 +       [0] = pdma_lro_disable_dvt,     /* PDMA_TEST_LRO_DISABLE */
1848 +       [1] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_PORT */
1849 +       [2] = pdma_lro_auto_aggre_dvt,  /* PDMA_TEST_LRO_AUTO_LEARN */
1850 +       [3] = pdma_lro_auto_ipv6_dvt,   /* PDMA_TEST_LRO_AUTO_IPV6 */
1851 +       [4] = pdma_lro_auto_myIP_dvt,   /* PDMA_TEST_LRO_AUTO_MYIP */
1852 +       [5] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_AGGREGATE */
1853 +#endif /* CONFIG_RAETH_HW_LRO */
1854 +#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1855 +       [6] = pdma_non_lro_portid_dvt,  /* PDMA_TEST_NON_LRO_PORT_ID */
1856 +       [7] = pdma_non_lro_stag_dvt,    /* PDMA_TEST_NON_LRO_STAG */
1857 +       [8] = pdma_non_lro_vlan_dvt,    /* PDMA_TEST_NON_LRO_VLAN */
1858 +       [9] = pdma_non_lro_tcpack_dvt,  /* PDMA_TEST_NON_LRO_TCP_ACK */
1859 +       [10] = pdma_non_lro_pri1_dvt,   /* PDMA_TEST_NON_LRO_PRI1 */
1860 +       [11] = pdma_non_lro_pri2_dvt,   /* PDMA_TEST_NON_LRO_PRI2 */
1861 +#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
1862 +#if defined(CONFIG_RAETH_HW_LRO)
1863 +       [12] = pdma_lro_dly_int0_dvt,   /* PDMA_TEST_LRO_DLY_INT0 */
1864 +       [13] = pdma_lro_dly_int1_dvt,   /* PDMA_TEST_LRO_DLY_INT1 */
1865 +       [14] = pdma_lro_dly_int2_dvt,   /* PDMA_TEST_LRO_DLY_INT2 */
1866 +       [15] = pdma_lro_dly_int3_dvt,   /* PDMA_TEST_LRO_DLY_INT3 */
1867 +#endif /* CONFIG_RAETH_HW_LRO */
1868 +};
1869 +
1870 +/* PDMA LRO test functions end */
1871 +
1872 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
1873 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
1874 +                       int rx_dma_owner_idx0)
1875 +{
1876 +       if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
1877 +               if (rx_ring_no == 1) {
1878 +                       printk("------- rt2880_eth_recv (ring1) --------\n");
1879 +                       printk("rx_info1=0x%x\n",
1880 +                              *(unsigned int *)
1881 +                              &ei_local->rx_ring1[rx_dma_owner_idx0].
1882 +                              rxd_info1);
1883 +                       printk("rx_info2=0x%x\n",
1884 +                              *(unsigned int *)
1885 +                              &ei_local->rx_ring1[rx_dma_owner_idx0].
1886 +                              rxd_info2);
1887 +                       printk("rx_info3=0x%x\n",
1888 +                              *(unsigned int *)
1889 +                              &ei_local->rx_ring1[rx_dma_owner_idx0].
1890 +                              rxd_info3);
1891 +                       printk("rx_info4=0x%x\n",
1892 +                              *(unsigned int *)
1893 +                              &ei_local->rx_ring1[rx_dma_owner_idx0].
1894 +                              rxd_info4);
1895 +                       printk("-------------------------------\n");
1896 +               } else if (rx_ring_no == 2) {
1897 +                       printk("------- rt2880_eth_recv (ring2) --------\n");
1898 +                       printk("rx_info1=0x%x\n",
1899 +                              *(unsigned int *)
1900 +                              &ei_local->rx_ring2[rx_dma_owner_idx0].
1901 +                              rxd_info1);
1902 +                       printk("rx_info2=0x%x\n",
1903 +                              *(unsigned int *)
1904 +                              &ei_local->rx_ring2[rx_dma_owner_idx0].
1905 +                              rxd_info2);
1906 +                       printk("rx_info3=0x%x\n",
1907 +                              *(unsigned int *)
1908 +                              &ei_local->rx_ring2[rx_dma_owner_idx0].
1909 +                              rxd_info3);
1910 +                       printk("rx_info4=0x%x\n",
1911 +                              *(unsigned int *)
1912 +                              &ei_local->rx_ring2[rx_dma_owner_idx0].
1913 +                              rxd_info4);
1914 +                       printk("-------------------------------\n");
1915 +               } else if (rx_ring_no == 3) {
1916 +                       printk("------- rt2880_eth_recv (ring3) --------\n");
1917 +                       printk("rx_info1=0x%x\n",
1918 +                              *(unsigned int *)
1919 +                              &ei_local->rx_ring3[rx_dma_owner_idx0].
1920 +                              rxd_info1);
1921 +                       printk("rx_info2=0x%x\n",
1922 +                              *(unsigned int *)
1923 +                              &ei_local->rx_ring3[rx_dma_owner_idx0].
1924 +                              rxd_info2);
1925 +                       printk("rx_info3=0x%x\n",
1926 +                              *(unsigned int *)
1927 +                              &ei_local->rx_ring3[rx_dma_owner_idx0].
1928 +                              rxd_info3);
1929 +                       printk("rx_info4=0x%x\n",
1930 +                              *(unsigned int *)
1931 +                              &ei_local->rx_ring3[rx_dma_owner_idx0].
1932 +                              rxd_info4);
1933 +                       printk("-------------------------------\n");
1934 +               }
1935 +#if 0
1936 +               else {
1937 +                       printk("------- rt2880_eth_recv (ring0) --------\n");
1938 +                       printk("rx_info1=0x%x\n",
1939 +                              *(unsigned int *)
1940 +                              &ei_local->rx_ring0[rx_dma_owner_idx0].
1941 +                              rxd_info1);
1942 +                       printk("rx_info2=0x%x\n",
1943 +                              *(unsigned int *)
1944 +                              &ei_local->rx_ring0[rx_dma_owner_idx0].
1945 +                              rxd_info2);
1946 +                       printk("rx_info3=0x%x\n",
1947 +                              *(unsigned int *)
1948 +                              &ei_local->rx_ring0[rx_dma_owner_idx0].
1949 +                              rxd_info3);
1950 +                       printk("rx_info4=0x%x\n",
1951 +                              *(unsigned int *)
1952 +                              &ei_local->rx_ring0[rx_dma_owner_idx0].
1953 +                              rxd_info4);
1954 +                       printk("-------------------------------\n");
1955 +               }
1956 +#endif
1957 +       }
1958 +       if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
1959 +           (pdma_dvt_get_lro_test_config()==PDMA_TEST_LRO_FORCE_PORT)) {
1960 +               if (rx_ring_no == 1) {
1961 +                       printk("------- rt2880_eth_recv (ring1) --------\n");
1962 +                       printk("rx_info1.PDP0=0x%x\n",
1963 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1964 +                              rxd_info1.PDP0);
1965 +                       printk("rx_info2.DDONE_bit=0x%x\n",
1966 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1967 +                              rxd_info2.DDONE_bit);
1968 +                       printk("rx_info2.LS0=0x%x\n",
1969 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1970 +                              rxd_info2.LS0);
1971 +                       printk("rx_info2.PLEN0=0x%x\n",
1972 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1973 +                              rxd_info2.PLEN0);
1974 +                       printk("rx_info2.TAG=0x%x\n",
1975 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1976 +                              rxd_info2.TAG);
1977 +#if defined(CONFIG_ARCH_MT7623)
1978 +                       printk("rx_info2.LRO_AGG_CNT=0x%x\n",
1979 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1980 +                              rxd_info2.LRO_AGG_CNT);
1981 +                       printk("rx_info2.REV=0x%x\n",
1982 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1983 +                              rxd_info2.REV);
1984 +#else
1985 +                       printk("rx_info2.LS1=0x%x\n",
1986 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1987 +                              rxd_info2.LS1);
1988 +#endif /* CONFIG_RAETH_HW_LRO */
1989 +                       printk("rx_info2.PLEN1=0x%x\n",
1990 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1991 +                              rxd_info2.PLEN1);
1992 +                       printk("rx_info3.TPID=0x%x\n",
1993 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1994 +                              rxd_info3.TPID);
1995 +                       printk("rx_info3.VID=0x%x\n",
1996 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
1997 +                              rxd_info3.VID);
1998 +                       printk("rx_info4.IP6=0x%x\n",
1999 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2000 +                              rxd_info4.IP6);
2001 +                       printk("rx_info4.IP4=0x%x\n",
2002 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2003 +                              rxd_info4.IP4);
2004 +                       printk("rx_info4.IP4F=0x%x\n",
2005 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2006 +                              rxd_info4.IP4F);
2007 +                       printk("rx_info4.TACK=0x%x\n",
2008 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2009 +                              rxd_info4.TACK);
2010 +                       printk("rx_info4.L4VLD=0x%x\n",
2011 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2012 +                              rxd_info4.L4VLD);
2013 +                       printk("rx_info4.L4F=0x%x\n",
2014 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2015 +                              rxd_info4.L4F);
2016 +                       printk("rx_info4.SPORT=0x%x\n",
2017 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2018 +                              rxd_info4.SP);
2019 +                       printk("rx_info4.CRSN=0x%x\n",
2020 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2021 +                              rxd_info4.CRSN);
2022 +                       printk("rx_info4.FOE_Entry=0x%x\n",
2023 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2024 +                              rxd_info4.FOE_Entry);
2025 +                       printk("-------------------------------\n");
2026 +               } else if (rx_ring_no == 2) {
2027 +                       printk("------- rt2880_eth_recv (ring2) --------\n");
2028 +                       printk("rx_info1.PDP0=0x%x\n",
2029 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2030 +                              rxd_info1.PDP0);
2031 +                       printk("rx_info2.DDONE_bit=0x%x\n",
2032 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2033 +                              rxd_info2.DDONE_bit);
2034 +                       printk("rx_info2.LS0=0x%x\n",
2035 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2036 +                              rxd_info2.LS0);
2037 +                       printk("rx_info2.PLEN0=0x%x\n",
2038 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2039 +                              rxd_info2.PLEN0);
2040 +                       printk("rx_info2.TAG=0x%x\n",
2041 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2042 +                              rxd_info2.TAG);
2043 +#if defined(CONFIG_ARCH_MT7623)
2044 +                       printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2045 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2046 +                              rxd_info2.LRO_AGG_CNT);
2047 +                       printk("rx_info2.REV=0x%x\n",
2048 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2049 +                              rxd_info2.REV);
2050 +#else
2051 +                       printk("rx_info2.LS1=0x%x\n",
2052 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2053 +                              rxd_info2.LS1);
2054 +#endif /* CONFIG_RAETH_HW_LRO */
2055 +                       printk("rx_info2.PLEN1=0x%x\n",
2056 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2057 +                              rxd_info2.PLEN1);
2058 +                       printk("rx_info3.TPID=0x%x\n",
2059 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2060 +                              rxd_info3.TPID);
2061 +                       printk("rx_info3.VID=0x%x\n",
2062 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2063 +                              rxd_info3.VID);
2064 +                       printk("rx_info4.IP6=0x%x\n",
2065 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2066 +                              rxd_info4.IP6);
2067 +                       printk("rx_info4.IP4=0x%x\n",
2068 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2069 +                              rxd_info4.IP4);
2070 +                       printk("rx_info4.IP4F=0x%x\n",
2071 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2072 +                              rxd_info4.IP4F);
2073 +                       printk("rx_info4.TACK=0x%x\n",
2074 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2075 +                              rxd_info4.TACK);
2076 +                       printk("rx_info4.L4VLD=0x%x\n",
2077 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2078 +                              rxd_info4.L4VLD);
2079 +                       printk("rx_info4.L4F=0x%x\n",
2080 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2081 +                              rxd_info4.L4F);
2082 +                       printk("rx_info4.SPORT=0x%x\n",
2083 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2084 +                              rxd_info4.SP);
2085 +                       printk("rx_info4.CRSN=0x%x\n",
2086 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2087 +                              rxd_info4.CRSN);
2088 +                       printk("rx_info4.FOE_Entry=0x%x\n",
2089 +                              ei_local->rx_ring2[rx_dma_owner_idx0].
2090 +                              rxd_info4.FOE_Entry);
2091 +                       printk("-------------------------------\n");
2092 +               } else if (rx_ring_no == 3) {
2093 +                       printk("------- rt2880_eth_recv (ring3) --------\n");
2094 +                       printk("rx_info1.PDP0=0x%x\n",
2095 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2096 +                              rxd_info1.PDP0);
2097 +                       printk("rx_info2.DDONE_bit=0x%x\n",
2098 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2099 +                              rxd_info2.DDONE_bit);
2100 +                       printk("rx_info2.LS0=0x%x\n",
2101 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2102 +                              rxd_info2.LS0);
2103 +                       printk("rx_info2.PLEN0=0x%x\n",
2104 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2105 +                              rxd_info2.PLEN0);
2106 +                       printk("rx_info2.TAG=0x%x\n",
2107 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2108 +                              rxd_info2.TAG);
2109 +#if defined(CONFIG_ARCH_MT7623)
2110 +                       printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2111 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2112 +                              rxd_info2.LRO_AGG_CNT);
2113 +                       printk("rx_info2.REV=0x%x\n",
2114 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2115 +                              rxd_info2.REV);
2116 +#else
2117 +                       printk("rx_info2.LS1=0x%x\n",
2118 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2119 +                              rxd_info2.LS1);
2120 +#endif /* CONFIG_RAETH_HW_LRO */
2121 +                       printk("rx_info2.PLEN1=0x%x\n",
2122 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2123 +                              rxd_info2.PLEN1);
2124 +                       printk("rx_info3.TPID=0x%x\n",
2125 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2126 +                              rxd_info3.TPID);
2127 +                       printk("rx_info3.VID=0x%x\n",
2128 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2129 +                              rxd_info3.VID);
2130 +                       printk("rx_info4.IP6=0x%x\n",
2131 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2132 +                              rxd_info4.IP6);
2133 +                       printk("rx_info4.IP4=0x%x\n",
2134 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2135 +                              rxd_info4.IP4);
2136 +                       printk("rx_info4.IP4F=0x%x\n",
2137 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2138 +                              rxd_info4.IP4F);
2139 +                       printk("rx_info4.TACK=0x%x\n",
2140 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2141 +                              rxd_info4.TACK);
2142 +                       printk("rx_info4.L4VLD=0x%x\n",
2143 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2144 +                              rxd_info4.L4VLD);
2145 +                       printk("rx_info4.L4F=0x%x\n",
2146 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2147 +                              rxd_info4.L4F);
2148 +                       printk("rx_info4.SPORT=0x%x\n",
2149 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2150 +                              rxd_info4.SP);
2151 +                       printk("rx_info4.CRSN=0x%x\n",
2152 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2153 +                              rxd_info4.CRSN);
2154 +                       printk("rx_info4.FOE_Entry=0x%x\n",
2155 +                              ei_local->rx_ring3[rx_dma_owner_idx0].
2156 +                              rxd_info4.FOE_Entry);
2157 +                       printk("-------------------------------\n");
2158 +               }
2159 +#if 0
2160 +               else {
2161 +                       printk("------- rt2880_eth_recv (ring0) --------\n");
2162 +                       printk("rx_info1.PDP0=0x%x\n",
2163 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2164 +                              rxd_info1.PDP0);
2165 +                       printk("rx_info2.DDONE_bit=0x%x\n",
2166 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2167 +                              rxd_info2.DDONE_bit);
2168 +                       printk("rx_info2.LS0=0x%x\n",
2169 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2170 +                              rxd_info2.LS0);
2171 +                       printk("rx_info2.PLEN0=0x%x\n",
2172 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2173 +                              rxd_info2.PLEN0);
2174 +                       printk("rx_info2.TAG=0x%x\n",
2175 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2176 +                              rxd_info2.TAG);
2177 +                       printk("rx_info2.LS1=0x%x\n",
2178 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2179 +                              rxd_info2.LS1);
2180 +                       printk("rx_info2.PLEN1=0x%x\n",
2181 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2182 +                              rxd_info2.PLEN1);
2183 +                       printk("rx_info3.TPID=0x%x\n",
2184 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2185 +                              rxd_info3.TPID);
2186 +                       printk("rx_info3.VID=0x%x\n",
2187 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2188 +                              rxd_info3.VID);
2189 +                       printk("rx_info4.IP6=0x%x\n",
2190 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2191 +                              rxd_info4.IP6);
2192 +                       printk("rx_info4.IP4=0x%x\n",
2193 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2194 +                              rxd_info4.IP4);
2195 +                       printk("rx_info4.IP4F=0x%x\n",
2196 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2197 +                              rxd_info4.IP4F);
2198 +                       printk("rx_info4.TACK=0x%x\n",
2199 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2200 +                              rxd_info4.TACK);
2201 +                       printk("rx_info4.L4VLD=0x%x\n",
2202 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2203 +                              rxd_info4.L4VLD);
2204 +                       printk("rx_info4.L4F=0x%x\n",
2205 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2206 +                              rxd_info4.L4F);
2207 +                       printk("rx_info4.SPORT=0x%x\n",
2208 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2209 +                              rxd_info4.SP);
2210 +                       printk("rx_info4.CRSN=0x%x\n",
2211 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2212 +                              rxd_info4.CRSN);
2213 +                       printk("rx_info4.FOE_Entry=0x%x\n",
2214 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2215 +                              rxd_info4.FOE_Entry);
2216 +                       printk("-------------------------------\n");
2217 +               }
2218 +#endif
2219 +       }
2220 +       if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_FORCE_AGGREGATE) {
2221 +               if (rx_ring_no == 1) {
2222 +                       printk("PASS!!! => RING1: rxd_info1.PDP0=0x%x\n",
2223 +                              ei_local->rx_ring1[rx_dma_owner_idx0].
2224 +                              rxd_info1.PDP0);
2225 +                       skb_dump(ei_local->netrx1_skbuf[rx_dma_owner_idx0]);
2226 +                       pdma_dvt_reset_config();
2227 +               }
2228 +       }
2229 +}
2230 +#endif
2231 +
2232 +int pdma_dvt_show_ctrl(int par1, int par2)
2233 +{
2234 +       if (par2 == 0)
2235 +               g_pdma_dvt_show_config = 0;
2236 +       else
2237 +               g_pdma_dvt_show_config |= (1 << par2);
2238 +
2239 +       return 0;
2240 +}
2241 +
2242 +int pdma_dvt_test_rx_ctrl(int par1, int par2)
2243 +{
2244 +       if (par2 == 0)
2245 +               g_pdma_dvt_rx_test_config = 0;
2246 +       else
2247 +               g_pdma_dvt_rx_test_config |= (1 << par2);
2248 +
2249 +       return 0;
2250 +}
2251 +
2252 +int pdma_dvt_test_tx_ctrl(int par1, int par2)
2253 +{
2254 +       if (par2 == 0)
2255 +               g_pdma_dvt_tx_test_config = 0;
2256 +       else
2257 +               g_pdma_dvt_tx_test_config |= (1 << par2);
2258 +
2259 +       return 0;
2260 +}
2261 +
2262 +int pdma_dvt_test_debug_ctrl(int par1, int par2)
2263 +{
2264 +       if (par2 == 0)
2265 +               g_pdma_dvt_debug_test_config = 0;
2266 +       else
2267 +               g_pdma_dvt_debug_test_config |= (1 << par2);
2268 +
2269 +       return 0;
2270 +}
2271 +
2272 +int pdma_dvt_test_lro_ctrl(int par1, int par2)
2273 +{
2274 +       g_pdma_dvt_lro_test_config = par2;
2275 +
2276 +#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
2277 +       if (pdma_dvt_lro_func[par2])
2278 +               (*pdma_dvt_lro_func[par2]) ();
2279 +#endif /* #if defined (CONFIG_RAETH_HW_LRO) */
2280 +
2281 +       return 0;
2282 +}
2283 +
2284 +unsigned int pdma_dvt_get_show_config()
2285 +{
2286 +       return g_pdma_dvt_show_config;
2287 +}
2288 +
2289 +unsigned int pdma_dvt_get_rx_test_config()
2290 +{
2291 +       return g_pdma_dvt_rx_test_config;
2292 +}
2293 +
2294 +unsigned int pdma_dvt_get_tx_test_config()
2295 +{
2296 +       return g_pdma_dvt_tx_test_config;
2297 +}
2298 +
2299 +unsigned int pdma_dvt_get_debug_test_config()
2300 +{
2301 +       return g_pdma_dvt_debug_test_config;
2302 +}
2303 +
2304 +unsigned int pdma_dvt_get_lro_test_config()
2305 +{
2306 +       return g_pdma_dvt_lro_test_config;
2307 +}
2308 +
2309 +void pdma_dvt_reset_config()
2310 +{
2311 +       g_pdma_dvt_show_config = 0;
2312 +       g_pdma_dvt_rx_test_config = 0;
2313 +       g_pdma_dvt_tx_test_config = 0;
2314 +       g_pdma_dvt_lro_test_config = 0;
2315 +}
2316 +
2317 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0)
2318 +{
2319 +#if 0
2320 +       unsigned int udf = 0;
2321 +#endif
2322 +
2323 +       if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
2324 +               printk("------- rt2880_eth_recv --------\n");
2325 +               printk("rx_info1=0x%x\n",
2326 +                      *(unsigned int *)&ei_local->
2327 +                      rx_ring0[rx_dma_owner_idx0].rxd_info1);
2328 +               printk("rx_info2=0x%x\n",
2329 +                      *(unsigned int *)&ei_local->
2330 +                      rx_ring0[rx_dma_owner_idx0].rxd_info2);
2331 +               printk("rx_info3=0x%x\n",
2332 +                      *(unsigned int *)&ei_local->
2333 +                      rx_ring0[rx_dma_owner_idx0].rxd_info3);
2334 +               printk("rx_info4=0x%x\n",
2335 +                      *(unsigned int *)&ei_local->
2336 +                      rx_ring0[rx_dma_owner_idx0].rxd_info4);
2337 +               printk("-------------------------------\n");
2338 +       }
2339 +       if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
2340 +           pdma_dvt_get_rx_test_config()) {
2341 +#if 0
2342 +               udf = ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6 << 5 |
2343 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4 << 4 |
2344 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F << 3 |
2345 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK << 2 |
2346 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD << 1 |
2347 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F;
2348 +#endif
2349 +               printk("------- rt2880_eth_recv --------\n");
2350 +               printk("rx_info1.PDP0=0x%x\n",
2351 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info1.PDP0);
2352 +               printk("rx_info2.DDONE_bit=0x%x\n",
2353 +                      ei_local->rx_ring0[rx_dma_owner_idx0].
2354 +                      rxd_info2.DDONE_bit);
2355 +               printk("rx_info2.LS0=0x%x\n",
2356 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS0);
2357 +               printk("rx_info2.PLEN0=0x%x\n",
2358 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN0);
2359 +               printk("rx_info2.TAG=0x%x\n",
2360 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG);
2361 +#if defined(CONFIG_ARCH_MT7623)
2362 +               printk("rx_info2.LRO_AGG_CNT=0x%x\n",
2363 +                      ei_local->rx_ring0[rx_dma_owner_idx0].
2364 +                      rxd_info2.LRO_AGG_CNT);
2365 +#else
2366 +               printk("rx_info2.LS1=0x%x\n",
2367 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS1);
2368 +#endif /* CONFIG_RAETH_HW_LRO */
2369 +               printk("rx_info2.PLEN1=0x%x\n",
2370 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN1);
2371 +               printk("rx_info3.TPID=0x%x\n",
2372 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.TPID);
2373 +               printk("rx_info3.VID=0x%x\n",
2374 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID);
2375 +#if 0
2376 +               printk("rx_info4.UDF=0x%x\n", udf);
2377 +#endif
2378 +               printk("rx_info4.IP6=0x%x\n",
2379 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6);
2380 +               printk("rx_info4.IP4=0x%x\n",
2381 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4);
2382 +               printk("rx_info4.IP4F=0x%x\n",
2383 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F);
2384 +               printk("rx_info4.TACK=0x%x\n",
2385 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK);
2386 +               printk("rx_info4.L4VLD=0x%x\n",
2387 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD);
2388 +               printk("rx_info4.L4F=0x%x\n",
2389 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F);
2390 +               printk("rx_info4.SPORT=0x%x\n",
2391 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP);
2392 +               printk("rx_info4.CRSN=0x%x\n",
2393 +                      ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.CRSN);
2394 +               printk("rx_info4.FOE_Entry=0x%x\n",
2395 +                      ei_local->rx_ring0[rx_dma_owner_idx0].
2396 +                      rxd_info4.FOE_Entry);
2397 +               printk("-------------------------------\n");
2398 +       }
2399 +       if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV6)) {
2400 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6) {
2401 +                       printk("PASS!!! => rx_info4.IP6=0x%x\n",
2402 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2403 +                              rxd_info4.IP6);
2404 +                       pdma_dvt_reset_config();
2405 +               }
2406 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4)) {
2407 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4) {
2408 +                       printk("PASS!!! => rx_info4.IP4=0x%x\n",
2409 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2410 +                              rxd_info4.IP4);
2411 +                       pdma_dvt_reset_config();
2412 +               }
2413 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4F)) {
2414 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F) {
2415 +                       printk("PASS!!! => rx_info4.IP4F=0x%x\n",
2416 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2417 +                              rxd_info4.IP4F);
2418 +                       pdma_dvt_reset_config();
2419 +               }
2420 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4VLD)) {
2421 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD) {
2422 +                       printk("PASS!!! => rx_info4.L4VLD=0x%x\n",
2423 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2424 +                              rxd_info4.L4VLD);
2425 +                       pdma_dvt_reset_config();
2426 +               }
2427 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4F)) {
2428 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F) {
2429 +                       printk("PASS!!! => rx_info4.L4F=0x%x\n",
2430 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2431 +                              rxd_info4.L4F);
2432 +                       pdma_dvt_reset_config();
2433 +               }
2434 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_SPORT)) {
2435 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP == 1) {
2436 +                       g_pdma_dev_lanport++;
2437 +               } else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP ==
2438 +                          2) {
2439 +                       g_pdma_dev_wanport++;
2440 +               }
2441 +               if (g_pdma_dev_lanport && g_pdma_dev_wanport) {
2442 +                       printk
2443 +                           ("PASS!!! => g_pdma_dev_lanport=0x%x, g_pdma_dev_wanport=0x%x",
2444 +                            g_pdma_dev_lanport, g_pdma_dev_wanport);
2445 +
2446 +                       g_pdma_dev_lanport = 0;
2447 +                       g_pdma_dev_wanport = 0;
2448 +                       pdma_dvt_reset_config();
2449 +               }
2450 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_OFF)) {
2451 +               if (!ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID) {
2452 +                       printk("PASS!!! => rxd_info3.VID=0x%x\n",
2453 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2454 +                              rxd_info3.VID);
2455 +                       pdma_dvt_reset_config();
2456 +               }
2457 +       } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_ON)) {
2458 +               printk("RX data: (PDP0=%x)\n",
2459 +                      (unsigned int)ei_local->
2460 +                      netrx0_skbuf[rx_dma_owner_idx0]->data);
2461 +
2462 +               skb_dump(ei_local->netrx0_skbuf[rx_dma_owner_idx0]);
2463 +
2464 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID &&
2465 +                   ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG) {
2466 +                       printk("PASS!!! => rxd_info2.TAG=0x%x\n",
2467 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2468 +                              rxd_info2.TAG);
2469 +                       printk("PASS!!! => rxd_info3.VID=0x%x\n",
2470 +                              ei_local->rx_ring0[rx_dma_owner_idx0].
2471 +                              rxd_info3.VID);
2472 +                       pdma_dvt_reset_config();
2473 +               }
2474 +       }
2475 +}
2476 +
2477 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2478 +                           unsigned long tx_cpu_owner_idx0)
2479 +{
2480 +       if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2481 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xE007;    /* VLAN_TAG = 0x1E007 */
2482 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2483 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0x0000;    /* VLAN_TAG = 0x10000 */
2484 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2485 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xFFFF;    /* VLAN_TAG = 0x1FFFF */
2486 +       }
2487 +}
2488 +
2489 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2490 +                           unsigned long tx_cpu_owner_idx0)
2491 +{
2492 +       if (PDMA_TEST_RX_UDF == pdma_dvt_get_rx_test_config()) {
2493 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4;      /* PPE */
2494 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF = 0x2F;
2495 +       }
2496 +       if (pdma_dvt_get_show_config() & PDMA_SHOW_TX_DESC) {
2497 +               printk("------- rt2880_eth_send --------\n");
2498 +               printk("tx_info1=%x\n",
2499 +                      *(unsigned int *)&ei_local->
2500 +                      tx_ring0[tx_cpu_owner_idx0].txd_info1);
2501 +               printk("tx_info2=%x\n",
2502 +                      *(unsigned int *)&ei_local->
2503 +                      tx_ring0[tx_cpu_owner_idx0].txd_info2);
2504 +               printk("tx_info3=%x\n",
2505 +                      *(unsigned int *)&ei_local->
2506 +                      tx_ring0[tx_cpu_owner_idx0].txd_info3);
2507 +               printk("tx_info4=%x\n",
2508 +                      *(unsigned int *)&ei_local->
2509 +                      tx_ring0[tx_cpu_owner_idx0].txd_info4);
2510 +               printk("--------------------------------\n");
2511 +       }
2512 +       if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_TX_DESC) ||
2513 +           pdma_dvt_get_tx_test_config()) {
2514 +               printk("------- rt2880_eth_send --------\n");
2515 +               printk("tx_info1.SDP0=%x\n",
2516 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0);
2517 +               printk("tx_info2.DDONE_bit=%x\n",
2518 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].
2519 +                      txd_info2.DDONE_bit);
2520 +               printk("tx_info2.LS0_bit=%x\n",
2521 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit);
2522 +               printk("tx_info2.SDL0=%x\n",
2523 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0);
2524 +               printk("tx_info2.BURST_bit=%x\n",
2525 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].
2526 +                      txd_info2.BURST_bit);
2527 +               printk("tx_info2.LS1_bit=%x\n",
2528 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit);
2529 +               printk("tx_info2.SDL1=%x\n",
2530 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1);
2531 +               printk("tx_info3.SDP1=%x\n",
2532 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1);
2533 +               printk("tx_info4.TUI_CO=%x\n",
2534 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO);
2535 +               printk("tx_info4.TSO=%x\n",
2536 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO);
2537 +               printk("tx_info4.FPORT=%x\n",
2538 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT);
2539 +               printk("tx_info4.UDF=%x\n",
2540 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF);
2541 +               printk("tx_info4.RESV=%x\n",
2542 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.RESV);
2543 +               printk("tx_info4.VLAN_TAG=%x\n",
2544 +                      ei_local->tx_ring0[tx_cpu_owner_idx0].
2545 +                      txd_info4.VLAN_TAG);
2546 +               printk("--------------------------------\n");
2547 +       }
2548 +       if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_LAN_SPORT)) {
2549 +               if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 1) {
2550 +                       printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2551 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2552 +                              txd_info4.FPORT);
2553 +                       pdma_dvt_reset_config();
2554 +               }
2555 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_WAN_SPORT)) {
2556 +               if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 2) {
2557 +                       printk("PASS!!! => txd_info4.FPORT=0x%x\n",
2558 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2559 +                              txd_info4.FPORT);
2560 +                       pdma_dvt_reset_config();
2561 +               }
2562 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
2563 +               if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2564 +                       printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2565 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2566 +                              txd_info4.VLAN_TAG);
2567 +                       /* pdma_dvt_reset_config(); */
2568 +               }
2569 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_OFF)) {
2570 +               if (!ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2571 +                       printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2572 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2573 +                              txd_info4.VLAN_TAG);
2574 +                       pdma_dvt_reset_config();
2575 +               }
2576 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
2577 +               if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2578 +                       printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2579 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2580 +                              txd_info4.VLAN_TAG);
2581 +                       /* pdma_dvt_reset_config(); */
2582 +               }
2583 +       } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
2584 +               if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
2585 +                       printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
2586 +                              ei_local->tx_ring0[tx_cpu_owner_idx0].
2587 +                              txd_info4.VLAN_TAG);
2588 +                       /* pdma_dvt_reset_config(); */
2589 +               }
2590 +       }
2591 +}
2592 +
2593 +void raeth_pdma_lro_dly_int_dvt(void)
2594 +{
2595 +       unsigned int reg_int_val;
2596 +
2597 +       reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
2598 +
2599 +       if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT0) {
2600 +               if ((reg_int_val & RX_DLY_INT)) {
2601 +                       printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2602 +                       pdma_dvt_reset_config();
2603 +               }
2604 +       } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT1) {
2605 +               if ((reg_int_val & RING1_RX_DLY_INT)) {
2606 +                       printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2607 +                       pdma_dvt_reset_config();
2608 +               }
2609 +       } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT2) {
2610 +               if ((reg_int_val & RING2_RX_DLY_INT)) {
2611 +                       printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2612 +                       pdma_dvt_reset_config();
2613 +               }
2614 +       } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT3) {
2615 +               if ((reg_int_val & RING3_RX_DLY_INT)) {
2616 +                       printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
2617 +                       pdma_dvt_reset_config();
2618 +               }
2619 +       }
2620 +}
2621 +
2622 +void pdma_dvt_set_dma_mode(void)
2623 +{
2624 +#if defined(CONFIG_RAETH_PDMA_LEGACY_MODE)
2625 +       unsigned int regVal;
2626 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW3);
2627 +       regVal &= ~(BIT(15));
2628 +       sysRegWrite(ADMA_LRO_CTRL_DW3, regVal);
2629 +#endif  /* CONFIG_RAETH_PDMA_DVT */    
2630 +}
2631 +
2632 --- /dev/null
2633 +++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
2634 @@ -0,0 +1,75 @@
2635 +/* Show controls */
2636 +#define PDMA_SHOW_RX_DESC   (1 << 1)
2637 +#define PDMA_SHOW_TX_DESC   (1 << 2)
2638 +#define PDMA_SHOW_DETAIL_RX_DESC   (1 << 3)
2639 +#define PDMA_SHOW_DETAIL_TX_DESC   (1 << 4)
2640 +
2641 +/* Rx test controls */
2642 +#define PDMA_TEST_RX_UDF     (1 << 1)
2643 +#define PDMA_TEST_RX_IPV6    (1 << 2)
2644 +#define PDMA_TEST_RX_IPV4    (1 << 3)
2645 +#define PDMA_TEST_RX_IPV4F   (1 << 4)
2646 +#define PDMA_TEST_RX_L4VLD   (1 << 5)
2647 +#define PDMA_TEST_RX_L4F     (1 << 6)
2648 +#define PDMA_TEST_RX_SPORT   (1 << 7)
2649 +#define PDMA_TEST_RX_VID_ON  (1 << 8)
2650 +#define PDMA_TEST_RX_VID_OFF (1 << 9)
2651 +
2652 +/* Tx test controls */
2653 +#define PDMA_TEST_TX_LAN_SPORT   (1 << 1)
2654 +#define PDMA_TEST_TX_WAN_SPORT   (1 << 2)
2655 +#define PDMA_TEST_TX_VLAN_ON     (1 << 3)
2656 +#define PDMA_TEST_TX_VLAN_OFF    (1 << 4)
2657 +#define PDMA_TEST_TX_VLAN_ZERO   (1 << 5)
2658 +#define PDMA_TEST_TX_VLAN_MAX    (1 << 6)
2659 +#define PDMA_TEST_TX_PDMA_LPK    (1 << 31)
2660 +
2661 +/* Debug controls */
2662 +#define PDMA_TEST_TSO_DEBUG      (1 << 1)
2663 +
2664 +/* LRO test controls */
2665 +typedef int (*PDMA_LRO_DVT_FUNC) (void);
2666 +
2667 +#define PDMA_TEST_LRO_DISABLE           (0)
2668 +#define PDMA_TEST_LRO_FORCE_PORT        (1)
2669 +#define PDMA_TEST_LRO_AUTO_LEARN        (2)
2670 +#define PDMA_TEST_LRO_AUTO_IPV6         (3)
2671 +#define PDMA_TEST_LRO_AUTO_MYIP         (4)
2672 +#define PDMA_TEST_LRO_FORCE_AGGREGATE   (5)
2673 +#define PDMA_TEST_NON_LRO_PORT_ID       (6)
2674 +#define PDMA_TEST_NON_LRO_STAG          (7)
2675 +#define PDMA_TEST_NON_LRO_VLAN          (8)
2676 +#define PDMA_TEST_NON_LRO_TCP_ACK       (9)
2677 +#define PDMA_TEST_NON_LRO_PRI1          (10)
2678 +#define PDMA_TEST_NON_LRO_PRI2          (11)
2679 +#define PDMA_TEST_LRO_DLY_INT0          (12)
2680 +#define PDMA_TEST_LRO_DLY_INT1          (13)
2681 +#define PDMA_TEST_LRO_DLY_INT2          (14)
2682 +#define PDMA_TEST_LRO_DLY_INT3          (15)
2683 +
2684 +void skb_dump(struct sk_buff *sk);
2685 +
2686 +int pdma_dvt_show_ctrl(int par1, int par2);
2687 +int pdma_dvt_test_rx_ctrl(int par1, int par2);
2688 +int pdma_dvt_test_tx_ctrl(int par1, int par2);
2689 +int pdma_dvt_test_debug_ctrl(int par1, int par2);
2690 +int pdma_dvt_test_lro_ctrl(int par1, int par2);
2691 +
2692 +unsigned int pdma_dvt_get_show_config(void);
2693 +unsigned int pdma_dvt_get_rx_test_config(void);
2694 +unsigned int pdma_dvt_get_tx_test_config(void);
2695 +unsigned int pdma_dvt_get_debug_test_config(void);
2696 +unsigned int pdma_dvt_get_lro_test_config(void);
2697 +void pdma_dvt_reset_config(void);
2698 +
2699 +void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0);
2700 +void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
2701 +                           unsigned long tx_cpu_owner_idx0);
2702 +void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
2703 +                           unsigned long tx_cpu_owner_idx0);
2704 +
2705 +void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
2706 +                       int rx_dma_owner_idx0);
2707 +void raeth_pdma_lro_dly_int_dvt(void);
2708 +void pdma_dvt_set_dma_mode(void);
2709 +
2710 --- /dev/null
2711 +++ b/drivers/net/ethernet/raeth/ethtool_readme.txt
2712 @@ -0,0 +1,44 @@
2713 +
2714 +Ethtool readme for selecting different PHY address.
2715 +
2716 +Before doing any ethtool command you should make sure the current PHY
2717 +address is expected. The default PHY address is 1(port 1).
2718 +
2719 +You can change current PHY address to X(0~4) by doing follow command:
2720 +# echo X > /proc/rt2880/gmac
2721 +
2722 +Ethtool command also would show the current PHY address as following.
2723 +
2724 +# ethtool  eth2
2725 +Settings for eth2:
2726 +        Supported ports: [ TP MII ]
2727 +        Supported link modes:   10baseT/Half 10baseT/Full
2728 +                                100baseT/Half 100baseT/Full
2729 +        Supports auto-negotiation: Yes
2730 +        Advertised link modes:  10baseT/Half 10baseT/Full
2731 +                                100baseT/Half 100baseT/Full
2732 +        Advertised auto-negotiation: No
2733 +        Speed: 10Mb/s
2734 +        Duplex: Full
2735 +        Port: MII
2736 +        PHYAD: 1
2737 +        Transceiver: internal
2738 +        Auto-negotiation: off
2739 +        Current message level: 0x00000000 (0)
2740 +        Link detected: no
2741 +
2742 +
2743 +The "PHYAD" field shows the current PHY address.
2744 +
2745 +
2746 +
2747 +Usage example
2748 +1) show port1 info
2749 +# echo 1 > /proc/rt2880/gmac           # change phy address to 1
2750 +# ethtool eth2
2751 +
2752 +2) show port0 info
2753 +# echo 0 > /proc/rt2880/gmac           # change phy address to 0
2754 +# ethtool eth2
2755 +
2756 +
2757 --- /dev/null
2758 +++ b/drivers/net/ethernet/raeth/mcast.c
2759 @@ -0,0 +1,187 @@
2760 +#include <linux/config.h>
2761 +#include <linux/version.h>
2762 +#include <linux/module.h>
2763 +#include <linux/skbuff.h>
2764 +#include <linux/kernel.h>
2765 +#include <linux/init.h>
2766 +#include <linux/types.h>
2767 +#include <linux/netdevice.h>
2768 +#include <linux/if_vlan.h>
2769 +
2770 +
2771 +#define MAX_MCAST_ENTRY            16
2772 +#define AGEING_TIME        5  //Unit: Sec
2773 +#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2], \
2774 +    ((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
2775 +
2776 +//#define MCAST_DEBUG
2777 +#ifdef MCAST_DEBUG
2778 +#define MCAST_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
2779 +#else
2780 +#define MCAST_PRINT(fmt, args...) { }
2781 +#endif
2782 +
2783 +typedef struct {
2784 +    uint8_t    src_mac[6];
2785 +    uint8_t    dst_mac[6];
2786 +    uint16_t    vlan_id;
2787 +    uint32_t   valid;
2788 +    uint32_t   use_count;
2789 +    unsigned long ageout;
2790 +} mcast_entry;
2791 +
2792 +mcast_entry mcast_tbl[MAX_MCAST_ENTRY];
2793 +atomic_t mcast_entry_num=ATOMIC_INIT(0);
2794 +DECLARE_MUTEX(mtbl_lock);
2795 +
2796 +uint32_t inline is_multicast_pkt(uint8_t *mac)
2797 +{
2798 +    if(mac[0]==0x01 && mac[1]==0x00 && mac[2]==0x5E) {
2799 +       return 1;
2800 +    }else{
2801 +       return 0;
2802 +    }
2803 +}
2804 +
2805 +int32_t inline mcast_entry_get(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac) 
2806 +{
2807 +    int i=0;
2808 +
2809 +    for(i=0;i<MAX_MCAST_ENTRY;i++) {
2810 +       if( (mcast_tbl[i].vlan_id == vlan_id) &&
2811 +           memcmp(mcast_tbl[i].src_mac,src_mac, 6)==0 &&
2812 +           memcmp(mcast_tbl[i].dst_mac, dst_mac, 6)==0 &&
2813 +               mcast_tbl[i].valid == 1) {
2814 +           return i;
2815 +       }
2816 +    }
2817 +    return -1;
2818 +}
2819 +
2820 +int inline __add_mcast_entry(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
2821 +{
2822 +    int i=0;
2823 +
2824 +    // use empty or ageout entry
2825 +    for(i=0;i<MAX_MCAST_ENTRY;i++) {
2826 +       if( mcast_tbl[i].valid==0 ||
2827 +               time_after(jiffies, mcast_tbl[i].ageout)) {
2828 +
2829 +           if(mcast_tbl[i].valid==0) {
2830 +               atomic_inc(&mcast_entry_num);
2831 +           }
2832 +           mcast_tbl[i].vlan_id = vlan_id;
2833 +           memcpy(mcast_tbl[i].src_mac, src_mac, 6);
2834 +           memcpy(mcast_tbl[i].dst_mac, dst_mac, 6);
2835 +           mcast_tbl[i].valid=1;
2836 +           mcast_tbl[i].use_count=1;
2837 +           mcast_tbl[i].ageout=jiffies + AGEING_TIME * HZ;
2838 +          
2839 +           return 1;
2840 +       }
2841 +    }
2842 +
2843 +    MCAST_PRINT("RAETH: Multicast Table is FULL!!\n");
2844 +    return 0;
2845 +}
2846 +
2847 +int inline mcast_entry_ins(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac) 
2848 +{
2849 +    int entry_num=0, ret=0;
2850 +
2851 +    down(&mtbl_lock);
2852 +    if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2853 +       mcast_tbl[entry_num].use_count++;
2854 +       mcast_tbl[entry_num].ageout=jiffies + AGEING_TIME * HZ;
2855 +       MCAST_PRINT("%s: Update %0X:%0X:%0X:%0X:%0X:%0X's use_count=%d\n" \
2856 +               ,__FUNCTION__, MAC_ARG(dst_mac), mcast_tbl[entry_num].use_count);
2857 +       ret = 1;
2858 +    }else { //if entry not found, create new entry.
2859 +       MCAST_PRINT("%s: Create new entry %0X:%0X:%0X:%0X:%0X:%0X\n", \
2860 +               __FUNCTION__, MAC_ARG(dst_mac));
2861 +       ret = __add_mcast_entry(vlan_id, src_mac,dst_mac);
2862 +    }
2863 +    
2864 +    up(&mtbl_lock);
2865 +    return ret;
2866 +
2867 +}
2868 +
2869 +
2870 +/*
2871 + * Return:
2872 + *         0: entry found
2873 + *         1: entry not found
2874 + */
2875 +int inline mcast_entry_del(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac) 
2876 +{
2877 +    int entry_num;
2878 +
2879 +    down(&mtbl_lock);
2880 +    if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
2881 +       if((--mcast_tbl[entry_num].use_count)==0) {
2882 +           MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X (entry_num=%d)\n", \
2883 +                   __FUNCTION__, MAC_ARG(dst_mac), entry_num);
2884 +           mcast_tbl[entry_num].valid=0;
2885 +           atomic_dec(&mcast_entry_num);
2886 +       }
2887 +       up(&mtbl_lock);
2888 +       return 0;
2889 +    }else { 
2890 +       /* this multicast packet was not sent by meself, just ignore it */
2891 +       up(&mtbl_lock);
2892 +       return 1;
2893 +    }
2894 +}
2895 +
2896 +/* 
2897 + * Return
2898 + *         0: drop packet
2899 + *         1: continue
2900 + */
2901 +int32_t mcast_rx(struct sk_buff * skb)
2902 +{
2903 +    struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data-ETH_HLEN);
2904 +
2905 +    /* if we do not send multicast packet before, 
2906 +     * we don't need to check re-inject multicast packet.
2907 +     */
2908 +    if (atomic_read(&mcast_entry_num)==0) {
2909 +       return 1;
2910 +    }
2911 +
2912 +
2913 +    if(is_multicast_pkt(eth->h_dest)) {
2914 +       MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__, \
2915 +               MAC_ARG(eth->h_dest));
2916 +
2917 +       if(ntohs(eth->h_vlan_proto)==0x8100) {
2918 +           return mcast_entry_del(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2919 +       } else {
2920 +           return mcast_entry_del(0, eth->h_source, eth->h_dest);
2921 +       }
2922 +    }
2923 +
2924 +    return 1;
2925 +}
2926 +
2927 +
2928 +int32_t mcast_tx(struct sk_buff *skb)
2929 +{
2930 +    struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data);
2931 +
2932 +
2933 +    if(is_multicast_pkt(eth->h_dest)) {
2934 +       MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__,\
2935 +               MAC_ARG(eth->h_dest));
2936 +
2937 +       if(ntohs(eth->h_vlan_proto)==0x8100) {
2938 +           mcast_entry_ins(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
2939 +       } else {
2940 +           mcast_entry_ins(0, eth->h_source, eth->h_dest);
2941 +       }
2942 +    }
2943 +
2944 +    return 1;
2945 +}
2946 +
2947 --- /dev/null
2948 +++ b/drivers/net/ethernet/raeth/mii_mgr.c
2949 @@ -0,0 +1,603 @@
2950 +#include <linux/module.h>
2951 +#include <linux/version.h>
2952 +#include <linux/netdevice.h>
2953 +
2954 +#include <linux/kernel.h>
2955 +#include <linux/sched.h>
2956 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
2957 +#include <asm/rt2880/rt_mmap.h>
2958 +#endif
2959 +
2960 +#include "ra2882ethreg.h"
2961 +#include "raether.h"
2962 +
2963 +
2964 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
2965 +#define PHY_CONTROL_0          0xC0   
2966 +#define PHY_CONTROL_1          0xC4   
2967 +#define MDIO_PHY_CONTROL_0     (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2968 +#define MDIO_PHY_CONTROL_1     (RALINK_ETH_SW_BASE + PHY_CONTROL_1)
2969 +
2970 +#define GPIO_MDIO_BIT          (1<<7)
2971 +#define GPIO_PURPOSE_SELECT    0x60
2972 +#define GPIO_PRUPOSE           (RALINK_SYSCTL_BASE + GPIO_PURPOSE_SELECT)
2973 +
2974 +#elif defined (CONFIG_RALINK_RT6855)  || defined (CONFIG_RALINK_RT6855A)
2975 +
2976 +#define PHY_CONTROL_0          0x7004   
2977 +#define MDIO_PHY_CONTROL_0     (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2978 +#define enable_mdio(x)
2979 +
2980 +#elif defined (CONFIG_RALINK_MT7620)
2981 +
2982 +#define PHY_CONTROL_0          0x7004   
2983 +#define MDIO_PHY_CONTROL_0     (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2984 +#define enable_mdio(x)
2985 +
2986 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
2987 +
2988 +#define PHY_CONTROL_0          0x0004   
2989 +#define MDIO_PHY_CONTROL_0     (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
2990 +#define enable_mdio(x)
2991 +
2992 +#else 
2993 +#define PHY_CONTROL_0          0x00
2994 +#define PHY_CONTROL_1          0x04
2995 +#define MDIO_PHY_CONTROL_0     (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_0)
2996 +#define MDIO_PHY_CONTROL_1     (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_1)
2997 +#define enable_mdio(x)
2998 +#endif
2999 +
3000 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3001 +void enable_mdio(int enable)
3002 +{
3003 +#if !defined (CONFIG_P5_MAC_TO_PHY_MODE) && !defined(CONFIG_GE1_RGMII_AN) && !defined(CONFIG_GE2_RGMII_AN) && \
3004 +    !defined (CONFIG_GE1_MII_AN) && !defined (CONFIG_GE2_MII_AN) && !defined (CONFIG_RALINK_MT7628)
3005 +       u32 data = sysRegRead(GPIO_PRUPOSE);
3006 +       if (enable)
3007 +               data &= ~GPIO_MDIO_BIT;
3008 +       else
3009 +               data |= GPIO_MDIO_BIT;
3010 +       sysRegWrite(GPIO_PRUPOSE, data);
3011 +#endif
3012 +}
3013 +#endif
3014 +
3015 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
3016 +
3017 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3018 +{
3019 +       u32 volatile status = 0;
3020 +       u32 rc = 0;
3021 +       unsigned long volatile t_start = jiffies;
3022 +       u32 volatile data = 0;
3023 +
3024 +       /* We enable mdio gpio purpose register, and disable it when exit. */
3025 +       enable_mdio(1);
3026 +
3027 +       // make sure previous read operation is complete
3028 +       while (1) {
3029 +                       // 0 : Read/write operation complete
3030 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3031 +               {
3032 +                       break;
3033 +               }
3034 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3035 +                       enable_mdio(0);
3036 +                       printk("\n MDIO Read operation is ongoing !!\n");
3037 +                       return rc;
3038 +               }
3039 +       }
3040 +
3041 +       data  = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3042 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3043 +       data |= (1<<31);
3044 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3045 +       //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3046 +
3047 +
3048 +       // make sure read operation is complete
3049 +       t_start = jiffies;
3050 +       while (1) {
3051 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3052 +                       status = sysRegRead(MDIO_PHY_CONTROL_0);
3053 +                       *read_data = (u32)(status & 0x0000FFFF);
3054 +
3055 +                       enable_mdio(0);
3056 +                       return 1;
3057 +               }
3058 +               else if (time_after(jiffies, t_start+5*HZ)) {
3059 +                       enable_mdio(0);
3060 +                       printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3061 +                       return 0;
3062 +               }
3063 +       }
3064 +}
3065 +
3066 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3067 +{
3068 +       unsigned long volatile t_start=jiffies;
3069 +       u32 volatile data;
3070 +
3071 +       enable_mdio(1);
3072 +
3073 +       // make sure previous write operation is complete
3074 +       while(1) {
3075 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3076 +               {
3077 +                       break;
3078 +               }
3079 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3080 +                       enable_mdio(0);
3081 +                       printk("\n MDIO Write operation ongoing\n");
3082 +                       return 0;
3083 +               }
3084 +       }
3085 +
3086 +       data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3087 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3088 +       data |= (1<<31);
3089 +       sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3090 +       //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3091 +
3092 +       t_start = jiffies;
3093 +
3094 +       // make sure write operation is complete
3095 +       while (1) {
3096 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3097 +               {
3098 +                       enable_mdio(0);
3099 +                       return 1;
3100 +               }
3101 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3102 +                       enable_mdio(0);
3103 +                       printk("\n MDIO Write operation Time Out\n");
3104 +                       return 0;
3105 +               }
3106 +       }
3107 +}
3108 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
3109 +
3110 +u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3111 +{
3112 +       u32 volatile status = 0;
3113 +       u32 rc = 0;
3114 +       unsigned long volatile t_start = jiffies;
3115 +       u32 volatile data = 0;
3116 +
3117 +       /* We enable mdio gpio purpose register, and disable it when exit. */
3118 +       enable_mdio(1);
3119 +
3120 +       // make sure previous read operation is complete
3121 +       while (1) {
3122 +                       // 0 : Read/write operation complete
3123 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3124 +               {
3125 +                       break;
3126 +               }
3127 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3128 +                       enable_mdio(0);
3129 +                       printk("\n MDIO Read operation is ongoing !!\n");
3130 +                       return rc;
3131 +               }
3132 +       }
3133 +
3134 +       data  = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
3135 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3136 +       data |= (1<<31);
3137 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3138 +       //printk("\n Set Command [0x%08X] = [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0, data);
3139 +
3140 +
3141 +       // make sure read operation is complete
3142 +       t_start = jiffies;
3143 +       while (1) {
3144 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3145 +                       status = sysRegRead(MDIO_PHY_CONTROL_0);
3146 +                       *read_data = (u32)(status & 0x0000FFFF);
3147 +
3148 +                       enable_mdio(0);
3149 +                       return 1;
3150 +               }
3151 +               else if (time_after(jiffies, t_start+5*HZ)) {
3152 +                       enable_mdio(0);
3153 +                       printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3154 +                       return 0;
3155 +               }
3156 +       }
3157 +}
3158 +
3159 +u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3160 +{
3161 +       unsigned long volatile t_start=jiffies;
3162 +       u32 volatile data;
3163 +
3164 +       enable_mdio(1);
3165 +
3166 +       // make sure previous write operation is complete
3167 +       while(1) {
3168 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3169 +               {
3170 +                       break;
3171 +               }
3172 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3173 +                       enable_mdio(0);
3174 +                       printk("\n MDIO Write operation ongoing\n");
3175 +                       return 0;
3176 +               }
3177 +       }
3178 +
3179 +       data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
3180 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3181 +       data |= (1<<31);
3182 +       sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3183 +       //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3184 +
3185 +       t_start = jiffies;
3186 +
3187 +       // make sure write operation is complete
3188 +       while (1) {
3189 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3190 +               {
3191 +                       enable_mdio(0);
3192 +                       return 1;
3193 +               }
3194 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3195 +                       enable_mdio(0);
3196 +                       printk("\n MDIO Write operation Time Out\n");
3197 +                       return 0;
3198 +               }
3199 +       }
3200 +}
3201 +
3202 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3203 +{
3204 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3205 +        u32 low_word;
3206 +        u32 high_word;
3207 +        u32 an_status = 0;
3208 +        
3209 +       if(phy_addr==31) 
3210 +       {
3211 +               an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3212 +               if(an_status){
3213 +                       *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3214 +               }
3215 +               //phase1: write page address phase
3216 +                if(__mii_mgr_write(phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
3217 +                        //phase2: write address & read low word phase
3218 +                        if(__mii_mgr_read(phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
3219 +                                //phase3: write address & read high word phase
3220 +                                if(__mii_mgr_read(phy_addr, (0x1 << 4), &high_word)) {
3221 +                                        *read_data = (high_word << 16) | (low_word & 0xFFFF);
3222 +                                       if(an_status){
3223 +                                               *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3224 +                                       }
3225 +                                       return 1;
3226 +                                }
3227 +                        }
3228 +                }
3229 +               if(an_status){
3230 +                       *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3231 +               }
3232 +        } else 
3233 +#endif
3234 +       {
3235 +                if(__mii_mgr_read(phy_addr, phy_register, read_data)) {
3236 +                        return 1;
3237 +                }
3238 +        }
3239 +
3240 +        return 0;
3241 +}
3242 +
3243 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3244 +{
3245 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
3246 +       u32 an_status = 0;
3247 +        
3248 +       if(phy_addr == 31) 
3249 +       {
3250 +               an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
3251 +               if(an_status){
3252 +                       *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
3253 +               }
3254 +               //phase1: write page address phase
3255 +                if(__mii_mgr_write(phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
3256 +                        //phase2: write address & read low word phase
3257 +                        if(__mii_mgr_write(phy_addr, ((phy_register >> 2) & 0xF), write_data & 0xFFFF)) {
3258 +                                //phase3: write address & read high word phase
3259 +                                if(__mii_mgr_write(phy_addr, (0x1 << 4), write_data >> 16)) {
3260 +                                       if(an_status){
3261 +                                               *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3262 +                                       }
3263 +                                   return 1;
3264 +                                }
3265 +                        }
3266 +                }
3267 +               if(an_status){
3268 +                       *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
3269 +               }
3270 +        } else 
3271 +#endif
3272 +       {
3273 +                if(__mii_mgr_write(phy_addr, phy_register, write_data)) {
3274 +                        return 1;
3275 +                }
3276 +        }
3277 +
3278 +        return 0;
3279 +}
3280 +
3281 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
3282 +{
3283 +       u32 rc = 0;
3284 +       unsigned long volatile t_start = jiffies;
3285 +       u32 volatile data = 0;
3286 +
3287 +       enable_mdio(1);
3288 +
3289 +       while (1) {
3290 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3291 +               {
3292 +                       break;
3293 +               }
3294 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3295 +                       enable_mdio(0);
3296 +                       printk("\n MDIO Read operation is ongoing !!\n");
3297 +                       return rc;
3298 +               }
3299 +       }
3300 +       data = (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) | reg_addr;
3301 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3302 +       data |= (1<<31);
3303 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3304 +
3305 +       t_start = jiffies;
3306 +       while (1) {
3307 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3308 +               {
3309 +                       enable_mdio(0);
3310 +                       return 1;
3311 +               }
3312 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3313 +                       enable_mdio(0);
3314 +                       printk("\n MDIO Write operation Time Out\n");
3315 +                       return 0;
3316 +               }
3317 +       }
3318 +
3319 +}
3320 +
3321 +
3322 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
3323 +{
3324 +       u32 volatile status = 0;
3325 +       u32 rc = 0;
3326 +       unsigned long volatile t_start = jiffies;
3327 +       u32 volatile data = 0;
3328 +
3329 +        // set address first
3330 +       mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3331 +       //udelay(10);
3332 +
3333 +       enable_mdio(1);
3334 +
3335 +       while (1) {
3336 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3337 +               {
3338 +                       break;
3339 +               }
3340 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3341 +                       enable_mdio(0);
3342 +                       printk("\n MDIO Read operation is ongoing !!\n");
3343 +                       return rc;
3344 +               }
3345 +       }
3346 +       data = (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) | reg_addr;
3347 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3348 +       data |= (1<<31);
3349 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3350 +       t_start = jiffies;
3351 +       while (1) {
3352 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3353 +                       *read_data = (sysRegRead(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
3354 +                       enable_mdio(0);
3355 +                       return 1;
3356 +               }
3357 +               else if (time_after(jiffies, t_start+5*HZ)) {
3358 +                       enable_mdio(0);
3359 +                       printk("\n Set Operation: MDIO Read operation is ongoing and Time Out!!\n");
3360 +                       return 0;
3361 +               }
3362 +               status = sysRegRead(MDIO_PHY_CONTROL_0);
3363 +       }
3364 +
3365 +}
3366 +
3367 +u32 mii_mgr_write_cl45 (u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
3368 +{
3369 +       u32 rc = 0;
3370 +       unsigned long volatile t_start = jiffies;
3371 +       u32 volatile data = 0;
3372 +
3373 +       // set address first
3374 +       mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
3375 +       //udelay(10);
3376 +
3377 +       enable_mdio(1);
3378 +       while (1) {
3379 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3380 +               {
3381 +                       break;
3382 +               }
3383 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3384 +                       enable_mdio(0);
3385 +                       printk("\n MDIO Read operation is ongoing !!\n");
3386 +                       return rc;
3387 +               }
3388 +       }
3389 +
3390 +       data = (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) | write_data;
3391 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3392 +       data |= (1<<31);
3393 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3394 +
3395 +       t_start = jiffies;
3396 +
3397 +       while (1) {
3398 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
3399 +               {
3400 +                       enable_mdio(0);
3401 +                       return 1;
3402 +               }
3403 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3404 +                       enable_mdio(0);
3405 +                       printk("\n MDIO Write operation Time Out\n");
3406 +                       return 0;
3407 +               }
3408 +
3409 +       }
3410 +}
3411 +
3412 +#else // not rt6855
3413 +
3414 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
3415 +{
3416 +       u32 volatile status = 0;
3417 +       u32 rc = 0;
3418 +       unsigned long volatile t_start = jiffies;
3419 +#if !defined (CONFIG_RALINK_RT3052) && !defined (CONFIG_RALINK_RT3352) && !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
3420 +       u32 volatile data = 0;
3421 +#endif
3422 +
3423 +       /* We enable mdio gpio purpose register, and disable it when exit. */
3424 +       enable_mdio(1);
3425 +
3426 +       // make sure previous read operation is complete
3427 +       while (1) {
3428 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3429 +               // rd_rdy: read operation is complete
3430 +               if(!( sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1))) 
3431 +#else
3432 +                       // 0 : Read/write operation complet
3433 +               if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3434 +#endif
3435 +               {
3436 +                       break;
3437 +               }
3438 +               else if (time_after(jiffies, t_start + 5*HZ)) {
3439 +                       enable_mdio(0);
3440 +                       printk("\n MDIO Read operation is ongoing !!\n");
3441 +                       return rc;
3442 +               }
3443 +       }
3444 +
3445 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3446 +       sysRegWrite(MDIO_PHY_CONTROL_0 , (1<<14) | (phy_register << 8) | (phy_addr));
3447 +#else
3448 +       data  = (phy_addr << 24) | (phy_register << 16);
3449 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3450 +       data |= (1<<31);
3451 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3452 +#endif
3453 +       //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3454 +
3455 +
3456 +       // make sure read operation is complete
3457 +       t_start = jiffies;
3458 +       while (1) {
3459 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3460 +               if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)) {
3461 +                       status = sysRegRead(MDIO_PHY_CONTROL_1);
3462 +                       *read_data = (u32)(status >>16);
3463 +
3464 +                       enable_mdio(0);
3465 +                       return 1;
3466 +               }
3467 +#else
3468 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
3469 +                       status = sysRegRead(MDIO_PHY_CONTROL_0);
3470 +                       *read_data = (u32)(status & 0x0000FFFF);
3471 +
3472 +                       enable_mdio(0);
3473 +                       return 1;
3474 +               }
3475 +#endif
3476 +               else if (time_after(jiffies, t_start+5*HZ)) {
3477 +                       enable_mdio(0);
3478 +                       printk("\n MDIO Read operation is ongoing and Time Out!!\n");
3479 +                       return 0;
3480 +               }
3481 +       }
3482 +}
3483 +
3484 +
3485 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
3486 +{
3487 +       unsigned long volatile t_start=jiffies;
3488 +       u32 volatile data;
3489 +
3490 +       enable_mdio(1);
3491 +
3492 +       // make sure previous write operation is complete
3493 +       while(1) {
3494 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3495 +               if (!(sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)))
3496 +#else
3497 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) 
3498 +#endif
3499 +               {
3500 +                       break;
3501 +               }
3502 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3503 +                       enable_mdio(0);
3504 +                       printk("\n MDIO Write operation ongoing\n");
3505 +                       return 0;
3506 +               }
3507 +       }
3508 +
3509 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3510 +       data = ((write_data & 0xFFFF) << 16);
3511 +       data |= (phy_register << 8) | (phy_addr);
3512 +       data |= (1<<13);
3513 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3514 +#else
3515 +       data = (1<<30) | (phy_addr << 24) | (phy_register << 16) | write_data;
3516 +       sysRegWrite(MDIO_PHY_CONTROL_0, data);
3517 +       data |= (1<<31);
3518 +       sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
3519 +#endif
3520 +       //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
3521 +
3522 +       t_start = jiffies;
3523 +
3524 +       // make sure write operation is complete
3525 +       while (1) {
3526 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3527 +               if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)) //wt_done ?= 1
3528 +#else
3529 +               if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
3530 +#endif
3531 +               {
3532 +                       enable_mdio(0);
3533 +                       return 1;
3534 +               }
3535 +               else if (time_after(jiffies, t_start + 5 * HZ)) {
3536 +                       enable_mdio(0);
3537 +                       printk("\n MDIO Write operation Time Out\n");
3538 +                       return 0;
3539 +               }
3540 +       }
3541 +}
3542 +
3543 +
3544 +
3545 +
3546 +#endif
3547 +
3548 +
3549 +
3550 +
3551 +EXPORT_SYMBOL(mii_mgr_write);
3552 +EXPORT_SYMBOL(mii_mgr_read);
3553 --- /dev/null
3554 +++ b/drivers/net/ethernet/raeth/ra2882ethreg.h
3555 @@ -0,0 +1,1985 @@
3556 +#ifndef RA2882ETHREG_H
3557 +#define RA2882ETHREG_H
3558 +
3559 +#include <linux/mii.h>         // for struct mii_if_info in ra2882ethreg.h
3560 +#include <linux/version.h>     /* check linux version for 2.4 and 2.6 compatibility */
3561 +#include <linux/interrupt.h>   /* for "struct tasklet_struct" in linux-3.10.14 */
3562 +#if defined (CONFIG_HW_SFQ)
3563 +#include <linux/ip.h>  
3564 +#include <linux/ipv6.h>
3565 +#endif
3566 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
3567 +#include <asm/rt2880/rt_mmap.h>
3568 +#endif
3569 +#include "raether.h"
3570 +
3571 +#ifdef WORKQUEUE_BH
3572 +#include <linux/workqueue.h>
3573 +#endif // WORKQUEUE_BH //
3574 +#ifdef CONFIG_RAETH_LRO
3575 +#include <linux/inet_lro.h>
3576 +#endif
3577 +
3578 +#define MAX_PACKET_SIZE        1514
3579 +#define        MIN_PACKET_SIZE 60
3580 +#define MAX_TXD_LEN 0x3fff
3581 +
3582 +#if defined (CONFIG_ARCH_MT7623)
3583 +#define phys_to_bus(a) (a)
3584 +#else
3585 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
3586 +#endif
3587 +
3588 +
3589 +
3590 +
3591 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
3592 +#define BIT(x)     ((1 << x))
3593 +#endif
3594 +/* bits range: for example BITS(16,23) = 0xFF0000
3595 + *   ==>  (BIT(m)-1)   = 0x0000FFFF     ~(BIT(m)-1)   => 0xFFFF0000
3596 + *   ==>  (BIT(n+1)-1) = 0x00FFFFFF
3597 + */
3598 +#define BITS(m,n)   (~(BIT(m)-1) & ((BIT(n) - 1) | BIT(n)))
3599 +
3600 +#define ETHER_ADDR_LEN  6
3601 +
3602 +/*  Phy Vender ID list */
3603 +
3604 +#define EV_ICPLUS_PHY_ID0 0x0243  
3605 +#define EV_ICPLUS_PHY_ID1 0x0D90  
3606 +#define EV_MARVELL_PHY_ID0 0x0141  
3607 +#define EV_MARVELL_PHY_ID1 0x0CC2  
3608 +#define EV_VTSS_PHY_ID0 0x0007
3609 +#define EV_VTSS_PHY_ID1 0x0421
3610 +
3611 +/*
3612 +     FE_INT_STATUS
3613 +*/
3614 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3615 +    defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3616 +    defined (CONFIG_ARCH_MT7623)
3617 +
3618 +#define RX_COHERENT      BIT(31)
3619 +#define RX_DLY_INT       BIT(30)
3620 +#define TX_COHERENT      BIT(29)
3621 +#define TX_DLY_INT       BIT(28)
3622 +#define RING3_RX_DLY_INT    BIT(27)
3623 +#define RING2_RX_DLY_INT    BIT(26)
3624 +#define RING1_RX_DLY_INT    BIT(25)
3625 +
3626 +#define ALT_RPLC_INT3    BIT(23)
3627 +#define ALT_RPLC_INT2    BIT(22)
3628 +#define ALT_RPLC_INT1    BIT(21)
3629 +
3630 +#define RX_DONE_INT3     BIT(19)
3631 +#define RX_DONE_INT2     BIT(18)
3632 +#define RX_DONE_INT1     BIT(17)
3633 +#define RX_DONE_INT0     BIT(16)
3634 +
3635 +#define TX_DONE_INT3     BIT(3)
3636 +#define TX_DONE_INT2     BIT(2)
3637 +#define TX_DONE_INT1     BIT(1)
3638 +#define TX_DONE_INT0     BIT(0)
3639 +
3640 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3641 +#define RLS_COHERENT     BIT(29)
3642 +#define RLS_DLY_INT      BIT(28)
3643 +#define RLS_DONE_INT     BIT(0)
3644 +#endif
3645 +
3646 +#else
3647 +//#define CNT_PPE_AF       BIT(31)     
3648 +//#define CNT_GDM_AF       BIT(29)
3649 +#define PSE_P2_FC       BIT(26)
3650 +#define GDM_CRC_DROP     BIT(25)
3651 +#define PSE_BUF_DROP     BIT(24)
3652 +#define GDM_OTHER_DROP  BIT(23)
3653 +#define PSE_P1_FC        BIT(22)
3654 +#define PSE_P0_FC        BIT(21)
3655 +#define PSE_FQ_EMPTY     BIT(20)
3656 +#define GE1_STA_CHG      BIT(18)
3657 +#define TX_COHERENT      BIT(17)
3658 +#define RX_COHERENT      BIT(16)
3659 +
3660 +#define TX_DONE_INT3     BIT(11)
3661 +#define TX_DONE_INT2     BIT(10)
3662 +#define TX_DONE_INT1     BIT(9)
3663 +#define TX_DONE_INT0     BIT(8)
3664 +#define RX_DONE_INT1     RX_DONE_INT0
3665 +#define RX_DONE_INT0     BIT(2)
3666 +#define TX_DLY_INT       BIT(1)
3667 +#define RX_DLY_INT       BIT(0)
3668 +#endif
3669 +
3670 +#define FE_INT_ALL             (TX_DONE_INT3 | TX_DONE_INT2 | \
3671 +                                TX_DONE_INT1 | TX_DONE_INT0 | \
3672 +                                RX_DONE_INT0 | RX_DONE_INT1 | \
3673 +                                RX_DONE_INT2 | RX_DONE_INT3)
3674 +
3675 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3676 +#define QFE_INT_ALL            (RLS_DONE_INT | RX_DONE_INT0 | RX_DONE_INT1)
3677 +#define QFE_INT_DLY_INIT       (RLS_DLY_INT | RX_DLY_INT)
3678 +
3679 +#define NUM_QDMA_PAGE      512 
3680 +#define QDMA_PAGE_SIZE      2048
3681 +#endif
3682 +/*
3683 + * SW_INT_STATUS
3684 + */
3685 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3686 +#define PORT0_QUEUE_FULL        BIT(14) //port0 queue full
3687 +#define PORT1_QUEUE_FULL        BIT(15) //port1 queue full
3688 +#define PORT2_QUEUE_FULL        BIT(16) //port2 queue full
3689 +#define PORT3_QUEUE_FULL        BIT(17) //port3 queue full
3690 +#define PORT4_QUEUE_FULL        BIT(18) //port4 queue full
3691 +#define PORT5_QUEUE_FULL        BIT(19) //port5 queue full
3692 +#define PORT6_QUEUE_FULL        BIT(20) //port6 queue full
3693 +#define SHARED_QUEUE_FULL       BIT(23) //shared queue full
3694 +#define QUEUE_EXHAUSTED         BIT(24) //global queue is used up and all packets are dropped
3695 +#define BC_STROM                BIT(25) //the device is undergoing broadcast storm
3696 +#define PORT_ST_CHG             BIT(26) //Port status change
3697 +#define UNSECURED_ALERT         BIT(27) //Intruder alert
3698 +#define ABNORMAL_ALERT          BIT(28) //Abnormal
3699 +
3700 +#define ESW_ISR                        (RALINK_ETH_SW_BASE + 0x00)
3701 +#define ESW_IMR                        (RALINK_ETH_SW_BASE + 0x04)
3702 +#define ESW_INT_ALL            (PORT_ST_CHG)
3703 +
3704 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3705 +      defined (CONFIG_RALINK_MT7620)
3706 +#define MIB_INT                 BIT(25)
3707 +#define ACL_INT                        BIT(24)
3708 +#define P5_LINK_CH             BIT(5)
3709 +#define P4_LINK_CH             BIT(4)
3710 +#define P3_LINK_CH             BIT(3)
3711 +#define P2_LINK_CH             BIT(2)
3712 +#define P1_LINK_CH             BIT(1)
3713 +#define P0_LINK_CH             BIT(0)
3714 +
3715 +#define RX_GOCT_CNT            BIT(4)
3716 +#define RX_GOOD_CNT            BIT(6)
3717 +#define TX_GOCT_CNT            BIT(17)
3718 +#define TX_GOOD_CNT            BIT(19)
3719 +
3720 +#define MSK_RX_GOCT_CNT                BIT(4)
3721 +#define MSK_RX_GOOD_CNT                BIT(6)
3722 +#define MSK_TX_GOCT_CNT                BIT(17)
3723 +#define MSK_TX_GOOD_CNT                BIT(19)
3724 +#define MSK_CNT_INT_ALL                (MSK_RX_GOCT_CNT | MSK_RX_GOOD_CNT | MSK_TX_GOCT_CNT | MSK_TX_GOOD_CNT) 
3725 +//#define MSK_CNT_INT_ALL              (MSK_RX_GOOD_CNT | MSK_TX_GOOD_CNT) 
3726 +
3727 +
3728 +#define ESW_IMR                        (RALINK_ETH_SW_BASE + 0x7000 + 0x8)
3729 +#define ESW_ISR                        (RALINK_ETH_SW_BASE + 0x7000 + 0xC)
3730 +#define ESW_INT_ALL            (P0_LINK_CH | P1_LINK_CH | P2_LINK_CH | P3_LINK_CH | P4_LINK_CH | P5_LINK_CH | ACL_INT | MIB_INT)
3731 +#define ESW_AISR               (RALINK_ETH_SW_BASE + 0x8)
3732 +#define ESW_P0_IntSn           (RALINK_ETH_SW_BASE + 0x4004)
3733 +#define ESW_P1_IntSn           (RALINK_ETH_SW_BASE + 0x4104)
3734 +#define ESW_P2_IntSn           (RALINK_ETH_SW_BASE + 0x4204)
3735 +#define ESW_P3_IntSn           (RALINK_ETH_SW_BASE + 0x4304)
3736 +#define ESW_P4_IntSn           (RALINK_ETH_SW_BASE + 0x4404)
3737 +#define ESW_P5_IntSn           (RALINK_ETH_SW_BASE + 0x4504)
3738 +#define ESW_P6_IntSn           (RALINK_ETH_SW_BASE + 0x4604)
3739 +#define ESW_P0_IntMn           (RALINK_ETH_SW_BASE + 0x4008)
3740 +#define ESW_P1_IntMn           (RALINK_ETH_SW_BASE + 0x4108)
3741 +#define ESW_P2_IntMn           (RALINK_ETH_SW_BASE + 0x4208)
3742 +#define ESW_P3_IntMn           (RALINK_ETH_SW_BASE + 0x4308)
3743 +#define ESW_P4_IntMn           (RALINK_ETH_SW_BASE + 0x4408)
3744 +#define ESW_P5_IntMn           (RALINK_ETH_SW_BASE + 0x4508)
3745 +#define ESW_P6_IntMn           (RALINK_ETH_SW_BASE + 0x4608)
3746 +
3747 +#if defined (CONFIG_RALINK_MT7620) 
3748 +#define ESW_P7_IntSn           (RALINK_ETH_SW_BASE + 0x4704)
3749 +#define ESW_P7_IntMn           (RALINK_ETH_SW_BASE + 0x4708)
3750 +#endif
3751 +
3752 +
3753 +#define ESW_PHY_POLLING                (RALINK_ETH_SW_BASE + 0x7000)
3754 +
3755 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3756 +
3757 +#define ESW_PHY_POLLING                (RALINK_ETH_SW_BASE + 0x0000)
3758 +
3759 +#define P5_LINK_CH             BIT(5)
3760 +#define P4_LINK_CH             BIT(4)
3761 +#define P3_LINK_CH             BIT(3)
3762 +#define P2_LINK_CH             BIT(2)
3763 +#define P1_LINK_CH             BIT(1)
3764 +#define P0_LINK_CH             BIT(0)
3765 +
3766 +
3767 +#endif // CONFIG_RALINK_RT3052 || CONFIG_RALINK_RT3352 || CONFIG_RALINK_RT5350 || defined (CONFIG_RALINK_MT7628)//
3768 +
3769 +#define RX_BUF_ALLOC_SIZE      2000
3770 +#define FASTPATH_HEADROOM      64
3771 +
3772 +#define ETHER_BUFFER_ALIGN     32              ///// Align on a cache line
3773 +
3774 +#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
3775 +        ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
3776 +        ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
3777 +
3778 +#ifdef CONFIG_PSEUDO_SUPPORT
3779 +typedef struct _PSEUDO_ADAPTER {
3780 +    struct net_device *RaethDev;
3781 +    struct net_device *PseudoDev;
3782 +    struct net_device_stats stat;
3783 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
3784 +       struct mii_if_info      mii_info;
3785 +#endif
3786 +
3787 +} PSEUDO_ADAPTER, PPSEUDO_ADAPTER;
3788 +
3789 +#define MAX_PSEUDO_ENTRY               1
3790 +#endif
3791 +
3792 +
3793 +
3794 +/* Register Categories Definition */
3795 +#define RAFRAMEENGINE_OFFSET   0x0000
3796 +#define RAGDMA_OFFSET          0x0020
3797 +#define RAPSE_OFFSET           0x0040
3798 +#define RAGDMA2_OFFSET         0x0060
3799 +#define RACDMA_OFFSET          0x0080
3800 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3801 +    defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
3802 +    defined (CONFIG_ARCH_MT7623)
3803 +
3804 +#define RAPDMA_OFFSET          0x0800
3805 +#define SDM_OFFSET             0x0C00
3806 +#else
3807 +#define RAPDMA_OFFSET          0x0100
3808 +#endif
3809 +#define RAPPE_OFFSET           0x0200
3810 +#define RACMTABLE_OFFSET       0x0400
3811 +#define RAPOLICYTABLE_OFFSET   0x1000
3812 +
3813 +
3814 +/* Register Map Detail */
3815 +/* RT3883 */
3816 +#define SYSCFG1                        (RALINK_SYSCTL_BASE + 0x14)
3817 +
3818 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
3819 +
3820 +/* 1. PDMA */
3821 +#define TX_BASE_PTR0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x000)
3822 +#define TX_MAX_CNT0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x004)
3823 +#define TX_CTX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x008)
3824 +#define TX_DTX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00C)
3825 +
3826 +#define TX_BASE_PTR1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x010)
3827 +#define TX_MAX_CNT1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x014)
3828 +#define TX_CTX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x018)
3829 +#define TX_DTX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x01C)
3830 +
3831 +#define TX_BASE_PTR2           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x020)
3832 +#define TX_MAX_CNT2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x024)
3833 +#define TX_CTX_IDX2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x028)
3834 +#define TX_DTX_IDX2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x02C)
3835 +
3836 +#define TX_BASE_PTR3           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x030)
3837 +#define TX_MAX_CNT3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x034)
3838 +#define TX_CTX_IDX3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x038)
3839 +#define TX_DTX_IDX3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x03C)
3840 +
3841 +#define RX_BASE_PTR0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x100)
3842 +#define RX_MAX_CNT0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x104)
3843 +#define RX_CALC_IDX0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x108)
3844 +#define RX_DRX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10C)
3845 +
3846 +#define RX_BASE_PTR1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x110)
3847 +#define RX_MAX_CNT1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x114)
3848 +#define RX_CALC_IDX1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x118)
3849 +#define RX_DRX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x11C)
3850 +
3851 +#define PDMA_INFO              (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x200)
3852 +#define PDMA_GLO_CFG           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x204)
3853 +#define PDMA_RST_IDX           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x208)
3854 +#define PDMA_RST_CFG           (PDMA_RST_IDX)
3855 +#define DLY_INT_CFG            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20C)
3856 +#define FREEQ_THRES            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x210)
3857 +#define INT_STATUS             (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x220)
3858 +#define FE_INT_STATUS          (INT_STATUS)
3859 +#define INT_MASK               (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x228)
3860 +#define FE_INT_ENABLE          (INT_MASK)
3861 +#define PDMA_WRR               (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3862 +#define PDMA_SCH_CFG           (PDMA_WRR)
3863 +
3864 +#define SDM_CON                        (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x00)  //Switch DMA configuration
3865 +#define SDM_RRING              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x04)  //Switch DMA Rx Ring
3866 +#define SDM_TRING              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x08)  //Switch DMA Tx Ring
3867 +#define SDM_MAC_ADRL           (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x0C)  //Switch MAC address LSB
3868 +#define SDM_MAC_ADRH           (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10)  //Switch MAC Address MSB
3869 +#define SDM_TPCNT              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x100) //Switch DMA Tx packet count
3870 +#define SDM_TBCNT              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x104) //Switch DMA Tx byte count
3871 +#define SDM_RPCNT              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x108) //Switch DMA rx packet count
3872 +#define SDM_RBCNT              (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10C) //Switch DMA rx byte count
3873 +#define SDM_CS_ERR             (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x110) //Switch DMA rx checksum error count
3874 +
3875 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
3876 +      defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || \
3877 +      defined (CONFIG_ARCH_MT7623)
3878 +
3879 +/* Old FE with New PDMA */
3880 +#define PDMA_RELATED            0x0800
3881 +/* 1. PDMA */
3882 +#define TX_BASE_PTR0            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x000)
3883 +#define TX_MAX_CNT0             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x004)
3884 +#define TX_CTX_IDX0             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x008)
3885 +#define TX_DTX_IDX0             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x00C)
3886 +
3887 +#define TX_BASE_PTR1            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x010)
3888 +#define TX_MAX_CNT1             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x014)
3889 +#define TX_CTX_IDX1             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x018)
3890 +#define TX_DTX_IDX1             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x01C)
3891 +
3892 +#define TX_BASE_PTR2            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x020)
3893 +#define TX_MAX_CNT2             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x024)
3894 +#define TX_CTX_IDX2             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x028)
3895 +#define TX_DTX_IDX2             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x02C)
3896 +
3897 +#define TX_BASE_PTR3            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x030)
3898 +#define TX_MAX_CNT3             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x034)
3899 +#define TX_CTX_IDX3             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x038)
3900 +#define TX_DTX_IDX3             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x03C)
3901 +
3902 +#define RX_BASE_PTR0            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x100)
3903 +#define RX_MAX_CNT0             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x104)
3904 +#define RX_CALC_IDX0            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x108)
3905 +#define RX_DRX_IDX0             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x10C)
3906 +
3907 +#define RX_BASE_PTR1            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x110)
3908 +#define RX_MAX_CNT1             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x114)
3909 +#define RX_CALC_IDX1            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x118)
3910 +#define RX_DRX_IDX1             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x11C)
3911 +
3912 +#define RX_BASE_PTR2            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x120)
3913 +#define RX_MAX_CNT2             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x124)
3914 +#define RX_CALC_IDX2            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x128)
3915 +#define RX_DRX_IDX12            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x12C)
3916 +
3917 +#define RX_BASE_PTR3            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x130)
3918 +#define RX_MAX_CNT3             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x134)
3919 +#define RX_CALC_IDX3            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x138)
3920 +#define RX_DRX_IDX3             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x13C)
3921 +
3922 +#define PDMA_INFO               (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x200)
3923 +#define PDMA_GLO_CFG            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x204)
3924 +#define PDMA_RST_IDX            (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x208)
3925 +#define PDMA_RST_CFG            (PDMA_RST_IDX)
3926 +#define DLY_INT_CFG             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x20C)
3927 +#define FREEQ_THRES             (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x210)
3928 +#define INT_STATUS              (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x220)
3929 +#define FE_INT_STATUS          (INT_STATUS)
3930 +#define INT_MASK                (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x228)
3931 +#define FE_INT_ENABLE          (INT_MASK)
3932 +#define SCH_Q01_CFG            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
3933 +#define SCH_Q23_CFG            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x284)
3934 +
3935 +#define FE_GLO_CFG          RALINK_FRAME_ENGINE_BASE + 0x00
3936 +#define FE_RST_GL           RALINK_FRAME_ENGINE_BASE + 0x04
3937 +#define FE_INT_STATUS2     RALINK_FRAME_ENGINE_BASE + 0x08
3938 +#define FE_INT_ENABLE2     RALINK_FRAME_ENGINE_BASE + 0x0c
3939 +//#define FC_DROP_STA         RALINK_FRAME_ENGINE_BASE + 0x18
3940 +#define FOE_TS_T            RALINK_FRAME_ENGINE_BASE + 0x10
3941 +
3942 +#if defined (CONFIG_RALINK_MT7620)
3943 +#define GDMA1_RELATED       0x0600
3944 +#define GDMA1_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3945 +#define GDMA1_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3946 +#define GDMA1_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3947 +#define GDMA1_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3948 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
3949 +#define GDMA1_RELATED       0x0500
3950 +#define GDMA1_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3951 +#define GDMA1_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3952 +#define GDMA1_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3953 +#define GDMA1_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3954 +
3955 +#define GDMA2_RELATED       0x1500
3956 +#define GDMA2_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
3957 +#define GDMA2_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
3958 +#define GDMA2_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
3959 +#define GDMA2_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
3960 +#else
3961 +#define GDMA1_RELATED       0x0020
3962 +#define GDMA1_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
3963 +#define GDMA1_SCH_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
3964 +#define GDMA1_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
3965 +#define GDMA1_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
3966 +#define GDMA1_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x10)
3967 +
3968 +#define GDMA2_RELATED       0x0060
3969 +#define GDMA2_FWD_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
3970 +#define GDMA2_SCH_CFG       (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
3971 +#define GDMA2_SHPR_CFG      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
3972 +#define GDMA2_MAC_ADRL      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
3973 +#define GDMA2_MAC_ADRH      (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x10)
3974 +#endif
3975 +
3976 +#if defined (CONFIG_RALINK_MT7620)
3977 +#define PSE_RELATED         0x0500
3978 +#define PSE_FQFC_CFG        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
3979 +#define PSE_IQ_CFG          (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
3980 +#define PSE_QUE_STA         (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
3981 +#else
3982 +#define PSE_RELATED         0x0040
3983 +#define PSE_FQ_CFG          (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
3984 +#define CDMA_FC_CFG         (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
3985 +#define GDMA1_FC_CFG        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
3986 +#define GDMA2_FC_CFG        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C)
3987 +#define CDMA_OQ_STA         (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10)
3988 +#define GDMA1_OQ_STA        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14)
3989 +#define GDMA2_OQ_STA        (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18)
3990 +#define PSE_IQ_STA          (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C)
3991 +#endif
3992 +
3993 +
3994 +#if defined (CONFIG_RALINK_MT7620)
3995 +#define CDMA_RELATED        0x0400
3996 +#define CDMA_CSG_CFG        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
3997 +#define SMACCR0                    (RALINK_ETH_SW_BASE + 0x3FE4)
3998 +#define SMACCR1                    (RALINK_ETH_SW_BASE + 0x3FE8)
3999 +#define CKGCR               (RALINK_ETH_SW_BASE + 0x3FF0)
4000 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4001 +#define CDMA_RELATED        0x0400
4002 +#define CDMA_CSG_CFG        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) //fake definition
4003 +#define CDMP_IG_CTRL        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4004 +#define CDMP_EG_CTRL        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4005 +#else
4006 +#define CDMA_RELATED        0x0080
4007 +#define CDMA_CSG_CFG        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
4008 +#define CDMA_SCH_CFG        (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
4009 +#define SMACCR0                    (RALINK_ETH_SW_BASE + 0x30E4)
4010 +#define SMACCR1                    (RALINK_ETH_SW_BASE + 0x30E8)
4011 +#define CKGCR               (RALINK_ETH_SW_BASE + 0x30F0)
4012 +#endif
4013 +
4014 +#define PDMA_FC_CFG        (RALINK_FRAME_ENGINE_BASE+0x100)
4015 +
4016 +
4017 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4018 +/*kurtis: add QDMA define*/
4019 +
4020 +#define CLK_CFG_0              (RALINK_SYSCTL_BASE + 0x2C)
4021 +#define PAD_RGMII2_MDIO_CFG     (RALINK_SYSCTL_BASE + 0x58)
4022 +
4023 +#define QDMA_RELATED            0x1800
4024 +#define  QTX_CFG_0          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000)
4025 +#define  QTX_SCH_0          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004)
4026 +#define  QTX_HEAD_0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008)
4027 +#define  QTX_TAIL_0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C)
4028 +#define  QTX_CFG_1          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010)
4029 +#define  QTX_SCH_1          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014)
4030 +#define  QTX_HEAD_1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018)
4031 +#define  QTX_TAIL_1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C)
4032 +#define  QTX_CFG_2          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020)
4033 +#define  QTX_SCH_2          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024)
4034 +#define  QTX_HEAD_2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028)
4035 +#define  QTX_TAIL_2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C)
4036 +#define  QTX_CFG_3          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030)
4037 +#define  QTX_SCH_3          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034)
4038 +#define  QTX_HEAD_3         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038)
4039 +#define  QTX_TAIL_3         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C)
4040 +#define  QTX_CFG_4          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040)
4041 +#define  QTX_SCH_4          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044)
4042 +#define  QTX_HEAD_4         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048)
4043 +#define  QTX_TAIL_4         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C)
4044 +#define  QTX_CFG_5          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050)
4045 +#define  QTX_SCH_5          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054)
4046 +#define  QTX_HEAD_5         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058)
4047 +#define  QTX_TAIL_5         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C)
4048 +#define  QTX_CFG_6          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060)
4049 +#define  QTX_SCH_6          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064)
4050 +#define  QTX_HEAD_6         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068)
4051 +#define  QTX_TAIL_6         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C)
4052 +#define  QTX_CFG_7          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070)
4053 +#define  QTX_SCH_7          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074)
4054 +#define  QTX_HEAD_7         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078)
4055 +#define  QTX_TAIL_7         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C)
4056 +#define  QTX_CFG_8          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080)
4057 +#define  QTX_SCH_8          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084)
4058 +#define  QTX_HEAD_8         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088)
4059 +#define  QTX_TAIL_8         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C)
4060 +#define  QTX_CFG_9          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090)
4061 +#define  QTX_SCH_9          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094)
4062 +#define  QTX_HEAD_9         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098)
4063 +#define  QTX_TAIL_9         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C)
4064 +#define  QTX_CFG_10         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0)
4065 +#define  QTX_SCH_10         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4)
4066 +#define  QTX_HEAD_10        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8)
4067 +#define  QTX_TAIL_10        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC)
4068 +#define  QTX_CFG_11         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0)
4069 +#define  QTX_SCH_11         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4)
4070 +#define  QTX_HEAD_11        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8)
4071 +#define  QTX_TAIL_11        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC)
4072 +#define  QTX_CFG_12         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0)
4073 +#define  QTX_SCH_12         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4)
4074 +#define  QTX_HEAD_12        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8)
4075 +#define  QTX_TAIL_12        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC)
4076 +#define  QTX_CFG_13         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0)
4077 +#define  QTX_SCH_13         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4)
4078 +#define  QTX_HEAD_13        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8)
4079 +#define  QTX_TAIL_13        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC)
4080 +#define  QTX_CFG_14         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0)
4081 +#define  QTX_SCH_14         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4)
4082 +#define  QTX_HEAD_14        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8)
4083 +#define  QTX_TAIL_14        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC)
4084 +#define  QTX_CFG_15         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0)
4085 +#define  QTX_SCH_15         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4)
4086 +#define  QTX_HEAD_15        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8)
4087 +#define  QTX_TAIL_15        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC)
4088 +#define  QRX_BASE_PTR_0     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100)
4089 +#define  QRX_MAX_CNT_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104)
4090 +#define  QRX_CRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108)
4091 +#define  QRX_DRX_IDX_0      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C)
4092 +#define  QRX_BASE_PTR_1     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110)
4093 +#define  QRX_MAX_CNT_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114)
4094 +#define  QRX_CRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118)
4095 +#define  QRX_DRX_IDX_1      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C)
4096 +#if defined (CONFIG_ARCH_MT7623)
4097 +#define  VQTX_TB_BASE_0     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x180)
4098 +#define  VQTX_TB_BASE_1     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x184)
4099 +#define  VQTX_TB_BASE_2     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x188)
4100 +#define  VQTX_TB_BASE_3     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x18C)
4101 +#endif
4102 +#define  QDMA_INFO          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200)
4103 +#define  QDMA_GLO_CFG       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204)
4104 +#define  QDMA_RST_IDX       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208)
4105 +#define  QDMA_RST_CFG       (QDMA_RST_IDX)
4106 +#define  QDMA_DELAY_INT     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C)
4107 +#define  QDMA_FC_THRES      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210)
4108 +#define  QDMA_TX_SCH        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214)
4109 +#define  QDMA_INT_STS       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218)
4110 +#define  QFE_INT_STATUS                  (QDMA_INT_STS)
4111 +#define  QDMA_INT_MASK      (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C)
4112 +#define  QFE_INT_ENABLE                  (QDMA_INT_MASK)
4113 +#define  QDMA_TRTCM         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
4114 +#define  QDMA_DATA0         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
4115 +#define  QDMA_DATA1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228)
4116 +#define  QDMA_RED_THRES     (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C)
4117 +#define  QDMA_TEST          (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230)
4118 +#define  QDMA_DMA           (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234)
4119 +#define  QDMA_BMU           (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238)
4120 +#define  QDMA_HRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240)
4121 +#define  QDMA_HRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244)
4122 +#define  QDMA_SRED1         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248)
4123 +#define  QDMA_SRED2         (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C)
4124 +#define  QTX_CTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300)
4125 +#define  QTX_DTX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304)
4126 +#define  QTX_FWD_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308)
4127 +#define  QTX_CRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310)
4128 +#define  QTX_DRX_PTR        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314)
4129 +#define  QTX_RLS_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318)
4130 +#define  QDMA_FQ_HEAD       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320)
4131 +#define  QDMA_FQ_TAIL       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324)
4132 +#define  QDMA_FQ_CNT        (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328)
4133 +#define  QDMA_FQ_BLEN       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C)
4134 +#define  QTX_Q0MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350)
4135 +#define  QTX_Q1MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354)
4136 +#define  QTX_Q2MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358)
4137 +#define  QTX_Q3MIN_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C)
4138 +#define  QTX_Q0MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360)
4139 +#define  QTX_Q1MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364)
4140 +#define  QTX_Q2MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368)
4141 +#define  QTX_Q3MAX_BK       (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C)
4142 +
4143 +
4144 +#endif/*MT7621 QDMA*/
4145 +
4146 +#else
4147 +
4148 +/* 1. Frame Engine Global Registers */
4149 +#define MDIO_ACCESS            (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x00)
4150 +#define MDIO_CFG               (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x04)
4151 +#define FE_GLO_CFG             (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x08)
4152 +#define FE_RST_GL              (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x0C)
4153 +#define FE_INT_STATUS          (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x10)
4154 +#define FE_INT_ENABLE          (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x14)
4155 +#define MDIO_CFG2              (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x18) //Original:FC_DROP_STA
4156 +#define FOC_TS_T               (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x1C)
4157 +
4158 +
4159 +/* 2. GDMA Registers */
4160 +#define        GDMA1_FWD_CFG           (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x00)
4161 +#define GDMA1_SCH_CFG          (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x04)
4162 +#define GDMA1_SHPR_CFG         (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x08)
4163 +#define GDMA1_MAC_ADRL         (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x0C)
4164 +#define GDMA1_MAC_ADRH         (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x10)
4165 +
4166 +#define        GDMA2_FWD_CFG           (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x00)
4167 +#define GDMA2_SCH_CFG          (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x04)
4168 +#define GDMA2_SHPR_CFG         (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x08)
4169 +#define GDMA2_MAC_ADRL         (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x0C)
4170 +#define GDMA2_MAC_ADRH         (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x10)
4171 +
4172 +/* 3. PSE */
4173 +#define PSE_FQ_CFG             (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x00)
4174 +#define CDMA_FC_CFG            (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x04)
4175 +#define GDMA1_FC_CFG           (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x08)
4176 +#define GDMA2_FC_CFG           (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x0C)
4177 +#define PDMA_FC_CFG            (RALINK_FRAME_ENGINE_BASE+0x1f0)
4178 +
4179 +/* 4. CDMA */
4180 +#define CDMA_CSG_CFG           (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x00)
4181 +#define CDMA_SCH_CFG           (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x04)
4182 +/* skip ppoe sid and vlan id definition */
4183 +
4184 +
4185 +/* 5. PDMA */
4186 +#define PDMA_GLO_CFG           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00)
4187 +#define PDMA_RST_CFG           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x04)
4188 +#define PDMA_SCH_CFG           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x08)
4189 +
4190 +#define DLY_INT_CFG            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x0C)
4191 +
4192 +#define TX_BASE_PTR0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10)
4193 +#define TX_MAX_CNT0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x14)
4194 +#define TX_CTX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x18)
4195 +#define TX_DTX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x1C)
4196 +
4197 +#define TX_BASE_PTR1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20)
4198 +#define TX_MAX_CNT1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x24)
4199 +#define TX_CTX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x28)
4200 +#define TX_DTX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x2C)
4201 +
4202 +#define TX_BASE_PTR2           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4203 +#define TX_MAX_CNT2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4204 +#define TX_CTX_IDX2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4205 +#define TX_DTX_IDX2            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4206 +
4207 +#define TX_BASE_PTR3           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x50)
4208 +#define TX_MAX_CNT3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x54)
4209 +#define TX_CTX_IDX3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x58)
4210 +#define TX_DTX_IDX3            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x5C)
4211 +
4212 +#define RX_BASE_PTR0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x30)
4213 +#define RX_MAX_CNT0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x34)
4214 +#define RX_CALC_IDX0           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x38)
4215 +#define RX_DRX_IDX0            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x3C)
4216 +
4217 +#define RX_BASE_PTR1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
4218 +#define RX_MAX_CNT1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
4219 +#define RX_CALC_IDX1           (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
4220 +#define RX_DRX_IDX1            (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
4221 +
4222 +#endif
4223 +
4224 +#define DELAY_INT_INIT         0x84048404
4225 +#define FE_INT_DLY_INIT                (TX_DLY_INT | RX_DLY_INT)
4226 +
4227 +
4228 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
4229 +
4230 +/* 6. Counter and Meter Table */
4231 +#define PPE_AC_BCNT0           (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x000) /* PPE Accounting Group 0 Byte Cnt */
4232 +#define PPE_AC_PCNT0           (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x004) /* PPE Accounting Group 0 Packet Cnt */
4233 +/* 0 ~ 63 */
4234 +
4235 +#define PPE_MTR_CNT0           (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x200) /* 0 ~ 63 */
4236 +/* skip... */
4237 +#define PPE_MTR_CNT63          (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x2FC)
4238 +
4239 +#define GDMA_TX_GBCNT0         (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x300) /* Transmit good byte cnt for GEport */
4240 +#define GDMA_TX_GPCNT0         (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x304) /* Transmit good pkt cnt for GEport */
4241 +#define GDMA_TX_SKIPCNT0       (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x308) /* Transmit skip cnt for GEport */
4242 +#define GDMA_TX_COLCNT0                (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x30C) /* Transmit collision cnt for GEport */
4243 +
4244 +/* update these address mapping to fit data sheet v0.26, by bobtseng, 2007.6.14 */
4245 +#define GDMA_RX_GBCNT0         (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x320)
4246 +#define GDMA_RX_GPCNT0         (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x324)
4247 +#define GDMA_RX_OERCNT0                (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x328)
4248 +#define GDMA_RX_FERCNT0        (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x32C)
4249 +#define GDMA_RX_SERCNT0                (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x330)
4250 +#define GDMA_RX_LERCNT0                (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x334)
4251 +#define GDMA_RX_CERCNT0                (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x338)
4252 +#define GDMA_RX_FCCNT1         (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x33C)
4253 +
4254 +#endif
4255 +
4256 +/* LRO global control */ 
4257 +/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
4258 +#define LRO_ALT_REFRESH_TIMER   (RALINK_FRAME_ENGINE_BASE+0x001C)
4259 +
4260 +/* LRO auto-learn table info */
4261 +#define PDMA_FE_ALT_CF8                (RALINK_FRAME_ENGINE_BASE+0x0300)
4262 +#define PDMA_FE_ALT_SGL_CFC    (RALINK_FRAME_ENGINE_BASE+0x0304)
4263 +#define PDMA_FE_ALT_SEQ_CFC    (RALINK_FRAME_ENGINE_BASE+0x0308)
4264 +
4265 +/* LRO controls */
4266 +#define ADMA_LRO_CTRL_OFFSET    0x0980
4267 +/* 
4268 + * Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN, Bit [3]:MULTIPLE_RXD_PREFETCH_EN,
4269 + * Bit [4]:RXD_PREFETCH_EN, Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN, 
4270 + * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode, Bit [22]:cr_lro_alt_rplc_mode,
4271 + * Bit [23]:cr_lro_l4_ctrl_psh_en, Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
4272 + */
4273 +#define ADMA_LRO_CTRL_DW0              (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x00)
4274 +/* Bits [31:0]:LRO_CPU_REASON */
4275 +#define ADMA_LRO_CTRL_DW1              (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x04)
4276 +/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
4277 +#define ADMA_LRO_CTRL_DW2              (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x08)
4278 +/* 
4279 + * Bits [7:0]:LRO_MAX_AGGREGATED_CNT, Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
4280 + * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
4281 + */
4282 +#define ADMA_LRO_CTRL_DW3       (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x0C)
4283 +
4284 +/* LRO RX delay interrupt configurations */
4285 +#define LRO_RX1_DLY_INT        (RALINK_FRAME_ENGINE_BASE+0x0a70)
4286 +#define LRO_RX2_DLY_INT        (RALINK_FRAME_ENGINE_BASE+0x0a74)
4287 +#define LRO_RX3_DLY_INT        (RALINK_FRAME_ENGINE_BASE+0x0a78)
4288 +
4289 +/* LRO auto-learn configurations */
4290 +#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET    0x0990
4291 +#define PDMA_LRO_ATL_OVERFLOW_ADJ   (RALINK_FRAME_ENGINE_BASE+PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
4292 +#define LRO_ALT_SCORE_DELTA   (RALINK_FRAME_ENGINE_BASE+0x0a4c)
4293 +
4294 +/* LRO agg timer configurations */
4295 +#define LRO_MAX_AGG_TIME       (RALINK_FRAME_ENGINE_BASE+0x0a5c)
4296 +
4297 +/* LRO configurations of RX ring #0 */
4298 +#define LRO_RXRING0_OFFSET          0x0b00
4299 +#define LRO_RX_RING0_DIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x04)
4300 +#define LRO_RX_RING0_DIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x08)
4301 +#define LRO_RX_RING0_DIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x0C)
4302 +#define LRO_RX_RING0_DIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x10)
4303 +#define LRO_RX_RING0_CTRL_DW1          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x28)
4304 +/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
4305 +#define LRO_RX_RING0_CTRL_DW2          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x2C)
4306 +#define LRO_RX_RING0_CTRL_DW3          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x30)
4307 +/* LRO configurations of RX ring #1 */
4308 +#define LRO_RXRING1_OFFSET          0x0b40
4309 +#define LRO_RX_RING1_STP_DTP_DW                (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x00)
4310 +#define LRO_RX_RING1_DIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x04)
4311 +#define LRO_RX_RING1_DIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x08)
4312 +#define LRO_RX_RING1_DIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x0C)
4313 +#define LRO_RX_RING1_DIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x10)
4314 +#define LRO_RX_RING1_SIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x14)
4315 +#define LRO_RX_RING1_SIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x18)
4316 +#define LRO_RX_RING1_SIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x1C)
4317 +#define LRO_RX_RING1_SIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x20)
4318 +#define LRO_RX_RING1_CTRL_DW0          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x24)
4319 +#define LRO_RX_RING1_CTRL_DW1          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x28)
4320 +#define LRO_RX_RING1_CTRL_DW2          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x2C)
4321 +#define LRO_RX_RING1_CTRL_DW3          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x30)
4322 +#define LRO_RXRING2_OFFSET          0x0b80
4323 +#define LRO_RX_RING2_STP_DTP_DW                (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x00)
4324 +#define LRO_RX_RING2_DIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x04)
4325 +#define LRO_RX_RING2_DIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x08)
4326 +#define LRO_RX_RING2_DIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x0C)
4327 +#define LRO_RX_RING2_DIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x10)
4328 +#define LRO_RX_RING2_SIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x14)
4329 +#define LRO_RX_RING2_SIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x18)
4330 +#define LRO_RX_RING2_SIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x1C)
4331 +#define LRO_RX_RING2_SIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x20)
4332 +#define LRO_RX_RING2_CTRL_DW0          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x24)
4333 +#define LRO_RX_RING2_CTRL_DW1          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x28)
4334 +#define LRO_RX_RING2_CTRL_DW2          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x2C)
4335 +#define LRO_RX_RING2_CTRL_DW3          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x30)
4336 +#define LRO_RXRING3_OFFSET          0x0bc0
4337 +#define LRO_RX_RING3_STP_DTP_DW                (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x00)
4338 +#define LRO_RX_RING3_DIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x04)
4339 +#define LRO_RX_RING3_DIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x08)
4340 +#define LRO_RX_RING3_DIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x0C)
4341 +#define LRO_RX_RING3_DIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x10)
4342 +#define LRO_RX_RING3_SIP_DW0           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x14)
4343 +#define LRO_RX_RING3_SIP_DW1           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x18)
4344 +#define LRO_RX_RING3_SIP_DW2           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x1C)
4345 +#define LRO_RX_RING3_SIP_DW3           (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x20)
4346 +#define LRO_RX_RING3_CTRL_DW0          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x24)
4347 +#define LRO_RX_RING3_CTRL_DW1          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x28)
4348 +#define LRO_RX_RING3_CTRL_DW2          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x2C)
4349 +#define LRO_RX_RING3_CTRL_DW3          (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x30)
4350 +
4351 +/* LRO RX ring mode */
4352 +#define PDMA_RX_NORMAL_MODE         (0x0)
4353 +#define PDMA_RX_PSE_MODE            (0x1)
4354 +#define PDMA_RX_FORCE_PORT          (0x2)
4355 +#define PDMA_RX_AUTO_LEARN          (0x3)
4356 +
4357 +#define ADMA_RX_RING0   (0)
4358 +#define ADMA_RX_RING1   (1)
4359 +#define ADMA_RX_RING2   (2)
4360 +#define ADMA_RX_RING3   (3)
4361 +
4362 +#define ADMA_RX_LEN0_MASK   (0x3fff)
4363 +#define ADMA_RX_LEN1_MASK   (0x3)
4364 +
4365 +#define PDMA_LRO_EN             BIT(0)
4366 +#define PDMA_LRO_IPV6_EN        BIT(1)
4367 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN    BIT(7)
4368 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN     BIT(23)
4369 +#define PDMA_LRO_RXD_PREFETCH_EN        BITS(3,4)
4370 +#define PDMA_NON_LRO_MULTI_EN   BIT(2)
4371 +#define PDMA_LRO_DLY_INT_EN             BIT(5)
4372 +#define PDMA_LRO_FUSH_REQ               BITS(26,28)
4373 +#define PDMA_LRO_RELINGUISH     BITS(29,31)
4374 +#define PDMA_LRO_FREQ_PRI_ADJ   BITS(16,19)
4375 +#define PDMA_LRO_TPUT_PRE_ADJ           BITS(8,11)
4376 +#define PDMA_LRO_TPUT_PRI_ADJ           BITS(12,15)
4377 +#define PDMA_LRO_ALT_SCORE_MODE         BIT(21)
4378 +#define PDMA_LRO_RING_AGE1      BITS(22,31)
4379 +#define PDMA_LRO_RING_AGE2      BITS(0,5)
4380 +#define PDMA_LRO_RING_AGG               BITS(10,25)
4381 +#define PDMA_LRO_RING_AGG_CNT1          BITS(26,31)
4382 +#define PDMA_LRO_RING_AGG_CNT2          BITS(0,1)
4383 +#define PDMA_LRO_ALT_TICK_TIMER         BITS(16,20)
4384 +#define PDMA_LRO_LRO_MIN_RXD_SDL0       BITS(16,31)
4385 +
4386 +#define PDMA_LRO_DLY_INT_EN_OFFSET          (5)
4387 +#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET        (8)
4388 +#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET    (16)
4389 +#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET    (16)
4390 +#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET        (12)
4391 +#define PDMA_LRO_ALT_SCORE_MODE_OFFSET      (21)
4392 +#define PDMA_LRO_FUSH_REQ_OFFSET            (26)
4393 +#define PDMA_NON_LRO_MULTI_EN_OFFSET        (2)
4394 +#define PDMA_LRO_IPV6_EN_OFFSET             (1)
4395 +#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET     (3)
4396 +#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
4397 +#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET   (23)
4398 +#define PDMA_LRO_ALT_TICK_TIMER_OFFSET      (16)
4399 +
4400 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ  BITS(12,31)
4401 +#define PDMA_LRO_CNT_OVERFLOW_ADJ   BITS(0,11)
4402 +
4403 +#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET   (12)
4404 +#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET    (0)
4405 +
4406 +#define PDMA_LRO_ALT_BYTE_CNT_MODE  (0)
4407 +#define PDMA_LRO_ALT_PKT_CNT_MODE   (1)
4408 +
4409 +/* LRO_RX_RING1_CTRL_DW1 offsets  */
4410 +#define PDMA_LRO_AGE_H_OFFSET           (10)
4411 +#define PDMA_LRO_RING_AGE1_OFFSET       (22)
4412 +#define PDMA_LRO_RING_AGG_CNT1_OFFSET   (26)
4413 +/* LRO_RX_RING1_CTRL_DW2 offsets  */
4414 +#define PDMA_RX_MODE_OFFSET             (6)
4415 +#define PDMA_RX_PORT_VALID_OFFSET       (8)
4416 +#define PDMA_RX_MYIP_VALID_OFFSET       (9)
4417 +#define PDMA_LRO_RING_AGE2_OFFSET       (0)
4418 +#define PDMA_LRO_RING_AGG_OFFSET        (10)
4419 +#define PDMA_LRO_RING_AGG_CNT2_OFFSET   (0)
4420 +/* LRO_RX_RING1_CTRL_DW3 offsets  */
4421 +#define PDMA_LRO_AGG_CNT_H_OFFSET       (6)
4422 +/* LRO_RX_RING1_STP_DTP_DW offsets */
4423 +#define PDMA_RX_TCP_SRC_PORT_OFFSET     (16)
4424 +#define PDMA_RX_TCP_DEST_PORT_OFFSET    (0)
4425 +/* LRO_RX_RING1_CTRL_DW0 offsets */
4426 +#define PDMA_RX_IPV4_FORCE_OFFSET       (1)
4427 +#define PDMA_RX_IPV6_FORCE_OFFSET       (0)
4428 +
4429 +#define SET_ADMA_RX_LEN0(x)    ((x)&ADMA_RX_LEN0_MASK)
4430 +#define SET_ADMA_RX_LEN1(x)    ((x)&ADMA_RX_LEN1_MASK)
4431 +
4432 +#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
4433 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4434 +        *addr &= ~0xff;   \
4435 +        *addr |= ((x) & 0xff);  \
4436 +    }
4437 +#define SET_PDMA_LRO_FLUSH_REQ(x) \
4438 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4439 +        *addr &= ~PDMA_LRO_FUSH_REQ;   \
4440 +        *addr |= ((x) & 0x7)<<PDMA_LRO_FUSH_REQ_OFFSET;  \
4441 +    }
4442 +#define SET_PDMA_LRO_IPV6_EN(x) \
4443 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4444 +        *addr &= ~PDMA_LRO_IPV6_EN;   \
4445 +        *addr |= ((x) & 0x1)<<PDMA_LRO_IPV6_EN_OFFSET;  \
4446 +    }
4447 +#if defined(CONFIG_RAETH_HW_LRO_PREFETCH)
4448 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
4449 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4450 +        *addr &= ~PDMA_LRO_RXD_PREFETCH_EN;   \
4451 +        *addr |= ((x) & 0x3)<<PDMA_LRO_RXD_PREFETCH_EN_OFFSET;  \
4452 +    }
4453 +#else
4454 +#define SET_PDMA_LRO_RXD_PREFETCH_EN(x)
4455 +#endif
4456 +#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
4457 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4458 +        *addr &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN;   \
4459 +        *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET;  \
4460 +    }
4461 +#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
4462 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4463 +        *addr &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN;   \
4464 +        *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET;  \
4465 +    }
4466 +#define SET_PDMA_NON_LRO_MULTI_EN(x) \
4467 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4468 +        *addr &= ~(PDMA_NON_LRO_MULTI_EN);   \
4469 +        *addr |= ((x) & 0x1)<<PDMA_NON_LRO_MULTI_EN_OFFSET;  \
4470 +    }
4471 +#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
4472 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4473 +        *addr &= ~PDMA_LRO_FREQ_PRI_ADJ;   \
4474 +        *addr |= ((x) & 0xf)<<PDMA_LRO_FREQ_PRI_ADJ_OFFSET;  \
4475 +    }
4476 +#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
4477 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4478 +        *addr &= ~PDMA_LRO_TPUT_PRE_ADJ;   \
4479 +        *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRE_ADJ_OFFSET;  \
4480 +    }
4481 +#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
4482 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4483 +        *addr &= ~PDMA_LRO_TPUT_PRI_ADJ;   \
4484 +        *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRI_ADJ_OFFSET;  \
4485 +    }
4486 +#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
4487 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4488 +        *addr &= ~PDMA_LRO_ALT_SCORE_MODE;   \
4489 +        *addr |= ((x) & 0x1)<<PDMA_LRO_ALT_SCORE_MODE_OFFSET;  \
4490 +    }
4491 +#define SET_PDMA_LRO_DLY_INT_EN(x) \
4492 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
4493 +        *addr &= ~PDMA_LRO_DLY_INT_EN;   \
4494 +        *addr |= ((x) & 0x1)<<PDMA_LRO_DLY_INT_EN_OFFSET;  \
4495 +    }
4496 +#define SET_PDMA_LRO_BW_THRESHOLD(x) \
4497 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW2; \
4498 +        *addr = (x);  \
4499 +    }
4500 +#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
4501 +    { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
4502 +        *addr &= ~PDMA_LRO_LRO_MIN_RXD_SDL0;   \
4503 +        *addr |= ((x) & 0xffff)<<PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET;  \
4504 +    }
4505 +#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
4506 +    { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4507 +        *addr &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ;   \
4508 +        *addr |= ((x) & 0xfffff)<<PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET;  \
4509 +    }
4510 +#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
4511 +    { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
4512 +        *addr &= ~PDMA_LRO_CNT_OVERFLOW_ADJ;   \
4513 +        *addr |= ((x) & 0xfff)<<PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET;  \
4514 +    }
4515 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
4516 +    { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4517 +        *addr &= ~PDMA_LRO_ALT_TICK_TIMER;   \
4518 +        *addr |= ((x) & 0x1f)<<PDMA_LRO_ALT_TICK_TIMER_OFFSET;  \
4519 +    }
4520 +#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
4521 +    { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
4522 +        *addr &= ~0xffff;   \
4523 +        *addr |= ((x) & 0xffff);  \
4524 +    }
4525 +#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
4526 +    { volatile unsigned int *addr = (unsigned int*)LRO_MAX_AGG_TIME; \
4527 +        *addr &= ~0xffff;   \
4528 +        *addr |= ((x) & 0xffff);  \
4529 +    }
4530 +#define SET_PDMA_RXRING_MODE(x,y) \
4531 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4532 +        *addr &= ~(0x3<<PDMA_RX_MODE_OFFSET);   \
4533 +        *addr |= (y)<<PDMA_RX_MODE_OFFSET;  \
4534 +    }
4535 +#define SET_PDMA_RXRING_MYIP_VALID(x,y) \
4536 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4537 +        *addr &= ~(0x1<<PDMA_RX_MYIP_VALID_OFFSET); \
4538 +        *addr |= ((y)&0x1)<<PDMA_RX_MYIP_VALID_OFFSET;    \
4539 +    }
4540 +#define SET_PDMA_RXRING_VALID(x,y) \
4541 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4542 +        *addr &= ~(0x1<<PDMA_RX_PORT_VALID_OFFSET); \
4543 +        *addr |= ((y)&0x1)<<PDMA_RX_PORT_VALID_OFFSET;    \
4544 +    }
4545 +#define SET_PDMA_RXRING_TCP_SRC_PORT(x,y) \
4546 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4547 +        *addr &= ~(0xffff<<PDMA_RX_TCP_SRC_PORT_OFFSET);    \
4548 +        *addr |= (y)<<PDMA_RX_TCP_SRC_PORT_OFFSET;    \
4549 +    }
4550 +#define SET_PDMA_RXRING_TCP_DEST_PORT(x,y) \
4551 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
4552 +        *addr &= ~(0xffff<<PDMA_RX_TCP_DEST_PORT_OFFSET);    \
4553 +        *addr |= (y)<<PDMA_RX_TCP_DEST_PORT_OFFSET;    \
4554 +    }
4555 +#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x,y) \
4556 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4557 +        *addr &= ~(0x1<<PDMA_RX_IPV4_FORCE_OFFSET);    \
4558 +        *addr |= (y)<<PDMA_RX_IPV4_FORCE_OFFSET;    \
4559 +    }
4560 +#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x,y) \
4561 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
4562 +        *addr &= ~(0x1<<PDMA_RX_IPV6_FORCE_OFFSET);    \
4563 +        *addr |= (y)<<PDMA_RX_IPV6_FORCE_OFFSET;    \
4564 +    }
4565 +#define SET_PDMA_RXRING_AGE_TIME(x,y) \
4566 +    { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
4567 +      volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4568 +        *addr1 &= ~PDMA_LRO_RING_AGE1;    \
4569 +        *addr2 &= ~PDMA_LRO_RING_AGE2;    \
4570 +        *addr1 |= ((y) & 0x3ff)<<PDMA_LRO_RING_AGE1_OFFSET;    \
4571 +        *addr2 |= (((y)>>PDMA_LRO_AGE_H_OFFSET) & 0x03f)<<PDMA_LRO_RING_AGE2_OFFSET;    \
4572 +    }
4573 +#define SET_PDMA_RXRING_AGG_TIME(x,y) \
4574 +    { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
4575 +        *addr &= ~PDMA_LRO_RING_AGG;    \
4576 +        *addr |= ((y) & 0xffff)<<PDMA_LRO_RING_AGG_OFFSET;    \
4577 +    }
4578 +#define SET_PDMA_RXRING_MAX_AGG_CNT(x,y) \
4579 +    { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING1_CTRL_DW2 + (((x)-1) << 6)); \
4580 +      volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING1_CTRL_DW3 + (((x)-1) << 6)); \
4581 +        *addr1 &= ~PDMA_LRO_RING_AGG_CNT1;    \
4582 +        *addr2 &= ~PDMA_LRO_RING_AGG_CNT2;    \
4583 +        *addr1 |= ((y) & 0x3f)<<PDMA_LRO_RING_AGG_CNT1_OFFSET;    \
4584 +        *addr2 |= (((y)>>PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03)<<PDMA_LRO_RING_AGG_CNT2_OFFSET;    \
4585 +    }
4586 +
4587 +typedef struct _PDMA_LRO_AUTO_TLB_INFO0_    PDMA_LRO_AUTO_TLB_INFO0_T;
4588 +typedef struct _PDMA_LRO_AUTO_TLB_INFO1_    PDMA_LRO_AUTO_TLB_INFO1_T;
4589 +typedef struct _PDMA_LRO_AUTO_TLB_INFO2_    PDMA_LRO_AUTO_TLB_INFO2_T;
4590 +typedef struct _PDMA_LRO_AUTO_TLB_INFO3_    PDMA_LRO_AUTO_TLB_INFO3_T;
4591 +typedef struct _PDMA_LRO_AUTO_TLB_INFO4_    PDMA_LRO_AUTO_TLB_INFO4_T;
4592 +typedef struct _PDMA_LRO_AUTO_TLB_INFO5_    PDMA_LRO_AUTO_TLB_INFO5_T;
4593 +typedef struct _PDMA_LRO_AUTO_TLB_INFO6_    PDMA_LRO_AUTO_TLB_INFO6_T;
4594 +typedef struct _PDMA_LRO_AUTO_TLB_INFO7_    PDMA_LRO_AUTO_TLB_INFO7_T;
4595 +typedef struct _PDMA_LRO_AUTO_TLB_INFO8_    PDMA_LRO_AUTO_TLB_INFO8_T;
4596 +
4597 +struct _PDMA_LRO_AUTO_TLB_INFO0_
4598 +{
4599 +    unsigned int    DTP         : 16;
4600 +    unsigned int    STP         : 16;
4601 +};
4602 +struct _PDMA_LRO_AUTO_TLB_INFO1_
4603 +{
4604 +    unsigned int    SIP0        : 32;
4605 +};
4606 +struct _PDMA_LRO_AUTO_TLB_INFO2_
4607 +{
4608 +    unsigned int    SIP1        : 32;
4609 +};
4610 +struct _PDMA_LRO_AUTO_TLB_INFO3_
4611 +{
4612 +    unsigned int    SIP2        : 32;
4613 +};
4614 +struct _PDMA_LRO_AUTO_TLB_INFO4_
4615 +{
4616 +    unsigned int    SIP3        : 32;
4617 +};
4618 +struct _PDMA_LRO_AUTO_TLB_INFO5_
4619 +{
4620 +    unsigned int    VLAN_VID0   : 32;
4621 +};
4622 +struct _PDMA_LRO_AUTO_TLB_INFO6_
4623 +{
4624 +    unsigned int    VLAN_VID1       : 16;
4625 +    unsigned int    VLAN_VID_VLD    : 4;
4626 +    unsigned int    CNT             : 12;
4627 +};
4628 +struct _PDMA_LRO_AUTO_TLB_INFO7_
4629 +{
4630 +    unsigned int    DW_LEN          : 32;
4631 +};
4632 +struct _PDMA_LRO_AUTO_TLB_INFO8_
4633 +{
4634 +    unsigned int    DIP_ID          : 2;
4635 +    unsigned int    IPV6            : 1;
4636 +    unsigned int    IPV4            : 1;
4637 +    unsigned int    RESV            : 27;
4638 +    unsigned int    VALID           : 1;
4639 +};
4640 +struct PDMA_LRO_AUTO_TLB_INFO {
4641 +       PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
4642 +       PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
4643 +       PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
4644 +       PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
4645 +    PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
4646 +    PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
4647 +    PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
4648 +    PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
4649 +    PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
4650 +};
4651 +
4652 +#if defined (CONFIG_HW_SFQ)
4653 +#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1980)
4654 +#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1984)
4655 +#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1988)
4656 +#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + 0x198C)
4657 +#define SFQ_OFFSET 0x1A80
4658 +#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET)
4659 +#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x0C)
4660 +#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x10)
4661 +#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x18)
4662 +#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x20)
4663 +#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x24)
4664 +#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x30)
4665 +#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x3C)
4666 +#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x40)
4667 +#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x44)
4668 +#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x48)
4669 +
4670 +#define VQTX_MIB_EN (1<<17) 
4671 +#define VQTX_NUM_0  (4<<0)
4672 +#define VQTX_NUM_1  (4<<4)
4673 +#define VQTX_NUM_2  (4<<8)
4674 +#define VQTX_NUM_3  (4<<12)
4675 +
4676 +/*=========================================
4677 +      SFQ Table Format define
4678 +=========================================*/
4679 +typedef struct _SFQ_INFO1_  SFQ_INFO1_T;
4680 +
4681 +struct _SFQ_INFO1_
4682 +{
4683 +    unsigned int    VQHPTR;
4684 +};
4685 +//-------------------------------------------------
4686 +typedef struct _SFQ_INFO2_    SFQ_INFO2_T;
4687 +
4688 +struct _SFQ_INFO2_
4689 +{
4690 +    unsigned int    VQTPTR;
4691 +};
4692 +//-------------------------------------------------
4693 +typedef struct _SFQ_INFO3_  SFQ_INFO3_T;
4694 +
4695 +struct _SFQ_INFO3_
4696 +{
4697 +    unsigned int    QUE_DEPTH:16;
4698 +    unsigned int    DEFICIT_CNT:16;
4699 +};
4700 +//-------------------------------------------------
4701 +typedef struct _SFQ_INFO4_    SFQ_INFO4_T;
4702 +
4703 +struct _SFQ_INFO4_
4704 +{
4705 +       unsigned int    RESV; 
4706 +};
4707 +//-------------------------------------------------
4708 +
4709 +typedef struct _SFQ_INFO5_    SFQ_INFO5_T;
4710 +
4711 +struct _SFQ_INFO5_
4712 +{
4713 +       unsigned int    PKT_CNT; 
4714 +};
4715 +//-------------------------------------------------
4716 +
4717 +typedef struct _SFQ_INFO6_    SFQ_INFO6_T;
4718 +
4719 +struct _SFQ_INFO6_
4720 +{
4721 +       unsigned int    BYTE_CNT; 
4722 +};
4723 +//-------------------------------------------------
4724 +
4725 +typedef struct _SFQ_INFO7_    SFQ_INFO7_T;
4726 +
4727 +struct _SFQ_INFO7_
4728 +{
4729 +       unsigned int    BYTE_CNT; 
4730 +};
4731 +//-------------------------------------------------
4732 +
4733 +typedef struct _SFQ_INFO8_    SFQ_INFO8_T;
4734 +
4735 +struct _SFQ_INFO8_
4736 +{
4737 +               unsigned int    RESV; 
4738 +};
4739 +
4740 +
4741 +struct SFQ_table {
4742 +       SFQ_INFO1_T sfq_info1;
4743 +       SFQ_INFO2_T sfq_info2;
4744 +       SFQ_INFO3_T sfq_info3;
4745 +       SFQ_INFO4_T sfq_info4;
4746 +  SFQ_INFO5_T sfq_info5;
4747 +       SFQ_INFO6_T sfq_info6;
4748 +       SFQ_INFO7_T sfq_info7;
4749 +       SFQ_INFO8_T sfq_info8;
4750 +
4751 +};
4752 +#endif
4753 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
4754 +#define FE_GDM_RXID1_OFFSET        (0x0130)
4755 +#define FE_GDM_RXID1               (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID1_OFFSET)
4756 +#define GDM_VLAN_PRI7_RXID_SEL     BITS(30,31)
4757 +#define GDM_VLAN_PRI6_RXID_SEL     BITS(28,29)
4758 +#define GDM_VLAN_PRI5_RXID_SEL     BITS(26,27)
4759 +#define GDM_VLAN_PRI4_RXID_SEL     BITS(24,25)
4760 +#define GDM_VLAN_PRI3_RXID_SEL     BITS(22,23)
4761 +#define GDM_VLAN_PRI2_RXID_SEL     BITS(20,21)
4762 +#define GDM_VLAN_PRI1_RXID_SEL     BITS(18,19)
4763 +#define GDM_VLAN_PRI0_RXID_SEL     BITS(16,17)
4764 +#define GDM_TCP_ACK_RXID_SEL       BITS(4,5)
4765 +#define GDM_TCP_ACK_WZPC           BIT(3)
4766 +#define GDM_RXID_PRI_SEL           BITS(0,2)
4767 +
4768 +#define FE_GDM_RXID2_OFFSET        (0x0134)
4769 +#define FE_GDM_RXID2               (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID2_OFFSET)
4770 +#define GDM_STAG7_RXID_SEL         BITS(30,31)
4771 +#define GDM_STAG6_RXID_SEL         BITS(28,29)
4772 +#define GDM_STAG5_RXID_SEL         BITS(26,27)
4773 +#define GDM_STAG4_RXID_SEL         BITS(24,25)
4774 +#define GDM_STAG3_RXID_SEL         BITS(22,23)
4775 +#define GDM_STAG2_RXID_SEL         BITS(20,21)
4776 +#define GDM_STAG1_RXID_SEL         BITS(18,19)
4777 +#define GDM_STAG0_RXID_SEL         BITS(16,17)
4778 +#define GDM_PID2_RXID_SEL          BITS(2,3)
4779 +#define GDM_PID1_RXID_SEL          BITS(0,1)
4780 +
4781 +#define GDM_PRI_PID              (0)
4782 +#define GDM_PRI_VLAN_PID         (1)
4783 +#define GDM_PRI_ACK_PID          (2)
4784 +#define GDM_PRI_VLAN_ACK_PID     (3)
4785 +#define GDM_PRI_ACK_VLAN_PID     (4)
4786 +
4787 +#define SET_GDM_VLAN_PRI_RXID_SEL(x,y) \
4788 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4789 +        *addr &= ~(0x03 << (((x) << 1)+16));    \
4790 +        *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4791 +    }
4792 +#define SET_GDM_TCP_ACK_RXID_SEL(x) \
4793 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4794 +        *addr &= ~(GDM_TCP_ACK_RXID_SEL);    \
4795 +        *addr |= ((x) & 0x3) << 4; \
4796 +    }
4797 +#define SET_GDM_TCP_ACK_WZPC(x) \
4798 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4799 +        *addr &= ~(GDM_TCP_ACK_WZPC);    \
4800 +        *addr |= ((x) & 0x1) << 3; \
4801 +    }
4802 +#define SET_GDM_RXID_PRI_SEL(x) \
4803 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
4804 +        *addr &= ~(GDM_RXID_PRI_SEL);    \
4805 +        *addr |= (x) & 0x7; \
4806 +    }
4807 +#define GDM_STAG_RXID_SEL(x,y) \
4808 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4809 +        *addr &= ~(0x03 << (((x) << 1)+16));    \
4810 +        *addr |= ((y) & 0x3) << (((x) << 1)+16); \
4811 +    }
4812 +#define SET_GDM_PID2_RXID_SEL(x) \
4813 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4814 +        *addr &= ~(GDM_PID2_RXID_SEL);    \
4815 +        *addr |= ((x) & 0x3) << 2; \
4816 +    }
4817 +#define SET_GDM_PID1_RXID_SEL(x) \
4818 +    { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
4819 +        *addr &= ~(GDM_PID1_RXID_SEL);    \
4820 +        *addr |= ((x) & 0x3); \
4821 +    }
4822 +#endif  /* CONFIG_RAETH_MULTIPLE_RX_RING */
4823 +/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
4824 +#define        PORT0_PKCOUNT           (0xb01100e8)
4825 +#define        PORT1_PKCOUNT           (0xb01100ec)
4826 +#define        PORT2_PKCOUNT           (0xb01100f0)
4827 +#define        PORT3_PKCOUNT           (0xb01100f4)
4828 +#define        PORT4_PKCOUNT           (0xb01100f8)
4829 +#define        PORT5_PKCOUNT           (0xb01100fc)
4830 +
4831 +#if defined (CONFIG_ARCH_MT7623)
4832 +#include "sync_write.h"
4833 +#define sysRegRead(phys)            (*(volatile unsigned int *)((phys)))
4834 +#define sysRegWrite(phys, val)      mt65xx_reg_sync_writel((val), (phys))
4835 +#else
4836 +#define PHYS_TO_K1(physaddr) KSEG1ADDR(physaddr)
4837 +#define sysRegRead(phys)        (*(volatile unsigned int *)PHYS_TO_K1(phys))
4838 +#define sysRegWrite(phys, val)  ((*(volatile unsigned int *)PHYS_TO_K1(phys)) = (val))
4839 +#endif
4840 +
4841 +#define u_long unsigned long
4842 +#define u32    unsigned int
4843 +#define u16    unsigned short
4844 +
4845 +
4846 +/* ====================================== */
4847 +#define GDM1_DISPAD       BIT(18)
4848 +#define GDM1_DISCRC       BIT(17)
4849 +
4850 +//GDMA1 uni-cast frames destination port
4851 +#define GDM1_ICS_EN       (0x1 << 22)
4852 +#define GDM1_TCS_EN       (0x1 << 21)
4853 +#define GDM1_UCS_EN       (0x1 << 20)
4854 +#define GDM1_JMB_EN       (0x1 << 19)
4855 +#define GDM1_STRPCRC              (0x1 << 16)
4856 +#define GDM1_UFRC_P_CPU     (0 << 12)
4857 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4858 +#define GDM1_UFRC_P_PPE     (4 << 12)
4859 +#else
4860 +#define GDM1_UFRC_P_PPE     (6 << 12)
4861 +#endif
4862 +
4863 +//GDMA1 broad-cast MAC address frames
4864 +#define GDM1_BFRC_P_CPU     (0 << 8)
4865 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4866 +#define GDM1_BFRC_P_PPE     (4 << 8)
4867 +#else
4868 +#define GDM1_BFRC_P_PPE     (6 << 8)
4869 +#endif
4870 +
4871 +//GDMA1 multi-cast MAC address frames
4872 +#define GDM1_MFRC_P_CPU     (0 << 4)
4873 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4874 +#define GDM1_MFRC_P_PPE     (4 << 4)
4875 +#else
4876 +#define GDM1_MFRC_P_PPE     (6 << 4)
4877 +#endif
4878 +
4879 +//GDMA1 other MAC address frames destination port
4880 +#define GDM1_OFRC_P_CPU     (0 << 0)
4881 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4882 +#define GDM1_OFRC_P_PPE     (4 << 0)
4883 +#else
4884 +#define GDM1_OFRC_P_PPE     (6 << 0)
4885 +#endif
4886 +
4887 +#if defined (CONFIG_RALINK_RT6856) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
4888 +/* checksum generator registers are removed */
4889 +#define ICS_GEN_EN          (0 << 2)
4890 +#define UCS_GEN_EN          (0 << 1)
4891 +#define TCS_GEN_EN          (0 << 0)
4892 +#else
4893 +#define ICS_GEN_EN          (1 << 2)
4894 +#define UCS_GEN_EN          (1 << 1)
4895 +#define TCS_GEN_EN          (1 << 0)
4896 +#endif
4897 +
4898 +// MDIO_CFG    bit
4899 +#define MDIO_CFG_GP1_FC_TX     (1 << 11)
4900 +#define MDIO_CFG_GP1_FC_RX     (1 << 10)
4901 +
4902 +/* ====================================== */
4903 +/* ====================================== */
4904 +#define GP1_LNK_DWN     BIT(9) 
4905 +#define GP1_AN_FAIL     BIT(8) 
4906 +/* ====================================== */
4907 +/* ====================================== */
4908 +#define PSE_RESET       BIT(0)
4909 +/* ====================================== */
4910 +#define PST_DRX_IDX3       BIT(19)
4911 +#define PST_DRX_IDX2       BIT(18)
4912 +#define PST_DRX_IDX1       BIT(17)
4913 +#define PST_DRX_IDX0       BIT(16)
4914 +#define PST_DTX_IDX3       BIT(3)
4915 +#define PST_DTX_IDX2       BIT(2)
4916 +#define PST_DTX_IDX1       BIT(1)
4917 +#define PST_DTX_IDX0       BIT(0)
4918 +
4919 +#define RX_2B_OFFSET     BIT(31)
4920 +#define DESC_32B_EN      BIT(8)
4921 +#define TX_WB_DDONE       BIT(6)
4922 +#define RX_DMA_BUSY       BIT(3)
4923 +#define TX_DMA_BUSY       BIT(1)
4924 +#define RX_DMA_EN         BIT(2)
4925 +#define TX_DMA_EN         BIT(0)
4926 +
4927 +#define PDMA_BT_SIZE_4DWORDS     (0<<4)
4928 +#define PDMA_BT_SIZE_8DWORDS     (1<<4)
4929 +#define PDMA_BT_SIZE_16DWORDS    (2<<4)
4930 +#define PDMA_BT_SIZE_32DWORDS    (3<<4)
4931 +
4932 +#define ADMA_RX_BT_SIZE_4DWORDS                (0<<11)
4933 +#define ADMA_RX_BT_SIZE_8DWORDS                (1<<11)
4934 +#define ADMA_RX_BT_SIZE_16DWORDS       (2<<11)
4935 +#define ADMA_RX_BT_SIZE_32DWORDS       (3<<11)
4936 +
4937 +/* Register bits.
4938 + */
4939 +
4940 +#define MACCFG_RXEN            (1<<2)
4941 +#define MACCFG_TXEN            (1<<3)
4942 +#define MACCFG_PROMISC         (1<<18)
4943 +#define MACCFG_RXMCAST         (1<<19)
4944 +#define MACCFG_FDUPLEX         (1<<20)
4945 +#define MACCFG_PORTSEL         (1<<27)
4946 +#define MACCFG_HBEATDIS                (1<<28)
4947 +
4948 +
4949 +#define DMACTL_SR              (1<<1)  /* Start/Stop Receive */
4950 +#define DMACTL_ST              (1<<13) /* Start/Stop Transmission Command */
4951 +
4952 +#define DMACFG_SWR             (1<<0)  /* Software Reset */
4953 +#define DMACFG_BURST32         (32<<8)
4954 +
4955 +#define DMASTAT_TS             0x00700000      /* Transmit Process State */
4956 +#define DMASTAT_RS             0x000e0000      /* Receive Process State */
4957 +
4958 +#define MACCFG_INIT            0 //(MACCFG_FDUPLEX) // | MACCFG_PORTSEL)
4959 +
4960 +
4961 +
4962 +/* Descriptor bits.
4963 + */
4964 +#define R_OWN          0x80000000      /* Own Bit */
4965 +#define RD_RER         0x02000000      /* Receive End Of Ring */
4966 +#define RD_LS          0x00000100      /* Last Descriptor */
4967 +#define RD_ES          0x00008000      /* Error Summary */
4968 +#define RD_CHAIN       0x01000000      /* Chained */
4969 +
4970 +/* Word 0 */
4971 +#define T_OWN          0x80000000      /* Own Bit */
4972 +#define TD_ES          0x00008000      /* Error Summary */
4973 +
4974 +/* Word 1 */
4975 +#define TD_LS          0x40000000      /* Last Segment */
4976 +#define TD_FS          0x20000000      /* First Segment */
4977 +#define TD_TER         0x08000000      /* Transmit End Of Ring */
4978 +#define TD_CHAIN       0x01000000      /* Chained */
4979 +
4980 +
4981 +#define TD_SET         0x08000000      /* Setup Packet */
4982 +
4983 +
4984 +#define POLL_DEMAND 1
4985 +
4986 +#define RSTCTL (0x34)
4987 +#define RSTCTL_RSTENET1        (1<<19)
4988 +#define RSTCTL_RSTENET2        (1<<20)
4989 +
4990 +#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG                0xff908000
4991 +#define INIT_VALUE_OF_PSE_FQFC_CFG             0x80504000
4992 +#define INIT_VALUE_OF_FORCE_100_FD             0x1001BC01
4993 +#define INIT_VALUE_OF_FORCE_1000_FD            0x1F01DC01
4994 +
4995 +// Define Whole FE Reset Register
4996 +#define RSTCTRL                        (RALINK_SYSCTL_BASE + 0x34)
4997 +#define RT2880_AGPIOCFG_REG    (RALINK_SYSCTL_BASE + 0x3C)
4998 +
4999 +/*=========================================
5000 +      PDMA RX Descriptor Format define
5001 +=========================================*/
5002 +
5003 +//-------------------------------------------------
5004 +typedef struct _PDMA_RXD_INFO1_  PDMA_RXD_INFO1_T;
5005 +
5006 +struct _PDMA_RXD_INFO1_
5007 +{
5008 +    unsigned int    PDP0;
5009 +};
5010 +//-------------------------------------------------
5011 +typedef struct _PDMA_RXD_INFO2_    PDMA_RXD_INFO2_T;
5012 +
5013 +struct _PDMA_RXD_INFO2_
5014 +{
5015 +#if defined (CONFIG_ARCH_MT7623)
5016 +    unsigned int    PLEN1                 : 2;
5017 +    unsigned int    LRO_AGG_CNT           : 8;
5018 +    unsigned int    REV                   : 5;
5019 +#else
5020 +    unsigned int    PLEN1                 : 14;
5021 +    unsigned int    LS1                   : 1;
5022 +#endif  /* CONFIG_RAETH_HW_LRO */
5023 +    unsigned int    TAG                   : 1;
5024 +    unsigned int    PLEN0                 : 14;
5025 +    unsigned int    LS0                   : 1;
5026 +    unsigned int    DDONE_bit             : 1;
5027 +};
5028 +//-------------------------------------------------
5029 +typedef struct _PDMA_RXD_INFO3_  PDMA_RXD_INFO3_T;
5030 +
5031 +struct _PDMA_RXD_INFO3_
5032 +{
5033 +    unsigned int    VID:16;
5034 +    unsigned int    TPID:16;
5035 +};
5036 +//-------------------------------------------------
5037 +typedef struct _PDMA_RXD_INFO4_    PDMA_RXD_INFO4_T;
5038 +
5039 +struct _PDMA_RXD_INFO4_
5040 +{
5041 +#if defined (CONFIG_RALINK_MT7620)
5042 +    unsigned int    FOE_Entry           : 14;
5043 +    unsigned int    CRSN               : 5;
5044 +    unsigned int    SPORT              : 3;
5045 +    unsigned int    L4F                        : 1;
5046 +    unsigned int    L4VLD              : 1;
5047 +    unsigned int    TACK               : 1;
5048 +    unsigned int    IP4F               : 1;
5049 +    unsigned int    IP4                        : 1;
5050 +    unsigned int    IP6                        : 1;
5051 +    unsigned int    UN_USE1            : 4;
5052 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5053 +    unsigned int    FOE_Entry           : 14;
5054 +    unsigned int    CRSN               : 5;
5055 +    unsigned int    SP                 : 4;
5056 +    unsigned int    L4F                        : 1;
5057 +    unsigned int    L4VLD              : 1;
5058 +    unsigned int    TACK               : 1;
5059 +    unsigned int    IP4F               : 1;
5060 +    unsigned int    IP4                        : 1;
5061 +    unsigned int    IP6                        : 1;
5062 +    unsigned int    UN_USE1            : 3;
5063 +#else
5064 +    unsigned int    FOE_Entry           : 14;
5065 +    unsigned int    FVLD                : 1;
5066 +    unsigned int    UN_USE1             : 1;
5067 +    unsigned int    AI                  : 8;
5068 +    unsigned int    SP                  : 3;
5069 +    unsigned int    AIS                 : 1;
5070 +    unsigned int    L4F                 : 1;
5071 +    unsigned int    IPF                  : 1;
5072 +    unsigned int    L4FVLD_bit           : 1;
5073 +    unsigned int    IPFVLD_bit           : 1;
5074 +#endif
5075 +};
5076 +
5077 +
5078 +struct PDMA_rxdesc {
5079 +       PDMA_RXD_INFO1_T rxd_info1;
5080 +       PDMA_RXD_INFO2_T rxd_info2;
5081 +       PDMA_RXD_INFO3_T rxd_info3;
5082 +       PDMA_RXD_INFO4_T rxd_info4;
5083 +#ifdef CONFIG_32B_DESC
5084 +       unsigned int     rxd_info5;
5085 +       unsigned int     rxd_info6;
5086 +       unsigned int     rxd_info7;
5087 +       unsigned int     rxd_info8;
5088 +#endif
5089 +};
5090 +
5091 +/*=========================================
5092 +      PDMA TX Descriptor Format define
5093 +=========================================*/
5094 +//-------------------------------------------------
5095 +typedef struct _PDMA_TXD_INFO1_  PDMA_TXD_INFO1_T;
5096 +
5097 +struct _PDMA_TXD_INFO1_
5098 +{
5099 +    unsigned int    SDP0;
5100 +};
5101 +//-------------------------------------------------
5102 +typedef struct _PDMA_TXD_INFO2_    PDMA_TXD_INFO2_T;
5103 +
5104 +struct _PDMA_TXD_INFO2_
5105 +{
5106 +    unsigned int    SDL1                  : 14;
5107 +    unsigned int    LS1_bit               : 1;
5108 +    unsigned int    BURST_bit             : 1;
5109 +    unsigned int    SDL0                  : 14;
5110 +    unsigned int    LS0_bit               : 1;
5111 +    unsigned int    DDONE_bit             : 1;
5112 +};
5113 +//-------------------------------------------------
5114 +typedef struct _PDMA_TXD_INFO3_  PDMA_TXD_INFO3_T;
5115 +
5116 +struct _PDMA_TXD_INFO3_
5117 +{
5118 +    unsigned int    SDP1;
5119 +};
5120 +//-------------------------------------------------
5121 +typedef struct _PDMA_TXD_INFO4_    PDMA_TXD_INFO4_T;
5122 +
5123 +struct _PDMA_TXD_INFO4_
5124 +{
5125 +#if defined (CONFIG_RALINK_MT7620)
5126 +    unsigned int    VPRI_VIDX           : 8;
5127 +    unsigned int    SIDX                : 4;
5128 +    unsigned int    INSP                : 1;
5129 +    unsigned int    RESV               : 2;
5130 +    unsigned int    UDF                : 5;
5131 +    unsigned int    FP_BMAP                    : 8;
5132 +    unsigned int    TSO                        : 1;
5133 +    unsigned int    TUI_CO             : 3;
5134 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5135 +    unsigned int    VLAN_TAG           :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5136 +    unsigned int    RESV                : 2;
5137 +    unsigned int    UDF                 : 6;
5138 +    unsigned int    FPORT               : 3;
5139 +    unsigned int    TSO                        : 1;
5140 +    unsigned int    TUI_CO             : 3;
5141 +#else
5142 +    unsigned int    VPRI_VIDX           : 8;
5143 +    unsigned int    SIDX                : 4;
5144 +    unsigned int    INSP                : 1;
5145 +    unsigned int    RESV               : 1;
5146 +    unsigned int    UN_USE3             : 2;
5147 +    unsigned int    QN                  : 3;
5148 +    unsigned int    UN_USE2             : 1;
5149 +    unsigned int    UDF                        : 4;
5150 +    unsigned int    PN                  : 3;
5151 +    unsigned int    UN_USE1             : 1;
5152 +    unsigned int    TSO                        : 1;
5153 +    unsigned int    TUI_CO             : 3;
5154 +#endif
5155 +};
5156 +
5157 +
5158 +struct PDMA_txdesc {
5159 +       PDMA_TXD_INFO1_T txd_info1;
5160 +       PDMA_TXD_INFO2_T txd_info2;
5161 +       PDMA_TXD_INFO3_T txd_info3;
5162 +       PDMA_TXD_INFO4_T txd_info4;
5163 +#ifdef CONFIG_32B_DESC
5164 +       unsigned int     txd_info5;
5165 +       unsigned int     txd_info6;
5166 +       unsigned int     txd_info7;
5167 +       unsigned int     txd_info8;
5168 +#endif
5169 +};
5170 +
5171 +
5172 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
5173 +/*=========================================
5174 +      QDMA TX Descriptor Format define
5175 +=========================================*/
5176 +//-------------------------------------------------
5177 +typedef struct _QDMA_TXD_INFO1_  QDMA_TXD_INFO1_T;
5178 +
5179 +struct _QDMA_TXD_INFO1_
5180 +{
5181 +    unsigned int    SDP;
5182 +};
5183 +//-------------------------------------------------
5184 +typedef struct _QDMA_TXD_INFO2_    QDMA_TXD_INFO2_T;
5185 +
5186 +struct _QDMA_TXD_INFO2_
5187 +{
5188 +    unsigned int    NDP;
5189 +};
5190 +//-------------------------------------------------
5191 +typedef struct _QDMA_TXD_INFO3_  QDMA_TXD_INFO3_T;
5192 +
5193 +struct _QDMA_TXD_INFO3_
5194 +{
5195 +    unsigned int    QID                   : 4;
5196 +#if defined (CONFIG_HW_SFQ)
5197 +    //unsigned int    VQID                  : 10;  
5198 +    unsigned int    PROT                   : 3;
5199 +    unsigned int    IPOFST                   : 7;              
5200 +#else
5201 +    unsigned int    RESV                  : 10;
5202 +#endif
5203 +    unsigned int    SWC_bit               : 1; 
5204 +    unsigned int    BURST_bit             : 1;
5205 +    unsigned int    SDL                   : 14;
5206 +    unsigned int    LS_bit               : 1;
5207 +    unsigned int    OWN_bit             : 1;
5208 +};
5209 +//-------------------------------------------------
5210 +typedef struct _QDMA_TXD_INFO4_    QDMA_TXD_INFO4_T;
5211 +
5212 +struct _QDMA_TXD_INFO4_
5213 +{
5214 +    unsigned int    VLAN_TAG           :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
5215 +#if defined (CONFIG_RALINK_MT7621)
5216 +    unsigned int    RESV                : 2;
5217 +    unsigned int    UDF                 : 6;
5218 +#elif defined(CONFIG_ARCH_MT7623)
5219 +         unsigned int    VQID0               : 1;
5220 +         unsigned int    RESV                : 7;
5221 +#endif
5222 +    unsigned int    FPORT               : 3;
5223 +    unsigned int    TSO                        : 1;
5224 +    unsigned int    TUI_CO             : 3;
5225 +};
5226 +
5227 +
5228 +struct QDMA_txdesc {
5229 +       QDMA_TXD_INFO1_T txd_info1;
5230 +       QDMA_TXD_INFO2_T txd_info2;
5231 +       QDMA_TXD_INFO3_T txd_info3;
5232 +       QDMA_TXD_INFO4_T txd_info4;
5233 +#ifdef CONFIG_32B_DESC
5234 +       unsigned int     txd_info5;
5235 +       unsigned int     txd_info6;
5236 +       unsigned int     txd_info7;
5237 +       unsigned int     txd_info8;
5238 +#endif
5239 +};
5240 +#endif
5241 +
5242 +#if defined (CONFIG_ARCH_MT7623)
5243 +#define phys_to_bus(a) (a)
5244 +#else
5245 +#define phys_to_bus(a) (a & 0x1FFFFFFF)
5246 +#endif
5247 +
5248 +#define PHY_Enable_Auto_Nego           0x1000
5249 +#define PHY_Restart_Auto_Nego          0x0200
5250 +
5251 +/* PHY_STAT_REG = 1; */
5252 +#define PHY_Auto_Neco_Comp     0x0020
5253 +#define PHY_Link_Status                0x0004
5254 +
5255 +/* PHY_AUTO_NEGO_REG = 4; */
5256 +#define PHY_Cap_10_Half  0x0020
5257 +#define PHY_Cap_10_Full  0x0040
5258 +#define        PHY_Cap_100_Half 0x0080
5259 +#define        PHY_Cap_100_Full 0x0100
5260 +
5261 +/* proc definition */
5262 +
5263 +#if !defined (CONFIG_RALINK_RT6855) && !defined(CONFIG_RALINK_RT6855A) && \
5264 +    !defined (CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7621) && \
5265 +    !defined (CONFIG_ARCH_MT7623)
5266 +#define CDMA_OQ_STA    (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x4c)
5267 +#define GDMA1_OQ_STA   (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x50)
5268 +#define PPE_OQ_STA     (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x54)
5269 +#define PSE_IQ_STA     (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x58)
5270 +#endif
5271 +
5272 +#define PROCREG_CONTROL_FILE      "/var/run/procreg_control"
5273 +#if defined (CONFIG_RALINK_RT2880)
5274 +#define PROCREG_DIR             "rt2880"
5275 +#elif defined (CONFIG_RALINK_RT3052)
5276 +#define PROCREG_DIR             "rt3052"
5277 +#elif defined (CONFIG_RALINK_RT3352)
5278 +#define PROCREG_DIR             "rt3352"
5279 +#elif defined (CONFIG_RALINK_RT5350)
5280 +#define PROCREG_DIR             "rt5350"
5281 +#elif defined (CONFIG_RALINK_RT2883)
5282 +#define PROCREG_DIR             "rt2883"
5283 +#elif defined (CONFIG_RALINK_RT3883)
5284 +#define PROCREG_DIR             "rt3883"
5285 +#elif defined (CONFIG_RALINK_RT6855)
5286 +#define PROCREG_DIR             "rt6855"
5287 +#elif defined (CONFIG_RALINK_MT7620)
5288 +#define PROCREG_DIR             "mt7620"
5289 +#elif defined (CONFIG_RALINK_MT7621)
5290 +#define PROCREG_DIR             "mt7621"
5291 +#elif defined (CONFIG_ARCH_MT7623)
5292 +#define PROCREG_DIR             "mt7623"
5293 +#elif defined (CONFIG_RALINK_MT7628)
5294 +#define PROCREG_DIR             "mt7628"
5295 +#elif defined (CONFIG_RALINK_RT6855A)
5296 +#define PROCREG_DIR             "rt6855a"
5297 +#else
5298 +#define PROCREG_DIR             "rt2880"
5299 +#endif
5300 +#define PROCREG_SKBFREE                "skb_free"
5301 +#define PROCREG_TXRING         "tx_ring"
5302 +#define PROCREG_RXRING         "rx_ring"
5303 +#define PROCREG_RXRING1                "rx_ring1"
5304 +#define PROCREG_RXRING2                "rx_ring2"
5305 +#define PROCREG_RXRING3                "rx_ring3"
5306 +#define PROCREG_NUM_OF_TXD     "num_of_txd"
5307 +#define PROCREG_TSO_LEN                "tso_len"
5308 +#define PROCREG_LRO_STATS      "lro_stats"
5309 +#define PROCREG_HW_LRO_STATS   "hw_lro_stats"
5310 +#define PROCREG_HW_LRO_AUTO_TLB        "hw_lro_auto_tlb"
5311 +#define PROCREG_GMAC           "gmac"
5312 +#define PROCREG_GMAC2           "gmac2"
5313 +#define PROCREG_CP0            "cp0"
5314 +#define PROCREG_RAQOS          "qos"
5315 +#define PROCREG_READ_VAL       "regread_value"
5316 +#define PROCREG_WRITE_VAL      "regwrite_value"
5317 +#define PROCREG_ADDR           "reg_addr"
5318 +#define PROCREG_CTL            "procreg_control"
5319 +#define PROCREG_RXDONE_INTR    "rxdone_intr_count"
5320 +#define PROCREG_ESW_INTR       "esw_intr_count"
5321 +#define PROCREG_ESW_CNT                "esw_cnt"
5322 +#define PROCREG_SNMP           "snmp"
5323 +#if defined (TASKLET_WORKQUEUE_SW)
5324 +#define PROCREG_SCHE           "schedule"
5325 +#endif
5326 +#define PROCREG_QDMA            "qdma"
5327 +#if defined(CONFIG_RAETH_PDMA_DVT)
5328 +#define PROCREG_PDMA_DVT               "pdma_dvt"
5329 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
5330 +struct rt2880_reg_op_data {
5331 +  char name[64];
5332 +  unsigned int reg_addr;
5333 +  unsigned int op;
5334 +  unsigned int reg_value;
5335 +};        
5336 +
5337 +#ifdef CONFIG_RAETH_LRO
5338 +struct lro_counters {
5339 +        u32 lro_aggregated;
5340 +        u32 lro_flushed;
5341 +        u32 lro_no_desc;
5342 +};
5343 +
5344 +struct lro_para_struct {
5345 +       unsigned int lan_ip1;
5346 +};
5347 +
5348 +#endif // CONFIG_RAETH_LRO //
5349 +
5350 +
5351 +#if defined (CONFIG_HW_SFQ)
5352 +typedef struct {
5353 +       //layer2 header
5354 +       uint8_t dmac[6];
5355 +       uint8_t smac[6];
5356 +
5357 +       //vlan header 
5358 +       uint16_t vlan_tag;
5359 +       uint16_t vlan1_gap;
5360 +       uint16_t vlan1;
5361 +       uint16_t vlan2_gap;
5362 +       uint16_t vlan2;
5363 +       uint16_t vlan_layer;
5364 +
5365 +       //pppoe header
5366 +       uint32_t pppoe_gap;
5367 +       uint16_t ppp_tag;
5368 +       uint16_t pppoe_sid;
5369 +
5370 +       //layer3 header
5371 +       uint16_t eth_type;
5372 +       struct iphdr iph;
5373 +       struct ipv6hdr ip6h;
5374 +
5375 +       //layer4 header
5376 +       struct tcphdr th;
5377 +       struct udphdr uh;
5378 +
5379 +       uint32_t pkt_type;
5380 +       uint8_t is_mcast;
5381 +
5382 +} ParseResult;
5383 +#endif
5384 +typedef struct end_device
5385 +{
5386 +
5387 +    unsigned int        tx_cpu_owner_idx0;
5388 +    unsigned int        rx_cpu_owner_idx0;
5389 +    unsigned int        fe_int_status;
5390 +    unsigned int        tx_full; 
5391 +    
5392 +#if !defined (CONFIG_RAETH_QDMA)
5393 +    unsigned int       phy_tx_ring0;
5394 +#else
5395 +    /* QDMA Tx  PTR */
5396 +    struct sk_buff *free_skb[NUM_TX_DESC];
5397 +    unsigned int tx_dma_ptr;
5398 +    unsigned int tx_cpu_ptr;
5399 +    unsigned int free_txd_num;
5400 +    unsigned int free_txd_head;
5401 +    unsigned int free_txd_tail;        
5402 +    struct QDMA_txdesc *txd_pool;
5403 +    dma_addr_t phy_txd_pool;
5404 +    unsigned int txd_pool_info[NUM_TX_DESC];
5405 +    struct QDMA_txdesc *free_head;
5406 +    unsigned int phy_free_head;
5407 +    unsigned int *free_page_head;
5408 +    unsigned int phy_free_page_head;
5409 +    struct PDMA_rxdesc *qrx_ring;
5410 +    unsigned int phy_qrx_ring;
5411 +#ifdef CONFIG_RAETH_PDMATX_QDMARX      /* QDMA RX */
5412 +    unsigned int phy_tx_ring0;
5413 +#endif
5414 +#endif
5415 +
5416 +    unsigned int       phy_rx_ring0, phy_rx_ring1, phy_rx_ring2, phy_rx_ring3;
5417 +
5418 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || \
5419 +    defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || \
5420 +    defined(CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || \
5421 +    defined(CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628)  || \
5422 +    defined (CONFIG_ARCH_MT7623)
5423 +    //send signal to user application to notify link status changed
5424 +    struct work_struct  kill_sig_wq;
5425 +#endif
5426 +
5427 +    struct work_struct  reset_task;
5428 +#ifdef WORKQUEUE_BH
5429 +    struct work_struct  rx_wq;
5430 +#else
5431 +#if defined (TASKLET_WORKQUEUE_SW)
5432 +    struct work_struct  rx_wq;
5433 +#endif
5434 +    struct              tasklet_struct     rx_tasklet;
5435 +    struct              tasklet_struct     tx_tasklet;
5436 +#endif // WORKQUEUE_BH //
5437 +
5438 +#if defined(CONFIG_RAETH_QOS)
5439 +    struct             sk_buff *          skb_free[NUM_TX_RINGS][NUM_TX_DESC];
5440 +    unsigned int       free_idx[NUM_TX_RINGS];
5441 +#else
5442 +    struct             sk_buff*           skb_free[NUM_TX_DESC];
5443 +    unsigned int       free_idx;
5444 +#endif
5445 +
5446 +    struct              net_device_stats stat;  /* The new statistics table. */
5447 +    spinlock_t          page_lock;              /* Page register locks */
5448 +    struct PDMA_txdesc *tx_ring0;
5449 +#if defined(CONFIG_RAETH_QOS)
5450 +    struct PDMA_txdesc *tx_ring1;
5451 +    struct PDMA_txdesc *tx_ring2;
5452 +    struct PDMA_txdesc *tx_ring3;
5453 +#endif
5454 +    struct PDMA_rxdesc *rx_ring0;
5455 +    struct sk_buff     *netrx0_skbuf[NUM_RX_DESC];
5456 +#if defined (CONFIG_RAETH_HW_LRO)
5457 +    struct PDMA_rxdesc *rx_ring3;
5458 +    struct sk_buff     *netrx3_skbuf[NUM_RX_DESC];
5459 +    struct PDMA_rxdesc *rx_ring2;
5460 +    struct sk_buff     *netrx2_skbuf[NUM_RX_DESC];
5461 +    struct PDMA_rxdesc *rx_ring1;
5462 +    struct sk_buff     *netrx1_skbuf[NUM_RX_DESC];
5463 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
5464 +    struct PDMA_rxdesc *rx_ring1;
5465 +    struct sk_buff     *netrx1_skbuf[NUM_RX_DESC];
5466 +#if defined(CONFIG_ARCH_MT7623)
5467 +    struct PDMA_rxdesc *rx_ring2;
5468 +    struct sk_buff     *netrx2_skbuf[NUM_RX_DESC];
5469 +    struct PDMA_rxdesc *rx_ring3;
5470 +    struct sk_buff     *netrx3_skbuf[NUM_RX_DESC];
5471 +#endif  /* CONFIG_ARCH_MT7623 */
5472 +#endif
5473 +#ifdef CONFIG_RAETH_NAPI
5474 +    atomic_t irq_sem;
5475 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5476 +    struct napi_struct napi;
5477 +#endif
5478 +#endif
5479 +#ifdef CONFIG_PSEUDO_SUPPORT
5480 +    struct net_device *PseudoDev;
5481 +    unsigned int isPseudo;
5482 +#endif
5483 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
5484 +       struct mii_if_info      mii_info;
5485 +#endif
5486 +#ifdef CONFIG_RAETH_LRO
5487 +    struct lro_counters lro_counters;
5488 +    struct net_lro_mgr lro_mgr;
5489 +    struct net_lro_desc lro_arr[8];
5490 +#endif
5491 +#ifdef CONFIG_RAETH_HW_VLAN_RX
5492 +    struct vlan_group *vlgrp;
5493 +#endif
5494 +#if defined (CONFIG_RAETH_HW_LRO)
5495 +    struct work_struct hw_lro_wq;
5496 +    unsigned int hw_lro_pkt_interval[3];
5497 +    unsigned int hw_lro_alpha;  /* 0 < packet interval alpha <= 10 */
5498 +    unsigned int hw_lro_fix_setting;  /* 0: dynamical AGG/AGE time, 1: fixed AGG/AGE time */
5499 +#endif  /* CONFIG_RAETH_HW_LRO */
5500 +} END_DEVICE, *pEND_DEVICE;
5501 +
5502 +
5503 +#define RAETH_VERSION  "v3.1"
5504 +
5505 +#endif
5506 +
5507 +#define DMA_GLO_CFG PDMA_GLO_CFG
5508 +
5509 +#if defined(CONFIG_RAETH_QDMATX_QDMARX) 
5510 +#define GDMA1_FWD_PORT 0x5555
5511 +#define GDMA2_FWD_PORT 0x5555
5512 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
5513 +#define GDMA1_FWD_PORT 0x5555
5514 +#define GDMA2_FWD_PORT 0x5555
5515 +#else
5516 +#define GDMA1_FWD_PORT 0x0000
5517 +#define GDMA2_FWD_PORT 0x0000
5518 +#endif
5519 +
5520 +#if defined(CONFIG_RAETH_QDMATX_QDMARX) 
5521 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5522 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5523 +#elif defined(CONFIG_RAETH_PDMATX_QDMARX) 
5524 +#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
5525 +#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
5526 +#else
5527 +#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
5528 +#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
5529 +#endif
5530 +#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
5531 +#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
5532 +#define RAETH_FE_INT_STATUS FE_INT_STATUS
5533 +#define RAETH_FE_INT_ALL FE_INT_ALL
5534 +#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
5535 +#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
5536 +#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5537 +#define QFE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
5538 +#define RAETH_TX_DLY_INT TX_DLY_INT
5539 +#define RAETH_TX_DONE_INT0 TX_DONE_INT0
5540 +#define RAETH_DLY_INT_CFG DLY_INT_CFG
5541 --- /dev/null
5542 +++ b/drivers/net/ethernet/raeth/ra_ethtool.c
5543 @@ -0,0 +1,515 @@
5544 +#include <linux/module.h>
5545 +#include <linux/version.h>
5546 +
5547 +#include <linux/kernel.h>
5548 +#include <linux/sched.h>
5549 +
5550 +#include <linux/netdevice.h>
5551 +#include <linux/etherdevice.h>
5552 +#include <linux/skbuff.h>
5553 +#include <linux/if_ether.h>
5554 +#include <linux/ethtool.h>
5555 +
5556 +#include "ra2882ethreg.h"
5557 +#include "raether.h"
5558 +#include "ra_mac.h"
5559 +#include "ra_ethtool.h"
5560 +
5561 +#define RAETHER_DRIVER_NAME            "raether"
5562 +#define RA_NUM_STATS                   4
5563 +
5564 +
5565 +static struct {
5566 +    const char str[ETH_GSTRING_LEN];
5567 +} ethtool_stats_keys[] = {
5568 +    { "statistic1" },
5569 +    { "statistic2" },
5570 +    { "statistic3" },
5571 +    { "statistic4" },
5572 +};
5573 +
5574 +unsigned char get_current_phy_address(void)
5575 +{
5576 +       struct net_device *cur_dev_p;
5577 +       END_DEVICE *ei_local;
5578 +#if 0
5579 +       for(cur_dev_p=dev_base; cur_dev_p!=NULL; cur_dev_p=cur_dev_p->next){
5580 +               if (strncmp(cur_dev_p->name, DEV_NAME /* "eth2" usually */, 4) == 0)
5581 +                       break;
5582 +       }
5583 +#else
5584 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5585 +       cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
5586 +#else
5587 +       cur_dev_p = dev_get_by_name(DEV_NAME);
5588 +#endif
5589 +#endif 
5590 +       if(!cur_dev_p)
5591 +               return 0;
5592 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5593 +       ei_local = netdev_priv(cur_dev_p);
5594 +#else
5595 +       ei_local = cur_dev_p->priv;
5596 +#endif 
5597 +       return ei_local->mii_info.phy_id;
5598 +}
5599 +#if 0
5600 +static u32 et_get_tx_csum(struct net_device *dev)
5601 +{
5602 +       return (sysRegRead(GDMA1_FWD_CFG) & GDM1_DISCRC) ? 0 : 1;       // a pitfall here, "0" means to enable.
5603 +}
5604 +
5605 +static u32 et_get_rx_csum(struct net_device *dev)
5606 +{
5607 +       return (sysRegRead(GDMA1_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5608 +}
5609 +
5610 +static int et_set_tx_csum(struct net_device *dev, u32 data)
5611 +{
5612 +       int value;
5613 +       //printk("et_set_tx_csum(): data = %d\n", data);
5614 +
5615 +       value = sysRegRead(GDMA1_FWD_CFG);
5616 +       if(data)
5617 +               value |= GDM1_DISCRC;
5618 +       else
5619 +               value &= ~GDM1_DISCRC;
5620 +
5621 +       sysRegWrite(GDMA1_FWD_CFG, value);
5622 +    return 0;
5623 +}
5624 +
5625 +static int et_set_rx_csum(struct net_device *dev, u32 data)
5626 +{
5627 +       int value;
5628 +       //printk("et_set_rx_csum(): data = %d\n", data);
5629 +
5630 +       value = sysRegRead(GDMA1_FWD_CFG);
5631 +       if(data)
5632 +               value |= GDM1_STRPCRC;
5633 +       else
5634 +               value &= ~GDM1_STRPCRC;
5635 +
5636 +       sysRegWrite(GDMA1_FWD_CFG, value);
5637 +    return 0;
5638 +}
5639 +#endif
5640 +
5641 +#define MII_CR_ADDR                    0x00
5642 +#define MII_CR_MR_AUTONEG_ENABLE       (1 << 12)
5643 +#define MII_CR_MR_RESTART_NEGOTIATION  (1 << 9)
5644 +
5645 +#define AUTO_NEGOTIATION_ADVERTISEMENT 0x04
5646 +#define AN_PAUSE                       (1 << 10)
5647 +
5648 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5649 +static void et_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5650 +{
5651 +       int mii_an_reg;
5652 +       int mdio_cfg_reg;
5653 +       END_DEVICE *ei_local = dev->priv;
5654 +
5655 +       // get mii auto-negotiation register
5656 +       mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5657 +       epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5658 +       
5659 +       mdio_cfg_reg = sysRegRead(MDIO_CFG);
5660 +       epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5661 +       epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5662 +
5663 +       //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5664 +}
5665 +
5666 +static int et_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5667 +{
5668 +       int mdio_cfg_reg;
5669 +       int mii_an_reg;
5670 +       END_DEVICE *ei_local = dev->priv;
5671 +
5672 +       //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5673 +
5674 +       // auto-neg pause
5675 +       mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5676 +       if(epause->autoneg)
5677 +               mii_an_reg |= AN_PAUSE;
5678 +       else
5679 +               mii_an_reg &= ~AN_PAUSE;
5680 +       mii_mgr_write(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5681 +
5682 +       // tx/rx pause
5683 +       mdio_cfg_reg = sysRegRead(MDIO_CFG);
5684 +       if(epause->tx_pause)
5685 +               mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5686 +       else
5687 +               mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5688 +       if(epause->rx_pause)
5689 +               mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5690 +       else
5691 +               mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5692 +       sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5693 +
5694 +       return 0;
5695 +}
5696 +
5697 +static int et_nway_reset(struct net_device *dev)
5698 +{
5699 +       END_DEVICE *ei_local = dev->priv;
5700 +       return mii_nway_restart(&ei_local->mii_info);
5701 +}
5702 +#endif
5703 +
5704 +static u32 et_get_link(struct net_device *dev)
5705 +{
5706 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5707 +        END_DEVICE *ei_local = netdev_priv(dev);
5708 +#else  
5709 +       END_DEVICE *ei_local = dev->priv;
5710 +#endif 
5711 +       return mii_link_ok(&ei_local->mii_info);
5712 +}
5713 +
5714 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5715 +static int et_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5716 +{
5717 +       END_DEVICE *ei_local = dev->priv;
5718 +       int rc;
5719 +       rc = mii_ethtool_sset(&ei_local->mii_info, cmd);
5720 +       return rc;
5721 +}
5722 +#endif
5723 +
5724 +static int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5725 +{
5726 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5727 +        END_DEVICE *ei_local = netdev_priv(dev);
5728 +#else
5729 +        END_DEVICE *ei_local = dev->priv;
5730 +#endif
5731 +       mii_ethtool_gset(&ei_local->mii_info, cmd);
5732 +       return 0;
5733 +}
5734 +
5735 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5736 +static u32 et_get_msglevel(struct net_device *dev)
5737 +{
5738 +       return 0;
5739 +}
5740 +
5741 +static void et_set_msglevel(struct net_device *dev, u32 datum)
5742 +{
5743 +       return;
5744 +}
5745 +
5746 +static void et_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5747 +{
5748 +       //END_DEVICE *ei_local = dev->priv;
5749 +       strcpy(info->driver, RAETHER_DRIVER_NAME);
5750 +       strcpy(info->version, RAETH_VERSION);
5751 +       strcpy(info->bus_info, "n/a");
5752 +       info->n_stats = RA_NUM_STATS;
5753 +       info->eedump_len = 0;
5754 +       info->regdump_len = 0;
5755 +}
5756 +
5757 +static int et_get_stats_count(struct net_device *dev)
5758 +{
5759 +       return RA_NUM_STATS;
5760 +}
5761 +
5762 +static void et_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5763 +{
5764 +//     END_DEVICE *ei_local = dev->priv;
5765 +       data[0] = 0;//np->xstats.early_rx;
5766 +       data[1] = 0;//np->xstats.tx_buf_mapped;
5767 +       data[2] = 0;//np->xstats.tx_timeouts;
5768 +       data[3] = 0;//np->xstats.rx_lost_in_ring;
5769 +}
5770 +
5771 +static void et_get_strings(struct net_device *dev, u32 stringset, u8 *data)
5772 +{
5773 +       memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
5774 +}
5775 +#endif
5776 +
5777 +/*
5778 + *     mii_mgr_read wrapper for mii.o ethtool
5779 + */
5780 +int mdio_read(struct net_device *dev, int phy_id, int location)
5781 +{
5782 +       unsigned int result;
5783 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5784 +        END_DEVICE *ei_local = netdev_priv(dev);
5785 +#else
5786 +        END_DEVICE *ei_local = dev->priv;
5787 +#endif
5788 +       mii_mgr_read( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, &result);
5789 +       //printk("\n%s mii.o query= phy_id:%d, address:%d retval:%x\n", dev->name, phy_id, location, result);
5790 +       return (int)result;
5791 +}
5792 +
5793 +/*
5794 + *     mii_mgr_write wrapper for mii.o ethtool
5795 + */
5796 +void mdio_write(struct net_device *dev, int phy_id, int location, int value)
5797 +{
5798 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5799 +               END_DEVICE *ei_local = netdev_priv(dev);
5800 +#else
5801 +               END_DEVICE *ei_local = dev->priv;
5802 +#endif
5803 +       //printk("mii.o write= phy_id:%d, address:%d value:%x\n", phy_id, location, value);
5804 +       mii_mgr_write( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
5805 +       return;
5806 +}
5807 +
5808 +struct ethtool_ops ra_ethtool_ops = {
5809 +
5810 +       .get_settings           = et_get_settings,
5811 +        .get_link               = et_get_link,
5812 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)        
5813 +       .get_drvinfo            = et_get_drvinfo,
5814 +       .set_settings           = et_set_settings,
5815 +       .get_pauseparam         = et_get_pauseparam,
5816 +       .set_pauseparam         = et_set_pauseparam,
5817 +//     .get_rx_csum            = et_get_rx_csum,
5818 +//     .set_rx_csum            = et_set_rx_csum,
5819 +//     .get_tx_csum            = et_get_tx_csum,
5820 +//     .set_tx_csum            = et_set_tx_csum,
5821 +       .nway_reset             = et_nway_reset,
5822 +       .get_msglevel           = et_get_msglevel,
5823 +       .set_msglevel           = et_set_msglevel,
5824 +       .get_strings            = et_get_strings,
5825 +       .get_stats_count        = et_get_stats_count,
5826 +       .get_ethtool_stats      = et_get_ethtool_stats,
5827 +/*     .get_regs_len           = et_get_regs_len,
5828 +       .get_regs               = et_get_regs,
5829 +*/
5830 +#endif 
5831 +};
5832 +
5833 +#ifdef CONFIG_PSEUDO_SUPPORT
5834 +/*
5835 + * We unable to re-use the Raether functions because it is hard to tell
5836 + * where the calling from is. From eth2 or eth3?
5837 + *
5838 + * These code size is around 950 bytes.
5839 + */
5840 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5841 +static void et_virt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5842 +{
5843 +       // PSEUDO_ADAPTER *pseudo = dev->priv;
5844 +       return et_get_drvinfo(dev, info);
5845 +}
5846 +
5847 +static void et_virt_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5848 +{
5849 +       int mii_an_reg, mdio_cfg_reg;
5850 +       PSEUDO_ADAPTER *pseudo = dev->priv;
5851 +
5852 +       // get mii auto-negotiation register
5853 +       mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5854 +       epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
5855 +       
5856 +       mdio_cfg_reg = sysRegRead(MDIO_CFG);
5857 +       epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
5858 +       epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
5859 +
5860 +       //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5861 +}
5862 +
5863 +static int et_virt_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5864 +{
5865 +       int mdio_cfg_reg;
5866 +       int mii_an_reg;
5867 +       PSEUDO_ADAPTER *pseudo = dev->priv;
5868 +
5869 +       //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
5870 +       // auto-neg pause
5871 +       mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
5872 +       if(epause->autoneg)
5873 +               mii_an_reg |= AN_PAUSE;
5874 +       else
5875 +               mii_an_reg &= ~AN_PAUSE;
5876 +       mii_mgr_write(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
5877 +
5878 +       // tx/rx pause
5879 +       mdio_cfg_reg = sysRegRead(MDIO_CFG);
5880 +       if(epause->tx_pause)
5881 +               mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
5882 +       else
5883 +               mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
5884 +       if(epause->rx_pause)
5885 +               mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
5886 +       else
5887 +               mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
5888 +       sysRegWrite(MDIO_CFG, mdio_cfg_reg);
5889 +
5890 +       return 0;
5891 +}
5892 +
5893 +static u32 et_virt_get_tx_csum(struct net_device *dev)
5894 +{
5895 +       return (sysRegRead(GDMA2_FWD_CFG) & GDM1_DISCRC) ? 0 : 1;       // a pitfall here, "0" means to enable.
5896 +}
5897 +
5898 +static u32 et_virt_get_rx_csum(struct net_device *dev)
5899 +{
5900 +       return (sysRegRead(GDMA2_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
5901 +}
5902 +
5903 +static int et_virt_set_tx_csum(struct net_device *dev, u32 data)
5904 +{
5905 +       int value;
5906 +       //printk("et_set_tx_csum(): data = %d\n", data);
5907 +       value = sysRegRead(GDMA2_FWD_CFG);
5908 +       if(data)
5909 +               value |= GDM1_DISCRC;
5910 +       else
5911 +               value &= ~GDM1_DISCRC;
5912 +       sysRegWrite(GDMA1_FWD_CFG, value);
5913 +       return 0;
5914 +}
5915 +
5916 +static int et_virt_set_rx_csum(struct net_device *dev, u32 data)
5917 +{
5918 +       int value;
5919 +       //printk("et_set_rx_csum(): data = %d\n", data);
5920 +       value = sysRegRead(GDMA2_FWD_CFG);
5921 +       if(data)
5922 +               value |= GDM1_STRPCRC;
5923 +       else
5924 +               value &= ~GDM1_STRPCRC;
5925 +       sysRegWrite(GDMA1_FWD_CFG, value);
5926 +       return 0;
5927 +}
5928 +
5929 +static int et_virt_nway_reset(struct net_device *dev)
5930 +{
5931 +       PSEUDO_ADAPTER *pseudo  = dev->priv;
5932 +       return mii_nway_restart(&pseudo->mii_info);
5933 +}
5934 +#endif
5935 +
5936 +static u32 et_virt_get_link(struct net_device *dev)
5937 +{
5938 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5939 +        PSEUDO_ADAPTER *pseudo  = netdev_priv(dev);
5940 +#else
5941 +        PSEUDO_ADAPTER *pseudo  = dev->priv;
5942 +#endif
5943 +       return mii_link_ok(&pseudo->mii_info);
5944 +}
5945 +
5946 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5947 +static int et_virt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5948 +{
5949 +       PSEUDO_ADAPTER *pseudo = dev->priv;
5950 +       int rc = mii_ethtool_sset(&pseudo->mii_info, cmd);
5951 +       return rc;
5952 +}
5953 +#endif
5954 +
5955 +static int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5956 +{
5957 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
5958 +        PSEUDO_ADAPTER *pseudo  = netdev_priv(dev);
5959 +#else
5960 +        PSEUDO_ADAPTER *pseudo  = dev->priv;
5961 +#endif
5962 +       mii_ethtool_gset(&pseudo->mii_info, cmd);
5963 +       return 0;
5964 +}
5965 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
5966 +static u32 et_virt_get_msglevel(struct net_device *dev)
5967 +{
5968 +       return 0;
5969 +}
5970 +
5971 +static void et_virt_set_msglevel(struct net_device *dev, u32 datum)
5972 +{
5973 +       return;
5974 +}
5975 +
5976 +static void et_virt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5977 +{
5978 +//     PSEUDO_ADAPTER *pseudo = dev->priv;
5979 +       data[0] = 0;//np->xstats.early_rx;
5980 +       data[1] = 0;//np->xstats.tx_buf_mapped;
5981 +       data[2] = 0;//np->xstats.tx_timeouts;
5982 +       data[3] = 0;//np->xstats.rx_lost_in_ring;
5983 +}
5984 +
5985 +/* for virtual interface dedicated */
5986 +#define RA_VIRT_NUM_STATS                      4
5987 +static struct {
5988 +    const char str[ETH_GSTRING_LEN];
5989 +} ethtool_stats_keys_2[] = {
5990 +    { "statistic1" },
5991 +    { "statistic2" },
5992 +    { "statistic3" },
5993 +    { "statistic4" },
5994 +};
5995 +
5996 +static int et_virt_get_stats_count(struct net_device *dev)
5997 +{
5998 +       return RA_VIRT_NUM_STATS;
5999 +}
6000 +
6001 +static void et_virt_get_strings(struct net_device *dev, u32 stringset, u8 *data)
6002 +{
6003 +       memcpy(data, ethtool_stats_keys_2, sizeof(ethtool_stats_keys_2));
6004 +}
6005 +#endif
6006 +
6007 +struct ethtool_ops ra_virt_ethtool_ops = {
6008 +       .get_settings           = et_virt_get_settings,
6009 +       .get_link               = et_virt_get_link,
6010 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)        
6011 +       .get_drvinfo            = et_virt_get_drvinfo,
6012 +       .set_settings           = et_virt_set_settings,
6013 +       .get_pauseparam         = et_virt_get_pauseparam,
6014 +       .set_pauseparam         = et_virt_set_pauseparam,
6015 +       .get_rx_csum            = et_virt_get_rx_csum,
6016 +       .set_rx_csum            = et_virt_set_rx_csum,
6017 +       .get_tx_csum            = et_virt_get_tx_csum,
6018 +       .set_tx_csum            = et_virt_set_tx_csum,
6019 +       .nway_reset             = et_virt_nway_reset,
6020 +       .get_msglevel           = et_virt_get_msglevel,
6021 +       .set_msglevel           = et_virt_set_msglevel,
6022 +       .get_strings            = et_virt_get_strings,
6023 +       .get_stats_count        = et_virt_get_stats_count,
6024 +       .get_ethtool_stats      = et_virt_get_ethtool_stats,
6025 +/*     .get_regs_len           = et_virt_get_regs_len,
6026 +       .get_regs               = et_virt_get_regs,
6027 +*/
6028 +#endif 
6029 +};
6030 +
6031 +int mdio_virt_read(struct net_device *dev, int phy_id, int location)
6032 +{
6033 +       unsigned int result;
6034 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6035 +        PSEUDO_ADAPTER *pseudo  = netdev_priv(dev);
6036 +#else
6037 +        PSEUDO_ADAPTER *pseudo  = dev->priv;
6038 +#endif
6039 +       mii_mgr_read( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, &result);
6040 +//     printk("%s mii.o query= phy_id:%d, address:%d retval:%d\n", dev->name, phy_id, location, result);
6041 +       return (int)result;
6042 +}
6043 +
6044 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value)
6045 +{
6046 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
6047 +        PSEUDO_ADAPTER *pseudo  = netdev_priv(dev);
6048 +#else
6049 +        PSEUDO_ADAPTER *pseudo  = dev->priv;
6050 +#endif
6051 +//     printk("mii.o write= phy_id:%d, address:%d value:%d\n", phy_id, location, value);
6052 +       mii_mgr_write( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
6053 +       return;
6054 +}
6055 +
6056 +#endif /* CONFIG_PSEUDO_SUPPORT */
6057 +
6058 +
6059 --- /dev/null
6060 +++ b/drivers/net/ethernet/raeth/ra_ethtool.h
6061 @@ -0,0 +1,13 @@
6062 +#ifndef RA_ETHTOOL_H
6063 +#define RA_ETHTOOL_H
6064 +
6065 +/* ethtool related */
6066 +unsigned char get_current_phy_address(void);
6067 +int mdio_read(struct net_device *dev, int phy_id, int location);
6068 +void mdio_write(struct net_device *dev, int phy_id, int location, int value);
6069 +
6070 +/* for pseudo interface */
6071 +int mdio_virt_read(struct net_device *dev, int phy_id, int location);
6072 +void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value);
6073 +
6074 +#endif
6075 --- /dev/null
6076 +++ b/drivers/net/ethernet/raeth/ra_ioctl.h
6077 @@ -0,0 +1,102 @@
6078 +#ifndef _RAETH_IOCTL_H
6079 +#define _RAETH_IOCTL_H
6080 +
6081 +/* ioctl commands */
6082 +#define RAETH_ESW_REG_READ             0x89F1
6083 +#define RAETH_ESW_REG_WRITE            0x89F2
6084 +#define RAETH_MII_READ                 0x89F3
6085 +#define RAETH_MII_WRITE                        0x89F4
6086 +#define RAETH_ESW_INGRESS_RATE         0x89F5
6087 +#define RAETH_ESW_EGRESS_RATE          0x89F6
6088 +#define RAETH_ESW_PHY_DUMP             0x89F7
6089 +#define RAETH_QDMA_REG_READ            0x89F8
6090 +#define RAETH_QDMA_REG_WRITE           0x89F9
6091 +#define RAETH_QDMA_QUEUE_MAPPING        0x89FA
6092 +#define RAETH_QDMA_READ_CPU_CLK         0x89FB
6093 +#define RAETH_MII_READ_CL45             0x89FC
6094 +#define RAETH_MII_WRITE_CL45            0x89FD
6095 +#if defined(CONFIG_HW_SFQ)
6096 +#define RAETH_QDMA_SFQ_WEB_ENABLE       0x89FE
6097 +#endif
6098 +
6099 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6100 +    defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
6101 +    defined (CONFIG_ARCH_MT7623)
6102 +
6103 +#define REG_ESW_WT_MAC_MFC              0x10
6104 +#define REG_ESW_ISC                     0x18
6105 +#define REG_ESW_WT_MAC_ATA1             0x74
6106 +#define REG_ESW_WT_MAC_ATA2             0x78
6107 +#define REG_ESW_WT_MAC_ATWD             0x7C
6108 +#define REG_ESW_WT_MAC_ATC              0x80 
6109 +
6110 +#define REG_ESW_TABLE_TSRA1            0x84
6111 +#define REG_ESW_TABLE_TSRA2            0x88
6112 +#define REG_ESW_TABLE_ATRD             0x8C
6113 +
6114 +
6115 +#define REG_ESW_VLAN_VTCR              0x90
6116 +#define REG_ESW_VLAN_VAWD1             0x94
6117 +#define REG_ESW_VLAN_VAWD2             0x98
6118 +
6119 +
6120 +#define REG_ESW_VLAN_ID_BASE           0x100
6121 +
6122 +//#define REG_ESW_VLAN_ID_BASE         0x50
6123 +#define REG_ESW_VLAN_MEMB_BASE         0x70
6124 +#define REG_ESW_TABLE_SEARCH           0x24
6125 +#define REG_ESW_TABLE_STATUS0          0x28
6126 +#define REG_ESW_TABLE_STATUS1          0x2C
6127 +#define REG_ESW_TABLE_STATUS2          0x30
6128 +#define REG_ESW_WT_MAC_AD0             0x34
6129 +#define REG_ESW_WT_MAC_AD1             0x38
6130 +#define REG_ESW_WT_MAC_AD2             0x3C
6131 +
6132 +#else
6133 +/* rt3052 embedded ethernet switch registers */
6134 +#define REG_ESW_VLAN_ID_BASE           0x50
6135 +#define REG_ESW_VLAN_MEMB_BASE         0x70
6136 +#define REG_ESW_TABLE_SEARCH           0x24
6137 +#define REG_ESW_TABLE_STATUS0          0x28
6138 +#define REG_ESW_TABLE_STATUS1          0x2C
6139 +#define REG_ESW_TABLE_STATUS2          0x30
6140 +#define REG_ESW_WT_MAC_AD0             0x34
6141 +#define REG_ESW_WT_MAC_AD1             0x38
6142 +#define REG_ESW_WT_MAC_AD2             0x3C
6143 +#endif
6144 +
6145 +
6146 +#if defined(CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6147 +#define REG_ESW_MAX                    0x16C
6148 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6149 +      defined (CONFIG_RALINK_MT7620)
6150 +#define REG_ESW_MAX                    0x7FFFF
6151 +#else //RT305x, RT3350
6152 +#define REG_ESW_MAX                    0xFC
6153 +#endif
6154 +#define REG_HQOS_MAX                   0x3FFF
6155 +
6156 +
6157 +typedef struct rt3052_esw_reg {
6158 +       unsigned int off;
6159 +       unsigned int val;
6160 +} esw_reg;
6161 +
6162 +typedef struct ralink_mii_ioctl_data {
6163 +       __u32   phy_id;
6164 +       __u32   reg_num;
6165 +       __u32   val_in;
6166 +       __u32   val_out;
6167 +       __u32   port_num;
6168 +       __u32   dev_addr;
6169 +       __u32   reg_addr;
6170 +} ra_mii_ioctl_data;
6171 +
6172 +typedef struct rt335x_esw_reg {
6173 +       unsigned int on_off;
6174 +       unsigned int port;
6175 +       unsigned int bw;/*Mbps*/
6176 +} esw_rate;
6177 +
6178 +
6179 +#endif
6180 --- /dev/null
6181 +++ b/drivers/net/ethernet/raeth/ra_mac.c
6182 @@ -0,0 +1,2645 @@
6183 +#include <linux/module.h>
6184 +#include <linux/version.h>
6185 +#include <linux/kernel.h>
6186 +#include <linux/sched.h>
6187 +#include <linux/types.h>
6188 +#include <linux/fcntl.h>
6189 +#include <linux/interrupt.h>
6190 +#include <linux/ptrace.h>
6191 +#include <linux/ioport.h>
6192 +#include <linux/in.h>
6193 +#include <linux/slab.h>
6194 +#include <linux/string.h>
6195 +#include <linux/signal.h>
6196 +#include <linux/irq.h>
6197 +#include <linux/ctype.h>
6198 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,4)
6199 +#include <asm/system.h>
6200 +#include <linux/mca.h>
6201 +#endif
6202 +#include <asm/io.h>
6203 +#include <asm/bitops.h>
6204 +#include <asm/io.h>
6205 +#include <asm/dma.h>
6206 +
6207 +#include <asm/rt2880/surfboardint.h>   /* for cp0 reg access, added by bobtseng */
6208 +
6209 +#include <linux/errno.h>
6210 +#include <linux/init.h>
6211 +
6212 +#include <linux/netdevice.h>
6213 +#include <linux/etherdevice.h>
6214 +#include <linux/skbuff.h>
6215 +
6216 +#include <linux/init.h>
6217 +#include <linux/module.h>
6218 +#include <linux/proc_fs.h>
6219 +#include <asm/uaccess.h>
6220 +
6221 +#include <linux/seq_file.h>
6222 +
6223 +
6224 +#if defined(CONFIG_RAETH_LRO)
6225 +#include <linux/inet_lro.h>
6226 +#endif
6227 +
6228 +#include "ra2882ethreg.h"
6229 +#include "raether.h"
6230 +#include "ra_mac.h"
6231 +#include "ra_ethtool.h"
6232 +#if defined(CONFIG_RAETH_PDMA_DVT)
6233 +#include "dvt/raether_pdma_dvt.h"
6234 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
6235 +
6236 +extern struct net_device *dev_raether;
6237 +
6238 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6239 +    defined (CONFIG_RALINK_MT7620)
6240 +extern unsigned short p0_rx_good_cnt;
6241 +extern unsigned short p0_tx_good_cnt;
6242 +extern unsigned short p1_rx_good_cnt;
6243 +extern unsigned short p1_tx_good_cnt;
6244 +extern unsigned short p2_rx_good_cnt;
6245 +extern unsigned short p2_tx_good_cnt;
6246 +extern unsigned short p3_rx_good_cnt;
6247 +extern unsigned short p3_tx_good_cnt;
6248 +extern unsigned short p4_rx_good_cnt;
6249 +extern unsigned short p4_tx_good_cnt;
6250 +extern unsigned short p5_rx_good_cnt;
6251 +extern unsigned short p5_tx_good_cnt;
6252 +extern unsigned short p6_rx_good_cnt;
6253 +extern unsigned short p6_tx_good_cnt;
6254 +
6255 +extern unsigned short p0_rx_byte_cnt;
6256 +extern unsigned short p1_rx_byte_cnt;
6257 +extern unsigned short p2_rx_byte_cnt;
6258 +extern unsigned short p3_rx_byte_cnt;
6259 +extern unsigned short p4_rx_byte_cnt;
6260 +extern unsigned short p5_rx_byte_cnt;
6261 +extern unsigned short p6_rx_byte_cnt;
6262 +extern unsigned short p0_tx_byte_cnt;
6263 +extern unsigned short p1_tx_byte_cnt;
6264 +extern unsigned short p2_tx_byte_cnt;
6265 +extern unsigned short p3_tx_byte_cnt;
6266 +extern unsigned short p4_tx_byte_cnt;
6267 +extern unsigned short p5_tx_byte_cnt;
6268 +extern unsigned short p6_tx_byte_cnt;
6269 +
6270 +#if defined(CONFIG_RALINK_MT7620)
6271 +extern unsigned short p7_rx_good_cnt;
6272 +extern unsigned short p7_tx_good_cnt;
6273 +extern unsigned short p7_rx_byte_cnt;
6274 +extern unsigned short p7_tx_byte_cnt;
6275 +#endif
6276 +#endif
6277 +
6278 +
6279 +
6280 +#if defined(CONFIG_RAETH_TSO)
6281 +int txd_cnt[MAX_SKB_FRAGS/2 + 1];
6282 +int tso_cnt[16];
6283 +#endif
6284 +
6285 +#if defined(CONFIG_RAETH_LRO)
6286 +#define MAX_AGGR 64
6287 +#define MAX_DESC  8
6288 +int lro_stats_cnt[MAX_AGGR + 1];
6289 +int lro_flush_cnt[MAX_AGGR + 1];
6290 +int lro_len_cnt1[16];
6291 +//int lro_len_cnt2[16];
6292 +int aggregated[MAX_DESC];
6293 +int lro_aggregated;
6294 +int lro_flushed;
6295 +int lro_nodesc;
6296 +int force_flush;
6297 +int tot_called1;
6298 +int tot_called2;
6299 +#endif
6300 +
6301 +#if defined(CONFIG_RAETH_HW_LRO)
6302 +#define HW_LRO_RING_NUM 3
6303 +#define MAX_HW_LRO_AGGR 64
6304 +unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
6305 +unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
6306 +unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
6307 +unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
6308 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
6309 +unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
6310 +unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
6311 +unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
6312 +unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
6313 +unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
6314 +#endif  /* CONFIG_RAETH_HW_LRO_REASON_DBG */
6315 +#endif  /* CONFIG_RAETH_HW_LRO */
6316 +
6317 +#if defined(CONFIG_RAETH_QDMA)
6318 +extern unsigned int M2Q_table[64];
6319 +extern struct QDMA_txdesc *free_head; 
6320 +#endif
6321 +#if defined (CONFIG_ARCH_MT7623)
6322 +extern struct SFQ_table *sfq0;
6323 +extern struct SFQ_table *sfq1;
6324 +extern struct SFQ_table *sfq2;
6325 +extern struct SFQ_table *sfq3;
6326 +#endif
6327 +
6328 +#if defined(CONFIG_USER_SNMPD)
6329 +
6330 +static int ra_snmp_seq_show(struct seq_file *seq, void *v)
6331 +{
6332 +#if !defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7628)
6333 +
6334 +       seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n", sysRegRead(GDMA_RX_GBCNT0), sysRegRead(GDMA_RX_GPCNT0),sysRegRead(GDMA_RX_OERCNT0), sysRegRead(GDMA_RX_FERCNT0), sysRegRead(GDMA_RX_SERCNT0), sysRegRead(GDMA_RX_LERCNT0), sysRegRead(GDMA_RX_CERCNT0));
6335 +
6336 +       seq_printf(seq, "fc config: %x %x %x %x\n", sysRegRead(CDMA_FC_CFG), sysRegRead(GDMA1_FC_CFG), PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6337 +
6338 +       seq_printf(seq, "scheduler: %x %x %x\n", sysRegRead(GDMA1_SCH_CFG), sysRegRead(GDMA2_SCH_CFG), sysRegRead(PDMA_SCH_CFG));
6339 +
6340 +#endif
6341 +       seq_printf(seq, "ports: %x %x %x %x %x %x\n", sysRegRead(PORT0_PKCOUNT), sysRegRead(PORT1_PKCOUNT), sysRegRead(PORT2_PKCOUNT), sysRegRead(PORT3_PKCOUNT), sysRegRead(PORT4_PKCOUNT), sysRegRead(PORT5_PKCOUNT));
6342 +
6343 +       return 0;
6344 +}
6345 +
6346 +static int ra_snmp_seq_open(struct inode *inode, struct file *file)
6347 +{
6348 +       return single_open(file, ra_snmp_seq_show, NULL);
6349 +}
6350 +
6351 +static const struct file_operations ra_snmp_seq_fops = {
6352 +       .owner   = THIS_MODULE,
6353 +       .open    = ra_snmp_seq_open,
6354 +       .read    = seq_read,
6355 +       .llseek  = seq_lseek,
6356 +       .release = single_release
6357 +};
6358 +#endif
6359 +
6360 +
6361 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_100PHY) || \
6362 +    defined (CONFIG_P5_MAC_TO_PHY_MODE) || defined (CONFIG_RAETH_GMAC2)
6363 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
6364 +    defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)  || \
6365 +    defined (CONFIG_ARCH_MT7623)
6366 +void enable_auto_negotiate(int unused)
6367 +{
6368 +       u32 regValue;
6369 +#if !defined (CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
6370 +       u32 addr = CONFIG_MAC_TO_GIGAPHY_MODE_ADDR;
6371 +#endif
6372 +
6373 +#if defined (CONFIG_RALINK_MT7621)
6374 +       //enable MDIO mode all the time
6375 +       regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
6376 +       regValue &= ~(0x3 << 12);
6377 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
6378 +#endif
6379 +       
6380 +       /* FIXME: we don't know how to deal with PHY end addr */
6381 +       regValue = sysRegRead(ESW_PHY_POLLING);
6382 +       regValue |= (1<<31);
6383 +       regValue &= ~(0x1f);
6384 +       regValue &= ~(0x1f<<8);
6385 +#if defined (CONFIG_RALINK_MT7620)
6386 +       regValue |= ((addr-1) << 0);//setup PHY address for auto polling (Start Addr).
6387 +       regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6388 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
6389 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)|| defined (CONFIG_GE_RGMII_INTERNAL_P4_AN) || defined (CONFIG_GE2_RGMII_AN)
6390 +       regValue |= ((CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2-1)&0x1f << 0);//setup PHY address for auto polling (Start Addr).
6391 +       regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6392 +#else
6393 +       regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 0);//setup PHY address for auto polling (Start Addr).
6394 +       regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
6395 +#endif
6396 +#else
6397 +       regValue |= (addr << 0);// setup PHY address for auto polling (start Addr).
6398 +       regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
6399 +#endif
6400 +
6401 +       /*kurtis: AN is strange*/
6402 +       sysRegWrite(ESW_PHY_POLLING, regValue);
6403 +
6404 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE)
6405 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;
6406 +#endif
6407 +#if defined (CONFIG_P5_MAC_TO_PHY_MODE)
6408 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x56330;
6409 +#endif
6410 +}
6411 +#elif defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT3883) || \
6412 +      defined (CONFIG_RALINK_RT3052) || defined(CONFIG_RALINK_RT3352)
6413 +
6414 +void enable_auto_negotiate(int ge)
6415 +{
6416 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6417 +        u32 regValue = sysRegRead(0xb01100C8);
6418 +#else
6419 +       u32 regValue;
6420 +       regValue = (ge == 2)? sysRegRead(MDIO_CFG2) : sysRegRead(MDIO_CFG);
6421 +#endif
6422 +
6423 +        regValue &= 0xe0ff7fff;                 // clear auto polling related field:
6424 +                                                // (MD_PHY1ADDR & GP1_FRC_EN).
6425 +        regValue |= 0x20000000;                 // force to enable MDC/MDIO auto polling.
6426 +
6427 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_GE2_MII_AN)
6428 +       if(ge==2) {
6429 +           regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 24);               // setup PHY address for auto polling.
6430 +       }
6431 +#endif
6432 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_GE1_MII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
6433 +       if(ge==1) {
6434 +           regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 24);               // setup PHY address for auto polling.
6435 +       }
6436 +#endif
6437 +
6438 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
6439 +       sysRegWrite(0xb01100C8, regValue);
6440 +#else
6441 +       if (ge == 2)
6442 +               sysRegWrite(MDIO_CFG2, regValue);
6443 +       else
6444 +               sysRegWrite(MDIO_CFG, regValue);
6445 +#endif
6446 +}
6447 +#endif
6448 +#endif
6449 +void ra2880stop(END_DEVICE *ei_local)
6450 +{
6451 +       unsigned int regValue;
6452 +       printk("ra2880stop()...");
6453 +
6454 +       regValue = sysRegRead(DMA_GLO_CFG);
6455 +       regValue &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
6456 +       sysRegWrite(DMA_GLO_CFG, regValue);
6457 +       
6458 +       printk("Done\n");       
6459 +       // printk("Done0x%x...\n", readreg(DMA_GLO_CFG));
6460 +}
6461 +
6462 +void ei_irq_clear(void)
6463 +{
6464 +        sysRegWrite(FE_INT_STATUS, 0xFFFFFFFF);
6465 +}
6466 +
6467 +void rt2880_gmac_hard_reset(void)
6468 +{
6469 +#if !defined (CONFIG_RALINK_RT6855A)
6470 +       //FIXME
6471 +       sysRegWrite(RSTCTRL, RALINK_FE_RST);
6472 +       sysRegWrite(RSTCTRL, 0);
6473 +#endif
6474 +}
6475 +
6476 +void ra2880EnableInterrupt()
6477 +{
6478 +       unsigned int regValue = sysRegRead(FE_INT_ENABLE);
6479 +       RAETH_PRINT("FE_INT_ENABLE -- : 0x%08x\n", regValue);
6480 +//     regValue |= (RX_DONE_INT0 | TX_DONE_INT0);
6481 +               
6482 +       sysRegWrite(FE_INT_ENABLE, regValue);
6483 +}
6484 +
6485 +void ra2880MacAddressSet(unsigned char p[6])
6486 +{
6487 +        unsigned long regValue;
6488 +
6489 +       regValue = (p[0] << 8) | (p[1]);
6490 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6491 +        sysRegWrite(SDM_MAC_ADRH, regValue);
6492 +       printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRH));
6493 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6494 +        sysRegWrite(GDMA1_MAC_ADRH, regValue);
6495 +       printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6496 +
6497 +       /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6498 +        sysRegWrite(SMACCR1, regValue);
6499 +       printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6500 +#elif defined (CONFIG_RALINK_MT7620)
6501 +        sysRegWrite(SMACCR1, regValue);
6502 +       printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
6503 +#else
6504 +        sysRegWrite(GDMA1_MAC_ADRH, regValue);
6505 +       printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
6506 +#endif
6507 +
6508 +        regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6509 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6510 +        sysRegWrite(SDM_MAC_ADRL, regValue);
6511 +       printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRL));           
6512 +#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
6513 +        sysRegWrite(GDMA1_MAC_ADRL, regValue);
6514 +       printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));         
6515 +
6516 +       /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
6517 +        sysRegWrite(SMACCR0, regValue);
6518 +       printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6519 +#elif defined (CONFIG_RALINK_MT7620)
6520 +        sysRegWrite(SMACCR0, regValue);
6521 +       printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
6522 +#else
6523 +        sysRegWrite(GDMA1_MAC_ADRL, regValue);
6524 +       printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));         
6525 +#endif
6526 +
6527 +        return;
6528 +}
6529 +
6530 +#ifdef CONFIG_PSEUDO_SUPPORT
6531 +void ra2880Mac2AddressSet(unsigned char p[6])
6532 +{
6533 +        unsigned long regValue;
6534 +
6535 +       regValue = (p[0] << 8) | (p[1]);
6536 +        sysRegWrite(GDMA2_MAC_ADRH, regValue);
6537 +
6538 +        regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
6539 +        sysRegWrite(GDMA2_MAC_ADRL, regValue);
6540 +
6541 +       printk("GDMA2_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRH));
6542 +       printk("GDMA2_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRL));         
6543 +        return;
6544 +}
6545 +#endif
6546 +
6547 +/**
6548 + * hard_init - Called by raeth_probe to inititialize network device
6549 + * @dev: device pointer
6550 + *
6551 + * ethdev_init initilize dev->priv and set to END_DEVICE structure
6552 + *
6553 + */
6554 +void ethtool_init(struct net_device *dev)
6555 +{
6556 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
6557 +       END_DEVICE *ei_local = netdev_priv(dev);
6558 +
6559 +       // init mii structure
6560 +       ei_local->mii_info.dev = dev;
6561 +       ei_local->mii_info.mdio_read = mdio_read;
6562 +       ei_local->mii_info.mdio_write = mdio_write;
6563 +       ei_local->mii_info.phy_id_mask = 0x1f;
6564 +       ei_local->mii_info.reg_num_mask = 0x1f;
6565 +       ei_local->mii_info.supports_gmii = mii_check_gmii_support(&ei_local->mii_info);
6566 +       // TODO:   phy_id: 0~4
6567 +       ei_local->mii_info.phy_id = 1;
6568 +#endif
6569 +       return;
6570 +}
6571 +
6572 +/*
6573 + *     Routine Name : get_idx(mode, index)
6574 + *     Description: calculate ring usage for tx/rx rings
6575 + *     Mode 1 : Tx Ring 
6576 + *     Mode 2 : Rx Ring
6577 + */
6578 +int get_ring_usage(int mode, int i)
6579 +{
6580 +       unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
6581 +       unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
6582 +
6583 +       struct PDMA_rxdesc* rxring;
6584 +       struct PDMA_txdesc* txring;
6585 +
6586 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
6587 +
6588 +
6589 +       if (mode == 2 ) {
6590 +               /* cpu point to the next descriptor of rx dma ring */
6591 +               rx_calc_idx = *(unsigned long*)RX_CALC_IDX0;
6592 +               rx_drx_idx = *(unsigned long*)RX_DRX_IDX0;
6593 +               rxring = (struct PDMA_rxdesc*)RX_BASE_PTR0;
6594 +               
6595 +               rx_usage = (rx_drx_idx - rx_calc_idx -1 + NUM_RX_DESC) % NUM_RX_DESC;
6596 +               if ( rx_calc_idx == rx_drx_idx ) {
6597 +                   if ( rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
6598 +                       tx_usage = NUM_RX_DESC;
6599 +                   else
6600 +                       tx_usage = 0;
6601 +               }
6602 +               return rx_usage;
6603 +       }
6604 +
6605 +       
6606 +       switch (i) {
6607 +               case 0:
6608 +                               tx_ctx_idx = *(unsigned long*)TX_CTX_IDX0;
6609 +                               tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
6610 +                               txring = ei_local->tx_ring0;
6611 +                               break;
6612 +#if defined(CONFIG_RAETH_QOS)
6613 +               case 1:
6614 +                               tx_ctx_idx = *(unsigned long*)TX_CTX_IDX1;
6615 +                               tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
6616 +                               txring = ei_local->tx_ring1;
6617 +                               break;
6618 +               case 2:
6619 +                               tx_ctx_idx = *(unsigned long*)TX_CTX_IDX2;
6620 +                               tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
6621 +                               txring = ei_local->tx_ring2;
6622 +                               break;
6623 +               case 3:
6624 +                               tx_ctx_idx = *(unsigned long*)TX_CTX_IDX3;
6625 +                               tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
6626 +                               txring = ei_local->tx_ring3;
6627 +                               break;
6628 +#endif
6629 +               default:
6630 +                       printk("get_tx_idx failed %d %d\n", mode, i);
6631 +                       return 0;
6632 +       };
6633 +
6634 +       tx_usage = (tx_ctx_idx - tx_dtx_idx + NUM_TX_DESC) % NUM_TX_DESC;
6635 +       if ( tx_ctx_idx == tx_dtx_idx ) {
6636 +               if ( txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
6637 +                       tx_usage = 0;
6638 +               else
6639 +                       tx_usage = NUM_TX_DESC;
6640 +       }
6641 +       return tx_usage;
6642 +
6643 +}
6644 +
6645 +#if defined(CONFIG_RAETH_QOS)
6646 +void dump_qos(struct seq_file *s)
6647 +{
6648 +       int usage;
6649 +       int i;
6650 +
6651 +       seq_printf(s, "\n-----Raeth QOS -----\n\n");
6652 +
6653 +       for ( i = 0; i < 4; i++)  {
6654 +               usage = get_ring_usage(1,i);
6655 +               seq_printf(s, "Tx Ring%d Usage : %d/%d\n", i, usage, NUM_TX_DESC);
6656 +       }
6657 +
6658 +       usage = get_ring_usage(2,0);
6659 +       seq_printf(s, "RX Usage : %d/%d\n\n", usage, NUM_RX_DESC);
6660 +#if defined  (CONFIG_RALINK_MT7620)
6661 +       seq_printf(s, "PSE_FQFC_CFG(0x%08x)  : 0x%08x\n", PSE_FQFC_CFG, sysRegRead(PSE_FQFC_CFG));
6662 +       seq_printf(s, "PSE_IQ_CFG(0x%08x)  : 0x%08x\n", PSE_IQ_CFG, sysRegRead(PSE_IQ_CFG));
6663 +       seq_printf(s, "PSE_QUE_STA(0x%08x)  : 0x%08x\n", PSE_QUE_STA, sysRegRead(PSE_QUE_STA));
6664 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
6665 +
6666 +#else
6667 +       seq_printf(s, "GDMA1_FC_CFG(0x%08x)  : 0x%08x\n", GDMA1_FC_CFG, sysRegRead(GDMA1_FC_CFG));
6668 +       seq_printf(s, "GDMA2_FC_CFG(0x%08x)  : 0x%08x\n", GDMA2_FC_CFG, sysRegRead(GDMA2_FC_CFG));
6669 +       seq_printf(s, "PDMA_FC_CFG(0x%08x)  : 0x%08x\n", PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
6670 +       seq_printf(s, "PSE_FQ_CFG(0x%08x)  : 0x%08x\n", PSE_FQ_CFG, sysRegRead(PSE_FQ_CFG));
6671 +#endif
6672 +       seq_printf(s, "\n\nTX_CTX_IDX0    : 0x%08x\n", sysRegRead(TX_CTX_IDX0));        
6673 +       seq_printf(s, "TX_DTX_IDX0    : 0x%08x\n", sysRegRead(TX_DTX_IDX0));
6674 +       seq_printf(s, "TX_CTX_IDX1    : 0x%08x\n", sysRegRead(TX_CTX_IDX1));    
6675 +       seq_printf(s, "TX_DTX_IDX1    : 0x%08x\n", sysRegRead(TX_DTX_IDX1));
6676 +       seq_printf(s, "TX_CTX_IDX2    : 0x%08x\n", sysRegRead(TX_CTX_IDX2));    
6677 +       seq_printf(s, "TX_DTX_IDX2    : 0x%08x\n", sysRegRead(TX_DTX_IDX2));
6678 +       seq_printf(s, "TX_CTX_IDX3    : 0x%08x\n", sysRegRead(TX_CTX_IDX3));
6679 +       seq_printf(s, "TX_DTX_IDX3    : 0x%08x\n", sysRegRead(TX_DTX_IDX3));
6680 +       seq_printf(s, "RX_CALC_IDX0   : 0x%08x\n", sysRegRead(RX_CALC_IDX0));
6681 +       seq_printf(s, "RX_DRX_IDX0    : 0x%08x\n", sysRegRead(RX_DRX_IDX0));
6682 +
6683 +       seq_printf(s, "\n------------------------------\n\n");
6684 +}
6685 +#endif
6686 +
6687 +void dump_reg(struct seq_file *s)
6688 +{
6689 +       int fe_int_enable;
6690 +       int rx_usage;
6691 +       int dly_int_cfg;
6692 +       int rx_base_ptr0;
6693 +       int rx_max_cnt0;
6694 +       int rx_calc_idx0;
6695 +       int rx_drx_idx0;
6696 +#if !defined (CONFIG_RAETH_QDMA)
6697 +       int tx_usage;
6698 +       int tx_base_ptr[4];
6699 +       int tx_max_cnt[4];
6700 +       int tx_ctx_idx[4];
6701 +       int tx_dtx_idx[4];
6702 +       int i;
6703 +#endif
6704 +
6705 +       fe_int_enable = sysRegRead(FE_INT_ENABLE);
6706 +        rx_usage = get_ring_usage(2,0);
6707 +
6708 +       dly_int_cfg = sysRegRead(DLY_INT_CFG);
6709 +       
6710 +#if !defined (CONFIG_RAETH_QDMA)
6711 +       tx_usage = get_ring_usage(1,0);
6712 +
6713 +       tx_base_ptr[0] = sysRegRead(TX_BASE_PTR0);
6714 +       tx_max_cnt[0] = sysRegRead(TX_MAX_CNT0);
6715 +       tx_ctx_idx[0] = sysRegRead(TX_CTX_IDX0);
6716 +       tx_dtx_idx[0] = sysRegRead(TX_DTX_IDX0);
6717 +       
6718 +       tx_base_ptr[1] = sysRegRead(TX_BASE_PTR1);
6719 +       tx_max_cnt[1] = sysRegRead(TX_MAX_CNT1);
6720 +       tx_ctx_idx[1] = sysRegRead(TX_CTX_IDX1);
6721 +       tx_dtx_idx[1] = sysRegRead(TX_DTX_IDX1);
6722 +
6723 +       tx_base_ptr[2] = sysRegRead(TX_BASE_PTR2);
6724 +       tx_max_cnt[2] = sysRegRead(TX_MAX_CNT2);
6725 +       tx_ctx_idx[2] = sysRegRead(TX_CTX_IDX2);
6726 +       tx_dtx_idx[2] = sysRegRead(TX_DTX_IDX2);
6727 +       
6728 +       tx_base_ptr[3] = sysRegRead(TX_BASE_PTR3);
6729 +       tx_max_cnt[3] = sysRegRead(TX_MAX_CNT3);
6730 +       tx_ctx_idx[3] = sysRegRead(TX_CTX_IDX3);
6731 +       tx_dtx_idx[3] = sysRegRead(TX_DTX_IDX3);
6732 +#endif
6733 +
6734 +       rx_base_ptr0 = sysRegRead(RX_BASE_PTR0);
6735 +       rx_max_cnt0 = sysRegRead(RX_MAX_CNT0);
6736 +       rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
6737 +       rx_drx_idx0 = sysRegRead(RX_DRX_IDX0);
6738 +
6739 +       seq_printf(s, "\n\nFE_INT_ENABLE  : 0x%08x\n", fe_int_enable);
6740 +#if !defined (CONFIG_RAETH_QDMA)
6741 +       seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, NUM_TX_DESC);
6742 +#endif
6743 +       seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, NUM_RX_DESC);
6744 +       seq_printf(s, "DLY_INT_CFG    : 0x%08x\n", dly_int_cfg);
6745 +
6746 +#if !defined (CONFIG_RAETH_QDMA)       
6747 +       for(i=0;i<4;i++) {
6748 +               seq_printf(s, "TX_BASE_PTR%d   : 0x%08x\n", i, tx_base_ptr[i]); 
6749 +               seq_printf(s, "TX_MAX_CNT%d    : 0x%08x\n", i, tx_max_cnt[i]);  
6750 +               seq_printf(s, "TX_CTX_IDX%d     : 0x%08x\n", i, tx_ctx_idx[i]);
6751 +               seq_printf(s, "TX_DTX_IDX%d     : 0x%08x\n", i, tx_dtx_idx[i]);
6752 +       }
6753 +#endif
6754 +
6755 +       seq_printf(s, "RX_BASE_PTR0   : 0x%08x\n", rx_base_ptr0);       
6756 +       seq_printf(s, "RX_MAX_CNT0    : 0x%08x\n", rx_max_cnt0);        
6757 +       seq_printf(s, "RX_CALC_IDX0   : 0x%08x\n", rx_calc_idx0);
6758 +       seq_printf(s, "RX_DRX_IDX0    : 0x%08x\n", rx_drx_idx0);
6759 +       
6760 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
6761 +       seq_printf(s, "The current PHY address selected by ethtool is %d\n", get_current_phy_address());
6762 +#endif
6763 +
6764 +#if defined (CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
6765 +       seq_printf(s, "GDMA_RX_FCCNT1(0x%08x)     : 0x%08x\n\n", GDMA_RX_FCCNT1, sysRegRead(GDMA_RX_FCCNT1));   
6766 +#endif
6767 +}
6768 +
6769 +#if 0
6770 +void dump_cp0(void)
6771 +{
6772 +       printk("CP0 Register dump --\n");
6773 +       printk("CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
6774 +       printk("CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
6775 +       printk("CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
6776 +       printk("CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
6777 +       printk("CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
6778 +       printk("CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
6779 +       printk("CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
6780 +       printk("CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
6781 +       printk("CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
6782 +       printk("CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
6783 +       printk("CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
6784 +       printk("CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
6785 +       printk("CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
6786 +       printk("CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
6787 +       printk("CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
6788 +       printk("CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
6789 +       printk("CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
6790 +       printk("CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
6791 +       printk("CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
6792 +       printk("CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
6793 +       printk("CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
6794 +       printk("CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
6795 +       printk("CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
6796 +       printk("CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
6797 +       printk("CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
6798 +       printk("CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
6799 +       printk("CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
6800 +       printk("CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
6801 +       printk("CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
6802 +       printk("CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
6803 +       printk("CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
6804 +       printk("CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
6805 +       printk("CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
6806 +}
6807 +#endif
6808 +
6809 +struct proc_dir_entry *procRegDir;
6810 +static struct proc_dir_entry *procGmac, *procSysCP0, *procTxRing, *procRxRing, *procSkbFree;
6811 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
6812 +static struct proc_dir_entry *procGmac2;
6813 +#endif
6814 +#if defined(CONFIG_USER_SNMPD)
6815 +static struct proc_dir_entry *procRaSnmp;
6816 +#endif
6817 +#if defined(CONFIG_RAETH_TSO)
6818 +static struct proc_dir_entry *procNumOfTxd, *procTsoLen;
6819 +#endif
6820 +
6821 +#if defined(CONFIG_RAETH_LRO)
6822 +static struct proc_dir_entry *procLroStats;
6823 +#endif
6824 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
6825 +static struct proc_dir_entry *procRxRing1, *procRxRing2, *procRxRing3;
6826 +static struct proc_dir_entry *procHwLroStats, *procHwLroAutoTlb;
6827 +const static HWLRO_DBG_FUNC hw_lro_dbg_func[] =
6828 +{
6829 +    [0] = hwlro_agg_cnt_ctrl,
6830 +    [1] = hwlro_agg_time_ctrl,
6831 +    [2] = hwlro_age_time_ctrl,
6832 +    [3] = hwlro_pkt_int_alpha_ctrl,
6833 +    [4] = hwlro_threshold_ctrl,
6834 +    [5] = hwlro_fix_setting_switch_ctrl,
6835 +};
6836 +#endif  /* CONFIG_RAETH_HW_LRO */
6837 +#if defined (TASKLET_WORKQUEUE_SW)
6838 +static struct proc_dir_entry *procSCHE;
6839 +#endif
6840 +
6841 +#if defined(CONFIG_RAETH_PDMA_DVT)
6842 +static struct proc_dir_entry *procPdmaDvt;
6843 +
6844 +const static PDMA_DBG_FUNC pdma_dvt_dbg_func[] =
6845 +{
6846 +    [0] = pdma_dvt_show_ctrl,
6847 +    [1] = pdma_dvt_test_rx_ctrl,
6848 +    [2] = pdma_dvt_test_tx_ctrl,
6849 +    [3] = pdma_dvt_test_debug_ctrl,
6850 +    [4] = pdma_dvt_test_lro_ctrl,
6851 +};
6852 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
6853 +
6854 +int RegReadMain(struct seq_file *seq, void *v)
6855 +{
6856 +       dump_reg(seq);
6857 +       return 0;
6858 +}
6859 +
6860 +static void *seq_SkbFree_start(struct seq_file *seq, loff_t *pos)
6861 +{
6862 +       if (*pos < NUM_TX_DESC)
6863 +               return pos;
6864 +       return NULL;
6865 +}
6866 +
6867 +static void *seq_SkbFree_next(struct seq_file *seq, void *v, loff_t *pos)
6868 +{
6869 +       (*pos)++;
6870 +       if (*pos >= NUM_TX_DESC)
6871 +               return NULL;
6872 +       return pos;
6873 +}
6874 +
6875 +static void seq_SkbFree_stop(struct seq_file *seq, void *v)
6876 +{
6877 +       /* Nothing to do */
6878 +}
6879 +
6880 +static int seq_SkbFree_show(struct seq_file *seq, void *v)
6881 +{
6882 +       int i = *(loff_t *) v;
6883 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
6884 +
6885 +       seq_printf(seq, "%d: %08x\n",i,  *(int *)&ei_local->skb_free[i]);
6886 +
6887 +       return 0;
6888 +}
6889 +
6890 +static const struct seq_operations seq_skb_free_ops = {
6891 +       .start = seq_SkbFree_start,
6892 +       .next  = seq_SkbFree_next,
6893 +       .stop  = seq_SkbFree_stop,
6894 +       .show  = seq_SkbFree_show
6895 +};
6896 +
6897 +static int skb_free_open(struct inode *inode, struct file *file)
6898 +{
6899 +       return seq_open(file, &seq_skb_free_ops);
6900 +}
6901 +
6902 +static const struct file_operations skb_free_fops = {
6903 +       .owner          = THIS_MODULE,
6904 +       .open           = skb_free_open,
6905 +       .read           = seq_read,
6906 +       .llseek         = seq_lseek,
6907 +       .release        = seq_release
6908 +};
6909 +
6910 +#if defined (CONFIG_RAETH_QDMA)
6911 +int QDMARead(struct seq_file *seq, void *v)
6912 +{
6913 +       unsigned int temp,i;
6914 +       unsigned int sw_fq, hw_fq;
6915 +       unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
6916 +       unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head, queue_tail;
6917 +       struct net_device *dev = dev_raether;
6918 +        END_DEVICE *ei_local = netdev_priv(dev);
6919 +
6920 +       seq_printf(seq, "==== General Information ====\n");
6921 +       temp = sysRegRead(QDMA_FQ_CNT);
6922 +       sw_fq = (temp&0xFFFF0000)>>16;
6923 +       hw_fq = (temp&0x0000FFFF);
6924 +       seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq, NUM_TX_DESC, hw_fq,NUM_QDMA_PAGE);
6925 +       seq_printf(seq, "SW TXD virtual start address: 0x%08x\n", ei_local->txd_pool);
6926 +       seq_printf(seq, "HW TXD virtual start address: 0x%08x\n\n", free_head);
6927 +
6928 +       seq_printf(seq, "==== Scheduler Information ====\n");
6929 +       temp = sysRegRead(QDMA_TX_SCH);
6930 +       max_en = (temp&0x00000800)>>11;
6931 +       max_rate = (temp&0x000007F0)>>4;
6932 +       for(i=0;i<(temp&0x0000000F);i++)
6933 +               max_rate *= 10;
6934 +       seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n", max_en, max_rate);
6935 +       max_en = (temp&0x08000000)>>27;
6936 +       max_rate = (temp&0x07F00000)>>20;
6937 +       for(i=0;i<(temp&0x000F0000);i++)
6938 +               max_rate *= 10;
6939 +       seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n", max_en, max_rate);
6940 +
6941 +       seq_printf(seq, "==== Physical Queue Information ====\n");
6942 +       for (queue = 0; queue < 16; queue++){
6943 +               temp = sysRegRead(QTX_CFG_0 + 0x10 * queue);
6944 +               tx_des_cnt = (temp & 0xffff0000) >> 16;
6945 +               hw_resv = (temp & 0xff00) >> 8;
6946 +               sw_resv = (temp & 0xff);
6947 +               temp = sysRegRead(QTX_CFG_0 +(0x10 * queue) + 0x4);
6948 +               sch = (temp >> 31) + 1 ;
6949 +               min_en = (temp & 0x8000000) >> 27;
6950 +               min_rate = (temp & 0x7f00000) >> 20;
6951 +               for (i = 0; i< (temp & 0xf0000) >> 16; i++)
6952 +                       min_rate *= 10;
6953 +               max_en = (temp & 0x800) >> 11;
6954 +               max_rate = (temp & 0x7f0) >> 4;
6955 +               for (i = 0; i< (temp & 0xf); i++)
6956 +                       max_rate *= 10;
6957 +               weight = (temp & 0xf000) >> 12;
6958 +               queue_head = sysRegRead(QTX_HEAD_0 + 0x10 * queue);
6959 +               queue_tail = sysRegRead(QTX_TAIL_0 + 0x10 * queue);
6960 +
6961 +               seq_printf(seq, "Queue#%d Information:\n", queue);
6962 +               seq_printf(seq, "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n", tx_des_cnt, queue_head, queue_tail);
6963 +               seq_printf(seq, "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n", hw_resv, sw_resv, sch, weight);
6964 +               seq_printf(seq, "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n", min_en, min_rate, max_en, max_rate);
6965 +       }
6966 +#if defined (CONFIG_ARCH_MT7623) && defined(CONFIG_HW_SFQ)
6967 +       seq_printf(seq, "==== Virtual Queue Information ====\n");
6968 +       seq_printf(seq, "VQTX_TB_BASE_0:0x%08x;VQTX_TB_BASE_1:0x%08x;VQTX_TB_BASE_2:0x%08x;VQTX_TB_BASE_3:0x%08x\n", \
6969 +                       sfq0, sfq1, sfq2, sfq3);
6970 +       temp = sysRegRead(VQTX_NUM);
6971 +       seq_printf(seq, "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n", \
6972 +                       temp&0xF, (temp&0xF0)>>4, (temp&0xF00)>>8, (temp&0xF000)>>12);
6973 +
6974 +#endif
6975 +
6976 +       seq_printf(seq, "==== Flow Control Information ====\n");
6977 +       temp = sysRegRead(QDMA_FC_THRES);
6978 +       seq_printf(seq, "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n", \
6979 +                       (temp&0x1000000)>>24, (temp&0x200000)>>25, (temp&0x30000000)>>28);
6980 +       seq_printf(seq, "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n", \
6981 +                       (temp&0x10000)>>16, (temp&0x2000)>>17, (temp&0x300000)>>20);
6982 +#if defined (CONFIG_ARCH_MT7623)
6983 +       seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n", \
6984 +                       (temp&0xC0000000)>>30, (temp&0x08000000)>>27);
6985 +       seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n", \
6986 +                       (temp&0xC00000)>>22, (temp&0x080000)>>19);
6987 +#endif
6988 +
6989 +       seq_printf(seq, "\n==== FSM Information\n");
6990 +       temp = sysRegRead(QDMA_DMA);
6991 +#if defined (CONFIG_ARCH_MT7623)
6992 +       seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp&0x0F000000)>>24);
6993 +#endif
6994 +       seq_printf(seq, "FQ_FSM:0x%01x\n", (temp&0x000F0000)>>16);
6995 +       seq_printf(seq, "TX_FSM:0x%01x\n", (temp&0x00000F00)>>12);
6996 +       seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp&0x0000000f));
6997 +
6998 +       seq_printf(seq, "==== M2Q Information ====\n");
6999 +       for (i = 0; i < 64; i+=8){
7000 +               seq_printf(seq, " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
7001 +                               i, M2Q_table[i], i+1, M2Q_table[i+1], i+2, M2Q_table[i+2], i+3, M2Q_table[i+3],
7002 +                               i+4, M2Q_table[i+4], i+5, M2Q_table[i+5], i+6, M2Q_table[i+6], i+7, M2Q_table[i+7]);
7003 +       }
7004 +
7005 +       return 0;
7006 +
7007 +}
7008 +
7009 +static int qdma_open(struct inode *inode, struct file *file)
7010 +{
7011 +       return single_open(file, QDMARead, NULL);
7012 +}
7013 +
7014 +static const struct file_operations qdma_fops = {
7015 +       .owner          = THIS_MODULE,
7016 +       .open           = qdma_open,
7017 +       .read           = seq_read,
7018 +       .llseek         = seq_lseek,
7019 +       .release        = single_release
7020 +};
7021 +#endif
7022 +
7023 +int TxRingRead(struct seq_file *seq, void *v)
7024 +{
7025 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
7026 +       struct PDMA_txdesc *tx_ring;
7027 +       int i = 0;
7028 +
7029 +       tx_ring = kmalloc(sizeof(struct PDMA_txdesc) * NUM_TX_DESC, GFP_KERNEL);
7030 +        if(tx_ring==NULL){
7031 +               seq_printf(seq, " allocate temp tx_ring fail.\n");
7032 +               return 0;
7033 +       }
7034 +
7035 +       for (i=0; i < NUM_TX_DESC; i++) {
7036 +               tx_ring[i] = ei_local->tx_ring0[i];
7037 +        }
7038 +       
7039 +       for (i=0; i < NUM_TX_DESC; i++) {
7040 +#ifdef CONFIG_32B_DESC
7041 +               seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i,  *(int *)&tx_ring[i].txd_info1, 
7042 +                               *(int *)&tx_ring[i].txd_info2, *(int *)&tx_ring[i].txd_info3, 
7043 +                               *(int *)&tx_ring[i].txd_info4, *(int *)&tx_ring[i].txd_info5, 
7044 +                               *(int *)&tx_ring[i].txd_info6, *(int *)&tx_ring[i].txd_info7,
7045 +                               *(int *)&tx_ring[i].txd_info8);
7046 +#else
7047 +               seq_printf(seq, "%d: %08x %08x %08x %08x\n",i,  *(int *)&tx_ring[i].txd_info1, *(int *)&tx_ring[i].txd_info2, 
7048 +                               *(int *)&tx_ring[i].txd_info3, *(int *)&tx_ring[i].txd_info4);
7049 +#endif
7050 +       }
7051 +
7052 +       kfree(tx_ring);
7053 +       return 0;
7054 +}
7055 +
7056 +static int tx_ring_open(struct inode *inode, struct file *file)
7057 +{
7058 +#if !defined (CONFIG_RAETH_QDMA)
7059 +       return single_open(file, TxRingRead, NULL);
7060 +#else
7061 +       return single_open(file, QDMARead, NULL);
7062 +#endif
7063 +}
7064 +
7065 +static const struct file_operations tx_ring_fops = {
7066 +       .owner          = THIS_MODULE,
7067 +       .open           = tx_ring_open,
7068 +       .read           = seq_read,
7069 +       .llseek         = seq_lseek,
7070 +       .release        = single_release
7071 +};
7072 +
7073 +int RxRingRead(struct seq_file *seq, void *v)
7074 +{
7075 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
7076 +       struct PDMA_rxdesc *rx_ring;
7077 +       int i = 0;
7078 +
7079 +       rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RX_DESC, GFP_KERNEL);
7080 +       if(rx_ring==NULL){
7081 +               seq_printf(seq, " allocate temp rx_ring fail.\n");
7082 +               return 0;
7083 +       }
7084 +
7085 +       for (i=0; i < NUM_RX_DESC; i++) {
7086 +               memcpy(&rx_ring[i], &ei_local->rx_ring0[i], sizeof(struct PDMA_rxdesc));
7087 +       }
7088 +       
7089 +       for (i=0; i < NUM_RX_DESC; i++) {
7090 +#ifdef CONFIG_32B_DESC
7091 +               seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i,  *(int *)&rx_ring[i].rxd_info1,
7092 +                               *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7093 +                               *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7094 +                               *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7095 +                               *(int *)&rx_ring[i].rxd_info8);
7096 +#else
7097 +               seq_printf(seq, "%d: %08x %08x %08x %08x\n",i,  *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2, 
7098 +                               *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7099 +#endif
7100 +        }
7101 +
7102 +       kfree(rx_ring);
7103 +       return 0;
7104 +}
7105 +
7106 +static int rx_ring_open(struct inode *inode, struct file *file)
7107 +{
7108 +       return single_open(file, RxRingRead, NULL);
7109 +}
7110 +
7111 +static const struct file_operations rx_ring_fops = {
7112 +       .owner          = THIS_MODULE,
7113 +       .open           = rx_ring_open,
7114 +       .read           = seq_read,
7115 +       .llseek         = seq_lseek,
7116 +       .release        = single_release
7117 +};
7118 +
7119 +#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
7120 +int RxLRORingRead(struct seq_file *seq, void *v, struct PDMA_rxdesc *rx_ring_p)
7121 +{
7122 +       struct PDMA_rxdesc *rx_ring;
7123 +       int i = 0;
7124 +
7125 +       rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
7126 +       if(rx_ring==NULL){
7127 +               seq_printf(seq, " allocate temp rx_ring fail.\n");
7128 +               return 0;
7129 +       }
7130 +
7131 +       for (i=0; i < NUM_LRO_RX_DESC; i++) {
7132 +               memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
7133 +       }
7134 +       
7135 +       for (i=0; i < NUM_LRO_RX_DESC; i++) {
7136 +#ifdef CONFIG_32B_DESC
7137 +               seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i,  *(int *)&rx_ring[i].rxd_info1,
7138 +                               *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
7139 +                               *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
7140 +                               *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
7141 +                               *(int *)&rx_ring[i].rxd_info8);
7142 +#else
7143 +               seq_printf(seq, "%d: %08x %08x %08x %08x\n",i,  *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2, 
7144 +                               *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
7145 +#endif
7146 +    }
7147 +
7148 +       kfree(rx_ring);
7149 +       return 0;
7150 +}
7151 +
7152 +int RxRing1Read(struct seq_file *seq, void *v)
7153 +{
7154 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
7155 +    RxLRORingRead(seq, v, ei_local->rx_ring1);
7156 +
7157 +    return 0;
7158 +}
7159 +
7160 +int RxRing2Read(struct seq_file *seq, void *v)
7161 +{
7162 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
7163 +    RxLRORingRead(seq, v, ei_local->rx_ring2);
7164 +
7165 +    return 0;
7166 +}
7167 +
7168 +int RxRing3Read(struct seq_file *seq, void *v)
7169 +{
7170 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
7171 +    RxLRORingRead(seq, v, ei_local->rx_ring3);
7172 +
7173 +    return 0;
7174 +}
7175 +
7176 +static int rx_ring1_open(struct inode *inode, struct file *file)
7177 +{
7178 +       return single_open(file, RxRing1Read, NULL);
7179 +}
7180 +
7181 +static int rx_ring2_open(struct inode *inode, struct file *file)
7182 +{
7183 +       return single_open(file, RxRing2Read, NULL);
7184 +}
7185 +
7186 +static int rx_ring3_open(struct inode *inode, struct file *file)
7187 +{
7188 +       return single_open(file, RxRing3Read, NULL);
7189 +}
7190 +
7191 +static const struct file_operations rx_ring1_fops = {
7192 +       .owner          = THIS_MODULE,
7193 +       .open           = rx_ring1_open,
7194 +       .read           = seq_read,
7195 +       .llseek         = seq_lseek,
7196 +       .release        = single_release
7197 +};
7198 +
7199 +static const struct file_operations rx_ring2_fops = {
7200 +       .owner          = THIS_MODULE,
7201 +       .open           = rx_ring2_open,
7202 +       .read           = seq_read,
7203 +       .llseek         = seq_lseek,
7204 +       .release        = single_release
7205 +};
7206 +
7207 +static const struct file_operations rx_ring3_fops = {
7208 +       .owner          = THIS_MODULE,
7209 +       .open           = rx_ring3_open,
7210 +       .read           = seq_read,
7211 +       .llseek         = seq_lseek,
7212 +       .release        = single_release
7213 +};
7214 +#endif  /* CONFIG_RAETH_HW_LRO */
7215 +
7216 +#if defined(CONFIG_RAETH_TSO)
7217 +
7218 +int NumOfTxdUpdate(int num_of_txd)
7219 +{
7220 +
7221 +       txd_cnt[num_of_txd]++;
7222 +
7223 +       return 0;       
7224 +}
7225 +
7226 +static void *seq_TsoTxdNum_start(struct seq_file *seq, loff_t *pos)
7227 +{
7228 +       seq_printf(seq, "TXD | Count\n");
7229 +       if (*pos < (MAX_SKB_FRAGS/2 + 1))
7230 +               return pos;
7231 +       return NULL;
7232 +}
7233 +
7234 +static void *seq_TsoTxdNum_next(struct seq_file *seq, void *v, loff_t *pos)
7235 +{
7236 +       (*pos)++;
7237 +       if (*pos >= (MAX_SKB_FRAGS/2 + 1))
7238 +               return NULL;
7239 +       return pos;
7240 +}
7241 +
7242 +static void seq_TsoTxdNum_stop(struct seq_file *seq, void *v)
7243 +{
7244 +       /* Nothing to do */
7245 +}
7246 +
7247 +static int seq_TsoTxdNum_show(struct seq_file *seq, void *v)
7248 +{
7249 +       int i = *(loff_t *) v;
7250 +       seq_printf(seq, "%d: %d\n",i , txd_cnt[i]);
7251 +
7252 +       return 0;
7253 +}
7254 +
7255 +ssize_t NumOfTxdWrite(struct file *file, const char __user *buffer, 
7256 +                     size_t count, loff_t *data)
7257 +{
7258 +       memset(txd_cnt, 0, sizeof(txd_cnt));
7259 +        printk("clear txd cnt table\n");
7260 +
7261 +       return count;
7262 +}
7263 +
7264 +int TsoLenUpdate(int tso_len)
7265 +{
7266 +
7267 +       if(tso_len > 70000) {
7268 +               tso_cnt[14]++;
7269 +       }else if(tso_len >  65000) {
7270 +               tso_cnt[13]++;
7271 +       }else if(tso_len >  60000) {
7272 +               tso_cnt[12]++;
7273 +       }else if(tso_len >  55000) {
7274 +               tso_cnt[11]++;
7275 +       }else if(tso_len >  50000) {
7276 +               tso_cnt[10]++;
7277 +       }else if(tso_len >  45000) {
7278 +               tso_cnt[9]++;
7279 +       }else if(tso_len > 40000) {
7280 +               tso_cnt[8]++;
7281 +       }else if(tso_len > 35000) {
7282 +               tso_cnt[7]++;
7283 +       }else if(tso_len > 30000) {
7284 +               tso_cnt[6]++;
7285 +       }else if(tso_len > 25000) {
7286 +               tso_cnt[5]++;
7287 +       }else if(tso_len > 20000) {
7288 +               tso_cnt[4]++;
7289 +       }else if(tso_len > 15000) {
7290 +               tso_cnt[3]++;
7291 +       }else if(tso_len > 10000) {
7292 +               tso_cnt[2]++;
7293 +       }else if(tso_len > 5000) {
7294 +               tso_cnt[1]++;
7295 +       }else {
7296 +               tso_cnt[0]++;
7297 +       }
7298 +
7299 +       return 0;       
7300 +}
7301 +
7302 +ssize_t TsoLenWrite(struct file *file, const char __user *buffer,
7303 +                   size_t count, loff_t *data)
7304 +{
7305 +       memset(tso_cnt, 0, sizeof(tso_cnt));
7306 +        printk("clear tso cnt table\n");
7307 +
7308 +       return count;
7309 +}
7310 +
7311 +static void *seq_TsoLen_start(struct seq_file *seq, loff_t *pos)
7312 +{
7313 +       seq_printf(seq, " Length  | Count\n");
7314 +       if (*pos < 15)
7315 +               return pos;
7316 +       return NULL;
7317 +}
7318 +
7319 +static void *seq_TsoLen_next(struct seq_file *seq, void *v, loff_t *pos)
7320 +{
7321 +       (*pos)++;
7322 +       if (*pos >= 15)
7323 +               return NULL;
7324 +       return pos;
7325 +}
7326 +
7327 +static void seq_TsoLen_stop(struct seq_file *seq, void *v)
7328 +{
7329 +       /* Nothing to do */
7330 +}
7331 +
7332 +static int seq_TsoLen_show(struct seq_file *seq, void *v)
7333 +{
7334 +       int i = *(loff_t *) v;
7335 +
7336 +       seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, tso_cnt[i]);
7337 +
7338 +       return 0;
7339 +}
7340 +
7341 +static const struct seq_operations seq_tso_txd_num_ops = {
7342 +       .start = seq_TsoTxdNum_start,
7343 +       .next  = seq_TsoTxdNum_next,
7344 +       .stop  = seq_TsoTxdNum_stop,
7345 +       .show  = seq_TsoTxdNum_show
7346 +};
7347 +
7348 +static int tso_txd_num_open(struct inode *inode, struct file *file)
7349 +{
7350 +       return seq_open(file, &seq_tso_txd_num_ops);
7351 +}
7352 +
7353 +static struct file_operations tso_txd_num_fops = {
7354 +       .owner          = THIS_MODULE,
7355 +       .open           = tso_txd_num_open,
7356 +       .read           = seq_read,
7357 +       .llseek         = seq_lseek,
7358 +       .write          = NumOfTxdWrite,
7359 +       .release        = seq_release
7360 +};
7361 +
7362 +static const struct seq_operations seq_tso_len_ops = {
7363 +       .start = seq_TsoLen_start,
7364 +       .next  = seq_TsoLen_next,
7365 +       .stop  = seq_TsoLen_stop,
7366 +       .show  = seq_TsoLen_show
7367 +};
7368 +
7369 +static int tso_len_open(struct inode *inode, struct file *file)
7370 +{
7371 +       return seq_open(file, &seq_tso_len_ops);
7372 +}
7373 +
7374 +static struct file_operations tso_len_fops = {
7375 +       .owner          = THIS_MODULE,
7376 +       .open           = tso_len_open,
7377 +       .read           = seq_read,
7378 +       .llseek         = seq_lseek,
7379 +       .write          = TsoLenWrite,
7380 +       .release        = seq_release
7381 +};
7382 +#endif
7383 +
7384 +#if defined(CONFIG_RAETH_LRO)
7385 +static int LroLenUpdate(struct net_lro_desc *lro_desc)
7386 +{
7387 +       int len_idx;
7388 +
7389 +       if(lro_desc->ip_tot_len > 65000) {
7390 +               len_idx = 13;
7391 +       }else if(lro_desc->ip_tot_len > 60000) {
7392 +               len_idx = 12;
7393 +       }else if(lro_desc->ip_tot_len > 55000) {
7394 +               len_idx = 11;
7395 +       }else if(lro_desc->ip_tot_len > 50000) {
7396 +               len_idx = 10;
7397 +       }else if(lro_desc->ip_tot_len > 45000) {
7398 +               len_idx = 9;
7399 +       }else if(lro_desc->ip_tot_len > 40000) {
7400 +               len_idx = 8;
7401 +       }else if(lro_desc->ip_tot_len > 35000) {
7402 +               len_idx = 7;
7403 +       }else if(lro_desc->ip_tot_len > 30000) {
7404 +               len_idx = 6;
7405 +       }else if(lro_desc->ip_tot_len > 25000) {
7406 +               len_idx = 5;
7407 +       }else if(lro_desc->ip_tot_len > 20000) {
7408 +               len_idx = 4;
7409 +       }else if(lro_desc->ip_tot_len > 15000) {
7410 +               len_idx = 3;
7411 +       }else if(lro_desc->ip_tot_len > 10000) {
7412 +               len_idx = 2;
7413 +       }else if(lro_desc->ip_tot_len > 5000) {
7414 +               len_idx = 1;
7415 +       }else {
7416 +               len_idx = 0;
7417 +       }
7418 +
7419 +       return len_idx;
7420 +}
7421 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed)
7422 +{
7423 +       struct net_lro_desc *tmp;
7424 +       int len_idx;
7425 +       int i, j; 
7426 +       
7427 +       if (all_flushed) {
7428 +               for (i=0; i< MAX_DESC; i++) {
7429 +                       tmp = & lro_mgr->lro_arr[i];
7430 +                       if (tmp->pkt_aggr_cnt !=0) {
7431 +                               for(j=0; j<=MAX_AGGR; j++) {
7432 +                                       if(tmp->pkt_aggr_cnt == j) {
7433 +                                               lro_flush_cnt[j]++;
7434 +                                       }
7435 +                               }
7436 +                               len_idx = LroLenUpdate(tmp);
7437 +                               lro_len_cnt1[len_idx]++;
7438 +                               tot_called1++;
7439 +                       }
7440 +                       aggregated[i] = 0;
7441 +               }
7442 +       } else {
7443 +               if (lro_flushed != lro_mgr->stats.flushed) {
7444 +                       if (lro_aggregated != lro_mgr->stats.aggregated) {
7445 +                               for (i=0; i<MAX_DESC; i++) {
7446 +                                       tmp = &lro_mgr->lro_arr[i];
7447 +                                       if ((aggregated[i]!= tmp->pkt_aggr_cnt) 
7448 +                                                       && (tmp->pkt_aggr_cnt == 0)) {
7449 +                                               aggregated[i] ++;
7450 +                                               for (j=0; j<=MAX_AGGR; j++) {
7451 +                                                       if (aggregated[i] == j) {
7452 +                                                               lro_stats_cnt[j] ++;
7453 +                                                       }
7454 +                                               }
7455 +                                               aggregated[i] = 0;
7456 +                                               //len_idx = LroLenUpdate(tmp);
7457 +                                               //lro_len_cnt2[len_idx]++;
7458 +                                               tot_called2++;
7459 +                                       }
7460 +                               }
7461 +                       } else {
7462 +                               for (i=0; i<MAX_DESC; i++) {
7463 +                                       tmp = &lro_mgr->lro_arr[i];
7464 +                                       if ((aggregated[i] != 0) && (tmp->pkt_aggr_cnt==0)) {
7465 +                                               for (j=0; j<=MAX_AGGR; j++) {
7466 +                                                       if (aggregated[i] == j) {
7467 +                                                               lro_stats_cnt[j] ++;
7468 +                                                       }
7469 +                                               }
7470 +                                               aggregated[i] = 0;
7471 +                                               //len_idx = LroLenUpdate(tmp);
7472 +                                               //lro_len_cnt2[len_idx]++;
7473 +                                               force_flush ++;
7474 +                                               tot_called2++;
7475 +                                       }
7476 +                               }
7477 +                       }
7478 +               } else {
7479 +                       if (lro_aggregated != lro_mgr->stats.aggregated) {
7480 +                               for (i=0; i<MAX_DESC; i++) {
7481 +                                       tmp = &lro_mgr->lro_arr[i];
7482 +                                       if (tmp->active) {
7483 +                                               if (aggregated[i] != tmp->pkt_aggr_cnt)
7484 +                                                       aggregated[i] = tmp->pkt_aggr_cnt;
7485 +                                       } else
7486 +                                               aggregated[i] = 0;
7487 +                               }
7488 +                       } 
7489 +               }
7490 +
7491 +       }
7492 +
7493 +       lro_aggregated = lro_mgr->stats.aggregated;
7494 +       lro_flushed = lro_mgr->stats.flushed;
7495 +       lro_nodesc = lro_mgr->stats.no_desc;
7496 +
7497 +       return 0;
7498 +               
7499 +}
7500 +
7501 +
7502 +ssize_t LroStatsWrite(struct file *file, const char __user *buffer, 
7503 +                     size_t count, loff_t *data)
7504 +{
7505 +       memset(lro_stats_cnt, 0, sizeof(lro_stats_cnt));
7506 +       memset(lro_flush_cnt, 0, sizeof(lro_flush_cnt));
7507 +       memset(lro_len_cnt1, 0, sizeof(lro_len_cnt1));
7508 +       //memset(lro_len_cnt2, 0, sizeof(lro_len_cnt2));
7509 +       memset(aggregated, 0, sizeof(aggregated));
7510 +       lro_aggregated = 0;
7511 +       lro_flushed = 0;
7512 +       lro_nodesc = 0;
7513 +       force_flush = 0;
7514 +       tot_called1 = 0;
7515 +       tot_called2 = 0;
7516 +        printk("clear lro  cnt table\n");
7517 +
7518 +       return count;
7519 +}
7520 +
7521 +int LroStatsRead(struct seq_file *seq, void *v)
7522 +{
7523 +       int i;
7524 +       int tot_cnt=0;
7525 +       int tot_aggr=0;
7526 +       int ave_aggr=0;
7527 +       
7528 +       seq_printf(seq, "LRO statistic dump:\n");
7529 +       seq_printf(seq, "Cnt:   Kernel | Driver\n");
7530 +       for(i=0; i<=MAX_AGGR; i++) {
7531 +               tot_cnt = tot_cnt + lro_stats_cnt[i] + lro_flush_cnt[i];
7532 +               seq_printf(seq, " %d :      %d        %d\n", i, lro_stats_cnt[i], lro_flush_cnt[i]);
7533 +               tot_aggr = tot_aggr + i * (lro_stats_cnt[i] + lro_flush_cnt[i]);
7534 +       }
7535 +       ave_aggr = lro_aggregated/lro_flushed;
7536 +       seq_printf(seq, "Total aggregated pkt: %d\n", lro_aggregated);
7537 +       seq_printf(seq, "Flushed pkt: %d  %d\n", lro_flushed, force_flush);
7538 +       seq_printf(seq, "Average flush cnt:  %d\n", ave_aggr);
7539 +       seq_printf(seq, "No descriptor pkt: %d\n\n\n", lro_nodesc);
7540 +
7541 +       seq_printf(seq, "Driver flush pkt len:\n");
7542 +       seq_printf(seq, " Length  | Count\n");
7543 +       for(i=0; i<15; i++) {
7544 +               seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, lro_len_cnt1[i]);
7545 +       }
7546 +       seq_printf(seq, "Kernel flush: %d;  Driver flush: %d\n", tot_called2, tot_called1);
7547 +       return 0;
7548 +}
7549 +
7550 +static int lro_stats_open(struct inode *inode, struct file *file)
7551 +{
7552 +       return single_open(file, LroStatsRead, NULL);
7553 +}
7554 +
7555 +static struct file_operations lro_stats_fops = {
7556 +       .owner          = THIS_MODULE,
7557 +       .open           = lro_stats_open,
7558 +       .read           = seq_read,
7559 +       .llseek         = seq_lseek,
7560 +       .write          = LroStatsWrite,
7561 +       .release        = single_release
7562 +};
7563 +#endif
7564 +
7565 +int getnext(const char *src, int separator, char *dest)
7566 +{
7567 +    char *c;
7568 +    int len;
7569 +
7570 +    if ( (src == NULL) || (dest == NULL) ) {
7571 +        return -1;
7572 +    }
7573 +
7574 +    c = strchr(src, separator);
7575 +    if (c == NULL) {
7576 +        strcpy(dest, src);
7577 +        return -1;
7578 +    }
7579 +    len = c - src;
7580 +    strncpy(dest, src, len);
7581 +    dest[len] = '\0';
7582 +    return len + 1;
7583 +}
7584 +
7585 +int str_to_ip(unsigned int *ip, const char *str)
7586 +{
7587 +    int len;
7588 +    const char *ptr = str;
7589 +    char buf[128];
7590 +    unsigned char c[4];
7591 +    int i;
7592 +
7593 +    for (i = 0; i < 3; ++i) {
7594 +        if ((len = getnext(ptr, '.', buf)) == -1) {
7595 +            return 1; /* parse error */
7596 +        }
7597 +        c[i] = simple_strtoul(buf, NULL, 10);
7598 +        ptr += len;
7599 +    }
7600 +    c[3] = simple_strtoul(ptr, NULL, 0);
7601 +    *ip = (c[0]<<24) + (c[1]<<16) + (c[2]<<8) + c[3];
7602 +    return 0;
7603 +}
7604 +
7605 +#if defined(CONFIG_RAETH_HW_LRO)
7606 +static int HwLroLenUpdate(unsigned int agg_size)
7607 +{
7608 +       int len_idx;
7609 +
7610 +       if(agg_size > 65000) {
7611 +               len_idx = 13;
7612 +       }else if(agg_size > 60000) {
7613 +               len_idx = 12;
7614 +       }else if(agg_size > 55000) {
7615 +               len_idx = 11;
7616 +       }else if(agg_size > 50000) {
7617 +               len_idx = 10;
7618 +       }else if(agg_size > 45000) {
7619 +               len_idx = 9;
7620 +       }else if(agg_size > 40000) {
7621 +               len_idx = 8;
7622 +       }else if(agg_size > 35000) {
7623 +               len_idx = 7;
7624 +       }else if(agg_size > 30000) {
7625 +               len_idx = 6;
7626 +       }else if(agg_size > 25000) {
7627 +               len_idx = 5;
7628 +       }else if(agg_size > 20000) {
7629 +               len_idx = 4;
7630 +       }else if(agg_size > 15000) {
7631 +               len_idx = 3;
7632 +       }else if(agg_size > 10000) {
7633 +               len_idx = 2;
7634 +       }else if(agg_size > 5000) {
7635 +               len_idx = 1;
7636 +       }else {
7637 +               len_idx = 0;
7638 +       }
7639 +
7640 +       return len_idx;
7641 +}
7642 +
7643 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size)
7644 +{
7645 +    if( (ring_num > 0) && (ring_num < 4) )
7646 +    {
7647 +        hw_lro_agg_size_cnt[ring_num-1][HwLroLenUpdate(agg_size)]++;
7648 +        hw_lro_agg_num_cnt[ring_num-1][agg_cnt]++;
7649 +        hw_lro_tot_flush_cnt[ring_num-1]++;
7650 +        hw_lro_tot_agg_cnt[ring_num-1] += agg_cnt;
7651 +    }
7652 +
7653 +    return 0;
7654 +}
7655 +
7656 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7657 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason)
7658 +{
7659 +    if( (ring_num > 0) && (ring_num < 4) )
7660 +    {
7661 +#if 1
7662 +        if ( (flush_reason & 0x7) == HW_LRO_AGG_FLUSH )
7663 +            hw_lro_agg_flush_cnt[ring_num-1]++;
7664 +        else if ( (flush_reason & 0x7) == HW_LRO_AGE_FLUSH )
7665 +            hw_lro_age_flush_cnt[ring_num-1]++;
7666 +        else if ( (flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH )
7667 +            hw_lro_seq_flush_cnt[ring_num-1]++;
7668 +        else if ( (flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH )
7669 +            hw_lro_timestamp_flush_cnt[ring_num-1]++;
7670 +        else if ( (flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH )
7671 +            hw_lro_norule_flush_cnt[ring_num-1]++;
7672 +#else
7673 +        if ( flush_reason & BIT(4) )
7674 +            hw_lro_agg_flush_cnt[ring_num-1]++;
7675 +        else if ( flush_reason & BIT(3) )
7676 +            hw_lro_age_flush_cnt[ring_num-1]++;
7677 +        else if ( flush_reason & BIT(2) )
7678 +            hw_lro_seq_flush_cnt[ring_num-1]++;
7679 +        else if ( flush_reason & BIT(1) )
7680 +            hw_lro_timestamp_flush_cnt[ring_num-1]++;
7681 +        else if ( flush_reason & BIT(0) )
7682 +            hw_lro_norule_flush_cnt[ring_num-1]++;
7683 +#endif
7684 +    }
7685 +
7686 +    return 0;
7687 +}
7688 +#endif  /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7689 +
7690 +ssize_t HwLroStatsWrite(struct file *file, const char __user *buffer, 
7691 +                     size_t count, loff_t *data)
7692 +{
7693 +    memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
7694 +    memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
7695 +    memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
7696 +    memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
7697 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7698 +    memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
7699 +    memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
7700 +    memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
7701 +    memset(hw_lro_timestamp_flush_cnt, 0, sizeof(hw_lro_timestamp_flush_cnt));
7702 +    memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
7703 +#endif  /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7704 +
7705 +    printk("clear hw lro cnt table\n");
7706 +
7707 +       return count;
7708 +}
7709 +
7710 +int HwLroStatsRead(struct seq_file *seq, void *v)
7711 +{
7712 +       int i;
7713 +       
7714 +       seq_printf(seq, "HW LRO statistic dump:\n");
7715 +
7716 +    /* Agg number count */
7717 +       seq_printf(seq, "Cnt:   RING1 | RING2 | RING3 | Total\n");
7718 +       for(i=0; i<=MAX_HW_LRO_AGGR; i++) {
7719 +               seq_printf(seq, " %d :      %d        %d        %d        %d\n", 
7720 +            i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
7721 +            hw_lro_agg_num_cnt[0][i]+hw_lro_agg_num_cnt[1][i]+hw_lro_agg_num_cnt[2][i]);
7722 +       }
7723 +
7724 +    /* Total agg count */
7725 +    seq_printf(seq, "Total agg:   RING1 | RING2 | RING3 | Total\n");
7726 +    seq_printf(seq, "                %d      %d      %d      %d\n", 
7727 +        hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], hw_lro_tot_agg_cnt[2],
7728 +        hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2]);
7729 +
7730 +    /* Total flush count */
7731 +    seq_printf(seq, "Total flush:   RING1 | RING2 | RING3 | Total\n");
7732 +    seq_printf(seq, "                %d      %d      %d      %d\n", 
7733 +        hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], hw_lro_tot_flush_cnt[2],
7734 +        hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]);
7735 +
7736 +    /* Avg agg count */
7737 +    seq_printf(seq, "Avg agg:   RING1 | RING2 | RING3 | Total\n");
7738 +    seq_printf(seq, "                %d      %d      %d      %d\n", 
7739 +        (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0]/hw_lro_tot_flush_cnt[0] : 0,
7740 +        (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1]/hw_lro_tot_flush_cnt[1] : 0,
7741 +        (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2]/hw_lro_tot_flush_cnt[2] : 0,
7742 +        (hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]) ? \
7743 +        ((hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2])/(hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2])) : 0
7744 +    );
7745 +
7746 +    /*  Statistics of aggregation size counts */
7747 +       seq_printf(seq, "HW LRO flush pkt len:\n");
7748 +       seq_printf(seq, " Length  | RING1  | RING2  | RING3  | Total\n");
7749 +       for(i=0; i<15; i++) {
7750 +               seq_printf(seq, "%d~%d: %d      %d      %d      %d\n", i*5000, (i+1)*5000, 
7751 +            hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
7752 +            hw_lro_agg_size_cnt[0][i]+hw_lro_agg_size_cnt[1][i]+hw_lro_agg_size_cnt[2][i]);
7753 +       }
7754 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
7755 +    seq_printf(seq, "Flush reason:   RING1 | RING2 | RING3 | Total\n");
7756 +    seq_printf(seq, "AGG timeout:      %d      %d      %d      %d\n", 
7757 +        hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], hw_lro_agg_flush_cnt[2],
7758 +        (hw_lro_agg_flush_cnt[0]+hw_lro_agg_flush_cnt[1]+hw_lro_agg_flush_cnt[2])
7759 +    );
7760 +    seq_printf(seq, "AGE timeout:      %d      %d      %d      %d\n", 
7761 +        hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], hw_lro_age_flush_cnt[2],
7762 +        (hw_lro_age_flush_cnt[0]+hw_lro_age_flush_cnt[1]+hw_lro_age_flush_cnt[2])
7763 +    );
7764 +    seq_printf(seq, "Not in-sequence:  %d      %d      %d      %d\n", 
7765 +        hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], hw_lro_seq_flush_cnt[2],
7766 +        (hw_lro_seq_flush_cnt[0]+hw_lro_seq_flush_cnt[1]+hw_lro_seq_flush_cnt[2])
7767 +    );
7768 +    seq_printf(seq, "Timestamp:        %d      %d      %d      %d\n", 
7769 +        hw_lro_timestamp_flush_cnt[0], hw_lro_timestamp_flush_cnt[1], hw_lro_timestamp_flush_cnt[2],
7770 +        (hw_lro_timestamp_flush_cnt[0]+hw_lro_timestamp_flush_cnt[1]+hw_lro_timestamp_flush_cnt[2])
7771 +    );
7772 +    seq_printf(seq, "No LRO rule:      %d      %d      %d      %d\n", 
7773 +        hw_lro_norule_flush_cnt[0], hw_lro_norule_flush_cnt[1], hw_lro_norule_flush_cnt[2],
7774 +        (hw_lro_norule_flush_cnt[0]+hw_lro_norule_flush_cnt[1]+hw_lro_norule_flush_cnt[2])
7775 +    );
7776 +#endif  /* CONFIG_RAETH_HW_LRO_REASON_DBG */
7777 +    
7778 +       return 0;
7779 +}
7780 +
7781 +static int hw_lro_stats_open(struct inode *inode, struct file *file)
7782 +{
7783 +       return single_open(file, HwLroStatsRead, NULL);
7784 +}
7785 +
7786 +static struct file_operations hw_lro_stats_fops = {
7787 +       .owner          = THIS_MODULE,
7788 +       .open           = hw_lro_stats_open,
7789 +       .read           = seq_read,
7790 +       .llseek         = seq_lseek,
7791 +       .write          = HwLroStatsWrite,
7792 +       .release        = single_release
7793 +};
7794 +
7795 +int hwlro_agg_cnt_ctrl(int par1, int par2)
7796 +{
7797 +    SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
7798 +    SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
7799 +    SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
7800 +    return 0;
7801 +}
7802 +
7803 +int hwlro_agg_time_ctrl(int par1, int par2)
7804 +{
7805 +    SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
7806 +    SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
7807 +    SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
7808 +    return 0;
7809 +}
7810 +
7811 +int hwlro_age_time_ctrl(int par1, int par2)
7812 +{
7813 +    SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
7814 +    SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
7815 +    SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
7816 +    return 0;
7817 +}
7818 +
7819 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2)
7820 +{
7821 +    END_DEVICE *ei_local = netdev_priv(dev_raether);
7822 +
7823 +    ei_local->hw_lro_alpha = par2;
7824 +    printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_alpha = %d\n", ei_local->hw_lro_alpha);
7825 +
7826 +    return 0;
7827 +}
7828 +
7829 +int hwlro_threshold_ctrl(int par1, int par2)
7830 +{
7831 +    /* bandwidth threshold setting */
7832 +    SET_PDMA_LRO_BW_THRESHOLD(par2);
7833 +    return 0;
7834 +}
7835 +
7836 +int hwlro_fix_setting_switch_ctrl(int par1, int par2)
7837 +{
7838 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
7839 +    END_DEVICE *ei_local = netdev_priv(dev_raether);
7840 +
7841 +    ei_local->hw_lro_fix_setting = par2;
7842 +    printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_fix_setting = %d\n", ei_local->hw_lro_fix_setting);
7843 +#endif  /* CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG */
7844 +
7845 +    return 0;
7846 +}
7847 +
7848 +ssize_t HwLroAutoTlbWrite(struct file *file, const char __user *buffer, 
7849 +                     size_t count, loff_t *data)
7850 +{
7851 +    char buf[32];
7852 +    char *pBuf;
7853 +    int len = count;
7854 +    int x = 0,y = 0;
7855 +    char *pToken = NULL;
7856 +    char *pDelimiter = " \t";
7857 +
7858 +    printk("[HwLroAutoTlbWrite]write parameter len = %d\n\r", (int)len);
7859 +    if(len >= sizeof(buf)){
7860 +        printk("input handling fail!\n");
7861 +        len = sizeof(buf) - 1;
7862 +        return -1;
7863 +    }
7864 +    
7865 +    if(copy_from_user(buf, buffer, len)){
7866 +        return -EFAULT;
7867 +    }
7868 +    buf[len] = '\0';
7869 +    printk("[HwLroAutoTlbWrite]write parameter data = %s\n\r", buf);
7870 +
7871 +    pBuf = buf;
7872 +    pToken = strsep(&pBuf, pDelimiter);
7873 +    x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7874 +
7875 +    pToken = strsep(&pBuf, "\t\n ");
7876 +    if(pToken != NULL){
7877 +        y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
7878 +        printk("y = 0x%08x \n\r", y);
7879 +    }
7880 +
7881 +    if ( (sizeof(hw_lro_dbg_func)/sizeof(hw_lro_dbg_func[0]) > x) && NULL != hw_lro_dbg_func[x])
7882 +    {
7883 +        (*hw_lro_dbg_func[x])(x, y);
7884 +    }
7885 +
7886 +       return count;
7887 +}
7888 +
7889 +void HwLroAutoTlbDump(struct seq_file *seq, unsigned int index)
7890 +{
7891 +    int i;
7892 +    struct PDMA_LRO_AUTO_TLB_INFO   pdma_lro_auto_tlb;
7893 +    unsigned int tlb_info[9];
7894 +    unsigned int dw_len, cnt, priority;
7895 +    unsigned int entry;
7896 +
7897 +    if( index > 4 )
7898 +        index = index - 1;
7899 +    entry = (index * 9) + 1;
7900 +
7901 +    /* read valid entries of the auto-learn table */
7902 +    sysRegWrite( PDMA_FE_ALT_CF8, entry );
7903 +
7904 +    //seq_printf(seq, "\nEntry = %d\n", entry);
7905 +    for(i=0; i<9; i++){
7906 +        tlb_info[i] = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
7907 +        //seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]);
7908 +    }
7909 +    memcpy(&pdma_lro_auto_tlb, tlb_info, sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
7910 +
7911 +    dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
7912 +    cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
7913 +
7914 +    if ( sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE )  /* packet count */
7915 +        priority = cnt;
7916 +    else    /* byte count */
7917 +        priority = dw_len;
7918 +
7919 +    /* dump valid entries of the auto-learn table */
7920 +    if( index >= 4 )
7921 +        seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
7922 +    else
7923 +        seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
7924 +    if( pdma_lro_auto_tlb.auto_tlb_info8.IPV4 ){
7925 +        seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n", 
7926 +            pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7927 +            pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7928 +            pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7929 +            pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7930 +    }
7931 +    else{        
7932 +        seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n", 
7933 +            pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
7934 +            pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
7935 +            pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
7936 +            pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
7937 +    }
7938 +    seq_printf(seq, "DIP_ID = %d\n", pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
7939 +    seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n", 
7940 +        pdma_lro_auto_tlb.auto_tlb_info0.STP, 
7941 +        pdma_lro_auto_tlb.auto_tlb_info0.DTP);
7942 +    seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d \n", 
7943 +        pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0,
7944 +        (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 12),
7945 +        (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 24),
7946 +        pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1);
7947 +    seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
7948 +    seq_printf(seq, "PRIORITY = %d\n", priority);
7949 +}
7950 +
7951 +int HwLroAutoTlbRead(struct seq_file *seq, void *v)
7952 +{
7953 +       int i;
7954 +    unsigned int regVal;
7955 +    unsigned int regOp1, regOp2, regOp3, regOp4;
7956 +    unsigned int agg_cnt, agg_time, age_time;
7957 +
7958 +    /* Read valid entries of the auto-learn table */
7959 +    sysRegWrite(PDMA_FE_ALT_CF8, 0);
7960 +    regVal = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
7961 +
7962 +    seq_printf(seq, "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n", regVal);
7963 +
7964 +    for(i = 7; i >= 0; i--)
7965 +    {
7966 +        if( regVal & (1 << i) )
7967 +            HwLroAutoTlbDump(seq, i);
7968 +    }
7969 +    
7970 +    /* Read the agg_time/age_time/agg_cnt of LRO rings */
7971 +    seq_printf(seq, "\nHW LRO Ring Settings\n");
7972 +    for(i = 1; i <= 3; i++) 
7973 +    {
7974 +        regOp1 = sysRegRead( LRO_RX_RING0_CTRL_DW1 + (i * 0x40) );
7975 +        regOp2 = sysRegRead( LRO_RX_RING0_CTRL_DW2 + (i * 0x40) );
7976 +        regOp3 = sysRegRead( LRO_RX_RING0_CTRL_DW3 + (i * 0x40) );
7977 +        regOp4 = sysRegRead( ADMA_LRO_CTRL_DW2 );
7978 +        agg_cnt = ((regOp3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) | ((regOp2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
7979 +        agg_time = (regOp2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
7980 +        age_time = ((regOp2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) | ((regOp1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
7981 +        seq_printf(seq, "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n", 
7982 +            i, agg_cnt, agg_time, age_time, regOp4);
7983 +    }
7984 +
7985 +       return 0;
7986 +}
7987 +
7988 +static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
7989 +{
7990 +       return single_open(file, HwLroAutoTlbRead, NULL);
7991 +}
7992 +
7993 +static struct file_operations hw_lro_auto_tlb_fops = {
7994 +       .owner          = THIS_MODULE,
7995 +       .open           = hw_lro_auto_tlb_open,
7996 +       .read           = seq_read,
7997 +       .llseek         = seq_lseek,
7998 +       .write          = HwLroAutoTlbWrite,
7999 +       .release        = single_release
8000 +};
8001 +#endif  /* CONFIG_RAETH_HW_LRO */
8002 +
8003 +#if defined (CONFIG_MIPS)
8004 +int CP0RegRead(struct seq_file *seq, void *v)
8005 +{
8006 +       seq_printf(seq, "CP0 Register dump --\n");
8007 +       seq_printf(seq, "CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
8008 +       seq_printf(seq, "CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
8009 +       seq_printf(seq, "CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
8010 +       seq_printf(seq, "CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
8011 +       seq_printf(seq, "CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
8012 +       seq_printf(seq, "CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
8013 +       seq_printf(seq, "CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
8014 +       seq_printf(seq, "CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
8015 +       seq_printf(seq, "CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
8016 +       seq_printf(seq, "CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
8017 +       seq_printf(seq, "CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
8018 +       seq_printf(seq, "CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
8019 +       seq_printf(seq, "CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
8020 +       seq_printf(seq, "CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
8021 +       seq_printf(seq, "CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
8022 +       seq_printf(seq, "CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
8023 +       seq_printf(seq, "CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
8024 +       seq_printf(seq, "CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
8025 +       seq_printf(seq, "CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
8026 +       seq_printf(seq, "CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
8027 +       seq_printf(seq, "CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
8028 +       seq_printf(seq, "CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
8029 +       seq_printf(seq, "CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
8030 +       seq_printf(seq, "CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
8031 +       seq_printf(seq, "CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
8032 +       seq_printf(seq, "CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
8033 +       seq_printf(seq, "CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
8034 +       seq_printf(seq, "CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
8035 +       seq_printf(seq, "CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
8036 +       seq_printf(seq, "CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
8037 +       seq_printf(seq, "CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
8038 +       seq_printf(seq, "CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
8039 +       seq_printf(seq, "CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
8040 +
8041 +       return 0;
8042 +}
8043 +
8044 +static int cp0_reg_open(struct inode *inode, struct file *file)
8045 +{
8046 +       return single_open(file, CP0RegRead, NULL);
8047 +}
8048 +
8049 +static const struct file_operations cp0_reg_fops = {
8050 +       .owner          = THIS_MODULE,
8051 +       .open           = cp0_reg_open,
8052 +       .read           = seq_read,
8053 +       .llseek         = seq_lseek,
8054 +       .release        = single_release
8055 +};
8056 +#endif
8057 +
8058 +#if defined(CONFIG_RAETH_QOS)
8059 +static struct proc_dir_entry *procRaQOS, *procRaFeIntr, *procRaEswIntr;
8060 +extern uint32_t num_of_rxdone_intr;
8061 +extern uint32_t num_of_esw_intr;
8062 +
8063 +int RaQOSRegRead(struct seq_file *seq, void *v)
8064 +{
8065 +       dump_qos(seq);
8066 +       return 0;
8067 +}
8068 +
8069 +static int raeth_qos_open(struct inode *inode, struct file *file)
8070 +{
8071 +       return single_open(file, RaQOSRegRead, NULL);
8072 +}
8073 +
8074 +static const struct file_operations raeth_qos_fops = {
8075 +       .owner          = THIS_MODULE,
8076 +       .open           = raeth_qos_open,
8077 +       .read           = seq_read,
8078 +       .llseek         = seq_lseek,
8079 +       .release        = single_release
8080 +};
8081 +#endif
8082 +
8083 +static struct proc_dir_entry *procEswCnt;
8084 +
8085 +int EswCntRead(struct seq_file *seq, void *v)
8086 +{
8087 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
8088 +       unsigned int pkt_cnt = 0;
8089 +       int i = 0;
8090 +#endif
8091 +       seq_printf(seq, "\n               <<CPU>>                        \n");
8092 +       seq_printf(seq, "                   |                            \n");
8093 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8094 +       seq_printf(seq, "+-----------------------------------------------+\n");
8095 +       seq_printf(seq, "|                <<PDMA>>                      |\n");
8096 +       seq_printf(seq, "+-----------------------------------------------+\n");
8097 +#else
8098 +       seq_printf(seq, "+-----------------------------------------------+\n");
8099 +       seq_printf(seq, "|                <<PSE>>                       |\n");
8100 +       seq_printf(seq, "+-----------------------------------------------+\n");
8101 +       seq_printf(seq, "                  |                             \n");
8102 +       seq_printf(seq, "+-----------------------------------------------+\n");
8103 +       seq_printf(seq, "|                <<GDMA>>                      |\n");
8104 +#if defined (CONFIG_RALINK_MT7620)
8105 +       seq_printf(seq, "| GDMA1_TX_GPCNT  : %010u (Tx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1304));     
8106 +       seq_printf(seq, "| GDMA1_RX_GPCNT  : %010u (Rx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1324));     
8107 +       seq_printf(seq, "|                                              |\n");
8108 +       seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (skip)               |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1308));     
8109 +       seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x130c));     
8110 +       seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1328));     
8111 +       seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x132c));     
8112 +       seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1330));     
8113 +       seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1334));     
8114 +       seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1338)); 
8115 +       seq_printf(seq, "| GDMA1_RX_FCCNT  : %010u (flow control)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x133c));     
8116 +
8117 +       seq_printf(seq, "|                                              |\n");
8118 +       seq_printf(seq, "| GDMA2_TX_GPCNT  : %010u (Tx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1344));     
8119 +       seq_printf(seq, "| GDMA2_RX_GPCNT  : %010u (Rx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1364));     
8120 +       seq_printf(seq, "|                                              |\n");
8121 +       seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip)               |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1348));     
8122 +       seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x134c));     
8123 +       seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1368));     
8124 +       seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x136c));     
8125 +       seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1370));     
8126 +       seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1374));     
8127 +       seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1378)); 
8128 +       seq_printf(seq, "| GDMA2_RX_FCCNT  : %010u (flow control)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x137c));     
8129 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8130 +       seq_printf(seq, "| GDMA1_RX_GBCNT  : %010u (Rx Good Bytes)      |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2400));     
8131 +       seq_printf(seq, "| GDMA1_RX_GPCNT  : %010u (Rx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2408));     
8132 +       seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error)     |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2410));     
8133 +       seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2414));     
8134 +       seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2418));     
8135 +       seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x241C));     
8136 +       seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error)     |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2420));     
8137 +       seq_printf(seq, "| GDMA1_RX_FCCNT  : %010u (flow control)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2424));     
8138 +       seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count)        |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2428));     
8139 +       seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count)    |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x242C));     
8140 +       seq_printf(seq, "| GDMA1_TX_GBCNT  : %010u (Tx Good Bytes)      |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2430));     
8141 +       seq_printf(seq, "| GDMA1_TX_GPCNT  : %010u (Tx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2438));     
8142 +       seq_printf(seq, "|                                              |\n");
8143 +       seq_printf(seq, "| GDMA2_RX_GBCNT  : %010u (Rx Good Bytes)      |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2440));     
8144 +       seq_printf(seq, "| GDMA2_RX_GPCNT  : %010u (Rx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2448));     
8145 +       seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error)     |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2450));     
8146 +       seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2454));     
8147 +       seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2458));     
8148 +       seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x245C));     
8149 +       seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error)     |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2460));     
8150 +       seq_printf(seq, "| GDMA2_RX_FCCNT  : %010u (flow control)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2464));     
8151 +       seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip)               |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2468));     
8152 +       seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x246C));     
8153 +       seq_printf(seq, "| GDMA2_TX_GBCNT  : %010u (Tx Good Bytes)      |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2470));     
8154 +       seq_printf(seq, "| GDMA2_TX_GPCNT  : %010u (Tx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2478));     
8155 +#else
8156 +       seq_printf(seq, "| GDMA_TX_GPCNT1  : %010u (Tx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x704));      
8157 +       seq_printf(seq, "| GDMA_RX_GPCNT1  : %010u (Rx Good Pkts)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x724));      
8158 +       seq_printf(seq, "|                                              |\n");
8159 +       seq_printf(seq, "| GDMA_TX_SKIPCNT1: %010u (skip)               |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x708));      
8160 +       seq_printf(seq, "| GDMA_TX_COLCNT1 : %010u (collision)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x70c));      
8161 +       seq_printf(seq, "| GDMA_RX_OERCNT1 : %010u (overflow)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x728));      
8162 +       seq_printf(seq, "| GDMA_RX_FERCNT1 : %010u (FCS error)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x72c));      
8163 +       seq_printf(seq, "| GDMA_RX_SERCNT1 : %010u (too short)  |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x730));      
8164 +       seq_printf(seq, "| GDMA_RX_LERCNT1 : %010u (too long)   |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x734));      
8165 +       seq_printf(seq, "| GDMA_RX_CERCNT1 : %010u (l3/l4 checksum)     |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x738));      
8166 +       seq_printf(seq, "| GDMA_RX_FCCNT1  : %010u (flow control)       |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x73c));      
8167 +
8168 +#endif
8169 +       seq_printf(seq, "+-----------------------------------------------+\n");
8170 +#endif
8171 +
8172 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8173 +    defined (CONFIG_RALINK_MT7620)
8174 +
8175 +       seq_printf(seq, "                      ^                          \n");
8176 +       seq_printf(seq, "                      | Port6 Rx:%010u Good Pkt   \n", ((p6_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4620)&0xFFFF)));
8177 +       seq_printf(seq, "                      | Port6 Rx:%010u Bad Pkt    \n", sysRegRead(RALINK_ETH_SW_BASE+0x4620)>>16);
8178 +       seq_printf(seq, "                      | Port6 Tx:%010u Good Pkt   \n", ((p6_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4610)&0xFFFF)));
8179 +       seq_printf(seq, "                      | Port6 Tx:%010u Bad Pkt    \n", sysRegRead(RALINK_ETH_SW_BASE+0x4610)>>16);
8180 +#if defined (CONFIG_RALINK_MT7620)
8181 +
8182 +       seq_printf(seq, "                      | Port7 Rx:%010u Good Pkt   \n", ((p7_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4720)&0xFFFF)));
8183 +       seq_printf(seq, "                      | Port7 Rx:%010u Bad Pkt    \n", sysRegRead(RALINK_ETH_SW_BASE+0x4720)>>16);
8184 +       seq_printf(seq, "                      | Port7 Tx:%010u Good Pkt   \n", ((p7_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4710)&0xFFFF)));
8185 +       seq_printf(seq, "                      | Port7 Tx:%010u Bad Pkt    \n", sysRegRead(RALINK_ETH_SW_BASE+0x4710)>>16);
8186 +#endif
8187 +       seq_printf(seq, "+---------------------v-------------------------+\n");
8188 +       seq_printf(seq, "|                    P6                        |\n");
8189 +       seq_printf(seq, "|        <<10/100/1000 Embedded Switch>>        |\n");
8190 +       seq_printf(seq, "|     P0    P1    P2     P3     P4     P5       |\n");
8191 +       seq_printf(seq, "+-----------------------------------------------+\n");
8192 +       seq_printf(seq, "       |     |     |     |       |      |        \n");
8193 +#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623) 
8194 +       /* no built-in switch */
8195 +#else
8196 +       seq_printf(seq, "                      ^                          \n");
8197 +       seq_printf(seq, "                      | Port6 Rx:%08u Good Pkt   \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)&0xFFFF);
8198 +       seq_printf(seq, "                      | Port6 Tx:%08u Good Pkt   \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)>>16);
8199 +       seq_printf(seq, "+---------------------v-------------------------+\n");
8200 +       seq_printf(seq, "|                    P6                        |\n");
8201 +       seq_printf(seq, "|           <<10/100 Embedded Switch>>         |\n");
8202 +       seq_printf(seq, "|     P0    P1    P2     P3     P4     P5       |\n");
8203 +       seq_printf(seq, "+-----------------------------------------------+\n");
8204 +       seq_printf(seq, "       |     |     |     |       |      |        \n");
8205 +#endif
8206 +
8207 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8208 +    defined (CONFIG_RALINK_MT7620)
8209 +
8210 +       seq_printf(seq, "Port0 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p0_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4020)&0xFFFF)), ((p0_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4010)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4020)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4010)>>16);
8211 +
8212 +       seq_printf(seq, "Port1 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p1_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4120)&0xFFFF)), ((p1_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4110)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4120)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4110)>>16);
8213 +
8214 +       seq_printf(seq, "Port2 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p2_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4220)&0xFFFF)), ((p2_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4210)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4220)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4210)>>16);
8215 +
8216 +       seq_printf(seq, "Port3 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p3_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4320)&0xFFFF)), ((p3_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4310)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4320)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4310)>>16);
8217 +
8218 +       seq_printf(seq, "Port4 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p4_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4420)&0xFFFF)), ((p4_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4410)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4420)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4410)>>16);
8219 +
8220 +       seq_printf(seq, "Port5 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p5_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4520)&0xFFFF)), ((p5_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4510)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4520)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4510)>>16);
8221 +
8222 +       seq_printf(seq, "Port0 KBytes RX=%010u Tx=%010u \n", ((p0_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4028) >> 10)), ((p0_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4018) >> 10)));
8223 +
8224 +       seq_printf(seq, "Port1 KBytes RX=%010u Tx=%010u \n", ((p1_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4128) >> 10)), ((p1_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4118) >> 10)));
8225 +
8226 +       seq_printf(seq, "Port2 KBytes RX=%010u Tx=%010u \n", ((p2_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4228) >> 10)), ((p2_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4218) >> 10)));
8227 +
8228 +       seq_printf(seq, "Port3 KBytes RX=%010u Tx=%010u \n", ((p3_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4328) >> 10)), ((p3_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4318) >> 10)));
8229 +
8230 +       seq_printf(seq, "Port4 KBytes RX=%010u Tx=%010u \n", ((p4_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4428) >> 10)), ((p4_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4418) >> 10)));
8231 +
8232 +       seq_printf(seq, "Port5 KBytes RX=%010u Tx=%010u \n", ((p5_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4528) >> 10)), ((p5_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4518) >> 10)));
8233 +
8234 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
8235 +#define DUMP_EACH_PORT(base)                                   \
8236 +       for(i=0; i < 7;i++) {                                   \
8237 +               mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8238 +               seq_printf(seq, "%8u ", pkt_cnt);                       \
8239 +       }                                                       \
8240 +       seq_printf(seq, "\n");
8241 +       seq_printf(seq, "========================================[MT7530] READ CLEAR========================\n");
8242 +
8243 +               seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8244 +               seq_printf(seq, "Tx Drop Packet      :"); DUMP_EACH_PORT(0x4000);
8245 +               //seq_printf(seq, "Tx CRC Error        :"); DUMP_EACH_PORT(0x4004);
8246 +               seq_printf(seq, "Tx Unicast Packet   :"); DUMP_EACH_PORT(0x4008);
8247 +               seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8248 +               seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8249 +               //seq_printf(seq, "Tx Collision Event  :"); DUMP_EACH_PORT(0x4014);
8250 +               seq_printf(seq, "Tx Pause Packet     :"); DUMP_EACH_PORT(0x402C);
8251 +               seq_printf(seq, "Rx Drop Packet      :"); DUMP_EACH_PORT(0x4060);
8252 +               seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8253 +               seq_printf(seq, "Rx Unicast Packet   :"); DUMP_EACH_PORT(0x4068);
8254 +               seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8255 +               seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8256 +               seq_printf(seq, "Rx Alignment Error  :"); DUMP_EACH_PORT(0x4074);
8257 +               seq_printf(seq, "Rx CRC Error       :"); DUMP_EACH_PORT(0x4078);
8258 +               seq_printf(seq, "Rx Undersize Error  :"); DUMP_EACH_PORT(0x407C);
8259 +               //seq_printf(seq, "Rx Fragment Error   :"); DUMP_EACH_PORT(0x4080);
8260 +               //seq_printf(seq, "Rx Oversize Error   :"); DUMP_EACH_PORT(0x4084);
8261 +               //seq_printf(seq, "Rx Jabber Error     :"); DUMP_EACH_PORT(0x4088);
8262 +               seq_printf(seq, "Rx Pause Packet     :"); DUMP_EACH_PORT(0x408C);
8263 +               mii_mgr_write(31, 0x4fe0, 0xf0);
8264 +               mii_mgr_write(31, 0x4fe0, 0x800000f0);
8265 +#endif
8266 +
8267 +
8268 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
8269 +       seq_printf(seq, "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x150)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x150)>>16);
8270 +
8271 +       seq_printf(seq, "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x154)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x154)>>16);
8272 +
8273 +       seq_printf(seq, "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x158)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x158)>>16);
8274 +
8275 +       seq_printf(seq, "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x15C)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x15c)>>16);
8276 +
8277 +       seq_printf(seq, "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x160)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x160)>>16);
8278 +
8279 +       seq_printf(seq, "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x164)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x164)>>16);
8280 +#elif defined (CONFIG_RALINK_RT3883)
8281 +       /* no built-in switch */
8282 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
8283 +
8284 +#define DUMP_EACH_PORT(base)                                   \
8285 +       for(i=0; i < 7;i++) {                                   \
8286 +               mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
8287 +               seq_printf(seq, "%8u ", pkt_cnt);                       \
8288 +       }                                                       \
8289 +       seq_printf(seq, "\n");
8290 +
8291 +#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
8292 +       if(sysRegRead(0xbe00000c & (1<<16)))//MCM
8293 +#endif
8294 +       {
8295 +               seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
8296 +               seq_printf(seq, "Tx Drop Packet      :"); DUMP_EACH_PORT(0x4000);
8297 +               seq_printf(seq, "Tx CRC Error        :"); DUMP_EACH_PORT(0x4004);
8298 +               seq_printf(seq, "Tx Unicast Packet   :"); DUMP_EACH_PORT(0x4008);
8299 +               seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
8300 +               seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
8301 +               seq_printf(seq, "Tx Collision Event  :"); DUMP_EACH_PORT(0x4014);
8302 +               seq_printf(seq, "Tx Pause Packet     :"); DUMP_EACH_PORT(0x402C);
8303 +               seq_printf(seq, "Rx Drop Packet      :"); DUMP_EACH_PORT(0x4060);
8304 +               seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
8305 +               seq_printf(seq, "Rx Unicast Packet   :"); DUMP_EACH_PORT(0x4068);
8306 +               seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
8307 +               seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
8308 +               seq_printf(seq, "Rx Alignment Error  :"); DUMP_EACH_PORT(0x4074);
8309 +               seq_printf(seq, "Rx CRC Error       :"); DUMP_EACH_PORT(0x4078);
8310 +               seq_printf(seq, "Rx Undersize Error  :"); DUMP_EACH_PORT(0x407C);
8311 +               seq_printf(seq, "Rx Fragment Error   :"); DUMP_EACH_PORT(0x4080);
8312 +               seq_printf(seq, "Rx Oversize Error   :"); DUMP_EACH_PORT(0x4084);
8313 +               seq_printf(seq, "Rx Jabber Error     :"); DUMP_EACH_PORT(0x4088);
8314 +               seq_printf(seq, "Rx Pause Packet     :"); DUMP_EACH_PORT(0x408C);
8315 +               mii_mgr_write(31, 0x4fe0, 0xf0);
8316 +               mii_mgr_write(31, 0x4fe0, 0x800000f0);
8317 +       } 
8318 +#if defined (CONFIG_RALINK_MT7621)     /* TODO: need to update to use MT7530 compiler flag */
8319 +       else {
8320 +               seq_printf(seq, "no built-in switch\n");
8321 +       }
8322 +#endif
8323 +
8324 +#else /* RT305x, RT3352 */
8325 +       seq_printf(seq, "Port0: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16);
8326 +       seq_printf(seq, "Port1: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16);
8327 +       seq_printf(seq, "Port2: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16);
8328 +       seq_printf(seq, "Port3: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16);
8329 +       seq_printf(seq, "Port4: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16);
8330 +       seq_printf(seq, "Port5: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16);
8331 +#endif
8332 +       seq_printf(seq, "\n");
8333 +
8334 +       return 0;
8335 +}
8336 +
8337 +static int switch_count_open(struct inode *inode, struct file *file)
8338 +{
8339 +       return single_open(file, EswCntRead, NULL);
8340 +}
8341 +
8342 +static const struct file_operations switch_count_fops = {
8343 +       .owner          = THIS_MODULE,
8344 +       .open           = switch_count_open,
8345 +       .read           = seq_read,
8346 +       .llseek         = seq_lseek,
8347 +       .release        = single_release
8348 +};
8349 +
8350 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8351 +/*
8352 + * proc write procedure
8353 + */
8354 +static ssize_t change_phyid(struct file *file, const char __user *buffer, 
8355 +                           size_t count, loff_t *data)
8356 +{
8357 +       char buf[32];
8358 +       struct net_device *cur_dev_p;
8359 +       END_DEVICE *ei_local;
8360 +       char if_name[64];
8361 +       unsigned int phy_id;
8362 +
8363 +       if (count > 32)
8364 +               count = 32;
8365 +       memset(buf, 0, 32);
8366 +       if (copy_from_user(buf, buffer, count))
8367 +               return -EFAULT;
8368 +
8369 +       /* determine interface name */
8370 +    strcpy(if_name, DEV_NAME); /* "eth2" by default */
8371 +    if(isalpha(buf[0]))
8372 +               sscanf(buf, "%s %d", if_name, &phy_id);
8373 +       else
8374 +               phy_id = simple_strtol(buf, 0, 10);
8375 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8376 +       cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
8377 +#else
8378 +       cur_dev_p = dev_get_by_name(DEV_NAME);
8379 +#endif
8380 +       if (cur_dev_p == NULL)
8381 +               return -EFAULT;
8382 +
8383 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8384 +       ei_local = netdev_priv(cur_dev_p);
8385 +#else
8386 +       ei_local = cur_dev_p->priv;
8387 +#endif 
8388 +       ei_local->mii_info.phy_id = (unsigned char)phy_id;
8389 +       return count;
8390 +}
8391 +
8392 +#if defined(CONFIG_PSEUDO_SUPPORT)
8393 +static ssize_t change_gmac2_phyid(struct file *file, const char __user *buffer, 
8394 +                                 size_t count, loff_t *data)
8395 +{
8396 +       char buf[32];
8397 +       struct net_device *cur_dev_p;
8398 +       PSEUDO_ADAPTER *pPseudoAd;
8399 +       char if_name[64];
8400 +       unsigned int phy_id;
8401 +
8402 +       if (count > 32)
8403 +               count = 32;
8404 +       memset(buf, 0, 32);
8405 +       if (copy_from_user(buf, buffer, count))
8406 +               return -EFAULT;
8407 +       /* determine interface name */
8408 +       strcpy(if_name, DEV2_NAME);  /* "eth3" by default */
8409 +       if(isalpha(buf[0]))
8410 +               sscanf(buf, "%s %d", if_name, &phy_id);
8411 +       else
8412 +               phy_id = simple_strtol(buf, 0, 10);
8413 +
8414 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8415 +       cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
8416 +#else
8417 +       cur_dev_p = dev_get_by_name(DEV2_NAMEj);
8418 +#endif
8419 +       if (cur_dev_p == NULL)
8420 +               return -EFAULT;
8421 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
8422 +        pPseudoAd = netdev_priv(cur_dev_p);    
8423 +#else
8424 +       pPseudoAd = cur_dev_p->priv;
8425 +#endif
8426 +       pPseudoAd->mii_info.phy_id = (unsigned char)phy_id;
8427 +       return count;
8428 +}      
8429 +
8430 +static struct file_operations gmac2_fops = {
8431 +       .owner          = THIS_MODULE,
8432 +       .write          = change_gmac2_phyid
8433 +};
8434 +#endif
8435 +#endif
8436 +
8437 +static int gmac_open(struct inode *inode, struct file *file)
8438 +{
8439 +       return single_open(file, RegReadMain, NULL);
8440 +}
8441 +
8442 +static struct file_operations gmac_fops = {
8443 +       .owner          = THIS_MODULE,
8444 +       .open           = gmac_open,
8445 +       .read           = seq_read,
8446 +       .llseek         = seq_lseek,
8447 +#if defined (CONFIG_ETHTOOL)
8448 +       .write          = change_phyid,
8449 +#endif
8450 +       .release        = single_release
8451 +};
8452 +
8453 +#if defined (TASKLET_WORKQUEUE_SW)
8454 +extern int init_schedule;
8455 +extern int working_schedule;
8456 +static int ScheduleRead(struct seq_file *seq, void *v)
8457 +{
8458 +       if (init_schedule == 1)
8459 +               seq_printf(seq, "Initialize Raeth with workqueque<%d>\n", init_schedule);
8460 +       else
8461 +               seq_printf(seq, "Initialize Raeth with tasklet<%d>\n", init_schedule);
8462 +       if (working_schedule == 1)
8463 +               seq_printf(seq, "Raeth is running at workqueque<%d>\n", working_schedule);
8464 +       else
8465 +               seq_printf(seq, "Raeth is running at tasklet<%d>\n", working_schedule);
8466 +
8467 +       return 0;
8468 +}
8469 +
8470 +static ssize_t ScheduleWrite(struct file *file, const char __user *buffer, 
8471 +                     size_t count, loff_t *data)
8472 +{
8473 +       char buf[2];
8474 +       int old;
8475 +       
8476 +       if (copy_from_user(buf, buffer, count))
8477 +               return -EFAULT;
8478 +       old = init_schedule;
8479 +       init_schedule = simple_strtol(buf, 0, 10);
8480 +       printk("Change Raeth initial schedule from <%d> to <%d>\n! Not running schedule at present !\n", 
8481 +               old, init_schedule);
8482 +
8483 +       return count;
8484 +}
8485 +
8486 +static int schedule_switch_open(struct inode *inode, struct file *file)
8487 +{
8488 +       return single_open(file, ScheduleRead, NULL);
8489 +}
8490 +
8491 +static const struct file_operations schedule_sw_fops = {
8492 +       .owner          = THIS_MODULE,
8493 +       .open           = schedule_switch_open,
8494 +       .read           = seq_read,
8495 +       .write          = ScheduleWrite,
8496 +       .llseek         = seq_lseek,
8497 +       .release        = single_release
8498 +};
8499 +#endif
8500 +
8501 +#if defined(CONFIG_RAETH_PDMA_DVT)
8502 +static int PdmaDvtRead(struct seq_file *seq, void *v)
8503 +{
8504 +    seq_printf(seq, "g_pdma_dvt_show_config = 0x%x\n", pdma_dvt_get_show_config());
8505 +    seq_printf(seq, "g_pdma_dvt_rx_test_config = 0x%x\n", pdma_dvt_get_rx_test_config());
8506 +    seq_printf(seq, "g_pdma_dvt_tx_test_config = 0x%x\n", pdma_dvt_get_tx_test_config());
8507 +    
8508 +       return 0;
8509 +}
8510 +
8511 +static int PdmaDvtOpen(struct inode *inode, struct file *file)
8512 +{
8513 +       return single_open(file, PdmaDvtRead, NULL);
8514 +}
8515 +
8516 +static ssize_t PdmaDvtWrite(struct file *file, const char __user *buffer, 
8517 +                     size_t count, loff_t *data)
8518 +{
8519 +       char buf[32];
8520 +    char *pBuf;
8521 +    int len = count;
8522 +    int x = 0,y = 0;
8523 +    char *pToken = NULL;
8524 +    char *pDelimiter = " \t";
8525 +
8526 +    printk("write parameter len = %d\n\r", (int)len);
8527 +    if(len >= sizeof(buf)){
8528 +        printk("input handling fail!\n");
8529 +        len = sizeof(buf) - 1;
8530 +        return -1;
8531 +    }
8532 +    
8533 +    if(copy_from_user(buf, buffer, len)){
8534 +        return -EFAULT;
8535 +    }
8536 +    buf[len] = '\0';
8537 +    printk("write parameter data = %s\n\r", buf);
8538 +
8539 +    pBuf = buf;
8540 +    pToken = strsep(&pBuf, pDelimiter);
8541 +    x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8542 +
8543 +    pToken = strsep(&pBuf, "\t\n ");
8544 +    if(pToken != NULL){
8545 +        y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
8546 +        printk("y = 0x%08x \n\r", y);
8547 +    }
8548 +
8549 +    if ( (sizeof(pdma_dvt_dbg_func)/sizeof(pdma_dvt_dbg_func[0]) > x) && NULL != pdma_dvt_dbg_func[x])
8550 +    {
8551 +        (*pdma_dvt_dbg_func[x])(x, y);
8552 +    }
8553 +    else
8554 +    {
8555 +        printk("no handler defined for command id(0x%08x)\n\r", x);
8556 +    }
8557 +    
8558 +    printk("x(0x%08x), y(0x%08x)\n", x, y);
8559 +
8560 +    return len;
8561 +}
8562 +
8563 +static const struct file_operations pdma_dev_sw_fops = {
8564 +       .owner          = THIS_MODULE,
8565 +       .open       = PdmaDvtOpen,
8566 +       .read           = seq_read,
8567 +       .write          = PdmaDvtWrite
8568 +};
8569 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
8570 +
8571 +int debug_proc_init(void)
8572 +{
8573 +    if (procRegDir == NULL)
8574 +       procRegDir = proc_mkdir(PROCREG_DIR, NULL);
8575 +   
8576 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8577 +    if ((procGmac = create_proc_entry(PROCREG_GMAC, 0, procRegDir)))
8578 +           procGmac->proc_fops = &gmac_fops;
8579 +    else
8580 +#else
8581 +    if (!(procGmac = proc_create(PROCREG_GMAC, 0, procRegDir, &gmac_fops)))
8582 +#endif
8583 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
8584 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
8585 +#if defined(CONFIG_PSEUDO_SUPPORT)
8586 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8587 +    if ((procGmac2 = create_proc_entry(PROCREG_GMAC2, 0, procRegDir)))
8588 +           procGmac2->proc_fops = &gmac2_fops;
8589 +    else
8590 +#else
8591 +    if (!(procGmac2 = proc_create(PROCREG_GMAC2, 0, procRegDir, &gmac2_fops)))
8592 +#endif
8593 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC2);
8594 +#endif 
8595 +#endif
8596 +
8597 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8598 +    if ((procSkbFree = create_proc_entry(PROCREG_SKBFREE, 0, procRegDir)))
8599 +           procSkbFree->proc_fops = &skb_free_fops;
8600 +    else
8601 +#else
8602 +    if (!(procSkbFree = proc_create(PROCREG_SKBFREE, 0, procRegDir, &skb_free_fops)))
8603 +#endif
8604 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
8605 +
8606 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8607 +    if ((procTxRing = create_proc_entry(PROCREG_TXRING, 0, procRegDir)))
8608 +           procTxRing->proc_fops = &tx_ring_fops;
8609 +    else
8610 +#else
8611 +    if (!(procTxRing = proc_create(PROCREG_TXRING, 0, procRegDir, &tx_ring_fops)))
8612 +#endif
8613 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
8614 +    
8615 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8616 +    if ((procRxRing = create_proc_entry(PROCREG_RXRING, 0, procRegDir)))
8617 +           procRxRing->proc_fops = &rx_ring_fops;
8618 +    else
8619 +#else
8620 +    if (!(procRxRing = proc_create(PROCREG_RXRING, 0, procRegDir, &rx_ring_fops)))
8621 +#endif
8622 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
8623 +
8624 +#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
8625 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8626 +    if ((procRxRing1 = create_proc_entry(PROCREG_RXRING1, 0, procRegDir)))
8627 +           procRxRing1->proc_fops = &rx_ring1_fops;
8628 +    else
8629 +#else
8630 +    if (!(procRxRing1 = proc_create(PROCREG_RXRING1, 0, procRegDir, &rx_ring1_fops)))
8631 +#endif
8632 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
8633 +
8634 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8635 +    if ((procRxRing2 = create_proc_entry(PROCREG_RXRING2, 0, procRegDir)))
8636 +           procRxRing2->proc_fops = &rx_ring2_fops;
8637 +    else
8638 +#else
8639 +    if (!(procRxRing2 = proc_create(PROCREG_RXRING2, 0, procRegDir, &rx_ring2_fops)))
8640 +#endif
8641 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
8642 +
8643 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8644 +    if ((procRxRing3 = create_proc_entry(PROCREG_RXRING3, 0, procRegDir)))
8645 +           procRxRing3->proc_fops = &rx_ring3_fops;
8646 +    else
8647 +#else
8648 +    if (!(procRxRing3 = proc_create(PROCREG_RXRING3, 0, procRegDir, &rx_ring3_fops)))
8649 +#endif
8650 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
8651 +#endif  /* CONFIG_RAETH_HW_LRO */
8652 +
8653 +#if defined (CONFIG_MIPS)
8654 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8655 +    if ((procSysCP0 = create_proc_entry(PROCREG_CP0, 0, procRegDir)))
8656 +           procSysCP0->proc_fops = &cp0_reg_fops;
8657 +    else
8658 +#else
8659 +    if (!(procSysCP0 = proc_create(PROCREG_CP0, 0, procRegDir, &cp0_reg_fops)))
8660 +#endif
8661 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_CP0);
8662 +#endif
8663 +
8664 +#if defined(CONFIG_RAETH_TSO)
8665 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8666 +    if ((procNumOfTxd = create_proc_entry(PROCREG_NUM_OF_TXD, 0, procRegDir)))
8667 +           procNumOfTxd->proc_fops = &tso_txd_num_fops;
8668 +    else
8669 +#else
8670 +    if (!(procNumOfTxd = proc_create(PROCREG_NUM_OF_TXD, 0, procRegDir, &tso_txd_num_fops)))
8671 +#endif
8672 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_NUM_OF_TXD);
8673 +    
8674 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8675 +    if ((procTsoLen = create_proc_entry(PROCREG_TSO_LEN, 0, procRegDir)))
8676 +           procTsoLen->proc_fops = &tso_len_fops;
8677 +    else
8678 +#else
8679 +    if (!(procTsoLen = proc_create(PROCREG_TSO_LEN, 0, procRegDir, &tso_len_fops)))
8680 +#endif
8681 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_TSO_LEN);
8682 +#endif
8683 +
8684 +#if defined(CONFIG_RAETH_LRO)
8685 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8686 +    if ((procLroStats = create_proc_entry(PROCREG_LRO_STATS, 0, procRegDir)))
8687 +           procLroStats->proc_fops = &lro_stats_fops;
8688 +    else
8689 +#else
8690 +    if (!(procLroStats = proc_create(PROCREG_LRO_STATS, 0, procRegDir, &lro_stats_fops)))
8691 +#endif
8692 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_LRO_STATS);
8693 +#endif
8694 +
8695 +#if defined(CONFIG_RAETH_HW_LRO)
8696 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8697 +    if ((procHwLroStats = create_proc_entry(PROCREG_HW_LRO_STATS, 0, procRegDir)))
8698 +           procHwLroStats->proc_fops = &hw_lro_stats_fops;
8699 +    else
8700 +#else
8701 +    if (!(procHwLroStats = proc_create(PROCREG_HW_LRO_STATS, 0, procRegDir, &hw_lro_stats_fops)))
8702 +#endif
8703 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
8704 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8705 +    if ((procHwLroAutoTlb = create_proc_entry(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir)))
8706 +           procHwLroAutoTlb->proc_fops = &hw_lro_auto_tlb_fops;
8707 +    else
8708 +#else
8709 +    if (!(procHwLroAutoTlb = proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir, &hw_lro_auto_tlb_fops)))
8710 +#endif
8711 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_AUTO_TLB);
8712 +#endif  /* CONFIG_RAETH_HW_LRO */
8713 +
8714 +#if defined(CONFIG_RAETH_QOS)
8715 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8716 +    if ((procRaQOS = create_proc_entry(PROCREG_RAQOS, 0, procRegDir)))
8717 +           procRaQOS->proc_fops = &raeth_qos_fops;
8718 +    else
8719 +#else
8720 +    if (!(procRaQOS = proc_create(PROCREG_RAQOS, 0, procRegDir, &raeth_qos_fops)))
8721 +#endif
8722 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_RAQOS);
8723 +#endif
8724 +
8725 +#if defined(CONFIG_USER_SNMPD)
8726 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8727 +    if ((procRaSnmp = create_proc_entry(PROCREG_SNMP, S_IRUGO, procRegDir)))
8728 +           procRaSnmp->proc_fops = &ra_snmp_seq_fops;
8729 +    else
8730 +#else
8731 +    if (!(procRaSnmp = proc_create(PROCREG_SNMP, S_IRUGO, procRegDir, &ra_snmp_seq_fops)))
8732 +#endif
8733 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_SNMP);
8734 +#endif
8735 +
8736 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8737 +    if ((procEswCnt = create_proc_entry(PROCREG_ESW_CNT, 0, procRegDir)))
8738 +           procEswCnt->proc_fops = &switch_count_fops;
8739 +    else
8740 +#else
8741 +    if (!(procEswCnt = proc_create(PROCREG_ESW_CNT, 0, procRegDir, &switch_count_fops)))
8742 +#endif
8743 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
8744 +
8745 +#if defined (TASKLET_WORKQUEUE_SW)
8746 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8747 +    if ((procSCHE = create_proc_entry(PROCREG_SCHE, 0, procRegDir)))
8748 +           procSCHE->proc_fops = &schedule_sw_fops;
8749 +    else
8750 +#else
8751 +    if (!(procSCHE = proc_create(PROCREG_SCHE, 0, procRegDir, &schedule_sw_fops)))
8752 +#endif
8753 +           printk("!! FAIL to create %s PROC !!\n", PROCREG_SCHE);
8754 +#endif
8755 +
8756 +#if defined(CONFIG_RAETH_PDMA_DVT)
8757 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
8758 +    if ((procPdmaDvt = create_proc_entry(PROCREG_PDMA_DVT, 0, procRegDir)))
8759 +        procPdmaDvt->proc_fops = &pdma_dev_sw_fops;
8760 +    else
8761 +#else
8762 +    if (!(procPdmaDvt = proc_create(PROCREG_PDMA_DVT, 0, procRegDir, &pdma_dev_sw_fops )))
8763 +#endif
8764 +        printk("!! FAIL to create %s PROC !!\n", PROCREG_PDMA_DVT);
8765 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
8766 +
8767 +    printk(KERN_ALERT "PROC INIT OK!\n");
8768 +    return 0;
8769 +}
8770 +
8771 +void debug_proc_exit(void)
8772 +{
8773 +
8774 +    if (procSysCP0)
8775 +       remove_proc_entry(PROCREG_CP0, procRegDir);
8776 +
8777 +    if (procGmac)
8778 +       remove_proc_entry(PROCREG_GMAC, procRegDir);
8779 +#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
8780 +    if (procGmac)
8781 +        remove_proc_entry(PROCREG_GMAC, procRegDir);
8782 +#endif
8783 +    if (procSkbFree)
8784 +       remove_proc_entry(PROCREG_SKBFREE, procRegDir);
8785 +
8786 +    if (procTxRing)
8787 +       remove_proc_entry(PROCREG_TXRING, procRegDir);
8788 +    
8789 +    if (procRxRing)
8790 +       remove_proc_entry(PROCREG_RXRING, procRegDir);
8791 +   
8792 +#if defined(CONFIG_RAETH_TSO)
8793 +    if (procNumOfTxd)
8794 +       remove_proc_entry(PROCREG_NUM_OF_TXD, procRegDir);
8795 +    
8796 +    if (procTsoLen)
8797 +       remove_proc_entry(PROCREG_TSO_LEN, procRegDir);
8798 +#endif
8799 +
8800 +#if defined(CONFIG_RAETH_LRO)
8801 +    if (procLroStats)
8802 +       remove_proc_entry(PROCREG_LRO_STATS, procRegDir);
8803 +#endif
8804 +
8805 +#if defined(CONFIG_RAETH_QOS)
8806 +    if (procRaQOS)
8807 +       remove_proc_entry(PROCREG_RAQOS, procRegDir);
8808 +    if (procRaFeIntr)
8809 +       remove_proc_entry(PROCREG_RXDONE_INTR, procRegDir);
8810 +    if (procRaEswIntr)
8811 +       remove_proc_entry(PROCREG_ESW_INTR, procRegDir);
8812 +#endif
8813 +
8814 +#if defined(CONFIG_USER_SNMPD)
8815 +    if (procRaSnmp)
8816 +       remove_proc_entry(PROCREG_SNMP, procRegDir);
8817 +#endif
8818 +
8819 +    if (procEswCnt)
8820 +       remove_proc_entry(PROCREG_ESW_CNT, procRegDir);
8821 +    
8822 +    //if (procRegDir)
8823 +       //remove_proc_entry(PROCREG_DIR, 0);
8824 +       
8825 +    printk(KERN_ALERT "proc exit\n");
8826 +}
8827 +EXPORT_SYMBOL(procRegDir);
8828 --- /dev/null
8829 +++ b/drivers/net/ethernet/raeth/ra_mac.h
8830 @@ -0,0 +1,57 @@
8831 +#ifndef RA_MAC_H
8832 +#define RA_MAC_H
8833 +
8834 +void ra2880stop(END_DEVICE *ei_local);
8835 +void ra2880MacAddressSet(unsigned char p[6]);
8836 +void ra2880Mac2AddressSet(unsigned char p[6]);
8837 +void ethtool_init(struct net_device *dev);
8838 +
8839 +void ra2880EnableInterrupt(void);
8840 +
8841 +void dump_qos(void);
8842 +void dump_reg(struct seq_file *s);
8843 +void dump_cp0(void);
8844 +
8845 +int debug_proc_init(void);
8846 +void debug_proc_exit(void);
8847 +
8848 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
8849 +           defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
8850 +void enable_auto_negotiate(int unused);
8851 +#else
8852 +void enable_auto_negotiate(int ge);
8853 +#endif
8854 +
8855 +void rt2880_gmac_hard_reset(void);
8856 +
8857 +int TsoLenUpdate(int tso_len);
8858 +int NumOfTxdUpdate(int num_of_txd);
8859 +
8860 +#ifdef CONFIG_RAETH_LRO
8861 +int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed);
8862 +#endif
8863 +#ifdef CONFIG_RAETH_HW_LRO
8864 +int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size);
8865 +#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
8866 +#define HW_LRO_AGG_FLUSH        (1)
8867 +#define HW_LRO_AGE_FLUSH        (2)
8868 +#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
8869 +#define HW_LRO_TIMESTAMP_FLUSH  (4)
8870 +#define HW_LRO_NON_RULE_FLUSH   (5)
8871 +int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason);
8872 +#endif  /* CONFIG_RAETH_HW_LRO_REASON_DBG */
8873 +typedef int (*HWLRO_DBG_FUNC)(int par1, int par2);
8874 +int hwlro_agg_cnt_ctrl(int par1, int par2);
8875 +int hwlro_agg_time_ctrl(int par1, int par2);
8876 +int hwlro_age_time_ctrl(int par1, int par2);
8877 +int hwlro_pkt_int_alpha_ctrl(int par1, int par2);
8878 +int hwlro_threshold_ctrl(int par1, int par2);
8879 +int hwlro_fix_setting_switch_ctrl(int par1, int par2);
8880 +#endif  /* CONFIG_RAETH_HW_LRO */
8881 +int getnext(const char *src, int separator, char *dest);
8882 +int str_to_ip(unsigned int *ip, const char *str);
8883 +
8884 +#if defined(CONFIG_RAETH_PDMA_DVT)
8885 +typedef int (*PDMA_DBG_FUNC)(int par1, int par2);
8886 +#endif  //#if defined(CONFIG_RAETH_PDMA_DVT)
8887 +#endif
8888 --- /dev/null
8889 +++ b/drivers/net/ethernet/raeth/ra_netlink.c
8890 @@ -0,0 +1,142 @@
8891 +// for netlink header
8892 +#include <asm/types.h>
8893 +#include <net/sock.h>
8894 +#include <linux/socket.h>
8895 +#include <linux/netlink.h>
8896 +#include <linux/skbuff.h>
8897 +#include <linux/net.h>
8898 +#include <linux/version.h>
8899 +
8900 +#include "csr_netlink.h"
8901 +#include "ra2882ethreg.h"
8902 +#include "ra_netlink.h"
8903 +
8904 +static struct sock *csr_msg_socket = NULL; // synchronize socket for netlink use
8905 +unsigned int flags;
8906 +
8907 +void rt2880_csr_receiver(struct sock *sk, int len)
8908 +{
8909 +       struct sk_buff *skb;
8910 +       int err;
8911 +       struct nlmsghdr *nlh;
8912 +       unsigned int reg_value = 0;
8913 +       CSR_MSG *csrmsg;
8914 +       RAETH_PRINT("csr netlink receiver!\n");
8915 +       skb = skb_recv_datagram(sk, 0, 1, &err);
8916 +
8917 +       RAETH_PRINT("error no : %d\n", err);
8918 +       
8919 +       if (skb == NULL) {
8920 +               printk("rt2880_csr_receiver(): No data received, error!\n");
8921 +               return;
8922 +       }
8923 +       
8924 +       nlh = (struct nlmsghdr*)skb->data;
8925 +       
8926 +       csrmsg = NLMSG_DATA(nlh);
8927 +
8928 +       if (csrmsg->enable == CSR_READ ) {
8929 +               reg_value = sysRegRead(csrmsg->address);
8930 +#if 0
8931 +               printk("raeth -- 0x%08x: 0x%08x\n", csrmsg->address, reg_value);
8932 +#endif
8933 +       } else if ( csrmsg->enable == CSR_WRITE ) {
8934 +               sysRegWrite(csrmsg->address, csrmsg->default_value);
8935 +               reg_value = sysRegRead(csrmsg->address);
8936 +       } else if ( csrmsg->enable == CSR_TEST ) {
8937 +               reg_value = sysRegRead(csrmsg->address);
8938 +               printk("0x%08x: 0x%08x\n", (unsigned int)csrmsg->address, reg_value);   
8939 +       }
8940 +       else
8941 +               printk("drv: Command format error!\n");
8942 +
8943 +       csrmsg->default_value = reg_value;
8944 +               
8945 +       RAETH_PRINT("drv: rt2880_csr_msgsend() - msg to send!\n");              
8946 +       
8947 +       err = rt2880_csr_msgsend(csrmsg);       
8948 +       
8949 +       if ( err == -2 )
8950 +               printk("drv: msg send error!\n");
8951 +
8952 +       skb_free_datagram(sk, skb);
8953 +}
8954 +
8955 +int rt2880_csr_msgsend(CSR_MSG* csrmsg)
8956 +{
8957 +       struct sk_buff *skb;
8958 +       struct nlmsghdr *nlh = NULL;
8959 +       size_t size = 0;
8960 +       struct sock *send_syncnl = csr_msg_socket;
8961 +
8962 +       CSR_MSG* csr_reg;
8963 +       if (send_syncnl == NULL) {
8964 +               printk("drv: netlink_kernel_create() failed!\n");
8965 +               return -1;
8966 +       }
8967 +               
8968 +       size = NLMSG_SPACE(sizeof(CSR_MSG));
8969 +       skb = alloc_skb(size, GFP_ATOMIC);
8970 +       
8971 +       if(!skb)
8972 +       {
8973 +               printk("rt2880_csr_msgsend() : error! msg structure not available\n");
8974 +               return -1;
8975 +       }
8976 +       
8977 +       nlh = NLMSG_PUT(skb, 0, 0, RALINK_CSR_GROUP, size - sizeof(struct nlmsghdr));
8978 +       
8979 +       if (!nlh)
8980 +       {
8981 +               printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
8982 +               return -1;
8983 +       }
8984 +
8985 +       csr_reg = NLMSG_DATA(nlh);
8986 +       if (!csr_reg)
8987 +       {
8988 +               printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
8989 +               return -1;
8990 +       }
8991 +
8992 +       csr_reg->address        = csrmsg->address;
8993 +       csr_reg->default_value  = csrmsg->default_value;
8994 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
8995 +       NETLINK_CB(skb).dst_group = RALINK_CSR_GROUP;
8996 +#else
8997 +       NETLINK_CB(skb).dst_groups = RALINK_CSR_GROUP;
8998 +#endif
8999 +       netlink_broadcast(send_syncnl, skb, 0, RALINK_CSR_GROUP, GFP_ATOMIC);
9000 +       return 0;
9001 +
9002 +nlmsg_failure:
9003 +       return -2;      
9004 +}
9005 +
9006 +int csr_netlink_init()
9007 +{
9008 +
9009 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9010 +       csr_msg_socket = netlink_kernel_create(NETLINK_CSR, RALINK_CSR_GROUP, rt2880_csr_receiver, THIS_MODULE);
9011 +#else
9012 +       csr_msg_socket = netlink_kernel_create(NETLINK_CSR, rt2880_csr_receiver);
9013 +#endif
9014 +
9015 +       if ( csr_msg_socket == NULL )
9016 +               printk("unable to create netlink socket!\n");
9017 +       else
9018 +               printk("Netlink init ok!\n");
9019 +       return 0;
9020 +}
9021 +
9022 +void csr_netlink_end()
9023 +{
9024 +       if (csr_msg_socket != NULL){
9025 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
9026 +               sock_release(csr_msg_socket->sk_socket);
9027 +#else
9028 +               sock_release(csr_msg_socket->socket);
9029 +#endif
9030 +               printk("Netlink end...\n");
9031 +       }
9032 +}
9033 --- /dev/null
9034 +++ b/drivers/net/ethernet/raeth/ra_netlink.h
9035 @@ -0,0 +1,10 @@
9036 +#ifndef RA_NETLINK
9037 +#define RA_NETLINK
9038 +
9039 +#include "csr_netlink.h"
9040 +int rt2880_csr_msgsend(CSR_MSG* csrmsg);
9041 +void rt2880_csr_receiver(struct sock *sk, int len);
9042 +int csr_netlink_init(void);
9043 +void csr_netlink_end(void);
9044 +
9045 +#endif
9046 --- /dev/null
9047 +++ b/drivers/net/ethernet/raeth/ra_qos.c
9048 @@ -0,0 +1,655 @@
9049 +#include <asm/io.h>
9050 +#include <linux/pci.h>
9051 +#include <linux/netdevice.h>
9052 +#include <linux/etherdevice.h>
9053 +#include <linux/net.h>
9054 +#include <linux/in.h>
9055 +#include "ra_qos.h"
9056 +#include "raether.h"
9057 +#include "ra2882ethreg.h"
9058 +
9059 +#include <asm/types.h>
9060 +#include <net/sock.h>
9061 +#include <linux/socket.h>
9062 +#include <linux/skbuff.h>
9063 +#include <linux/net.h>
9064 +#include <linux/if_vlan.h>
9065 +#include <linux/ip.h>
9066 +
9067 +
9068 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9069 +#include "../../../net/nat/hw_nat/ra_nat.h"
9070 +#endif
9071 +
9072 +#define CONTI_TX_SEND_MAX_SIZE 1440
9073 +
9074 +/* 
9075 + * set tx queue # to descriptor
9076 + */
9077 +void rt3052_tx_queue_init(unsigned long data)
9078 +{
9079 +       /* define qos p */
9080 +       
9081 +}
9082 +
9083 +void rt3052_pse_port0_fc_clear(unsigned long data)
9084 +{
9085 +       /* clear FE_INT_STATUS.PSE_P0_FC */
9086 +       
9087 +}
9088 +
9089 +inline int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx)
9090 +{
9091 +       switch (ring_no) {
9092 +               case RING0:
9093 +                       *idx = *(unsigned long*)TX_CTX_IDX0;
9094 +                       break;
9095 +               case RING1:
9096 +                       *idx = *(unsigned long*)TX_CTX_IDX1;
9097 +                       break;
9098 +               case RING2:
9099 +                       *idx = *(unsigned long*)TX_CTX_IDX2;
9100 +                       break;
9101 +               case RING3:
9102 +                       *idx = *(unsigned long*)TX_CTX_IDX3;
9103 +                       break;
9104 +               default:
9105 +                       printk("set_tx_ctx_idex error\n");
9106 +                       return -1;
9107 +       };
9108 +       return 0;
9109 +}
9110 +
9111 +inline int set_tx_ctx_idx(unsigned int ring_no, unsigned int idx)
9112 +{
9113 +       switch (ring_no ) {
9114 +               case RING0:
9115 +                       *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32)idx);
9116 +                       break;
9117 +               case RING1:
9118 +                       *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32)idx);
9119 +                       break;
9120 +               case RING2:
9121 +                       *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32)idx);
9122 +                       break;
9123 +               case RING3:
9124 +                       *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32)idx);
9125 +                       break;
9126 +               default:
9127 +                       printk("set_tx_ctx_idex error\n");
9128 +                       return -1;
9129 +       };
9130 +
9131 +       return 1;
9132 +}
9133 +
9134 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc)
9135 +{
9136 +       switch (ring_no) {
9137 +               case RING0:
9138 +                       *tx_desc = ei_local->tx_ring0;
9139 +                       *tx_dtx_idx      = *(unsigned long*)TX_DTX_IDX0;
9140 +                       break;
9141 +               case RING1:
9142 +                       *tx_desc = ei_local->tx_ring1;
9143 +                       *tx_dtx_idx      = *(unsigned long*)TX_DTX_IDX1;
9144 +                       break;
9145 +               case RING2:
9146 +                       *tx_desc = ei_local->tx_ring2;
9147 +                       *tx_dtx_idx      = *(unsigned long*)TX_DTX_IDX2;
9148 +                       break;
9149 +               case RING3:
9150 +                       *tx_desc = ei_local->tx_ring3;
9151 +                       *tx_dtx_idx      = *(unsigned long*)TX_DTX_IDX3;
9152 +                       break;
9153 +               default:
9154 +                       printk("ring_no input error... %d\n", ring_no);
9155 +       };
9156 +}
9157 +
9158 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned pn)
9159 +{
9160 +       END_DEVICE* ei_local = netdev_priv(dev);
9161 +       struct PDMA_txdesc* tx_desc;
9162 +       unsigned int tx_cpu_owner_idx, tx_dtx_idx;
9163 +
9164 +       unsigned int    length=skb->len;
9165 +       int ret;
9166 +       unsigned long flags;
9167 +
9168 +       //printk("fe_qos_packet_send: ring_no=%d qn=%d pn=%d\n", ring_no, qn, pn);
9169 +
9170 +       switch ( ring_no ) {
9171 +               case 0:
9172 +                       tx_desc = ei_local->tx_ring0;
9173 +                       tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX0;
9174 +                       tx_dtx_idx       = *(unsigned long*)TX_DTX_IDX0;
9175 +                       break;
9176 +               case 1:
9177 +                       tx_desc = ei_local->tx_ring1;
9178 +                       tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX1;
9179 +                       tx_dtx_idx       = *(unsigned long*)TX_DTX_IDX1;
9180 +                       break;
9181 +               case 2:
9182 +                       tx_desc = ei_local->tx_ring2;
9183 +                       tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX2;
9184 +                       tx_dtx_idx       = *(unsigned long*)TX_DTX_IDX2;
9185 +                       break;
9186 +               case 3:
9187 +                       tx_desc = ei_local->tx_ring3;
9188 +                       tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX3;
9189 +                       tx_dtx_idx       = *(unsigned long*)TX_DTX_IDX3;
9190 +                       break;
9191 +               default:
9192 +                       printk("ring_no input error... %d\n", ring_no);
9193 +                       return -1;
9194 +       };
9195 +
9196 +       //printk("tx_cpu_owner_idx=%d tx_dtx_idx=%d\n", tx_cpu_owner_idx, tx_dtx_idx);
9197 +
9198 +       if(tx_desc == NULL) {
9199 +               printk("%s : txdesc is NULL\n", dev->name);
9200 +               return -1;
9201 +       }
9202 +
9203 +       tx_desc[tx_cpu_owner_idx].txd_info1.SDP0 = virt_to_phys(skb->data);
9204 +       tx_desc[tx_cpu_owner_idx].txd_info2.SDL0 = length;
9205 +       tx_desc[tx_cpu_owner_idx].txd_info2.DDONE_bit = 0;
9206 +       tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
9207 +       tx_desc[tx_cpu_owner_idx].txd_info4.QN = qn;
9208 +
9209 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
9210 +       ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.TCO = 1; 
9211 +       ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.UCO = 1; 
9212 +       ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.ICO = 1; 
9213 +#endif
9214 +
9215 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) 
9216 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
9217 +           tx_desc[tx_cpu_owner_idx].txd_info4.PN = 6; /* PPE */
9218 +       } else {
9219 +           tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn; 
9220 +       }
9221 +       
9222 +#endif
9223 +
9224 +       spin_lock_irqsave(&ei_local->page_lock, flags);
9225 +       ei_local->skb_free[ring_no][tx_cpu_owner_idx] = skb;
9226 +       tx_cpu_owner_idx = (tx_cpu_owner_idx +1) % NUM_TX_DESC;
9227 +       ret = set_tx_ctx_idx(ring_no, tx_cpu_owner_idx);
9228 +       spin_unlock_irqrestore(&ei_local->page_lock, flags);
9229 +
9230 +       ei_local->stat.tx_packets++;
9231 +       ei_local->stat.tx_bytes += length;
9232 +
9233 +#ifdef CONFIG_RAETH_NAPI
9234 +       switch ( ring_no ) {
9235 +               case 0:
9236 +                       if ( ei_local->tx0_full == 1) {
9237 +                               ei_local->tx0_full = 0;
9238 +                               netif_wake_queue(dev);
9239 +                       }
9240 +                       break;
9241 +               case 1:
9242 +                       if ( ei_local->tx1_full == 1) {
9243 +                               ei_local->tx1_full = 0;
9244 +                               netif_wake_queue(dev);
9245 +                       }
9246 +                       break;
9247 +               case 2:
9248 +                       if ( ei_local->tx2_full == 1) {
9249 +                               ei_local->tx2_full = 0;
9250 +                               netif_wake_queue(dev);
9251 +                       }
9252 +                       break;
9253 +               case 3:
9254 +                       if ( ei_local->tx3_full == 1) {
9255 +                               ei_local->tx3_full = 0;
9256 +                               netif_wake_queue(dev);
9257 +                       }
9258 +                       break;
9259 +               default :
9260 +                       printk("ring_no input error %d\n", ring_no);
9261 +       };
9262 +#endif
9263 +       return length;
9264 +}
9265 +
9266 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn)
9267 +{
9268 +       END_DEVICE* ei_local = netdev_priv(dev);
9269 +       struct PDMA_txdesc *tx_desc;
9270 +       unsigned int tx_cpu_owner_idx = 0;
9271 +       int i;
9272 +       unsigned int phy_tx_ring;
9273 +
9274 +       // sanity check
9275 +       if ( ring_no > 3 ){
9276 +               printk("%s : ring_no - %d, please under 4...\n", dev->name, ring_no);
9277 +               return 0;
9278 +       }
9279 +
9280 +       if ( pn > 2 ){
9281 +               printk("%s : pn - %d, please under 2...\n", dev->name, pn);
9282 +               return 0;
9283 +       }
9284 +
9285 +       tx_desc = pci_alloc_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), &phy_tx_ring);
9286 +       ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx;
9287 +       
9288 +       switch (ring_no) {
9289 +               case 0:
9290 +                       ei_local->tx_ring0 = tx_desc;
9291 +                       ei_local->phy_tx_ring0 = phy_tx_ring;
9292 +                       break;
9293 +               case 1:
9294 +                       ei_local->phy_tx_ring1 = phy_tx_ring;
9295 +                       ei_local->tx_ring1 = tx_desc;
9296 +                       break;
9297 +               case 2:
9298 +                       ei_local->phy_tx_ring2 = phy_tx_ring;
9299 +                       ei_local->tx_ring2 = tx_desc;
9300 +                       break;
9301 +               case 3:
9302 +                       ei_local->phy_tx_ring3 = phy_tx_ring;
9303 +                       ei_local->tx_ring3 = tx_desc;
9304 +                       break;
9305 +               default:
9306 +                       printk("ring_no input error! %d\n", ring_no);
9307 +                       pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), tx_desc, phy_tx_ring);
9308 +                       return 0;
9309 +       };      
9310 +
9311 +       if ( tx_desc == NULL)
9312 +       {
9313 +               printk("tx desc allocation failed!\n");
9314 +               return 0;
9315 +       }
9316 +
9317 +       for( i = 0; i < NUM_TX_DESC; i++) {
9318 +               memset( &tx_desc[i], 0, sizeof(struct PDMA_txdesc));
9319 +               tx_desc[i].txd_info2.LS0_bit = 1;
9320 +               tx_desc[i].txd_info2.DDONE_bit = 1;
9321 +               tx_desc[i].txd_info4.PN = pn;
9322 +               tx_desc[i].txd_info4.QN = qn;
9323 +       }
9324 +
9325 +       switch ( ring_no ) {
9326 +               case 0 :
9327 +                       *(unsigned long*)TX_BASE_PTR0 = phys_to_bus((u32) phy_tx_ring);
9328 +                       *(unsigned long*)TX_MAX_CNT0  = cpu_to_le32((u32)NUM_TX_DESC);
9329 +                       *(unsigned long*)TX_CTX_IDX0  = cpu_to_le32((u32) tx_cpu_owner_idx);
9330 +                       sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
9331 +                       break;
9332 +               case 1 :
9333 +                       *(unsigned long*)TX_BASE_PTR1 = phys_to_bus((u32) phy_tx_ring);
9334 +                       *(unsigned long*)TX_MAX_CNT1  = cpu_to_le32((u32)NUM_TX_DESC);
9335 +                       *(unsigned long*)TX_CTX_IDX1  = cpu_to_le32((u32) tx_cpu_owner_idx);
9336 +                       sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX1);
9337 +                       break;
9338 +               case 2 :
9339 +                       *(unsigned long*)TX_BASE_PTR2 = phys_to_bus((u32) phy_tx_ring);
9340 +                       *(unsigned long*)TX_MAX_CNT2  = cpu_to_le32((u32)NUM_TX_DESC);
9341 +                       *(unsigned long*)TX_CTX_IDX2  = cpu_to_le32((u32) tx_cpu_owner_idx);
9342 +                       sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX2);
9343 +                       break;
9344 +               case 3 :
9345 +                       *(unsigned long*)TX_BASE_PTR3 = phys_to_bus((u32) phy_tx_ring);
9346 +                       *(unsigned long*)TX_MAX_CNT3  = cpu_to_le32((u32)NUM_TX_DESC);
9347 +                       *(unsigned long*)TX_CTX_IDX3  = cpu_to_le32((u32) tx_cpu_owner_idx);
9348 +                       sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX3);
9349 +                       break;
9350 +               default :
9351 +                       printk("tx descriptor init failed %d\n", ring_no);
9352 +                       return 0;
9353 +       };
9354 +       return 1;
9355 +}
9356 +
9357 +/*
9358 +   DSCP | AC | WMM_AC (Access Category)
9359 +   ------+----+--------
9360 +   00-07|  1 |  BE
9361 +   24-31|  1 |  BE
9362 +   08-15|  0 |  BG
9363 +   16-23|  0 |  BG
9364 +   32-39|  2 |  VI
9365 +   40-47|  2 |  VI
9366 +   48-55|  3 |  VO
9367 +   56-63|  3 |  VO 
9368 +
9369 +          |    TOS    |
9370 +     DSCP |(bit5~bit7)|  WMM  
9371 +   -------+-----------+-------
9372 +    0x00  |    000    |   BE
9373 +    0x18  |    011    |   BE
9374 +    0x08  |    001    |   BG
9375 +    0x10  |    010    |   BG
9376 +    0x20  |    100    |   VI
9377 +    0x28  |    101    |   VI
9378 +    0x30  |    110    |   VO
9379 +    0x38  |    111    |   VO
9380 +
9381 +    Notes: BE should be mapped to AC1, but mapped to AC0 in linux kernel.
9382 +
9383 + */
9384 +
9385 +int  pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no)
9386 +{
9387 +#if defined(CONFIG_RALINK_RT2880)
9388 +    /* RT2880 -- Assume using 1 Ring (Ring0), Queue 0, and Port 0 */
9389 +    *port_no   = 0;
9390 +    *ring_no   = 0;
9391 +    *queue_no  = 0;
9392 +#else
9393 +    unsigned int ac=0;
9394 +    unsigned int bridge_traffic=0, lan_traffic=0;
9395 +    struct iphdr *iph=NULL;
9396 +    struct vlan_ethhdr *veth=NULL;
9397 +    unsigned int vlan_id=0;
9398 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9399 +    static char DscpToAcMap[8]={1,0,0,1,2,2,3,3};
9400 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9401 +    static char VlanPriToAcMap[8]={1,0,0,1,2,2,3,3};
9402 +#endif
9403 +
9404 +    /* Bridge:: {BG,BE,VI,VO} */
9405 +    /* GateWay:: WAN: {BG,BE,VI,VO}, LAN: {BG,BE,VI,VO} */
9406 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9407 +    /* 
9408 +     * 1) Bridge: 
9409 +     *    1.1) GMAC1 ONLY:
9410 +     *                 VO/VI->Ring3, BG/BE->Ring2 
9411 +     *    1.2) GMAC1+GMAC2: 
9412 +     *                 GMAC1:: VO/VI->Ring3, BG/BE->Ring2 
9413 +     *                 GMAC2:: VO/VI->Ring1, BG/BE->Ring0 
9414 +     * 2) GateWay:
9415 +     *    2.1) GMAC1 ONLY:
9416 +     *        GMAC1:: LAN:VI/VO->Ring2, BE/BK->Ring2
9417 +     *                WAN:VI/VO->Ring3, BE/BK->Ring3
9418 +     *    2.2)GMAC1+GMAC2: 
9419 +     *        GMAC1:: LAN:VI/VO/BE/BK->Ring2, WAN:VI/VO/BE/BK->Ring3
9420 +     *        GMAC2:: VI/VO->Ring1, BE/BK->Ring0
9421 +     */
9422 +    static unsigned char AcToRing_BridgeMap[4] = {2, 2, 3, 3}; 
9423 +    static unsigned char AcToRing_GE1Map[2][4] = {{3, 3, 3, 3},{2, 2, 2, 2}}; 
9424 +    static unsigned char AcToRing_GE2Map[4] = {0, 0, 1, 1};
9425 +#elif defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT2883) || \
9426 +      defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || \
9427 +      defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9428 +      defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
9429 +      defined (CONFIG_RALINK_MT7628) || \
9430 +     (defined (CONFIG_RALINK_RT3883) && !defined(CONFIG_RAETH_GMAC2))
9431 +    /* 
9432 +     * 1) Bridge: VO->Ring3, VI->Ring2, BG->Ring1, BE->Ring0 
9433 +     * 2) GateWay:
9434 +     *    2.1) GMAC1:: LAN:VI/VO->Ring1, BE/BK->Ring0
9435 +     *                WAN:VI/VO->Ring3, BE/BK->Ring2
9436 +     */ 
9437 +    static unsigned char AcToRing_BridgeMap[4] = {0, 1, 2, 3}; 
9438 +    static unsigned char AcToRing_GE1Map[2][4] = {{2, 2, 3, 3},{0, 0, 1, 1}}; 
9439 +#endif  // CONFIG_RALINK_RT2883
9440 +
9441 +    /* 
9442 +     * Set queue no - QN field in TX Descriptor
9443 +     * always use queue 3 for the packet from CPU to GMAC 
9444 +     */
9445 +    *queue_no = 3; 
9446 +
9447 +    /* Get access category */
9448 +    veth = (struct vlan_ethhdr *)(skb->data);
9449 +    if(veth->h_vlan_proto == htons(ETH_P_8021Q)) { // VLAN traffic
9450 +       iph= (struct iphdr *)(skb->data + VLAN_ETH_HLEN); 
9451 +
9452 +       vlan_id = ntohs(veth->h_vlan_TCI & VLAN_VID_MASK);
9453 +       if(vlan_id==1) { //LAN
9454 +           lan_traffic = 1;
9455 +       } else { //WAN
9456 +           lan_traffic = 0;
9457 +       }
9458 +
9459 +       if (veth->h_vlan_encapsulated_proto == htons(ETH_P_IP)) { //IPv4 
9460 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9461 +           ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9462 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9463 +           ac = VlanPriToAcMap[skb->priority];
9464 +#endif
9465 +       }else { //Ipv6, ARP ...etc
9466 +           ac = 0;
9467 +       }
9468 +    }else { // non-VLAN traffic
9469 +       if (veth->h_vlan_proto == htons(ETH_P_IP)) { //IPv4
9470 +#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
9471 +           iph= (struct iphdr *)(skb->data + ETH_HLEN);
9472 +           ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
9473 +#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
9474 +           ac= VlanPriToAcMap[skb->priority];
9475 +#endif
9476 +       }else { // IPv6, ARP ...etc
9477 +           ac = 0;
9478 +       }
9479 +
9480 +       bridge_traffic=1;
9481 +    }
9482 +    
9483 +
9484 +    /* Set Tx Ring no */
9485 +    if(gmac_no==1) { //GMAC1
9486 +       if(bridge_traffic) { //Bridge Mode
9487 +           *ring_no = AcToRing_BridgeMap[ac];
9488 +       }else { //GateWay Mode
9489 +           *ring_no = AcToRing_GE1Map[lan_traffic][ac];
9490 +       }
9491 +    }else { //GMAC2
9492 +#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
9493 +       *ring_no = AcToRing_GE2Map[ac];
9494 +#endif
9495 +    }
9496 +
9497 +
9498 +    /* Set Port No - PN field in Tx Descriptor*/
9499 +#if defined(CONFIG_RAETH_GMAC2)
9500 +    *port_no = gmac_no;
9501 +#else
9502 +    if(bridge_traffic) {
9503 +       *port_no = 1;
9504 +    }else {
9505 +       if(lan_traffic==1) { //LAN use VP1
9506 +           *port_no = 1;
9507 +       }else { //WAN use VP2
9508 +           *port_no = 2;
9509 +       }
9510 +    }
9511 +#endif // CONFIG_RAETH_GMAC2 //
9512 +
9513 +#endif
9514 +
9515 +    return 1;
9516 +
9517 +}
9518 +
9519 +
9520 +/*
9521 + *  Routine Description : 
9522 + *  Hi/Li Rings and Queues definition for QoS Purpose
9523 + *
9524 + *  Related registers: (Detail information refer to pp106 of RT3052_DS_20080226.doc)
9525 + *  Priority High/Low Definition - PDMA_FC_CFG, GDMA1_FC_CFG, GDMA2_FC_CFG
9526 + *  Bit 28 -  Allows high priority Q to share low priority Q's reserved pages
9527 + *  Bit 27:24 -  Px high priority definition bitmap 
9528 + *  Weight Configuration - GDMA1_SCH_CFG, GDMA2_SCH_CFG, PDMA_SCH_CFG -> default 3210
9529 + *
9530 + * Parameter: 
9531 + *     NONE
9532 + *     
9533 +*/
9534 +#define PSE_P1_LQ_FULL (1<<2)
9535 +#define PSE_P1_HQ_FULL (1<<3)
9536 +#define PSE_P2_LQ_FULL (1<<4)
9537 +#define PSE_P2_HQ_FULL (1<<5)
9538 +
9539 +#define HIGH_QUEUE(queue)   (1<<(queue))
9540 +#define LOW_QUEUE(queue)    (0<<(queue))
9541 +#define PAGES_SHARING      (1<<28)
9542 +#define RSEV_PAGE_COUNT_HQ  0x10 /* Reserved page count for high priority Q */
9543 +#define RSEV_PAGE_COUNT_LQ  0x10 /* Reserved page count for low priority Q */
9544 +#define VIQ_FC_ASRT        0x10 /* Virtual input Q FC assertion threshold */
9545 +
9546 +#define QUEUE_WEIGHT_1     0
9547 +#define QUEUE_WEIGHT_2     1
9548 +#define QUEUE_WEIGHT_4     2
9549 +#define QUEUE_WEIGHT_8     3
9550 +#define QUEUE_WEIGHT_16     4
9551 +
9552 +#define WRR_SCH                    0 /*WRR */
9553 +#define STRICT_PRI_SCH     1 /* Strict Priority */
9554 +#define MIX_SCH                    2 /* Mixed : Q3>WRR(Q2,Q1,Q0) */
9555 +
9556 +/*
9557 + *           Ring3  Ring2  Ring1  Ring0
9558 + *            |  |   |  |  |  |   |  |
9559 + *            |  |   |  |  |  |   |  |
9560 + *        --------------------------------
9561 + *        |         WRR Scheduler        |
9562 + *        --------------------------------
9563 + *                       |
9564 + *    ---------------------------------------
9565 + *    |                 PDMA                |
9566 + *    ---------------------------------------
9567 + *     |Q3||Q2||Q1||Q0|    |Q3||Q2||Q1||Q0|
9568 + *     |  ||  ||  ||  |    |  ||  ||  ||  |
9569 + *    ------------------- -------------------
9570 + *    |      GDMA2      | |     GDMA1       |
9571 + *    ------------------- -------------------
9572 + *              |                      |
9573 + *      ------------------------------------
9574 + *      |              GMAC                |
9575 + *      ------------------------------------
9576 + *                       |
9577 + *
9578 + */
9579 +void set_scheduler_weight(void)
9580 +{
9581 +#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
9582 +    /* 
9583 +     * STEP1: Queue scheduling configuration 
9584 +     */
9585 +    *(unsigned long *)GDMA1_SCH_CFG = (WRR_SCH << 24) | 
9586 +       (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9587 +       (QUEUE_WEIGHT_8 << 8) |  /* queue 2 weight */
9588 +       (QUEUE_WEIGHT_4  << 4) |  /* queue 1 weight */
9589 +       (QUEUE_WEIGHT_2  << 0);   /* queue 0 weight */
9590 +
9591 +    *(unsigned long *)GDMA2_SCH_CFG = (WRR_SCH << 24) | 
9592 +       (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
9593 +       (QUEUE_WEIGHT_8 << 8) |  /* queue 2 weight */
9594 +       (QUEUE_WEIGHT_4  << 4) |  /* queue 1 weight */
9595 +       (QUEUE_WEIGHT_2  << 0);   /* queue 0 weight */
9596 +    
9597 +#endif
9598 +    /* 
9599 +     * STEP2: Ring scheduling configuration 
9600 +     */
9601 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9602 +    defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
9603 +    /* MIN_RATE_RATIO0=0, MAX_RATE_ULMT0=1, Weight0=1 */
9604 +    *(unsigned long *)SCH_Q01_CFG =  (0 << 10) | (1<<14) | (0 << 12);
9605 +    /* MIN_RATE_RATIO1=0, MAX_RATE_ULMT1=1, Weight1=4 */
9606 +    *(unsigned long *)SCH_Q01_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9607 +
9608 +    /* MIN_RATE_RATIO2=0, MAX_RATE_ULMT2=1, Weight0=1 */
9609 +    *(unsigned long *)SCH_Q23_CFG =  (0 << 10) | (1<<14) | (0 << 12);
9610 +    /* MIN_RATE_RATIO3=0, MAX_RATE_ULMT3=1, Weight1=4 */
9611 +    *(unsigned long *)SCH_Q23_CFG |= (0 << 26) | (1<<30) | (2 << 28);
9612 +#else
9613 +    *(unsigned long *)PDMA_SCH_CFG = (WRR_SCH << 24) | 
9614 +       (QUEUE_WEIGHT_16 << 12) | /* ring 3 weight */
9615 +       (QUEUE_WEIGHT_4 << 8) |  /* ring 2 weight */
9616 +       (QUEUE_WEIGHT_16 << 4) |  /* ring 1 weight */
9617 +       (QUEUE_WEIGHT_4 << 0);   /* ring 0 weight */
9618 +#endif
9619 +}
9620 +
9621 +/*
9622 + * Routine Description : 
9623 + *     Bucket size and related information from ASIC Designer, 
9624 + *     please check Max Lee to update these values
9625 + *
9626 + *     Related Registers
9627 + *       FE_GLO_CFG - initialize clock rate for rate limiting
9628 + *       PDMA_FC_CFG - Pause mechanism for Rings (Ref to pp116 in datasheet)
9629 + *       :
9630 + * Parameter: 
9631 + *     NONE
9632 + */
9633 +/*
9634 + *     Bit 29:24 - Q3 flow control pause condition
9635 + *     Bit 21:16 - Q2 flow control pause condition
9636 + *     Bit 13:8  - Q1 flow control pause condition
9637 + *     Bit 5:0   - Q0 flow control pause condition
9638 + *
9639 + *     detail bitmap -
9640 + *       Bit[5] - Pause Qx when PSE p2 HQ full
9641 + *       Bit[4] - Pause Qx when PSE p2 LQ full
9642 + *       Bit[3] - Pause Qx when PSE p1 HQ full
9643 + *       Bit[2] - Pause Qx when PSE p1 LQ full
9644 + *       Bit[1] - Pause Qx when PSE p0 HQ full
9645 + *       Bit[0] - Pause Qx when PSE p0 LQ full
9646 + */
9647 +void set_schedule_pause_condition(void)
9648 +{
9649 +#if defined (CONFIG_RALINK_MT7620)
9650 +    
9651 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
9652 +    *(unsigned long *)SDM_TRING = (0xC << 28) | (0x3 << 24) | (0xC << 4) | 0x3;
9653 +#else
9654 +    /* 
9655 +     * STEP1: Set queue priority is high or low 
9656 +     *
9657 +     * Set queue 3 as high queue in GMAC1/GMAC2 
9658 +     */        
9659 +    *(unsigned long *)GDMA1_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) | 
9660 +                                     LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9661 +                                     (RSEV_PAGE_COUNT_HQ << 16) |
9662 +                                     (RSEV_PAGE_COUNT_LQ <<8) |
9663 +                                     VIQ_FC_ASRT | PAGES_SHARING;
9664 +
9665 +    *(unsigned long *)GDMA2_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) | 
9666 +                                     LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
9667 +                                     (RSEV_PAGE_COUNT_HQ << 16) |
9668 +                                     (RSEV_PAGE_COUNT_LQ <<8) |
9669 +                                     VIQ_FC_ASRT | PAGES_SHARING;
9670 +    
9671 +    /* 
9672 +     * STEP2: Set flow control pause condition 
9673 +     *
9674 +     * CPU always use queue 3, and queue3 is high queue.
9675 +     * If P2(GMAC2) high queue is full, pause ring3/ring2
9676 +     * If P1(GMAC1) high queue is full, pause ring1/ring0
9677 +     */
9678 +    *(unsigned long *)PDMA_FC_CFG =  ( PSE_P2_HQ_FULL << 24 ) | /* queue 3 */
9679 +       ( PSE_P2_HQ_FULL << 16 ) | /* queue 2 */
9680 +       ( PSE_P1_HQ_FULL << 8 ) |  /* queue 1 */
9681 +       ( PSE_P1_HQ_FULL << 0 );  /* queue 0 */
9682 +#endif
9683 +    
9684 +}
9685 +
9686 +
9687 +void set_output_shaper(void)
9688 +{
9689 +#define GDMA1_TOKEN_RATE       16  /* unit=64bits/ms */
9690 +#define GDMA2_TOKEN_RATE       16  /* unit=64bits/ms */
9691 +
9692 +#if 0
9693 +    *(unsigned long *)GDMA1_SHPR_CFG =  (1 << 24) | /* output shaper enable */
9694 +                                       (128 << 16) | /* bucket size (unit=1KB) */
9695 +                                       (GDMA1_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9696 +#endif
9697 +
9698 +#if 0
9699 +    *(unsigned long *)GDMA2_SHPR_CFG =  (1 << 24) | /* output shaper enable */
9700 +                                       (128 << 16) | /* bucket size (unit=1KB) */
9701 +                                       (GDMA2_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
9702 +#endif
9703 +}
9704 --- /dev/null
9705 +++ b/drivers/net/ethernet/raeth/ra_qos.h
9706 @@ -0,0 +1,18 @@
9707 +#ifndef RA_QOS_H
9708 +#define        RA_QOS_H
9709 +
9710 +#include "ra2882ethreg.h"
9711 +#define RING0  0
9712 +#define RING1  1
9713 +#define RING2  2
9714 +#define RING3  3
9715 +void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc);
9716 +int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx);
9717 +int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn);
9718 +int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned int pn);
9719 +
9720 +int  pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no);
9721 +void set_schedule_pause_condition(void);
9722 +void set_scheduler_weight(void);
9723 +void set_output_shaper(void);
9724 +#endif
9725 --- /dev/null
9726 +++ b/drivers/net/ethernet/raeth/ra_rfrw.c
9727 @@ -0,0 +1,66 @@
9728 +#include <linux/module.h>
9729 +#include <linux/version.h>
9730 +#include <linux/kernel.h>
9731 +#include <linux/sched.h>
9732 +#include <linux/types.h>
9733 +#include <linux/fcntl.h>
9734 +#include <linux/interrupt.h>
9735 +#include <linux/ptrace.h>
9736 +#include <linux/ioport.h>
9737 +#include <linux/in.h>
9738 +#include <linux/slab.h>
9739 +#include <linux/string.h>
9740 +#include <linux/signal.h>
9741 +#include <linux/irq.h>
9742 +#include <linux/netdevice.h>
9743 +#include <linux/etherdevice.h>
9744 +#include <linux/skbuff.h>
9745 +
9746 +#include "ra2882ethreg.h"
9747 +#include "raether.h"
9748 +#include "ra_mac.h"
9749 +
9750 +#define RF_CSR_CFG      0xb0180500
9751 +#define RF_CSR_KICK     (1<<17)
9752 +int rw_rf_reg(int write, int reg, int *data)
9753 +{
9754 +        unsigned long    rfcsr, i = 0;
9755 +
9756 +        while (1) {
9757 +                rfcsr =  sysRegRead(RF_CSR_CFG);
9758 +                if (! (rfcsr & (u32)RF_CSR_KICK) )
9759 +                        break;
9760 +                if (++i > 10000) {
9761 +                        printk("Warning: Abort rw rf register: too busy\n");
9762 +                        return -1;
9763 +                }
9764 +        }
9765 +
9766 +        rfcsr = (u32)(RF_CSR_KICK | ((reg&0x3f) << 8) | (*data & 0xff));
9767 +        if (write)
9768 +                rfcsr |= 0x10000;
9769 +
9770 +         sysRegRead(RF_CSR_CFG) = cpu_to_le32(rfcsr);
9771 +
9772 +        i = 0;
9773 +        while (1) {
9774 +                rfcsr =  sysRegRead(RF_CSR_CFG);
9775 +                if (! (rfcsr & (u32)RF_CSR_KICK) )
9776 +                        break;
9777 +                if (++i > 10000) {
9778 +                        printk("Warning: still busy\n");
9779 +                        return -1;
9780 +                }
9781 +        }
9782 +
9783 +        rfcsr =  sysRegRead(RF_CSR_CFG);
9784 +
9785 +        if (((rfcsr&0x1f00) >> 8) != (reg & 0x1f)) {
9786 +                printk("Error: rw register failed\n");
9787 +                return -1;
9788 +        }
9789 +        *data = (int)(rfcsr & 0xff);
9790 +
9791 +        return 0;
9792 +}
9793 +
9794 --- /dev/null
9795 +++ b/drivers/net/ethernet/raeth/ra_rfrw.h
9796 @@ -0,0 +1,6 @@
9797 +#ifndef RA_RFRW_H
9798 +#define RA_RFRW_H
9799 +
9800 +int rw_rf_reg(int write, int reg, int *data);
9801 +
9802 +#endif
9803 --- /dev/null
9804 +++ b/drivers/net/ethernet/raeth/raether.c
9805 @@ -0,0 +1,6401 @@
9806 +#include <linux/module.h>
9807 +#include <linux/version.h>
9808 +#include <linux/kernel.h>
9809 +#include <linux/types.h>
9810 +#include <linux/pci.h>
9811 +#include <linux/init.h>
9812 +#include <linux/skbuff.h>
9813 +#include <linux/if_vlan.h>
9814 +#include <linux/if_ether.h>
9815 +#include <linux/fs.h>
9816 +#include <asm/uaccess.h>
9817 +#include <asm/rt2880/surfboardint.h>
9818 +#include <linux/platform_device.h>
9819 +#if defined (CONFIG_RAETH_TSO)
9820 +#include <linux/tcp.h>
9821 +#include <net/ipv6.h>
9822 +#include <linux/ip.h>
9823 +#include <net/ip.h>
9824 +#include <net/tcp.h>
9825 +#include <linux/in.h>
9826 +#include <linux/ppp_defs.h>
9827 +#include <linux/if_pppox.h>
9828 +#endif
9829 +#if defined (CONFIG_RAETH_LRO)
9830 +#include <linux/inet_lro.h>
9831 +#endif
9832 +#include <linux/delay.h>
9833 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9834 +#include <linux/sched.h>
9835 +#endif
9836 +
9837 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
9838 +#include <asm/rt2880/rt_mmap.h>
9839 +#else
9840 +#include <linux/libata-compat.h>
9841 +#endif
9842
9843 +#include "ra2882ethreg.h"
9844 +#include "raether.h"
9845 +#include "ra_mac.h"
9846 +#include "ra_ioctl.h"
9847 +#include "ra_rfrw.h"
9848 +#ifdef CONFIG_RAETH_NETLINK
9849 +#include "ra_netlink.h"
9850 +#endif
9851 +#if defined (CONFIG_RAETH_QOS)
9852 +#include "ra_qos.h"
9853 +#endif
9854 +
9855 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
9856 +#include "../../../net/nat/hw_nat/ra_nat.h"
9857 +#endif
9858 +#if defined(CONFIG_RAETH_PDMA_DVT)
9859 +#include "dvt/raether_pdma_dvt.h"
9860 +#endif  /* CONFIG_RAETH_PDMA_DVT */
9861 +
9862 +static int fe_irq = 0;
9863 +
9864 +#if defined (TASKLET_WORKQUEUE_SW)
9865 +int init_schedule;
9866 +int working_schedule;
9867 +#endif
9868 +
9869 +#ifdef CONFIG_RAETH_NAPI
9870 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
9871 +static int raeth_clean(struct napi_struct *napi, int budget);
9872 +#else
9873 +static int raeth_clean(struct net_device *dev, int *budget);
9874 +#endif
9875 +
9876 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do);
9877 +#else
9878 +static int rt2880_eth_recv(struct net_device* dev);
9879 +#endif
9880 +
9881 +#if !defined(CONFIG_RA_NAT_NONE)
9882 +/* bruce+
9883 + */
9884 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
9885 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
9886 +#endif
9887 +
9888 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
9889 +/* Qwert+
9890 + */
9891 +#include <asm/mipsregs.h>
9892 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
9893 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
9894 +#endif /* CONFIG_RA_CLASSIFIER */
9895 +
9896 +#if defined (CONFIG_RALINK_RT3052_MP2)
9897 +int32_t mcast_rx(struct sk_buff * skb);
9898 +int32_t mcast_tx(struct sk_buff * skb);
9899 +#endif
9900 +
9901 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf) 
9902 +{
9903 +       /* TODO */
9904 +       return 0;
9905 +}
9906 +
9907 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
9908 +void setup_internal_gsw(void);
9909 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
9910 +void apll_xtal_enable(void);
9911 +#define REGBIT(x, n)              (x << n)
9912 +#endif
9913 +#endif
9914 +
9915 +#if defined (CONFIG_MT7623_FPGA)
9916 +void setup_fpga_gsw(void);
9917 +#endif
9918 +
9919 +/* gmac driver feature set config */
9920 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
9921 +#undef DELAY_INT
9922 +#else
9923 +#if defined     (CONFIG_ARCH_MT7623)
9924 +#undef DELAY_INT
9925 +#else
9926 +#define DELAY_INT       1
9927 +#endif
9928 +#endif
9929 +
9930 +//#define CONFIG_UNH_TEST
9931 +/* end of config */
9932 +
9933 +#if defined (CONFIG_RAETH_JUMBOFRAME)
9934 +#define        MAX_RX_LENGTH   4096
9935 +#else
9936 +#define        MAX_RX_LENGTH   1536
9937 +#endif
9938 +
9939 +struct net_device              *dev_raether;
9940 +
9941 +static int rx_dma_owner_idx; 
9942 +static int rx_dma_owner_idx0;
9943 +#if defined (CONFIG_RAETH_HW_LRO)
9944 +static int rx_dma_owner_lro1;
9945 +static int rx_dma_owner_lro2;
9946 +static int rx_dma_owner_lro3;
9947 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
9948 +static int rx_dma_owner_idx1;
9949 +#if defined(CONFIG_ARCH_MT7623)
9950 +static int rx_dma_owner_idx2;
9951 +static int rx_dma_owner_idx3;
9952 +#endif  /* CONFIG_ARCH_MT7623 */
9953 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
9954 +int rx_calc_idx1;
9955 +#endif
9956 +#endif
9957 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
9958 +int rx_calc_idx0;
9959 +#endif
9960 +static int pending_recv;
9961 +static struct PDMA_rxdesc      *rx_ring;
9962 +unsigned long tx_ring_full=0;
9963 +
9964 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
9965 +    defined(CONFIG_RALINK_MT7620)
9966 +unsigned short p0_rx_good_cnt = 0;
9967 +unsigned short p1_rx_good_cnt = 0;
9968 +unsigned short p2_rx_good_cnt = 0;
9969 +unsigned short p3_rx_good_cnt = 0;
9970 +unsigned short p4_rx_good_cnt = 0;
9971 +unsigned short p5_rx_good_cnt = 0;
9972 +unsigned short p6_rx_good_cnt = 0;
9973 +unsigned short p0_tx_good_cnt = 0;
9974 +unsigned short p1_tx_good_cnt = 0;
9975 +unsigned short p2_tx_good_cnt = 0;
9976 +unsigned short p3_tx_good_cnt = 0;
9977 +unsigned short p4_tx_good_cnt = 0;
9978 +unsigned short p5_tx_good_cnt = 0;
9979 +unsigned short p6_tx_good_cnt = 0;
9980 +
9981 +unsigned short p0_rx_byte_cnt = 0;
9982 +unsigned short p1_rx_byte_cnt = 0;
9983 +unsigned short p2_rx_byte_cnt = 0;
9984 +unsigned short p3_rx_byte_cnt = 0;
9985 +unsigned short p4_rx_byte_cnt = 0;
9986 +unsigned short p5_rx_byte_cnt = 0;
9987 +unsigned short p6_rx_byte_cnt = 0;
9988 +unsigned short p0_tx_byte_cnt = 0;
9989 +unsigned short p1_tx_byte_cnt = 0;
9990 +unsigned short p2_tx_byte_cnt = 0;
9991 +unsigned short p3_tx_byte_cnt = 0;
9992 +unsigned short p4_tx_byte_cnt = 0;
9993 +unsigned short p5_tx_byte_cnt = 0;
9994 +unsigned short p6_tx_byte_cnt = 0;
9995 +
9996 +#if defined(CONFIG_RALINK_MT7620)
9997 +unsigned short p7_rx_good_cnt = 0;
9998 +unsigned short p7_tx_good_cnt = 0;
9999 +
10000 +unsigned short p7_rx_byte_cnt = 0;
10001 +unsigned short p7_tx_byte_cnt = 0;
10002 +#endif
10003 +#endif
10004 +
10005 +
10006 +
10007 +
10008 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
10009 +#include "ra_ethtool.h"
10010 +extern struct ethtool_ops      ra_ethtool_ops;
10011 +#ifdef CONFIG_PSEUDO_SUPPORT
10012 +extern struct ethtool_ops      ra_virt_ethtool_ops;
10013 +#endif // CONFIG_PSEUDO_SUPPORT //
10014 +#endif // (CONFIG_ETHTOOL //
10015 +
10016 +#ifdef CONFIG_RALINK_VISTA_BASIC
10017 +int is_switch_175c = 1;
10018 +#endif
10019 +
10020 +unsigned int M2Q_table[64] = {0};
10021 +unsigned int lan_wan_separate = 0;
10022 +
10023 +#if defined(CONFIG_HW_SFQ)
10024 +unsigned int web_sfq_enable = 0;
10025 +EXPORT_SYMBOL(web_sfq_enable);
10026 +#endif
10027 +
10028 +EXPORT_SYMBOL(M2Q_table);
10029 +EXPORT_SYMBOL(lan_wan_separate);
10030 +#if defined (CONFIG_RAETH_LRO)
10031 +unsigned int lan_ip;
10032 +struct lro_para_struct lro_para; 
10033 +int lro_flush_needed;
10034 +extern char const *nvram_get(int index, char *name);
10035 +#endif
10036 +
10037 +#define KSEG1                   0xa0000000
10038 +#if defined (CONFIG_MIPS)
10039 +#define PHYS_TO_VIRT(x)         ((void *)((x) | KSEG1))
10040 +#define VIRT_TO_PHYS(x)         ((unsigned long)(x) & ~KSEG1)
10041 +#else
10042 +#define PHYS_TO_VIRT(x)         phys_to_virt(x)
10043 +#define VIRT_TO_PHYS(x)         virt_to_phys(x)
10044 +#endif
10045 +
10046 +extern int fe_dma_init(struct net_device *dev);
10047 +extern int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no);
10048 +extern void ei_xmit_housekeeping(unsigned long unused);
10049 +extern inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no);
10050 +#if defined (CONFIG_RAETH_HW_LRO)
10051 +extern int fe_hw_lro_init(struct net_device *dev);
10052 +#endif  /* CONFIG_RAETH_HW_LRO */
10053 +
10054 +#if 0 
10055 +void skb_dump(struct sk_buff* sk) {
10056 +        unsigned int i;
10057 +
10058 +        printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
10059 +                sk->dev?sk->dev->name:"ip stack",sk->len,sk->truesize,
10060 +                skb_headroom(sk),skb_tailroom(sk));
10061 +
10062 +        //for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) {
10063 +        for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) {
10064 +                if((i % 20) == 0)
10065 +                        printk("\n");
10066 +                if(i==(unsigned int)sk->data) printk("{");
10067 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10068 +                if(i==(unsigned int)sk->transport_header) printk("#");
10069 +                if(i==(unsigned int)sk->network_header) printk("|");
10070 +                if(i==(unsigned int)sk->mac_header) printk("*");
10071 +#else
10072 +                if(i==(unsigned int)sk->h.raw) printk("#");
10073 +                if(i==(unsigned int)sk->nh.raw) printk("|");
10074 +                if(i==(unsigned int)sk->mac.raw) printk("*");
10075 +#endif
10076 +                printk("%02X-",*((unsigned char*)i));
10077 +                if(i==(unsigned int)sk->tail) printk("}");
10078 +        }
10079 +        printk("\n");
10080 +}
10081 +#endif
10082 +
10083 +
10084 +
10085 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10086 +int isICPlusGigaPHY(int ge)
10087 +{
10088 +       u32 phy_id0 = 0, phy_id1 = 0;
10089 +
10090 +#ifdef CONFIG_GE2_RGMII_AN
10091 +       if (ge == 2) {
10092 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10093 +                       printk("\n Read PhyID 1 is Fail!!\n");
10094 +                       phy_id0 =0;
10095 +               }
10096 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10097 +                       printk("\n Read PhyID 1 is Fail!!\n");
10098 +                       phy_id1 = 0;
10099 +               }
10100 +       }
10101 +       else
10102 +#endif
10103 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10104 +       {
10105 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10106 +                       printk("\n Read PhyID 0 is Fail!!\n");
10107 +                       phy_id0 =0;
10108 +               }
10109 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10110 +                       printk("\n Read PhyID 0 is Fail!!\n");
10111 +                       phy_id1 = 0;
10112 +               }
10113 +       }
10114 +#endif
10115 +
10116 +       if ((phy_id0 == EV_ICPLUS_PHY_ID0) && ((phy_id1 & 0xfff0) == EV_ICPLUS_PHY_ID1))
10117 +               return 1;
10118 +       return 0;
10119 +}
10120 +
10121 +
10122 +int isMarvellGigaPHY(int ge)
10123 +{
10124 +       u32 phy_id0 = 0, phy_id1 = 0;
10125 +
10126 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10127 +       if (ge == 2) {
10128 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10129 +                       printk("\n Read PhyID 1 is Fail!!\n");
10130 +                       phy_id0 =0;
10131 +               }
10132 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10133 +                       printk("\n Read PhyID 1 is Fail!!\n");
10134 +                       phy_id1 = 0;
10135 +               }
10136 +       }
10137 +       else
10138 +#endif
10139 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10140 +       {
10141 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10142 +                       printk("\n Read PhyID 0 is Fail!!\n");
10143 +                       phy_id0 =0;
10144 +               }
10145 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10146 +                       printk("\n Read PhyID 0 is Fail!!\n");
10147 +                       phy_id1 = 0;
10148 +               }
10149 +       }
10150 +#endif
10151 +               ;
10152 +       if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
10153 +               return 1;
10154 +       return 0;
10155 +}
10156 +
10157 +int isVtssGigaPHY(int ge)
10158 +{
10159 +       u32 phy_id0 = 0, phy_id1 = 0;
10160 +
10161 +#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
10162 +       if (ge == 2) {
10163 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
10164 +                       printk("\n Read PhyID 1 is Fail!!\n");
10165 +                       phy_id0 =0;
10166 +               }
10167 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
10168 +                       printk("\n Read PhyID 1 is Fail!!\n");
10169 +                       phy_id1 = 0;
10170 +               }
10171 +       }
10172 +       else
10173 +#endif
10174 +#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
10175 +       {
10176 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
10177 +                       printk("\n Read PhyID 0 is Fail!!\n");
10178 +                       phy_id0 =0;
10179 +               }
10180 +               if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
10181 +                       printk("\n Read PhyID 0 is Fail!!\n");
10182 +                       phy_id1 = 0;
10183 +               }
10184 +       }
10185 +#endif
10186 +               ;
10187 +       if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
10188 +               return 1;
10189 +       return 0;
10190 +}
10191 +#endif
10192 +
10193 +/*
10194 + * Set the hardware MAC address.
10195 + */
10196 +static int ei_set_mac_addr(struct net_device *dev, void *p)
10197 +{
10198 +       struct sockaddr *addr = p;
10199 +
10200 +       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10201 +
10202 +       if(netif_running(dev))
10203 +               return -EBUSY;
10204 +
10205 +        ra2880MacAddressSet(addr->sa_data);
10206 +       return 0;
10207 +}
10208 +
10209 +#ifdef CONFIG_PSEUDO_SUPPORT
10210 +static int ei_set_mac2_addr(struct net_device *dev, void *p)
10211 +{
10212 +       struct sockaddr *addr = p;
10213 +
10214 +       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10215 +
10216 +       if(netif_running(dev))
10217 +               return -EBUSY;
10218 +
10219 +        ra2880Mac2AddressSet(addr->sa_data);
10220 +       return 0;
10221 +}
10222 +#endif
10223 +
10224 +void set_fe_dma_glo_cfg(void)
10225 +{
10226 +        int dma_glo_cfg=0;
10227 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10228 +    defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10229 +        int fe_glo_cfg=0;
10230 +#endif
10231 +
10232 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) 
10233 +       dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_32DWORDS);
10234 +#elif defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
10235 +       dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS);
10236 +#elif defined (CONFIG_ARCH_MT7623)
10237 +       dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | ADMA_RX_BT_SIZE_32DWORDS);
10238 +#else 
10239 +       dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_4DWORDS);
10240 +#endif
10241 +
10242 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10243 +       dma_glo_cfg |= (RX_2B_OFFSET);
10244 +#endif
10245 +
10246 +#if defined (CONFIG_32B_DESC)
10247 +       dma_glo_cfg |= (DESC_32B_EN);
10248 +#endif
10249 +       sysRegWrite(DMA_GLO_CFG, dma_glo_cfg);
10250 +#ifdef CONFIG_RAETH_QDMA       
10251 +       sysRegWrite(QDMA_GLO_CFG, dma_glo_cfg);
10252 +#endif
10253 +
10254 +       /* only the following chipset need to set it */
10255 +#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
10256 +    defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
10257 +       //set 1us timer count in unit of clock cycle
10258 +       fe_glo_cfg = sysRegRead(FE_GLO_CFG);
10259 +       fe_glo_cfg &= ~(0xff << 8); //clear bit8-bit15
10260 +       fe_glo_cfg |= (((get_surfboard_sysclk()/1000000)) << 8);
10261 +       sysRegWrite(FE_GLO_CFG, fe_glo_cfg);
10262 +#endif
10263 +}
10264 +
10265 +int forward_config(struct net_device *dev)
10266 +{
10267 +       
10268 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
10269 +
10270 +       /* RT5350: No GDMA, PSE, CDMA, PPE */
10271 +       unsigned int sdmVal;
10272 +       sdmVal = sysRegRead(SDM_CON);
10273 +
10274 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10275 +       sdmVal |= 0x7<<16; // UDPCS, TCPCS, IPCS=1
10276 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10277 +
10278 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10279 +       sdmVal |= 0x1<<20; // TCI_81XX
10280 +#endif // CONFIG_RAETH_SPECIAL_TAG //
10281 +
10282 +       sysRegWrite(SDM_CON, sdmVal);
10283 +
10284 +#else //Non RT5350 chipset
10285 +
10286 +       unsigned int    regVal, regCsg;
10287 +
10288 +#ifdef CONFIG_PSEUDO_SUPPORT
10289 +       unsigned int    regVal2;
10290 +#endif
10291 +
10292 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10293 +#if defined(CONFIG_RALINK_MT7620)
10294 +       /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10295 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x430) = 0x00010000;
10296 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x434) = 0x00030002;
10297 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x438) = 0x00050004;
10298 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x43C) = 0x00070006;
10299 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x440) = 0x00090008;
10300 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x444) = 0x000b000a;
10301 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x448) = 0x000d000c;
10302 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x44C) = 0x000f000e;
10303 +#else
10304 +       /* 
10305 +        * VLAN_IDX 0 = VLAN_ID 0
10306 +        * .........
10307 +        * VLAN_IDX 15 = VLAN ID 15
10308 +        *
10309 +        */
10310 +       /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
10311 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xa8) = 0x00010000;
10312 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xac) = 0x00030002;
10313 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb0) = 0x00050004;
10314 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb4) = 0x00070006;
10315 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb8) = 0x00090008;
10316 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xbc) = 0x000b000a;
10317 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc0) = 0x000d000c;
10318 +       *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc4) = 0x000f000e;
10319 +#endif
10320 +#endif
10321 +
10322 +       regVal = sysRegRead(GDMA1_FWD_CFG);
10323 +       regCsg = sysRegRead(CDMA_CSG_CFG);
10324 +
10325 +#ifdef CONFIG_PSEUDO_SUPPORT
10326 +       regVal2 = sysRegRead(GDMA2_FWD_CFG);
10327 +#endif
10328 +
10329 +       //set unicast/multicast/broadcast frame to cpu
10330 +#if defined (CONFIG_RALINK_MT7620)
10331 +       /* GDMA1 frames destination port is port0 CPU*/
10332 +       regVal &= ~0x7;
10333 +#else
10334 +       regVal &= ~0xFFFF;
10335 +       regVal |= GDMA1_FWD_PORT;
10336 +#endif
10337 +       regCsg &= ~0x7;
10338 +
10339 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10340 +       regVal |= (1 << 24); //GDM1_TCI_81xx
10341 +#endif
10342 +
10343 +
10344 +#ifdef CONFIG_RAETH_HW_VLAN_TX
10345 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10346 +       dev->features |= NETIF_F_HW_VLAN_TX;
10347 +#else
10348 +       dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
10349 +#endif
10350 +#endif
10351 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10352 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
10353 +       dev->features |= NETIF_F_HW_VLAN_RX;
10354 +#else
10355 +       dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
10356 +#endif
10357 +#endif
10358 +
10359 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10360 +       //enable ipv4 header checksum check
10361 +       regVal |= GDM1_ICS_EN;
10362 +       regCsg |= ICS_GEN_EN;
10363 +
10364 +       //enable tcp checksum check
10365 +       regVal |= GDM1_TCS_EN;
10366 +       regCsg |= TCS_GEN_EN;
10367 +
10368 +       //enable udp checksum check
10369 +       regVal |= GDM1_UCS_EN;
10370 +       regCsg |= UCS_GEN_EN;
10371 +
10372 +#ifdef CONFIG_PSEUDO_SUPPORT
10373 +       regVal2 &= ~0xFFFF;
10374 +       regVal2 |= GDMA2_FWD_PORT;
10375 +  
10376 +       regVal2 |= GDM1_ICS_EN;
10377 +       regVal2 |= GDM1_TCS_EN;
10378 +       regVal2 |= GDM1_UCS_EN;
10379 +#endif
10380 +
10381 +#if defined (CONFIG_RAETH_HW_LRO) 
10382 +    dev->features |= NETIF_F_HW_CSUM;
10383 +#else
10384 +       dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
10385 +#endif  /* CONFIG_RAETH_HW_LRO */
10386 +//#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10387 +//     dev->vlan_features |= NETIF_F_IP_CSUM;
10388 +//#endif
10389 +
10390 +#if defined(CONFIG_RALINK_MT7620)
10391 +#if defined (CONFIG_RAETH_TSO)
10392 +       if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10393 +               dev->features |= NETIF_F_SG;
10394 +               dev->features |= NETIF_F_TSO;
10395 +       }
10396 +#endif // CONFIG_RAETH_TSO //
10397 +
10398 +#if defined (CONFIG_RAETH_TSOV6)
10399 +       if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
10400 +               dev->features |= NETIF_F_TSO6;
10401 +               dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10402 +       }
10403 +#endif // CONFIG_RAETH_TSOV6 //
10404 +#else
10405 +#if defined (CONFIG_RAETH_TSO)
10406 +       dev->features |= NETIF_F_SG;
10407 +       dev->features |= NETIF_F_TSO;
10408 +#endif // CONFIG_RAETH_TSO //
10409 +
10410 +#if defined (CONFIG_RAETH_TSOV6)
10411 +       dev->features |= NETIF_F_TSO6;
10412 +       dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
10413 +#endif // CONFIG_RAETH_TSOV6 //
10414 +#endif // CONFIG_RALINK_MT7620 //
10415 +#else // Checksum offload disabled
10416 +
10417 +       //disable ipv4 header checksum check
10418 +       regVal &= ~GDM1_ICS_EN;
10419 +       regCsg &= ~ICS_GEN_EN;
10420 +
10421 +       //disable tcp checksum check
10422 +       regVal &= ~GDM1_TCS_EN;
10423 +       regCsg &= ~TCS_GEN_EN;
10424 +
10425 +       //disable udp checksum check
10426 +       regVal &= ~GDM1_UCS_EN;
10427 +       regCsg &= ~UCS_GEN_EN;
10428 +
10429 +#ifdef CONFIG_PSEUDO_SUPPORT
10430 +       regVal2 &= ~GDM1_ICS_EN;
10431 +       regVal2 &= ~GDM1_TCS_EN;
10432 +       regVal2 &= ~GDM1_UCS_EN;
10433 +#endif
10434 +
10435 +       dev->features &= ~NETIF_F_IP_CSUM; /* disable checksum TCP/UDP over IPv4 */
10436 +#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
10437 +
10438 +#ifdef CONFIG_RAETH_JUMBOFRAME
10439 +       regVal |= GDM1_JMB_EN;
10440 +#ifdef CONFIG_PSEUDO_SUPPORT
10441 +       regVal2 |= GDM1_JMB_EN;
10442 +#endif
10443 +#endif
10444 +
10445 +       sysRegWrite(GDMA1_FWD_CFG, regVal);
10446 +       sysRegWrite(CDMA_CSG_CFG, regCsg);
10447 +#ifdef CONFIG_PSEUDO_SUPPORT
10448 +       sysRegWrite(GDMA2_FWD_CFG, regVal2);
10449 +#endif
10450 +
10451 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
10452 +        dev->vlan_features = dev->features;
10453 +#endif
10454 +
10455 +/*
10456 + *     PSE_FQ_CFG register definition -
10457 + *
10458 + *     Define max free queue page count in PSE. (31:24)
10459 + *     RT2883/RT3883 - 0xff908000 (255 pages)
10460 + *     RT3052 - 0x80504000 (128 pages)
10461 + *     RT2880 - 0x80504000 (128 pages)
10462 + *
10463 + *     In each page, there are 128 bytes in each page.
10464 + *
10465 + *     23:16 - free queue flow control release threshold
10466 + *     15:8  - free queue flow control assertion threshold
10467 + *     7:0   - free queue empty threshold
10468 + *
10469 + *     The register affects QOS correctness in frame engine!
10470 + */
10471 +
10472 +#if defined(CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
10473 +       sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_RT2883_PSE_FQ_CFG));
10474 +#elif defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) ||  \
10475 +      defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
10476 +      defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
10477 +      defined (CONFIG_RALINK_MT7628) || defined(CONFIG_ARCH_MT7623)
10478 +        /*use default value*/
10479 +#else
10480 +       sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_PSE_FQFC_CFG));
10481 +#endif
10482 +
10483 +       /*
10484 +        *FE_RST_GLO register definition -
10485 +        *Bit 0: PSE Rest
10486 +        *Reset PSE after re-programming PSE_FQ_CFG.
10487 +        */
10488 +       regVal = 0x1;
10489 +       sysRegWrite(FE_RST_GL, regVal);
10490 +       sysRegWrite(FE_RST_GL, 0);      // update for RSTCTL issue
10491 +
10492 +       regCsg = sysRegRead(CDMA_CSG_CFG);
10493 +       printk("CDMA_CSG_CFG = %0X\n",regCsg);
10494 +       regVal = sysRegRead(GDMA1_FWD_CFG);
10495 +       printk("GDMA1_FWD_CFG = %0X\n",regVal);
10496 +
10497 +#ifdef CONFIG_PSEUDO_SUPPORT
10498 +       regVal = sysRegRead(GDMA2_FWD_CFG);
10499 +       printk("GDMA2_FWD_CFG = %0X\n",regVal);
10500 +#endif
10501 +#endif
10502 +       return 1;
10503 +}
10504 +
10505 +#ifdef CONFIG_RAETH_LRO
10506 +static int
10507 +rt_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
10508 +                       u64 *hdr_flags, void *priv)
10509 +{
10510 +        struct iphdr *iph = NULL;
10511 +       int    vhdr_len = 0;
10512 +
10513 +        /*
10514 +         * Make sure that this packet is Ethernet II, is not VLAN
10515 +         * tagged, is IPv4, has a valid IP header, and is TCP.
10516 +         */
10517 +       if (skb->protocol == 0x0081) {
10518 +               vhdr_len = VLAN_HLEN;
10519 +       }
10520 +
10521 +       iph = (struct iphdr *)(skb->data + vhdr_len);
10522 +       if (iph->daddr != lro_para.lan_ip1) {
10523 +               return -1;
10524 +       }
10525 +
10526 +       if(iph->protocol != IPPROTO_TCP) {
10527 +               return -1;
10528 +       } else {
10529 +               *iphdr = iph;
10530 +               *tcph = skb->data + (iph->ihl << 2) + vhdr_len;
10531 +               *hdr_flags = LRO_IPV4 | LRO_TCP;
10532 +
10533 +               lro_flush_needed = 1;
10534 +               return 0;
10535 +       }
10536 +}
10537 +#endif // CONFIG_RAETH_LRO //
10538 +
10539 +#ifdef CONFIG_RAETH_NAPI
10540 +static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do)
10541 +#else
10542 +static int rt2880_eth_recv(struct net_device* dev)
10543 +#endif
10544 +{
10545 +       struct sk_buff  *skb, *rx_skb;
10546 +       unsigned int    length = 0;
10547 +       unsigned long   RxProcessed;
10548 +
10549 +       int bReschedule = 0;
10550 +       END_DEVICE*     ei_local = netdev_priv(dev);
10551 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) || defined (CONFIG_RAETH_HW_LRO)
10552 +       int rx_ring_no=0;
10553 +#endif
10554 +
10555 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10556 +       struct vlan_ethhdr *veth=NULL;
10557 +#endif
10558 +
10559 +#ifdef CONFIG_PSEUDO_SUPPORT
10560 +       PSEUDO_ADAPTER *pAd;
10561 +#endif
10562 +
10563 +       RxProcessed = 0;
10564 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10565 +       rx_dma_owner_idx0 = (rx_calc_idx0 + 1) % NUM_RX_DESC;
10566 +#else
10567 +       rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
10568 +#endif
10569 +
10570 +#if defined (CONFIG_32B_DESC)
10571 +       dma_cache_sync(NULL, &ei_local->rx_ring0[rx_dma_owner_idx0], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10572 +#endif
10573 +#if defined (CONFIG_RAETH_HW_LRO)
10574 +       rx_dma_owner_lro1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
10575 +       rx_dma_owner_lro2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
10576 +       rx_dma_owner_lro3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
10577 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10578 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10579 +       rx_dma_owner_idx1 = (rx_calc_idx1 + 1) % NUM_RX_DESC;
10580 +#else
10581 +       rx_dma_owner_idx1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_RX_DESC;
10582 +#endif  /* CONFIG_RAETH_RW_PDMAPTR_FROM_VAR */
10583 +#if defined(CONFIG_ARCH_MT7623)
10584 +    rx_dma_owner_idx2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_RX_DESC;
10585 +    rx_dma_owner_idx3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_RX_DESC;
10586 +#endif
10587 +#if defined (CONFIG_32B_DESC)
10588 +       dma_cache_sync(NULL, &ei_local->rx_ring1[rx_dma_owner_idx1], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
10589 +#endif
10590 +#endif
10591 +       for ( ; ; ) {
10592 +
10593 +
10594 +#ifdef CONFIG_RAETH_NAPI
10595 +                if(*work_done >= work_to_do)
10596 +                        break;
10597 +                (*work_done)++;
10598 +#else
10599 +               if (RxProcessed++ > NUM_RX_MAX_PROCESS)
10600 +                {
10601 +                        // need to reschedule rx handle
10602 +                        bReschedule = 1;
10603 +                        break;
10604 +                }
10605 +#endif
10606 +
10607 +
10608 +#if defined (CONFIG_RAETH_HW_LRO)
10609 +               if (ei_local->rx_ring3[rx_dma_owner_lro3].rxd_info2.DDONE_bit == 1)  {
10610 +                   rx_ring = ei_local->rx_ring3;
10611 +                   rx_dma_owner_idx = rx_dma_owner_lro3;
10612 +               //    printk("rx_dma_owner_lro3=%x\n",rx_dma_owner_lro3);
10613 +                   rx_ring_no=3;
10614 +               }
10615 +               else if (ei_local->rx_ring2[rx_dma_owner_lro2].rxd_info2.DDONE_bit == 1)  {
10616 +                   rx_ring = ei_local->rx_ring2;
10617 +                   rx_dma_owner_idx = rx_dma_owner_lro2;
10618 +               //    printk("rx_dma_owner_lro2=%x\n",rx_dma_owner_lro2);
10619 +                   rx_ring_no=2;
10620 +               }
10621 +               else if (ei_local->rx_ring1[rx_dma_owner_lro1].rxd_info2.DDONE_bit == 1)  {
10622 +                   rx_ring = ei_local->rx_ring1;
10623 +                   rx_dma_owner_idx = rx_dma_owner_lro1;
10624 +               //    printk("rx_dma_owner_lro1=%x\n",rx_dma_owner_lro1);
10625 +                   rx_ring_no=1;
10626 +               } 
10627 +               else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1)  {
10628 +                   rx_ring = ei_local->rx_ring0;
10629 +                   rx_dma_owner_idx = rx_dma_owner_idx0;
10630 +                //   printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10631 +                   rx_ring_no=0;
10632 +               } else {
10633 +                   break;
10634 +               }
10635 +    #if defined (CONFIG_RAETH_HW_LRO_DBG)
10636 +        HwLroStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.LRO_AGG_CNT, \
10637 +            (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0);
10638 +    #endif
10639 +    #if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
10640 +        HwLroFlushStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.REV);
10641 +    #endif
10642 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10643 +               if (ei_local->rx_ring1[rx_dma_owner_idx1].rxd_info2.DDONE_bit == 1)  {
10644 +                   rx_ring = ei_local->rx_ring1;
10645 +                   rx_dma_owner_idx = rx_dma_owner_idx1;
10646 +               //    printk("rx_dma_owner_idx1=%x\n",rx_dma_owner_idx1);
10647 +                   rx_ring_no=1;
10648 +               }
10649 +#if defined(CONFIG_ARCH_MT7623)
10650 +        else if (ei_local->rx_ring2[rx_dma_owner_idx2].rxd_info2.DDONE_bit == 1)  {
10651 +            rx_ring = ei_local->rx_ring2;
10652 +            rx_dma_owner_idx = rx_dma_owner_idx2;
10653 +        //    printk("rx_dma_owner_idx2=%x\n",rx_dma_owner_idx2);
10654 +            rx_ring_no=2;
10655 +        }
10656 +        else if (ei_local->rx_ring3[rx_dma_owner_idx3].rxd_info2.DDONE_bit == 1)  {
10657 +                   rx_ring = ei_local->rx_ring3;
10658 +                   rx_dma_owner_idx = rx_dma_owner_idx3;
10659 +               //    printk("rx_dma_owner_idx3=%x\n",rx_dma_owner_idx3);
10660 +                   rx_ring_no=3;
10661 +               }               
10662 +#endif  /* CONFIG_ARCH_MT7623 */
10663 +        else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1)  {
10664 +                   rx_ring = ei_local->rx_ring0;
10665 +                   rx_dma_owner_idx = rx_dma_owner_idx0;
10666 +                //   printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
10667 +                   rx_ring_no=0;
10668 +               } else {
10669 +                   break;
10670 +               }
10671 +#else
10672 +
10673 +               if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1)  {
10674 +                   rx_ring = ei_local->rx_ring0;
10675 +                   rx_dma_owner_idx = rx_dma_owner_idx0;
10676 +               } else {
10677 +                   break;
10678 +               }
10679 +#endif
10680 +
10681 +#if defined (CONFIG_32B_DESC)
10682 +               prefetch(&rx_ring[(rx_dma_owner_idx + 1) % NUM_RX_DESC]);
10683 +#endif
10684 +               /* skb processing */
10685 +#if defined (CONFIG_RAETH_HW_LRO)
10686 +        length = (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10687 +#else
10688 +               length = rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
10689 +#endif  /* CONFIG_RAETH_HW_LRO */
10690 +
10691 +#if defined (CONFIG_ARCH_MT7623)
10692 +               dma_unmap_single(NULL, rx_ring[rx_dma_owner_idx].rxd_info1.PDP0, length, DMA_FROM_DEVICE);
10693 +#endif
10694 +
10695 +#if defined (CONFIG_RAETH_HW_LRO)
10696 +               if(rx_ring_no==3) {
10697 +                   rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10698 +                   rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10699 +               }
10700 +               else if(rx_ring_no==2) {
10701 +                   rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10702 +                   rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10703 +               }
10704 +               else if(rx_ring_no==1) {
10705 +                   rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10706 +                   rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10707 +               } 
10708 +               else {
10709 +                   rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10710 +                   rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10711 +               }
10712 +    #if defined(CONFIG_RAETH_PDMA_DVT)
10713 +        raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10714 +    #endif  /* CONFIG_RAETH_PDMA_DVT */
10715 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10716 +               if(rx_ring_no==1) {
10717 +                   rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
10718 +                   rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
10719 +               } 
10720 +#if defined(CONFIG_ARCH_MT7623)
10721 +               else if(rx_ring_no==2) {
10722 +                   rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
10723 +                   rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
10724 +               }
10725 +        else if(rx_ring_no==3) {
10726 +                   rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
10727 +                   rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
10728 +               }
10729 +#endif  /* CONFIG_ARCH_MT7623 */
10730 +        else {
10731 +                   rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10732 +                   rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10733 +               }
10734 +    #if defined(CONFIG_RAETH_PDMA_DVT)
10735 +        raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
10736 +    #endif  /* CONFIG_RAETH_PDMA_DVT */
10737 +#else
10738 +               rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
10739 +               rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
10740 +    #if defined(CONFIG_RAETH_PDMA_DVT)
10741 +        raeth_pdma_rx_desc_dvt( ei_local, rx_dma_owner_idx0 );
10742 +    #endif  /* CONFIG_RAETH_PDMA_DVT */
10743 +#endif
10744 +               rx_skb->len     = length;
10745 +/*TODO*/
10746 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10747 +               rx_skb->data += NET_IP_ALIGN;
10748 +#endif
10749 +               rx_skb->tail    = rx_skb->data + length;
10750 +
10751 +#ifdef CONFIG_PSEUDO_SUPPORT
10752 +               if(rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10753 +                   if(ei_local->PseudoDev!=NULL) {
10754 +                       rx_skb->dev       = ei_local->PseudoDev;
10755 +                       rx_skb->protocol  = eth_type_trans(rx_skb,ei_local->PseudoDev);
10756 +                   }else {
10757 +                       printk("ERROR: PseudoDev is still not initialize but receive packet from GMAC2\n");
10758 +                   }
10759 +               }else{
10760 +                   rx_skb->dev           = dev;
10761 +                   rx_skb->protocol      = eth_type_trans(rx_skb,dev);
10762 +               }
10763 +#else
10764 +               rx_skb->dev       = dev;
10765 +               rx_skb->protocol  = eth_type_trans(rx_skb,dev);
10766 +#endif
10767 +
10768 +#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
10769 +#if defined (CONFIG_PDMA_NEW)
10770 +               if(rx_ring[rx_dma_owner_idx].rxd_info4.L4VLD) {
10771 +                       rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10772 +               }else {
10773 +                   rx_skb->ip_summed = CHECKSUM_NONE;
10774 +               }
10775 +#else
10776 +               if(rx_ring[rx_dma_owner_idx].rxd_info4.IPFVLD_bit) {
10777 +                       rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
10778 +               }else { 
10779 +                   rx_skb->ip_summed = CHECKSUM_NONE;
10780 +               }
10781 +#endif
10782 +#else
10783 +                   rx_skb->ip_summed = CHECKSUM_NONE;
10784 +#endif
10785 +
10786 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
10787 +               /* Qwert+
10788 +                */
10789 +               if(ra_classifier_hook_rx!= NULL)
10790 +               {
10791 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
10792 +                       ra_classifier_hook_rx(rx_skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
10793 +#else                  
10794 +                       ra_classifier_hook_rx(rx_skb, read_c0_count());
10795 +#endif                 
10796 +               }
10797 +#endif /* CONFIG_RA_CLASSIFIER */
10798 +
10799 +#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
10800 +               if(ra_sw_nat_hook_rx != NULL) {
10801 +                   FOE_MAGIC_TAG(rx_skb)= FOE_MAGIC_GE;
10802 +                   *(uint32_t *)(FOE_INFO_START_ADDR(rx_skb)+2) = *(uint32_t *)&rx_ring[rx_dma_owner_idx].rxd_info4;
10803 +                   FOE_ALG(rx_skb) = 0;
10804 +               }
10805 +#endif
10806 +
10807 +               /* We have to check the free memory size is big enough
10808 +                * before pass the packet to cpu*/
10809 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
10810 +#if defined (CONFIG_RAETH_HW_LRO)
10811 +            if( rx_ring != ei_local->rx_ring0 )
10812 +                skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10813 +            else
10814 +#endif  /* CONFIG_RAETH_HW_LRO */
10815 +                skb = skbmgr_dev_alloc_skb2k();
10816 +#else
10817 +#if defined (CONFIG_RAETH_HW_LRO)
10818 +        if( rx_ring != ei_local->rx_ring0 )
10819 +            skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10820 +        else
10821 +#endif  /* CONFIG_RAETH_HW_LRO */
10822 +               skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
10823 +#endif
10824 +
10825 +               if (unlikely(skb == NULL))
10826 +               {
10827 +                       printk(KERN_ERR "skb not available...\n");
10828 +#ifdef CONFIG_PSEUDO_SUPPORT
10829 +                       if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10830 +                               if (ei_local->PseudoDev != NULL) {
10831 +                                       pAd = netdev_priv(ei_local->PseudoDev);
10832 +                                       pAd->stat.rx_dropped++;
10833 +                               }
10834 +                       } else
10835 +#endif
10836 +                               ei_local->stat.rx_dropped++;
10837 +                        bReschedule = 1;
10838 +                       break;
10839 +               }
10840 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10841 +               skb_reserve(skb, NET_IP_ALIGN);
10842 +#endif
10843 +
10844 +#if defined (CONFIG_RAETH_SPECIAL_TAG)
10845 +               // port0: 0x8100 => 0x8100 0001
10846 +               // port1: 0x8101 => 0x8100 0002
10847 +               // port2: 0x8102 => 0x8100 0003
10848 +               // port3: 0x8103 => 0x8100 0004
10849 +               // port4: 0x8104 => 0x8100 0005
10850 +               // port5: 0x8105 => 0x8100 0006
10851 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
10852 +               veth = (struct vlan_ethhdr *)(rx_skb->mac_header);
10853 +#else
10854 +               veth = (struct vlan_ethhdr *)(rx_skb->mac.raw);
10855 +#endif
10856 +               /*donot check 0x81 due to MT7530 SPEC*/
10857 +               //if((veth->h_vlan_proto & 0xFF) == 0x81) 
10858 +               {
10859 +                   veth->h_vlan_TCI = htons( (((veth->h_vlan_proto >> 8) & 0xF) + 1) );
10860 +                   rx_skb->protocol = veth->h_vlan_proto = htons(ETH_P_8021Q);
10861 +               }
10862 +#endif
10863 +
10864 +/* ra_sw_nat_hook_rx return 1 --> continue
10865 + * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
10866 + */
10867 +#if !defined(CONFIG_RA_NAT_NONE)
10868 +         if((ra_sw_nat_hook_rx == NULL) || 
10869 +           (ra_sw_nat_hook_rx!= NULL && ra_sw_nat_hook_rx(rx_skb)))
10870 +#endif
10871 +         {
10872 +#if defined (CONFIG_RALINK_RT3052_MP2)
10873 +              if(mcast_rx(rx_skb)==0) {
10874 +                  kfree_skb(rx_skb);
10875 +              }else
10876 +#endif
10877 +#if defined (CONFIG_RAETH_LRO)
10878 +              if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY) {
10879 +                      lro_receive_skb(&ei_local->lro_mgr, rx_skb, NULL);
10880 +                      //LroStatsUpdate(&ei_local->lro_mgr,0);
10881 +              } else
10882 +#endif
10883 +#ifdef CONFIG_RAETH_NAPI
10884 +                netif_receive_skb(rx_skb);
10885 +#else
10886 +#ifdef CONFIG_RAETH_HW_VLAN_RX
10887 +               if(ei_local->vlgrp && rx_ring[rx_dma_owner_idx].rxd_info2.TAG) {
10888 +                       vlan_hwaccel_rx(rx_skb, ei_local->vlgrp, rx_ring[rx_dma_owner_idx].rxd_info3.VID);
10889 +               } else {
10890 +                       netif_rx(rx_skb);
10891 +               }
10892 +#else
10893 +#ifdef CONFIG_RAETH_CPU_LOOPBACK
10894 +                skb_push(rx_skb,ETH_HLEN);
10895 +                ei_start_xmit(rx_skb, dev, 1);
10896 +#else          
10897 +                netif_rx(rx_skb);
10898 +#endif
10899 +#endif
10900 +#endif
10901 +         }
10902 +
10903 +#ifdef CONFIG_PSEUDO_SUPPORT
10904 +               if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
10905 +                       if (ei_local->PseudoDev != NULL) {
10906 +                               pAd = netdev_priv(ei_local->PseudoDev);
10907 +                               pAd->stat.rx_packets++;
10908 +                               pAd->stat.rx_bytes += length;
10909 +                       }
10910 +               } else
10911 +#endif
10912 +               {
10913 +                       ei_local->stat.rx_packets++;
10914 +                       ei_local->stat.rx_bytes += length;
10915 +               }
10916 +
10917 +
10918 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
10919 +#if defined (CONFIG_RAETH_HW_LRO)
10920 +        if( rx_ring != ei_local->rx_ring0 ){
10921 +            rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
10922 +            rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
10923 +        }
10924 +        else
10925 +#endif  /* CONFIG_RAETH_HW_LRO */
10926 +               rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = MAX_RX_LENGTH;
10927 +               rx_ring[rx_dma_owner_idx].rxd_info2.LS0 = 0;
10928 +#endif
10929 +               rx_ring[rx_dma_owner_idx].rxd_info2.DDONE_bit = 0;
10930 +#if defined (CONFIG_RAETH_HW_LRO)
10931 +        if( rx_ring != ei_local->rx_ring0 )
10932 +            rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
10933 +        else
10934 +#endif  /* CONFIG_RAETH_HW_LRO */
10935 +               rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
10936 +#ifdef CONFIG_32B_DESC
10937 +               dma_cache_sync(NULL, &rx_ring[rx_dma_owner_idx], sizeof(struct PDMA_rxdesc), DMA_TO_DEVICE);
10938 +#endif
10939 +               /*  Move point to next RXD which wants to alloc*/
10940 +#if defined (CONFIG_RAETH_HW_LRO)
10941 +               if(rx_ring_no==3) {
10942 +                   sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
10943 +                   ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
10944 +               }
10945 +               else if(rx_ring_no==2) {
10946 +                   sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
10947 +                   ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
10948 +               }
10949 +               else if(rx_ring_no==1) {
10950 +                   sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
10951 +                   ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
10952 +               }
10953 +               else if(rx_ring_no==0) {
10954 +                   sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10955 +                   ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10956 +               }
10957 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
10958 +               if(rx_ring_no==0) {
10959 +                   sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10960 +                   ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10961 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10962 +                   rx_calc_idx0 = rx_dma_owner_idx;
10963 +#endif
10964 +               }
10965 +#if defined(CONFIG_ARCH_MT7623)
10966 +        else if(rx_ring_no==3) {
10967 +                   sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
10968 +                   ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
10969 +               }
10970 +               else if(rx_ring_no==2) {
10971 +                   sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
10972 +                   ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
10973 +               }
10974 +#endif  /* CONFIG_ARCH_MT7623 */
10975 +        else {
10976 +                   sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
10977 +                   ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
10978 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10979 +                   rx_calc_idx1 = rx_dma_owner_idx;
10980 +#endif
10981 +               }
10982 +#else
10983 +               sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
10984 +               ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
10985 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
10986 +               rx_calc_idx0 = rx_dma_owner_idx;
10987 +#endif
10988 +#endif
10989 +
10990 +               
10991 +               /* Update to Next packet point that was received.
10992 +                */
10993 +#if defined (CONFIG_RAETH_HW_LRO)
10994 +               if(rx_ring_no==3)
10995 +                       rx_dma_owner_lro3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
10996 +               else if(rx_ring_no==2)
10997 +                       rx_dma_owner_lro2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
10998 +               else if(rx_ring_no==1)
10999 +                       rx_dma_owner_lro1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
11000 +               else if(rx_ring_no==0)
11001 +                       rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11002 +               else {
11003 +               }
11004 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11005 +               if(rx_ring_no==0) {
11006 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11007 +                       rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11008 +#else
11009 +                       rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11010 +#endif
11011 +#if defined(CONFIG_ARCH_MT7623)
11012 +        }else if(rx_ring_no==3) {
11013 +            rx_dma_owner_idx3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_RX_DESC;
11014 +        }else if(rx_ring_no==2) {
11015 +            rx_dma_owner_idx2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_RX_DESC;
11016 +#endif  /* CONFIG_ARCH_MT7623 */
11017 +               }else {
11018 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11019 +                       rx_dma_owner_idx1 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11020 +#else
11021 +                       rx_dma_owner_idx1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_RX_DESC;
11022 +#endif
11023 +               }
11024 +#else
11025 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
11026 +               rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
11027 +#else
11028 +               rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
11029 +#endif
11030 +#endif
11031 +       }       /* for */
11032 +
11033 +#if defined (CONFIG_RAETH_LRO)
11034 +       if (lro_flush_needed) {
11035 +               //LroStatsUpdate(&ei_local->lro_mgr,1);
11036 +               lro_flush_all(&ei_local->lro_mgr);
11037 +               lro_flush_needed = 0;
11038 +       }
11039 +#endif
11040 +       return bReschedule;
11041 +}
11042 +
11043 +
11044 +///////////////////////////////////////////////////////////////////
11045 +/////
11046 +///// ra_get_stats - gather packet information for management plane
11047 +/////
11048 +///// Pass net_device_stats to the upper layer.
11049 +/////
11050 +/////
11051 +///// RETURNS: pointer to net_device_stats
11052 +///////////////////////////////////////////////////////////////////
11053 +
11054 +struct net_device_stats *ra_get_stats(struct net_device *dev)
11055 +{
11056 +       END_DEVICE *ei_local = netdev_priv(dev);
11057 +       return &ei_local->stat;
11058 +}
11059 +
11060 +#if defined (CONFIG_RT_3052_ESW)
11061 +void kill_sig_workq(struct work_struct *work)
11062 +{
11063 +       struct file *fp;
11064 +       char pid[8];
11065 +       struct task_struct *p = NULL;
11066 +
11067 +       //read udhcpc pid from file, and send signal USR2,USR1 to get a new IP
11068 +       fp = filp_open("/var/run/udhcpc.pid", O_RDONLY, 0);
11069 +       if (IS_ERR(fp))
11070 +           return;
11071 +
11072 +       if (fp->f_op && fp->f_op->read) {
11073 +           if (fp->f_op->read(fp, pid, 8, &fp->f_pos) > 0) {
11074 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11075 +               p = pid_task(find_get_pid(simple_strtoul(pid, NULL, 10)),  PIDTYPE_PID);
11076 +#else
11077 +               p = find_task_by_pid(simple_strtoul(pid, NULL, 10));
11078 +#endif
11079 +
11080 +               if (NULL != p) {
11081 +                   send_sig(SIGUSR2, p, 0);
11082 +                   send_sig(SIGUSR1, p, 0);
11083 +               }
11084 +           }
11085 +       }
11086 +       filp_close(fp, NULL);
11087 +
11088 +}
11089 +#endif
11090 +
11091 +
11092 +///////////////////////////////////////////////////////////////////
11093 +/////
11094 +///// ra2880Recv - process the next incoming packet
11095 +/////
11096 +///// Handle one incoming packet.  The packet is checked for errors and sent
11097 +///// to the upper layer.
11098 +/////
11099 +///// RETURNS: OK on success or ERROR.
11100 +///////////////////////////////////////////////////////////////////
11101 +
11102 +#ifndef CONFIG_RAETH_NAPI
11103 +#if defined WORKQUEUE_BH || defined (TASKLET_WORKQUEUE_SW)
11104 +void ei_receive_workq(struct work_struct *work)
11105 +#else
11106 +void ei_receive(unsigned long unused)  // device structure
11107 +#endif // WORKQUEUE_BH //
11108 +{
11109 +       struct net_device *dev = dev_raether;
11110 +       END_DEVICE *ei_local = netdev_priv(dev);
11111 +       unsigned long reg_int_mask=0;
11112 +       int bReschedule=0;
11113 +
11114 +
11115 +       if(tx_ring_full==0){
11116 +               bReschedule = rt2880_eth_recv(dev);
11117 +               if(bReschedule)
11118 +               {
11119 +#ifdef WORKQUEUE_BH
11120 +                       schedule_work(&ei_local->rx_wq);
11121 +#else
11122 +#if defined (TASKLET_WORKQUEUE_SW)
11123 +                       if (working_schedule == 1)
11124 +                               schedule_work(&ei_local->rx_wq);
11125 +                       else
11126 +#endif
11127 +                       tasklet_hi_schedule(&ei_local->rx_tasklet);
11128 +#endif // WORKQUEUE_BH //
11129 +               }else{
11130 +                       reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11131 +#if defined(DELAY_INT)
11132 +                       sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11133 +#else
11134 +                       sysRegWrite(RAETH_FE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11135 +#endif
11136 +#ifdef CONFIG_RAETH_QDMA
11137 +                       reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11138 +#if defined(DELAY_INT)
11139 +                       sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
11140 +#else
11141 +                       sysRegWrite(QFE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1));
11142 +#endif
11143 +
11144 +#endif                 
11145 +                       
11146 +               }
11147 +       }else{
11148 +#ifdef WORKQUEUE_BH
11149 +                schedule_work(&ei_local->rx_wq);
11150 +#else
11151 +#if defined (TASKLET_WORKQUEUE_SW)
11152 +               if (working_schedule == 1)
11153 +                       schedule_work(&ei_local->rx_wq);
11154 +               else
11155 +#endif
11156 +                tasklet_schedule(&ei_local->rx_tasklet);
11157 +#endif // WORKQUEUE_BH //
11158 +       }
11159 +}
11160 +#endif
11161 +
11162 +#if defined (CONFIG_RAETH_HW_LRO)
11163 +void ei_hw_lro_auto_adj(unsigned int index, END_DEVICE* ei_local)
11164 +{    
11165 +    unsigned int entry;
11166 +    unsigned int pkt_cnt;
11167 +    unsigned int tick_cnt;
11168 +    unsigned int duration_us;
11169 +    unsigned int byte_cnt;
11170 +
11171 +    /* read packet count statitics of the auto-learn table */
11172 +    entry = index  + 68;
11173 +    sysRegWrite( PDMA_FE_ALT_CF8, entry );
11174 +    pkt_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC) & 0xfff;
11175 +    tick_cnt = (sysRegRead(PDMA_FE_ALT_SGL_CFC) >> 16) & 0xffff;
11176 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11177 +    printk("[HW LRO] ei_hw_lro_auto_adj(): pkt_cnt[%d]=%d, tick_cnt[%d]=%d\n", index, pkt_cnt, index, tick_cnt);
11178 +    printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (ticks/pkt)\n", index, tick_cnt/pkt_cnt);
11179 +#endif    
11180 +
11181 +    /* read byte count statitics of the auto-learn table */
11182 +    entry = index  + 64;
11183 +    sysRegWrite( PDMA_FE_ALT_CF8, entry );
11184 +    byte_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC);
11185 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11186 +    printk("[HW LRO] ei_hw_lro_auto_adj(): byte_cnt[%d]=%d\n", index, byte_cnt);
11187 +#endif
11188 +
11189 +    /* calculate the packet interval of the rx flow */
11190 +    duration_us = tick_cnt * HW_LRO_TIMER_UNIT;
11191 +    ei_local->hw_lro_pkt_interval[index - 1] = (duration_us/pkt_cnt) * ei_local->hw_lro_alpha / 100;
11192 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11193 +    printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (20us)\n", index, duration_us/pkt_cnt);
11194 +#endif    
11195 +
11196 +    if ( !ei_local->hw_lro_fix_setting ){
11197 +    /* adjust age_time, agg_time for the lro ring */
11198 +       if(ei_local->hw_lro_pkt_interval[index - 1] > 0){
11199 +               SET_PDMA_RXRING_AGE_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_MAX_AGG_CNT));
11200 +               SET_PDMA_RXRING_AGG_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_AGG_DELTA));
11201 +       }
11202 +       else{
11203 +               SET_PDMA_RXRING_AGE_TIME(index, HW_LRO_MAX_AGG_CNT);
11204 +               SET_PDMA_RXRING_AGG_TIME(index, HW_LRO_AGG_DELTA);
11205 +       }
11206 +    }
11207 +}
11208 +
11209 +void ei_hw_lro_workq(struct work_struct *work)
11210 +{
11211 +    END_DEVICE *ei_local;
11212 +    unsigned int reg_int_val;
11213 +    unsigned int reg_int_mask;
11214 +
11215 +    ei_local = container_of(work, struct end_device, hw_lro_wq);
11216 +
11217 +    reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11218 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11219 +    printk("[HW LRO] ei_hw_lro_workq(): RAETH_FE_INT_STATUS=0x%x\n", reg_int_val);
11220 +#endif
11221 +    if((reg_int_val & ALT_RPLC_INT3)){
11222 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11223 +        printk("[HW LRO] ALT_RPLC_INT3 occurred!\n");
11224 +#endif
11225 +        sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT3);
11226 +        ei_hw_lro_auto_adj(3, ei_local);
11227 +    }
11228 +    if((reg_int_val & ALT_RPLC_INT2)){
11229 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11230 +        printk("[HW LRO] ALT_RPLC_INT2 occurred!\n");
11231 +#endif
11232 +        sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT2);
11233 +        ei_hw_lro_auto_adj(2, ei_local);
11234 +    }
11235 +    if((reg_int_val & ALT_RPLC_INT1)){
11236 +#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
11237 +        printk("[HW LRO] ALT_RPLC_INT1 occurred!\n");
11238 +#endif
11239 +        sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT1);
11240 +        ei_hw_lro_auto_adj(1, ei_local);
11241 +    }
11242 +
11243 +    /* unmask interrupts of rx flow to hw lor rings */
11244 +    reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);    
11245 +    sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
11246 +}
11247 +#endif  /* CONFIG_RAETH_HW_LRO */
11248 +
11249 +#ifdef CONFIG_RAETH_NAPI
11250 +static int
11251 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11252 +raeth_clean(struct napi_struct *napi, int budget)
11253 +#else
11254 +raeth_clean(struct net_device *netdev, int *budget)
11255 +#endif
11256 +{
11257 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11258 +       struct net_device *netdev=dev_raether;
11259 +        int work_to_do = budget;
11260 +#else
11261 +        int work_to_do = min(*budget, netdev->quota);
11262 +#endif
11263 +       END_DEVICE *ei_local =netdev_priv(netdev);
11264 +        int work_done = 0;
11265 +       unsigned long reg_int_mask=0;
11266 +
11267 +       ei_xmit_housekeeping(0);
11268 +
11269 +       rt2880_eth_recv(netdev, &work_done, work_to_do);
11270 +
11271 +        /* this could control when to re-enable interrupt, 0-> mean never enable interrupt*/
11272 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
11273 +        *budget -= work_done;
11274 +        netdev->quota -= work_done;
11275 +#endif
11276 +        /* if no Tx and not enough Rx work done, exit the polling mode */
11277 +        if(( (work_done < work_to_do)) || !netif_running(netdev)) {
11278 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11279 +               napi_complete(&ei_local->napi);
11280 +#else
11281 +                netif_rx_complete(netdev);
11282 +#endif
11283 +               atomic_dec_and_test(&ei_local->irq_sem);
11284 +
11285 +               sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL);             // ack all fe interrupts
11286 +               reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
11287 +
11288 +#ifdef DELAY_INT
11289 +               sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask |RAETH_FE_INT_DLY_INIT);  // init delay interrupt only
11290 +#else
11291 +               sysRegWrite(RAETH_FE_INT_ENABLE,reg_int_mask | RAETH_FE_INT_SETTING);
11292 +#endif
11293 +
11294 +#ifdef CONFIG_RAETH_QDMA
11295 +               sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11296 +               reg_int_mask=sysRegRead(QFE_INT_ENABLE);
11297 +#ifdef DELAY_INT
11298 +                sysRegWrite(QFE_INT_ENABLE, reg_int_mask |QFE_INT_DLY_INIT);  // init delay interrupt only
11299 +#else
11300 +                sysRegWrite(QFE_INT_ENABLE,reg_int_mask | (RX_DONE_INT0 | RX_DONE_INT1 | RLS_DONE_INT));
11301 +#endif
11302 +#endif // CONFIG_RAETH_QDMA //
11303 +
11304 +                return 0;
11305 +        }
11306 +
11307 +        return 1;
11308 +}
11309 +
11310 +#endif
11311 +
11312 +
11313 +void gsw_delay_setting(void) 
11314 +{
11315 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN) 
11316 +       END_DEVICE *ei_local = netdev_priv(dev_raether);
11317 +       int reg_int_val = 0;
11318 +       int link_speed = 0;
11319 +
11320 +       reg_int_val = sysRegRead(FE_INT_STATUS2);
11321 +#if defined (CONFIG_RALINK_MT7621)
11322 +       if( reg_int_val & BIT(25))
11323 +       {
11324 +               if(sysRegRead(RALINK_ETH_SW_BASE+0x0208) & 0x1) // link up
11325 +               {
11326 +                       link_speed = (sysRegRead(RALINK_ETH_SW_BASE+0x0208)>>2 & 0x3);
11327 +                       if(link_speed == 1)
11328 +                       {
11329 +                               // delay setting for 100M
11330 +                               if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)     
11331 +                                       mii_mgr_write(31, 0x7b00, 8);
11332 +                               printk("MT7621 GE2 link rate to 100M\n");
11333 +                       } else 
11334 +                       {
11335 +                               //delay setting for 10/1000M
11336 +                               if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
11337 +                                       mii_mgr_write(31, 0x7b00, 0x102);
11338 +                               printk("MT7621 GE2 link rate to 10M/1G\n");
11339 +                       }
11340 +                       schedule_work(&ei_local->kill_sig_wq);
11341 +               }
11342 +       }
11343 +#endif
11344 +       sysRegWrite(FE_INT_STATUS2, reg_int_val);
11345 +#endif
11346 +}
11347 +
11348 +/**
11349 + * ei_interrupt - handle controler interrupt
11350 + *
11351 + * This routine is called at interrupt level in response to an interrupt from
11352 + * the controller.
11353 + *
11354 + * RETURNS: N/A.
11355 + */
11356 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
11357 +static irqreturn_t ei_interrupt(int irq, void *dev_id)
11358 +#else
11359 +static irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
11360 +#endif
11361 +{
11362 +#if !defined(CONFIG_RAETH_NAPI)
11363 +       unsigned long reg_int_val;
11364 +       unsigned long reg_int_mask=0;
11365 +       unsigned int recv = 0;
11366 +       unsigned int transmit __maybe_unused = 0;
11367 +       unsigned long flags;
11368 +#endif
11369 +
11370 +       struct net_device *dev = (struct net_device *) dev_id;
11371 +       END_DEVICE *ei_local = netdev_priv(dev);
11372 +
11373 +       //Qwert
11374 +       /*
11375 +       unsigned long old,cur,dcycle;
11376 +       static int cnt = 0;
11377 +       static unsigned long max_dcycle = 0,tcycle = 0;
11378 +       old = read_c0_count();
11379 +       */
11380 +       if (dev == NULL)
11381 +       {
11382 +               printk (KERN_ERR "net_interrupt(): irq %x for unknown device.\n", IRQ_ENET0);
11383 +               return IRQ_NONE;
11384 +       }
11385 +
11386 +#ifdef CONFIG_RAETH_NAPI
11387 +       gsw_delay_setting();
11388 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11389 +        if(napi_schedule_prep(&ei_local->napi)) {
11390 +#else
11391 +        if(netif_rx_schedule_prep(dev)) {
11392 +#endif
11393 +                atomic_inc(&ei_local->irq_sem);
11394 +               sysRegWrite(RAETH_FE_INT_ENABLE, 0);
11395 +#ifdef CONFIG_RAETH_QDMA               
11396 +               sysRegWrite(QFE_INT_ENABLE, 0);
11397 +#endif
11398 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
11399 +               __napi_schedule(&ei_local->napi);
11400 +#else
11401 +                __netif_rx_schedule(dev);
11402 +#endif
11403 +        }
11404 +#else
11405 +
11406 +       spin_lock_irqsave(&(ei_local->page_lock), flags);
11407 +       reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
11408 +#ifdef CONFIG_RAETH_QDMA       
11409 +       reg_int_val |= sysRegRead(QFE_INT_STATUS);
11410 +#endif
11411 +#if defined (DELAY_INT)
11412 +       if((reg_int_val & RX_DLY_INT))
11413 +               recv = 1;
11414 +       
11415 +       if (reg_int_val & RAETH_TX_DLY_INT)
11416 +               transmit = 1;
11417 +
11418 +#if defined(CONFIG_RAETH_PDMA_DVT)
11419 +    raeth_pdma_lro_dly_int_dvt();
11420 +#endif  /* CONFIG_RAETH_PDMA_DVT */
11421 +
11422 +#else
11423 +       if((reg_int_val & (RX_DONE_INT0 | RX_DONE_INT3 | RX_DONE_INT2 | RX_DONE_INT1)))
11424 +               recv = 1;
11425 +
11426 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
11427 +#if defined(CONFIG_ARCH_MT7623)    
11428 +    if((reg_int_val & RX_DONE_INT3))
11429 +        recv = 3;
11430 +    if((reg_int_val & RX_DONE_INT2))
11431 +        recv = 2;
11432 +#endif  /* CONFIG_ARCH_MT7623 */
11433 +       if((reg_int_val & RX_DONE_INT1))
11434 +               recv = 1;
11435 +#endif
11436 +
11437 +       if (reg_int_val & RAETH_TX_DONE_INT0)
11438 +               transmit |= RAETH_TX_DONE_INT0;
11439 +#if defined (CONFIG_RAETH_QOS)
11440 +       if (reg_int_val & TX_DONE_INT1)
11441 +               transmit |= TX_DONE_INT1;
11442 +       if (reg_int_val & TX_DONE_INT2)
11443 +               transmit |= TX_DONE_INT2;
11444 +       if (reg_int_val & TX_DONE_INT3)
11445 +               transmit |= TX_DONE_INT3;
11446 +#endif //CONFIG_RAETH_QOS
11447 +
11448 +#endif //DELAY_INT
11449 +
11450 +#if defined (DELAY_INT)
11451 +       sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT);
11452 +#else
11453 +       sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL);
11454 +#endif
11455 +#ifdef CONFIG_RAETH_QDMA
11456 +#if defined (DELAY_INT)
11457 +       sysRegWrite(QFE_INT_STATUS, QFE_INT_DLY_INIT);
11458 +#else
11459 +       sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
11460 +#endif
11461 +#endif 
11462 +
11463 +#if defined (CONFIG_RAETH_HW_LRO)
11464 +    if( reg_int_val & (ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1) ){
11465 +        /* mask interrupts of rx flow to hw lor rings */
11466 +        reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11467 +        sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1));
11468 +        schedule_work(&ei_local->hw_lro_wq);
11469 +    }
11470 +#endif  /* CONFIG_RAETH_HW_LRO */
11471 +
11472 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
11473 +       if(transmit)
11474 +               ei_xmit_housekeeping(0);
11475 +#else
11476 +               ei_xmit_housekeeping(0);
11477 +#endif
11478 +
11479 +       if (((recv == 1) || (pending_recv ==1)) && (tx_ring_full==0))
11480 +       {
11481 +               reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
11482 +#if defined (DELAY_INT)
11483 +               sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11484 +#else
11485 +               sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11486 +#endif //DELAY_INT
11487 +#ifdef CONFIG_RAETH_QDMA               
11488 +               reg_int_mask = sysRegRead(QFE_INT_ENABLE);
11489 +#if defined (DELAY_INT)
11490 +               sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
11491 +#else
11492 +               sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
11493 +#endif //DELAY_INT
11494 +#endif
11495 +
11496 +               pending_recv=0;
11497 +#ifdef WORKQUEUE_BH
11498 +               schedule_work(&ei_local->rx_wq);
11499 +#else
11500 +#if defined (TASKLET_WORKQUEUE_SW)
11501 +               if (working_schedule == 1)
11502 +                       schedule_work(&ei_local->rx_wq);
11503 +               else
11504 +#endif
11505 +               tasklet_hi_schedule(&ei_local->rx_tasklet);
11506 +#endif // WORKQUEUE_BH //
11507 +       } 
11508 +       else if (recv == 1 && tx_ring_full==1) 
11509 +       {
11510 +               pending_recv=1;
11511 +       }
11512 +       else if((recv == 0) && (transmit == 0))
11513 +       {
11514 +               gsw_delay_setting();
11515 +       }
11516 +       spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11517 +#endif
11518 +
11519 +       return IRQ_HANDLED;
11520 +}
11521 +
11522 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
11523 +    defined (CONFIG_RALINK_MT7620)|| defined (CONFIG_RALINK_MT7621)
11524 +static void esw_link_status_changed(int port_no, void *dev_id)
11525 +{
11526 +    unsigned int reg_val;
11527 +    struct net_device *dev = (struct net_device *) dev_id;
11528 +    END_DEVICE *ei_local = netdev_priv(dev);
11529 +
11530 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11531 +    defined (CONFIG_RALINK_MT7620)
11532 +    reg_val = *((volatile u32 *)(RALINK_ETH_SW_BASE+ 0x3008 + (port_no*0x100)));
11533 +#elif defined (CONFIG_RALINK_MT7621)
11534 +    mii_mgr_read(31, (0x3008 + (port_no*0x100)), &reg_val);
11535 +#endif    
11536 +    if(reg_val & 0x1) {
11537 +       printk("ESW: Link Status Changed - Port%d Link UP\n", port_no);
11538 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11539 +       mii_mgr_write(port_no, 31, 0x52b5);
11540 +       mii_mgr_write(port_no, 16, 0xb780);
11541 +       mii_mgr_write(port_no, 17, 0x00e0);
11542 +       mii_mgr_write(port_no, 16, 0x9780);
11543 +#endif
11544 +
11545 +#if defined (CONFIG_WAN_AT_P0)
11546 +       if(port_no==0) {
11547 +           schedule_work(&ei_local->kill_sig_wq);
11548 +       }
11549 +#elif defined (CONFIG_WAN_AT_P4)
11550 +       if(port_no==4) {
11551 +           schedule_work(&ei_local->kill_sig_wq);
11552 +       }
11553 +#endif
11554 +    } else {       
11555 +       printk("ESW: Link Status Changed - Port%d Link Down\n", port_no);
11556 +#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
11557 +        mii_mgr_write(port_no, 31, 0x52b5);
11558 +        mii_mgr_write(port_no, 16, 0xb780);
11559 +        mii_mgr_write(port_no, 17, 0x0000);
11560 +        mii_mgr_write(port_no, 16, 0x9780);
11561 +#endif
11562 +
11563 +    }
11564 +}
11565 +#endif
11566 +
11567 +#if defined (CONFIG_RT_3052_ESW) && ! defined(CONFIG_RALINK_MT7621) && ! defined(CONFIG_ARCH_MT7623)
11568 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11569 +{
11570 +       unsigned long flags;
11571 +       unsigned long reg_int_val;
11572 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11573 +    defined(CONFIG_RALINK_MT7620)
11574 +       unsigned long acl_int_val;
11575 +       unsigned long mib_int_val;
11576 +#else
11577 +       static unsigned long stat;
11578 +       unsigned long stat_curr;
11579 +#endif
11580 +       
11581 +       struct net_device *dev = (struct net_device *) dev_id;
11582 +       END_DEVICE *ei_local = netdev_priv(dev);
11583 +
11584 +
11585 +       spin_lock_irqsave(&(ei_local->page_lock), flags);
11586 +       reg_int_val = (*((volatile u32 *)(ESW_ISR))); //Interrupt Status Register
11587 +
11588 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11589 +    defined(CONFIG_RALINK_MT7620)
11590 +       if (reg_int_val & P5_LINK_CH) {
11591 +           esw_link_status_changed(5, dev_id);
11592 +       }
11593 +       if (reg_int_val & P4_LINK_CH) {
11594 +           esw_link_status_changed(4, dev_id);
11595 +       }
11596 +       if (reg_int_val & P3_LINK_CH) {
11597 +           esw_link_status_changed(3, dev_id);
11598 +       }
11599 +       if (reg_int_val & P2_LINK_CH) {
11600 +           esw_link_status_changed(2, dev_id);
11601 +       }
11602 +       if (reg_int_val & P1_LINK_CH) {
11603 +           esw_link_status_changed(1, dev_id);
11604 +       }
11605 +       if (reg_int_val & P0_LINK_CH) {
11606 +           esw_link_status_changed(0, dev_id);
11607 +       }
11608 +       if (reg_int_val & ACL_INT) {
11609 +           acl_int_val = sysRegRead(ESW_AISR);
11610 +           sysRegWrite(ESW_AISR, acl_int_val);
11611 +       }
11612 +       if (reg_int_val & MIB_INT) {
11613 +
11614 +           mib_int_val = sysRegRead(ESW_P0_IntSn);
11615 +           if(mib_int_val){
11616 +               sysRegWrite(ESW_P0_IntSn, mib_int_val);
11617 +               if(mib_int_val & RX_GOOD_CNT)
11618 +                       p0_rx_good_cnt ++;      
11619 +               if(mib_int_val & TX_GOOD_CNT)
11620 +                       p0_tx_good_cnt ++;      
11621 +               if(mib_int_val & RX_GOCT_CNT)
11622 +                       p0_rx_byte_cnt ++;
11623 +               if(mib_int_val & TX_GOCT_CNT)
11624 +                       p0_tx_byte_cnt ++;
11625 +           }
11626 +
11627 +           mib_int_val = sysRegRead(ESW_P1_IntSn);
11628 +           if(mib_int_val){
11629 +               sysRegWrite(ESW_P1_IntSn, mib_int_val);
11630 +               if(mib_int_val & RX_GOOD_CNT)
11631 +                       p1_rx_good_cnt ++;              
11632 +               if(mib_int_val & TX_GOOD_CNT)
11633 +                       p1_tx_good_cnt ++;      
11634 +               if(mib_int_val & RX_GOCT_CNT)
11635 +                       p1_rx_byte_cnt ++;      
11636 +               if(mib_int_val & TX_GOCT_CNT)
11637 +                       p1_tx_byte_cnt ++;      
11638 +           }
11639 +
11640 +           mib_int_val = sysRegRead(ESW_P2_IntSn);
11641 +           if(mib_int_val){
11642 +               sysRegWrite(ESW_P2_IntSn, mib_int_val);
11643 +               if(mib_int_val & RX_GOOD_CNT)
11644 +                       p2_rx_good_cnt ++;              
11645 +               if(mib_int_val & TX_GOOD_CNT)
11646 +                       p2_tx_good_cnt ++;      
11647 +               if(mib_int_val & RX_GOCT_CNT)
11648 +                       p2_rx_byte_cnt ++;      
11649 +               if(mib_int_val & TX_GOCT_CNT)
11650 +                       p2_tx_byte_cnt ++;      
11651 +           }
11652 +
11653 +
11654 +           mib_int_val = sysRegRead(ESW_P3_IntSn);
11655 +           if(mib_int_val){
11656 +               sysRegWrite(ESW_P3_IntSn, mib_int_val);
11657 +               if(mib_int_val & RX_GOOD_CNT)
11658 +                       p3_rx_good_cnt ++;              
11659 +               if(mib_int_val & TX_GOOD_CNT)
11660 +                       p3_tx_good_cnt ++;      
11661 +               if(mib_int_val & RX_GOCT_CNT)
11662 +                       p3_rx_byte_cnt ++;      
11663 +               if(mib_int_val & TX_GOCT_CNT)
11664 +                       p3_tx_byte_cnt ++;      
11665 +           }
11666 +
11667 +           mib_int_val = sysRegRead(ESW_P4_IntSn);
11668 +           if(mib_int_val){
11669 +               sysRegWrite(ESW_P4_IntSn, mib_int_val);
11670 +               if(mib_int_val & RX_GOOD_CNT)
11671 +                       p4_rx_good_cnt ++;      
11672 +               if(mib_int_val & TX_GOOD_CNT)
11673 +                       p4_tx_good_cnt ++;      
11674 +               if(mib_int_val & RX_GOCT_CNT)
11675 +                       p4_rx_byte_cnt ++;      
11676 +               if(mib_int_val & TX_GOCT_CNT)
11677 +                       p4_tx_byte_cnt ++;      
11678 +           }   
11679 +
11680 +           mib_int_val = sysRegRead(ESW_P5_IntSn);
11681 +           if(mib_int_val){
11682 +               sysRegWrite(ESW_P5_IntSn, mib_int_val);
11683 +               if(mib_int_val & RX_GOOD_CNT)
11684 +                       p5_rx_good_cnt ++;              
11685 +               if(mib_int_val & TX_GOOD_CNT)
11686 +                       p5_tx_good_cnt ++;      
11687 +               if(mib_int_val & RX_GOCT_CNT)
11688 +                       p5_rx_byte_cnt ++;      
11689 +               if(mib_int_val & TX_GOCT_CNT)
11690 +                       p5_tx_byte_cnt ++;      
11691 +           }
11692 +
11693 +           mib_int_val = sysRegRead(ESW_P6_IntSn);
11694 +           if(mib_int_val){
11695 +               sysRegWrite(ESW_P6_IntSn, mib_int_val);
11696 +               if(mib_int_val & RX_GOOD_CNT)
11697 +                       p6_rx_good_cnt ++;              
11698 +               if(mib_int_val & TX_GOOD_CNT)
11699 +                       p6_tx_good_cnt ++;      
11700 +               if(mib_int_val & RX_GOCT_CNT)
11701 +                       p6_rx_byte_cnt ++;      
11702 +               if(mib_int_val & TX_GOCT_CNT)
11703 +                       p6_tx_byte_cnt ++;      
11704 +           }
11705 +#if defined (CONFIG_RALINK_MT7620)
11706 +           mib_int_val = sysRegRead(ESW_P7_IntSn);
11707 +           if(mib_int_val){
11708 +               sysRegWrite(ESW_P7_IntSn, mib_int_val);
11709 +               if(mib_int_val & RX_GOOD_CNT)
11710 +                       p7_rx_good_cnt ++;              
11711 +               if(mib_int_val & TX_GOOD_CNT)
11712 +                       p7_tx_good_cnt ++;      
11713 +               if(mib_int_val & RX_GOCT_CNT)
11714 +                       p7_rx_byte_cnt ++;      
11715 +               if(mib_int_val & TX_GOCT_CNT)
11716 +                       p7_tx_byte_cnt ++;      
11717 +  
11718 +           }
11719 +#endif     
11720 +       }
11721 +
11722 +#else // not RT6855
11723 +       if (reg_int_val & PORT_ST_CHG) {
11724 +               printk("RT305x_ESW: Link Status Changed\n");
11725 +
11726 +               stat_curr = *((volatile u32 *)(RALINK_ETH_SW_BASE+0x80));
11727 +#ifdef CONFIG_WAN_AT_P0
11728 +               //link down --> link up : send signal to user application
11729 +               //link up --> link down : ignore
11730 +               if ((stat & (1<<25)) || !(stat_curr & (1<<25)))
11731 +#else
11732 +               if ((stat & (1<<29)) || !(stat_curr & (1<<29)))
11733 +#endif
11734 +                       goto out;
11735 +
11736 +               schedule_work(&ei_local->kill_sig_wq);
11737 +out:
11738 +               stat = stat_curr;
11739 +       }
11740 +
11741 +#endif // defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)//
11742 +
11743 +       sysRegWrite(ESW_ISR, reg_int_val);
11744 +
11745 +       spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11746 +       return IRQ_HANDLED;
11747 +}
11748 +
11749 +
11750 +
11751 +#elif defined (CONFIG_RT_3052_ESW) && defined(CONFIG_RALINK_MT7621)
11752 +
11753 +static irqreturn_t esw_interrupt(int irq, void *dev_id)
11754 +{
11755 +       unsigned long flags;
11756 +       unsigned int reg_int_val;
11757 +       struct net_device *dev = (struct net_device *) dev_id;
11758 +       END_DEVICE *ei_local = netdev_priv(dev);
11759 +
11760 +       spin_lock_irqsave(&(ei_local->page_lock), flags);
11761 +        mii_mgr_read(31, 0x700c, &reg_int_val);
11762 +
11763 +       if (reg_int_val & P4_LINK_CH) {
11764 +           esw_link_status_changed(4, dev_id);
11765 +       }
11766 +
11767 +       if (reg_int_val & P3_LINK_CH) {
11768 +           esw_link_status_changed(3, dev_id);
11769 +       }
11770 +       if (reg_int_val & P2_LINK_CH) {
11771 +           esw_link_status_changed(2, dev_id);
11772 +       }
11773 +       if (reg_int_val & P1_LINK_CH) {
11774 +           esw_link_status_changed(1, dev_id);
11775 +       }
11776 +       if (reg_int_val & P0_LINK_CH) {
11777 +           esw_link_status_changed(0, dev_id);
11778 +       }
11779 +
11780 +        mii_mgr_write(31, 0x700c, 0x1f); //ack switch link change
11781 +       
11782 +       spin_unlock_irqrestore(&(ei_local->page_lock), flags);
11783 +       return IRQ_HANDLED;
11784 +}
11785 +
11786 +#endif
11787 +
11788 +
11789 +static int ei_start_xmit_fake(struct sk_buff* skb, struct net_device *dev)
11790 +{
11791 +       return ei_start_xmit(skb, dev, 1);
11792 +}
11793 +
11794 +
11795 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) 
11796 +void dump_phy_reg(int port_no, int from, int to, int is_local)
11797 +{
11798 +        u32 i=0;
11799 +        u32 temp=0;
11800 +
11801 +        if(is_local==0) {
11802 +            printk("Global Register\n");
11803 +            printk("===============");
11804 +            mii_mgr_write(0, 31, 0); //select global register
11805 +            for(i=from;i<=to;i++) {
11806 +                if(i%8==0) {
11807 +                    printk("\n");
11808 +                }
11809 +                mii_mgr_read(port_no,i, &temp);
11810 +                printk("%02d: %04X ",i, temp);
11811 +            }
11812 +        } else {
11813 +            mii_mgr_write(0, 31, 0x8000); //select local register
11814 +                printk("\n\nLocal Register Port %d\n",port_no);
11815 +                printk("===============");
11816 +                for(i=from;i<=to;i++) {
11817 +                    if(i%8==0) {
11818 +                        printk("\n");
11819 +                    }
11820 +                    mii_mgr_read(port_no,i, &temp);
11821 +                    printk("%02d: %04X ",i, temp);
11822 +                }
11823 +        }
11824 +        printk("\n");
11825 +}
11826 +#else
11827 +void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
11828 +{
11829 +
11830 +        u32 i=0;
11831 +        u32 temp=0;
11832 +        u32 r31=0;
11833 +
11834 +
11835 +        if(is_local==0) {
11836 +
11837 +            printk("\n\nGlobal Register Page %d\n",page_no);
11838 +            printk("===============");
11839 +            r31 |= 0 << 15; //global
11840 +            r31 |= ((page_no&0x7) << 12); //page no
11841 +            mii_mgr_write(port_no, 31, r31); //select global page x
11842 +            for(i=16;i<32;i++) {
11843 +                if(i%8==0) {
11844 +                    printk("\n");
11845 +                }
11846 +                mii_mgr_read(port_no,i, &temp);
11847 +                printk("%02d: %04X ",i, temp);
11848 +            }
11849 +        }else {
11850 +            printk("\n\nLocal Register Port %d Page %d\n",port_no, page_no);
11851 +            printk("===============");
11852 +            r31 |= 1 << 15; //local
11853 +            r31 |= ((page_no&0x7) << 12); //page no
11854 +            mii_mgr_write(port_no, 31, r31); //select local page x
11855 +            for(i=16;i<32;i++) {
11856 +                if(i%8==0) {
11857 +                    printk("\n");
11858 +                }
11859 +                mii_mgr_read(port_no,i, &temp);
11860 +                printk("%02d: %04X ",i, temp);
11861 +            }
11862 +        }
11863 +        printk("\n");
11864 +}
11865 +
11866 +#endif
11867 +
11868 +int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11869 +{
11870 +#if defined(CONFIG_RT_3052_ESW) || defined(CONFIG_RAETH_QDMA)
11871 +               esw_reg reg;
11872 +#endif
11873 +#if defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
11874 +    defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
11875 +    defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
11876 +    defined (CONFIG_RALINK_MT7628) || defined (CONFIG_ARCH_MT7623)
11877 +        esw_rate ratelimit;
11878 +#endif
11879 +#if defined(CONFIG_RT_3052_ESW)
11880 +       unsigned int offset = 0;
11881 +       unsigned int value = 0;
11882 +#endif
11883 +
11884 +       int ret = 0;
11885 +       END_DEVICE *ei_local = netdev_priv(dev);
11886 +       ra_mii_ioctl_data mii;
11887 +       spin_lock_irq(&ei_local->page_lock);
11888 +
11889 +       switch (cmd) {
11890 +#if defined(CONFIG_RAETH_QDMA)
11891 +#define _HQOS_REG(x)   (*((volatile u32 *)(RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + x)))
11892 +               case RAETH_QDMA_REG_READ:
11893 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11894 +                       if (reg.off > REG_HQOS_MAX) {
11895 +                               ret = -EINVAL;
11896 +                               break;
11897 +                       }
11898 +                       reg.val = _HQOS_REG(reg.off);
11899 +                       //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11900 +                       copy_to_user(ifr->ifr_data, &reg, sizeof(reg));
11901 +                       break;
11902 +               case RAETH_QDMA_REG_WRITE:
11903 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11904 +                       if (reg.off > REG_HQOS_MAX) {
11905 +                               ret = -EINVAL;
11906 +                               break;
11907 +                       }
11908 +                       _HQOS_REG(reg.off) = reg.val;
11909 +                       //printk("write reg off:%x val:%x\n", reg.off, reg.val);
11910 +                       break;
11911 +#if 0
11912 +                case RAETH_QDMA_READ_CPU_CLK:
11913 +                        copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11914 +                        reg.val = get_surfboard_sysclk();
11915 +                        //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11916 +                       copy_to_user(ifr->ifr_data, &reg, sizeof(reg));
11917 +                       break;
11918 +#endif                 
11919 +               case RAETH_QDMA_QUEUE_MAPPING:
11920 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11921 +                               if((reg.off&0x100) == 0x100){
11922 +                                       lan_wan_separate = 1;
11923 +                                       reg.off &= 0xff;
11924 +                               }else{
11925 +                                       lan_wan_separate = 0;
11926 +                               }
11927 +                       M2Q_table[reg.off] = reg.val;
11928 +               break;
11929 +#if defined(CONFIG_HW_SFQ)
11930 +      case RAETH_QDMA_SFQ_WEB_ENABLE:
11931 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11932 +                       if((reg.val) == 0x1){
11933 +                               web_sfq_enable = 1;
11934 +       
11935 +                       }else{
11936 +                               web_sfq_enable = 0;
11937 +                       }
11938 +               break;
11939 +#endif         
11940 +               
11941 +       
11942 +#endif         
11943 +               case RAETH_MII_READ:
11944 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11945 +                       mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
11946 +                       //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
11947 +                       copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
11948 +                       break;
11949 +
11950 +               case RAETH_MII_WRITE:
11951 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11952 +                       //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
11953 +                       mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
11954 +                       break;
11955 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)                   
11956 +               case RAETH_MII_READ_CL45:
11957 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11958 +                       //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
11959 +                       mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, &mii.val_out);
11960 +                       copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
11961 +                       break;
11962 +               case RAETH_MII_WRITE_CL45:
11963 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
11964 +                       //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
11965 +                       mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, mii.val_in);
11966 +                       break;
11967 +#endif
11968 +                       
11969 +#if defined(CONFIG_RT_3052_ESW)
11970 +#define _ESW_REG(x)    (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
11971 +               case RAETH_ESW_REG_READ:
11972 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11973 +                       if (reg.off > REG_ESW_MAX) {
11974 +                               ret = -EINVAL;
11975 +                               break;
11976 +                       }
11977 +                       reg.val = _ESW_REG(reg.off);
11978 +                       //printk("read reg off:%x val:%x\n", reg.off, reg.val);
11979 +                       copy_to_user(ifr->ifr_data, &reg, sizeof(reg));
11980 +                       break;
11981 +               case RAETH_ESW_REG_WRITE:
11982 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11983 +                       if (reg.off > REG_ESW_MAX) {
11984 +                               ret = -EINVAL;
11985 +                               break;
11986 +                       }
11987 +                       _ESW_REG(reg.off) = reg.val;
11988 +                       //printk("write reg off:%x val:%x\n", reg.off, reg.val);
11989 +                       break;
11990 +               case RAETH_ESW_PHY_DUMP:
11991 +                       copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
11992 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
11993 +                       if (reg.val ==32 ) {//dump all phy register
11994 +                           /* Global Register 0~31
11995 +                            * Local Register 0~31
11996 +                            */
11997 +                           dump_phy_reg(0, 0, 31, 0); //dump global register
11998 +                           for(offset=0;offset<5;offset++) {
11999 +                               dump_phy_reg(offset, 0, 31, 1); //dump local register
12000 +                           }
12001 +                       } else {
12002 +                           dump_phy_reg(reg.val, 0, 31, 0); //dump global register
12003 +                           dump_phy_reg(reg.val, 0, 31, 1); //dump local register
12004 +                       }
12005 +#else
12006 +                       /* SPEC defined Register 0~15
12007 +                        * Global Register 16~31 for each page
12008 +                        * Local Register 16~31 for each page
12009 +                        */
12010 +                       printk("SPEC defined Register");
12011 +                       if (reg.val ==32 ) {//dump all phy register
12012 +                           int i = 0;
12013 +                           for(i=0; i<5; i++){ 
12014 +                               printk("\n[Port %d]===============",i);
12015 +                               for(offset=0;offset<16;offset++) {
12016 +                                   if(offset%8==0) {
12017 +                                       printk("\n");
12018 +                               }
12019 +                               mii_mgr_read(i,offset, &value);
12020 +                               printk("%02d: %04X ",offset, value);
12021 +                               }
12022 +                           }   
12023 +                       }
12024 +                       else{
12025 +                               printk("\n[Port %d]===============",reg.val);
12026 +                               for(offset=0;offset<16;offset++) {
12027 +                                   if(offset%8==0) {
12028 +                                       printk("\n");
12029 +                               }
12030 +                               mii_mgr_read(reg.val,offset, &value);
12031 +                               printk("%02d: %04X ",offset, value);
12032 +                               }
12033 +                       }
12034 +
12035 +#if defined (CONFIG_RALINK_MT7628)
12036 +                       for(offset=0;offset<7;offset++) { //global register  page 0~6
12037 +#else
12038 +                       for(offset=0;offset<5;offset++) { //global register  page 0~4
12039 +#endif
12040 +                           if(reg.val == 32) //dump all phy register
12041 +                               dump_phy_reg(0, 16, 31, 0, offset);
12042 +                           else
12043 +                               dump_phy_reg(reg.val, 16, 31, 0, offset);
12044 +                       }
12045 +
12046 +                       if (reg.val == 32) {//dump all phy register
12047 +#if !defined (CONFIG_RAETH_HAS_PORT4)
12048 +                               for(offset=0;offset<5;offset++) { //local register port 0-port4
12049 +#else
12050 +                               for(offset=0;offset<4;offset++) { //local register port 0-port3
12051 +#endif     
12052 +                                       dump_phy_reg(offset, 16, 31, 1, 0); //dump local page 0
12053 +                                       dump_phy_reg(offset, 16, 31, 1, 1); //dump local page 1
12054 +                                       dump_phy_reg(offset, 16, 31, 1, 2); //dump local page 2
12055 +                                       dump_phy_reg(offset, 16, 31, 1, 3); //dump local page 3
12056 +                               }
12057 +                       }else {
12058 +                               dump_phy_reg(reg.val, 16, 31, 1, 0); //dump local page 0
12059 +                               dump_phy_reg(reg.val, 16, 31, 1, 1); //dump local page 1
12060 +                               dump_phy_reg(reg.val, 16, 31, 1, 2); //dump local page 2
12061 +                               dump_phy_reg(reg.val, 16, 31, 1, 3); //dump local page 3
12062 +                       }
12063 +#endif
12064 +                       break;
12065 +
12066 +#if defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12067 +#define _ESW_REG(x)    (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12068 +               case RAETH_ESW_INGRESS_RATE:
12069 +                       copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12070 +                       offset = 0x11c + (4 * (ratelimit.port / 2));
12071 +                        value = _ESW_REG(offset);
12072 +
12073 +                       if((ratelimit.port % 2) == 0)
12074 +                       {
12075 +                               value &= 0xffff0000;
12076 +                               if(ratelimit.on_off == 1)
12077 +                               {
12078 +                                       value |= (ratelimit.on_off << 14);
12079 +                                       value |= (0x07 << 10);
12080 +                                       value |= ratelimit.bw;
12081 +                               }
12082 +                       }
12083 +                       else if((ratelimit.port % 2) == 1)
12084 +                       {
12085 +                               value &= 0x0000ffff;
12086 +                               if(ratelimit.on_off == 1)
12087 +                               {
12088 +                                       value |= (ratelimit.on_off << 30);
12089 +                                       value |= (0x07 << 26);
12090 +                                       value |= (ratelimit.bw << 16);
12091 +                               }
12092 +                       }
12093 +                       printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12094 +
12095 +                       _ESW_REG(offset) = value;
12096 +                       break;
12097 +
12098 +               case RAETH_ESW_EGRESS_RATE:
12099 +                       copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12100 +                       offset = 0x140 + (4 * (ratelimit.port / 2));
12101 +                        value = _ESW_REG(offset);
12102 +
12103 +                       if((ratelimit.port % 2) == 0)
12104 +                       {
12105 +                               value &= 0xffff0000;
12106 +                               if(ratelimit.on_off == 1)
12107 +                               {
12108 +                                       value |= (ratelimit.on_off << 12);
12109 +                                       value |= (0x03 << 10);
12110 +                                       value |= ratelimit.bw;
12111 +                               }
12112 +                       }
12113 +                       else if((ratelimit.port % 2) == 1)
12114 +                       {
12115 +                               value &= 0x0000ffff;
12116 +                               if(ratelimit.on_off == 1)
12117 +                               {
12118 +                                       value |= (ratelimit.on_off << 28);
12119 +                                       value |= (0x03 << 26);
12120 +                                       value |= (ratelimit.bw << 16);
12121 +                               }
12122 +                       }
12123 +                       printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12124 +                       _ESW_REG(offset) = value;
12125 +                       break;
12126 +#elif  defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
12127 +       defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12128 +#define _ESW_REG(x)    (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
12129 +               case RAETH_ESW_INGRESS_RATE:
12130 +                       copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12131 +#if defined(CONFIG_RALINK_RT6855A) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
12132 +                       offset = 0x1800 + (0x100 * ratelimit.port);
12133 +#else
12134 +                       offset = 0x1080 + (0x100 * ratelimit.port);
12135 +#endif
12136 +                        value = _ESW_REG(offset);
12137 +
12138 +                       value &= 0xffff0000;
12139 +                       if(ratelimit.on_off == 1)
12140 +                       {
12141 +                               value |= (ratelimit.on_off << 15);
12142 +                               if (ratelimit.bw < 100)
12143 +                               {
12144 +                                       value |= (0x0 << 8);
12145 +                                       value |= ratelimit.bw;
12146 +                               }else if(ratelimit.bw < 1000)
12147 +                               {
12148 +                                       value |= (0x1 << 8);
12149 +                                       value |= ratelimit.bw/10;
12150 +                               }else if(ratelimit.bw < 10000)
12151 +                               {
12152 +                                       value |= (0x2 << 8);
12153 +                                       value |= ratelimit.bw/100;
12154 +                               }else if(ratelimit.bw < 100000)
12155 +                               {
12156 +                                       value |= (0x3 << 8);
12157 +                                       value |= ratelimit.bw/1000;
12158 +                               }else 
12159 +                               {
12160 +                                       value |= (0x4 << 8);
12161 +                                       value |= ratelimit.bw/10000;
12162 +                               }
12163 +                       }
12164 +                       printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12165 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)     
12166 +                       mii_mgr_write(0x1f, offset, value);
12167 +#else                  
12168 +                       _ESW_REG(offset) = value;
12169 +#endif                 
12170 +                       break;
12171 +
12172 +               case RAETH_ESW_EGRESS_RATE:
12173 +                       copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
12174 +                       offset = 0x1040 + (0x100 * ratelimit.port);
12175 +                        value = _ESW_REG(offset);
12176 +
12177 +                       value &= 0xffff0000;
12178 +                       if(ratelimit.on_off == 1)
12179 +                       {
12180 +                               value |= (ratelimit.on_off << 15);
12181 +                               if (ratelimit.bw < 100)
12182 +                               {
12183 +                                       value |= (0x0 << 8);
12184 +                                       value |= ratelimit.bw;
12185 +                               }else if(ratelimit.bw < 1000)
12186 +                               {
12187 +                                       value |= (0x1 << 8);
12188 +                                       value |= ratelimit.bw/10;
12189 +                               }else if(ratelimit.bw < 10000)
12190 +                               {
12191 +                                       value |= (0x2 << 8);
12192 +                                       value |= ratelimit.bw/100;
12193 +                               }else if(ratelimit.bw < 100000)
12194 +                               {
12195 +                                       value |= (0x3 << 8);
12196 +                                       value |= ratelimit.bw/1000;
12197 +                               }else 
12198 +                               {
12199 +                                       value |= (0x4 << 8);
12200 +                                       value |= ratelimit.bw/10000;
12201 +                               }
12202 +                       }
12203 +                       printk("offset = 0x%4x value=0x%x\n\r", offset, value);
12204 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)     
12205 +                       mii_mgr_write(0x1f, offset, value);
12206 +#else
12207 +                       _ESW_REG(offset) = value;
12208 +#endif
12209 +                       break;
12210 +#endif
12211 +#endif // CONFIG_RT_3052_ESW
12212 +               default:
12213 +                       ret = -EOPNOTSUPP;
12214 +                       break;
12215 +
12216 +       }
12217 +
12218 +       spin_unlock_irq(&ei_local->page_lock);
12219 +       return ret;
12220 +}
12221 +
12222 +/*
12223 + * Set new MTU size
12224 + * Change the mtu of Raeth Ethernet Device
12225 + */
12226 +static int ei_change_mtu(struct net_device *dev, int new_mtu)
12227 +{
12228 +       END_DEVICE *ei_local = netdev_priv(dev);  // get priv ei_local pointer from net_dev structure
12229 +
12230 +       if ( ei_local == NULL ) {
12231 +               printk(KERN_EMERG "%s: ei_change_mtu passed a non-existent private pointer from net_dev!\n", dev->name);
12232 +               return -ENXIO;
12233 +       }
12234 +
12235 +
12236 +       if ( (new_mtu > 4096) || (new_mtu < 64)) {
12237 +               return -EINVAL;
12238 +       }
12239 +
12240 +#ifndef CONFIG_RAETH_JUMBOFRAME
12241 +       if ( new_mtu > 1500 ) {
12242 +               return -EINVAL;
12243 +       }
12244 +#endif
12245 +
12246 +       dev->mtu = new_mtu;
12247 +
12248 +       return 0;
12249 +}
12250 +
12251 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12252 +static void ei_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
12253 +{
12254 +       END_DEVICE *ei_local = netdev_priv(dev);
12255 +       
12256 +       ei_local->vlgrp = grp;
12257 +
12258 +       /* enable HW VLAN RX */
12259 +       sysRegWrite(CDMP_EG_CTRL, 1);
12260 +
12261 +}
12262 +#endif
12263 +
12264 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12265 +static const struct net_device_ops ei_netdev_ops = {
12266 +        .ndo_init               = rather_probe,
12267 +        .ndo_open               = ei_open,
12268 +        .ndo_stop               = ei_close,
12269 +        .ndo_start_xmit         = ei_start_xmit_fake,
12270 +        .ndo_get_stats          = ra_get_stats,
12271 +        .ndo_set_mac_address    = eth_mac_addr,
12272 +        .ndo_change_mtu         = ei_change_mtu,
12273 +        .ndo_do_ioctl           = ei_ioctl,
12274 +        .ndo_validate_addr      = eth_validate_addr,
12275 +#ifdef CONFIG_RAETH_HW_VLAN_RX
12276 +       .ndo_vlan_rx_register   = ei_vlan_rx_register,
12277 +#endif
12278 +#ifdef CONFIG_NET_POLL_CONTROLLER
12279 +        .ndo_poll_controller    = raeth_clean,
12280 +#endif
12281 +//     .ndo_tx_timeout         = ei_tx_timeout,
12282 +};
12283 +#endif
12284 +
12285 +void ra2880_setup_dev_fptable(struct net_device *dev)
12286 +{
12287 +       RAETH_PRINT(__FUNCTION__ "is called!\n");
12288 +
12289 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12290 +       dev->netdev_ops         = &ei_netdev_ops;
12291 +#else
12292 +       dev->open               = ei_open;
12293 +       dev->stop               = ei_close;
12294 +       dev->hard_start_xmit    = ei_start_xmit_fake;
12295 +       dev->get_stats          = ra_get_stats;
12296 +       dev->set_mac_address    = ei_set_mac_addr;
12297 +       dev->change_mtu         = ei_change_mtu;
12298 +       dev->mtu                = 1500;
12299 +       dev->do_ioctl           = ei_ioctl;
12300 +//     dev->tx_timeout         = ei_tx_timeout;
12301 +
12302 +#ifdef CONFIG_RAETH_NAPI
12303 +        dev->poll = &raeth_clean;
12304 +#if defined (CONFIG_RAETH_ROUTER)
12305 +       dev->weight = 32;
12306 +#elif defined (CONFIG_RT_3052_ESW)
12307 +       dev->weight = 32;
12308 +#else
12309 +       dev->weight = 128;
12310 +#endif
12311 +#endif
12312 +#endif
12313 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12314 +       dev->ethtool_ops        = &ra_ethtool_ops;
12315 +#endif
12316 +#define TX_TIMEOUT (5*HZ)
12317 +       dev->watchdog_timeo = TX_TIMEOUT;
12318 +
12319 +}
12320 +
12321 +/* reset frame engine */
12322 +void fe_reset(void)
12323 +{
12324 +#if defined (CONFIG_RALINK_RT6855A)
12325 +       /* FIXME */
12326 +#else
12327 +       u32 val;
12328 +
12329 +       //val = *(volatile u32 *)(0x1b000000);
12330 +       //printk("0x1b000000 is 0x%x\n", val);
12331 +       //val = sysRegRead(0xFB110100);
12332 +       //val = 0x8000;
12333 +       //sysRegWrite(0xFB110100, val);
12334 +
12335 +
12336 +
12337 +       val = sysRegRead(RSTCTRL);
12338 +
12339 +// RT5350 need to reset ESW and FE at the same to avoid PDMA panic //  
12340 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
12341 +       val = val | RALINK_FE_RST | RALINK_ESW_RST ;
12342 +#else
12343 +       val = val | RALINK_FE_RST;
12344 +#endif
12345 +       sysRegWrite(RSTCTRL, val);
12346 +#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7628)
12347 +       val = val & ~(RALINK_FE_RST | RALINK_ESW_RST);
12348 +#else
12349 +       val = val & ~(RALINK_FE_RST);
12350 +#endif
12351 +
12352 +       sysRegWrite(RSTCTRL, val);
12353 +#endif
12354 +}
12355 +
12356 +/* set TRGMII */
12357 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
12358 +void trgmii_set_7621(void)
12359 +{
12360 +       u32 val = 0;
12361 +       u32 val_0 = 0;
12362 +
12363 +       val = sysRegRead(RSTCTRL);
12364 +// MT7621 need to reset GMAC and FE first //   
12365 +       val = val | RALINK_FE_RST | RALINK_ETH_RST ;
12366 +       sysRegWrite(RSTCTRL, val);
12367 +
12368 +//set TRGMII clock//
12369 +       val_0 = sysRegRead(CLK_CFG_0);
12370 +       val_0 &= 0xffffff9f;
12371 +       val_0 |= (0x1 << 5);
12372 +       sysRegWrite(CLK_CFG_0, val_0);
12373 +       mdelay(1);
12374 +       val_0 = sysRegRead(CLK_CFG_0);
12375 +       printk("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n",val_0);
12376 +       val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
12377 +       sysRegWrite(RSTCTRL, val);
12378 +}
12379 +
12380 +void trgmii_set_7530(void)
12381 +{
12382 +// set MT7530 //
12383 +#if 0 
12384 +       
12385 +       mii_mgr_write(31, 103, 0x0020);
12386 +
12387 +
12388 +       //disable EEE
12389 +       mii_mgr_write(0, 0x16, 0);
12390 +       mii_mgr_write(1, 0x16, 0);
12391 +       mii_mgr_write(2, 0x16, 0);
12392 +       mii_mgr_write(3, 0x16, 0);
12393 +       mii_mgr_write(4, 0x16, 0);
12394 +
12395 +
12396 +       //PLL reset for E2
12397 +       mii_mgr_write(31, 104, 0x0608);
12398 +       mii_mgr_write(31, 104, 0x2608);
12399 +       
12400 +       mii_mgr_write(31, 0x7808, 0x0);
12401 +       mdelay(1);
12402 +       mii_mgr_write(31, 0x7804, 0x01017e8f);
12403 +       mdelay(1);
12404 +       mii_mgr_write(31, 0x7808, 0x1);
12405 +       mdelay(1);
12406 +
12407 +#endif
12408 +#if 1
12409 +       //CL45 command
12410 +       //PLL to 150Mhz 
12411 +       mii_mgr_write(0, 13, 0x1f);
12412 +       mii_mgr_write(0, 14, 0x404);
12413 +       mii_mgr_write(0, 13, 0x401f);
12414 +       mii_mgr_read(31, 0x7800, &regValue);
12415 +       regValue = (regValue >> 9) & 0x3;
12416 +       if(regValue == 0x3) { //25Mhz Xtal
12417 +               mii_mgr_write(0, 14, 0x0A00);//25Mhz XTAL for 150Mhz CLK
12418 +       } else if(regValue == 0x2) { //40Mhz
12419 +               mii_mgr_write(0, 14, 0x0780);//40Mhz XTAL for 150Mhz CLK
12420 +       }                
12421 +       //mii_mgr_write(0, 14, 0x0C00);//ori
12422 +       mdelay(1);
12423 +
12424 +       mii_mgr_write(0, 13, 0x1f);
12425 +       mii_mgr_write(0, 14, 0x409);
12426 +       mii_mgr_write(0, 13, 0x401f);
12427 +       mii_mgr_write(0, 14, 0x57);
12428 +       mdelay(1);
12429 +
12430 +       mii_mgr_write(0, 13, 0x1f);
12431 +       mii_mgr_write(0, 14, 0x40a);
12432 +       mii_mgr_write(0, 13, 0x401f);
12433 +       mii_mgr_write(0, 14, 0x57);
12434 +
12435 +//PLL BIAS en
12436 +       mii_mgr_write(0, 13, 0x1f);
12437 +       mii_mgr_write(0, 14, 0x403);
12438 +       mii_mgr_write(0, 13, 0x401f);
12439 +       mii_mgr_write(0, 14, 0x1800);
12440 +       mdelay(1);
12441 +
12442 +//BIAS LPF en
12443 +       mii_mgr_write(0, 13, 0x1f);
12444 +       mii_mgr_write(0, 14, 0x403);
12445 +       mii_mgr_write(0, 13, 0x401f);
12446 +       mii_mgr_write(0, 14, 0x1c00);
12447 +
12448 +//sys PLL en
12449 +       mii_mgr_write(0, 13, 0x1f);
12450 +       mii_mgr_write(0, 14, 0x401);
12451 +       mii_mgr_write(0, 13, 0x401f);
12452 +       mii_mgr_write(0, 14, 0xc020);
12453 +
12454 +//LCDDDS PWDS
12455 +       mii_mgr_write(0, 13, 0x1f);
12456 +       mii_mgr_write(0, 14, 0x406);
12457 +       mii_mgr_write(0, 13, 0x401f);
12458 +       mii_mgr_write(0, 14, 0xa030);
12459 +       mdelay(1);
12460 +
12461 +//GSW_2X_CLK
12462 +       mii_mgr_write(0, 13, 0x1f);
12463 +       mii_mgr_write(0, 14, 0x410);
12464 +       mii_mgr_write(0, 13, 0x401f);
12465 +       mii_mgr_write(0, 14, 0x0003);
12466 +
12467 +//enable P6
12468 +       mii_mgr_write(31, 0x3600, 0x5e33b);
12469 +
12470 +//enable TRGMII
12471 +       mii_mgr_write(31, 0x7830, 0x1);
12472 +#endif 
12473 +
12474 +}
12475 +#endif
12476 +
12477 +void ei_reset_task(struct work_struct *work)
12478 +{
12479 +       struct net_device *dev = dev_raether;
12480 +
12481 +       ei_close(dev);
12482 +       ei_open(dev);
12483 +
12484 +       return;
12485 +}
12486 +
12487 +void ei_tx_timeout(struct net_device *dev)
12488 +{
12489 +        END_DEVICE *ei_local = netdev_priv(dev);
12490 +
12491 +        schedule_work(&ei_local->reset_task);
12492 +}
12493 +
12494 +void setup_statistics(END_DEVICE* ei_local)
12495 +{
12496 +       ei_local->stat.tx_packets       = 0;
12497 +       ei_local->stat.tx_bytes         = 0;
12498 +       ei_local->stat.tx_dropped       = 0;
12499 +       ei_local->stat.tx_errors        = 0;
12500 +       ei_local->stat.tx_aborted_errors= 0;
12501 +       ei_local->stat.tx_carrier_errors= 0;
12502 +       ei_local->stat.tx_fifo_errors   = 0;
12503 +       ei_local->stat.tx_heartbeat_errors = 0;
12504 +       ei_local->stat.tx_window_errors = 0;
12505 +
12506 +       ei_local->stat.rx_packets       = 0;
12507 +       ei_local->stat.rx_bytes         = 0;
12508 +       ei_local->stat.rx_dropped       = 0;
12509 +       ei_local->stat.rx_errors        = 0;
12510 +       ei_local->stat.rx_length_errors = 0;
12511 +       ei_local->stat.rx_over_errors   = 0;
12512 +       ei_local->stat.rx_crc_errors    = 0;
12513 +       ei_local->stat.rx_frame_errors  = 0;
12514 +       ei_local->stat.rx_fifo_errors   = 0;
12515 +       ei_local->stat.rx_missed_errors = 0;
12516 +
12517 +       ei_local->stat.collisions       = 0;
12518 +#if defined (CONFIG_RAETH_QOS)
12519 +       ei_local->tx3_full = 0;
12520 +       ei_local->tx2_full = 0;
12521 +       ei_local->tx1_full = 0;
12522 +       ei_local->tx0_full = 0;
12523 +#else
12524 +       ei_local->tx_full = 0;
12525 +#endif
12526 +#ifdef CONFIG_RAETH_NAPI
12527 +       atomic_set(&ei_local->irq_sem, 1);
12528 +#endif
12529 +
12530 +}
12531 +
12532 +/**
12533 + * rather_probe - pick up ethernet port at boot time
12534 + * @dev: network device to probe
12535 + *
12536 + * This routine probe the ethernet port at boot time.
12537 + *
12538 + *
12539 + */
12540 +
12541 +int __init rather_probe(struct net_device *dev)
12542 +{
12543 +       int i;
12544 +       END_DEVICE *ei_local = netdev_priv(dev);
12545 +       struct sockaddr addr;
12546 +       unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12547 +       unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12548 +
12549 +       fe_reset();
12550 +
12551 +       //Get mac0 address from flash
12552 +#ifdef RA_MTD_RW_BY_NUM
12553 +       i = ra_mtd_read(2, GMAC0_OFFSET, 6, addr.sa_data);
12554 +#else
12555 +       i = ra_mtd_read_nm("Factory", GMAC0_OFFSET, 6, addr.sa_data);
12556 +#endif
12557 +       //If reading mtd failed or mac0 is empty, generate a mac address
12558 +       if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) || 
12559 +           (memcmp(addr.sa_data, zero2, 6) == 0)) {
12560 +               unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12561 +       //      net_srandom(jiffies);
12562 +               memcpy(addr.sa_data, mac_addr01234, 5);
12563 +       //      addr.sa_data[5] = net_random()&0xFF;
12564 +       }
12565 +
12566 +#ifdef CONFIG_RAETH_NAPI
12567 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12568 +       netif_napi_add(dev, &ei_local->napi, raeth_clean, 128);
12569 +#endif
12570 +#endif
12571 +       ei_set_mac_addr(dev, &addr);
12572 +       spin_lock_init(&ei_local->page_lock);
12573 +       ether_setup(dev);
12574 +
12575 +#ifdef CONFIG_RAETH_LRO
12576 +       ei_local->lro_mgr.dev = dev;
12577 +        memset(&ei_local->lro_mgr.stats, 0, sizeof(ei_local->lro_mgr.stats));
12578 +        ei_local->lro_mgr.features = LRO_F_NAPI;
12579 +        ei_local->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
12580 +        ei_local->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
12581 +        ei_local->lro_mgr.max_desc = ARRAY_SIZE(ei_local->lro_arr);
12582 +        ei_local->lro_mgr.max_aggr = 64;
12583 +        ei_local->lro_mgr.frag_align_pad = 0;
12584 +        ei_local->lro_mgr.lro_arr = ei_local->lro_arr;
12585 +        ei_local->lro_mgr.get_skb_header = rt_get_skb_header;
12586 +#endif
12587 +
12588 +       setup_statistics(ei_local);
12589 +
12590 +       return 0;
12591 +}
12592 +
12593 +#ifdef CONFIG_PSEUDO_SUPPORT
12594 +int VirtualIF_ioctl(struct net_device * net_dev,
12595 +                   struct ifreq * ifr, int cmd)
12596 +{
12597 +       ra_mii_ioctl_data mii;
12598 +
12599 +       switch (cmd) {
12600 +               case RAETH_MII_READ:
12601 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12602 +                       mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
12603 +                       //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
12604 +                       copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
12605 +                       break;
12606 +
12607 +               case RAETH_MII_WRITE:
12608 +                       copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
12609 +                       //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
12610 +                       mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
12611 +                       break;
12612 +               default:
12613 +                       return -EOPNOTSUPP;
12614 +       }
12615 +
12616 +       return 0;
12617 +}
12618 +
12619 +struct net_device_stats *VirtualIF_get_stats(struct net_device *dev)
12620 +{
12621 +       PSEUDO_ADAPTER *pAd = netdev_priv(dev);
12622 +       return &pAd->stat;
12623 +}
12624 +
12625 +int VirtualIF_open(struct net_device * dev)
12626 +{
12627 +    PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12628 +
12629 +    printk("%s: ===> VirtualIF_open\n", dev->name);
12630
12631 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
12632 +    *((volatile u32 *)(FE_INT_ENABLE2)) |= (1<<25); //enable GE2 link change intr for MT7530 delay setting
12633 +#endif
12634 +
12635 +    netif_start_queue(pPesueoAd->PseudoDev);
12636 +
12637 +    return 0;
12638 +}
12639 +
12640 +int VirtualIF_close(struct net_device * dev)
12641 +{
12642 +    PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12643 +
12644 +    printk("%s: ===> VirtualIF_close\n", dev->name);
12645 +
12646 +    netif_stop_queue(pPesueoAd->PseudoDev);
12647 +
12648 +    return 0;
12649 +}
12650 +
12651 +int VirtualIFSendPackets(struct sk_buff * pSkb,
12652 +                        struct net_device * dev)
12653 +{
12654 +    PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
12655 +    END_DEVICE *ei_local __maybe_unused;
12656 +
12657 +
12658 +    //printk("VirtualIFSendPackets --->\n");
12659 +
12660 +    ei_local = netdev_priv(dev);
12661 +    if (!(pPesueoAd->RaethDev->flags & IFF_UP)) {
12662 +       dev_kfree_skb_any(pSkb);
12663 +       return 0;
12664 +    }
12665 +    //pSkb->cb[40]=0x5a;
12666 +    pSkb->dev = pPesueoAd->RaethDev;
12667 +    ei_start_xmit(pSkb, pPesueoAd->RaethDev, 2);
12668 +    return 0;
12669 +}
12670 +
12671 +void virtif_setup_statistics(PSEUDO_ADAPTER* pAd)
12672 +{
12673 +       pAd->stat.tx_packets    = 0;
12674 +       pAd->stat.tx_bytes      = 0;
12675 +       pAd->stat.tx_dropped    = 0;
12676 +       pAd->stat.tx_errors     = 0;
12677 +       pAd->stat.tx_aborted_errors= 0;
12678 +       pAd->stat.tx_carrier_errors= 0;
12679 +       pAd->stat.tx_fifo_errors        = 0;
12680 +       pAd->stat.tx_heartbeat_errors = 0;
12681 +       pAd->stat.tx_window_errors      = 0;
12682 +
12683 +       pAd->stat.rx_packets    = 0;
12684 +       pAd->stat.rx_bytes      = 0;
12685 +       pAd->stat.rx_dropped    = 0;
12686 +       pAd->stat.rx_errors     = 0;
12687 +       pAd->stat.rx_length_errors = 0;
12688 +       pAd->stat.rx_over_errors        = 0;
12689 +       pAd->stat.rx_crc_errors = 0;
12690 +       pAd->stat.rx_frame_errors       = 0;
12691 +       pAd->stat.rx_fifo_errors        = 0;
12692 +       pAd->stat.rx_missed_errors      = 0;
12693 +
12694 +       pAd->stat.collisions    = 0;
12695 +}
12696 +
12697 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12698 +static const struct net_device_ops VirtualIF_netdev_ops = {
12699 +        .ndo_open               = VirtualIF_open,
12700 +        .ndo_stop               = VirtualIF_close,
12701 +        .ndo_start_xmit         = VirtualIFSendPackets,
12702 +        .ndo_get_stats          = VirtualIF_get_stats,
12703 +        .ndo_set_mac_address    = ei_set_mac2_addr,
12704 +        .ndo_change_mtu         = ei_change_mtu,
12705 +        .ndo_do_ioctl           = VirtualIF_ioctl,
12706 +        .ndo_validate_addr      = eth_validate_addr,
12707 +};
12708 +#endif
12709 +// Register pseudo interface
12710 +void RAETH_Init_PSEUDO(pEND_DEVICE pAd, struct net_device *net_dev)
12711 +{
12712 +    int index;
12713 +    struct net_device *dev;
12714 +    PSEUDO_ADAPTER *pPseudoAd;
12715 +    int i = 0;
12716 +    struct sockaddr addr;
12717 +    unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
12718 +    unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
12719 +
12720 +    for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
12721 +
12722 +       dev = alloc_etherdev(sizeof(PSEUDO_ADAPTER));
12723 +       if (NULL == dev)
12724 +       {
12725 +               printk(" alloc_etherdev for PSEUDO_ADAPTER failed.\n");
12726 +               return;
12727 +       }
12728 +       strcpy(dev->name, DEV2_NAME);
12729 +
12730 +       //Get mac2 address from flash
12731 +#ifdef RA_MTD_RW_BY_NUM
12732 +       i = ra_mtd_read(2, GMAC2_OFFSET, 6, addr.sa_data);
12733 +#else
12734 +       i = ra_mtd_read_nm("Factory", GMAC2_OFFSET, 6, addr.sa_data);
12735 +#endif
12736 +
12737 +       //If reading mtd failed or mac0 is empty, generate a mac address
12738 +       if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) || 
12739 +           (memcmp(addr.sa_data, zero2, 6) == 0)) {
12740 +               unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
12741 +       //      net_srandom(jiffies);
12742 +               memcpy(addr.sa_data, mac_addr01234, 5);
12743 +       //      addr.sa_data[5] = net_random()&0xFF;
12744 +       }
12745 +
12746 +       ei_set_mac2_addr(dev, &addr);
12747 +       ether_setup(dev);
12748 +       pPseudoAd = netdev_priv(dev);
12749 +
12750 +       pPseudoAd->PseudoDev = dev;
12751 +       pPseudoAd->RaethDev = net_dev;
12752 +       virtif_setup_statistics(pPseudoAd);
12753 +       pAd->PseudoDev = dev;
12754 +
12755 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
12756 +       dev->netdev_ops         = &VirtualIF_netdev_ops;
12757 +#else
12758 +       dev->hard_start_xmit = VirtualIFSendPackets;
12759 +       dev->stop = VirtualIF_close;
12760 +       dev->open = VirtualIF_open;
12761 +       dev->do_ioctl = VirtualIF_ioctl;
12762 +       dev->set_mac_address = ei_set_mac2_addr;
12763 +       dev->get_stats = VirtualIF_get_stats;
12764 +       dev->change_mtu = ei_change_mtu;
12765 +       dev->mtu = 1500;
12766 +#endif
12767 +
12768 +#if defined (CONFIG_RAETH_HW_LRO) 
12769 +    dev->features |= NETIF_F_HW_CSUM;
12770 +#else
12771 +       dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
12772 +#endif  /* CONFIG_RAETH_HW_LRO */
12773 +
12774 +#if defined(CONFIG_RALINK_MT7620)
12775 +#if defined (CONFIG_RAETH_TSO)
12776 +       if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12777 +               dev->features |= NETIF_F_SG;
12778 +               dev->features |= NETIF_F_TSO;
12779 +       }
12780 +#endif // CONFIG_RAETH_TSO //
12781 +
12782 +#if defined (CONFIG_RAETH_TSOV6)
12783 +       if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
12784 +               dev->features |= NETIF_F_TSO6;
12785 +               dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12786 +       }
12787 +#endif 
12788 +#else
12789 +#if defined (CONFIG_RAETH_TSO)
12790 +        dev->features |= NETIF_F_SG;
12791 +        dev->features |= NETIF_F_TSO;
12792 +#endif // CONFIG_RAETH_TSO //
12793 +
12794 +#if defined (CONFIG_RAETH_TSOV6)
12795 +        dev->features |= NETIF_F_TSO6;
12796 +        dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
12797 +#endif 
12798 +#endif // CONFIG_RALINK_MT7620 //
12799 +
12800 +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
12801 +       dev->vlan_features = dev->features;
12802 +#endif
12803 +
12804 +
12805 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
12806 +       dev->ethtool_ops = &ra_virt_ethtool_ops;
12807 +    // init mii structure
12808 +       pPseudoAd->mii_info.dev = dev;
12809 +       pPseudoAd->mii_info.mdio_read = mdio_virt_read;
12810 +       pPseudoAd->mii_info.mdio_write = mdio_virt_write;
12811 +       pPseudoAd->mii_info.phy_id_mask = 0x1f;
12812 +       pPseudoAd->mii_info.reg_num_mask = 0x1f;
12813 +       pPseudoAd->mii_info.phy_id = 0x1e;
12814 +       pPseudoAd->mii_info.supports_gmii = mii_check_gmii_support(&pPseudoAd->mii_info);
12815 +#endif
12816 +
12817 +       // Register this device
12818 +       register_netdevice(dev);
12819 +    }
12820 +}
12821 +#endif
12822 +
12823 +/**
12824 + * ei_open - Open/Initialize the ethernet port.
12825 + * @dev: network device to initialize
12826 + *
12827 + * This routine goes all-out, setting everything
12828 + * up a new at each open, even though many of these registers should only need to be set once at boot.
12829 + */
12830 +int ei_open(struct net_device *dev)
12831 +{
12832 +       int i, err;
12833 +#if !defined (CONFIG_MT7623_FPGA)
12834 +       unsigned long flags;
12835 +#endif
12836 +       END_DEVICE *ei_local;
12837 +
12838 +#ifdef CONFIG_RAETH_LRO
12839 +       const char *lan_ip_tmp; 
12840 +#ifdef CONFIG_DUAL_IMAGE
12841 +#define RT2860_NVRAM   1
12842 +#else
12843 +#define RT2860_NVRAM   0
12844 +#endif
12845 +#endif // CONFIG_RAETH_LRO //
12846 +
12847 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
12848 +       if (!try_module_get(THIS_MODULE))
12849 +       {
12850 +               printk("%s: Cannot reserve module\n", __FUNCTION__);
12851 +               return -1;
12852 +       }
12853 +#else
12854 +       MOD_INC_USE_COUNT;
12855 +#endif
12856 +
12857 +       printk("Raeth %s (",RAETH_VERSION);
12858 +#if defined (CONFIG_RAETH_NAPI)
12859 +       printk("NAPI\n");
12860 +#elif defined (CONFIG_RA_NETWORK_TASKLET_BH)
12861 +       printk("Tasklet");
12862 +#elif defined (CONFIG_RA_NETWORK_WORKQUEUE_BH)
12863 +       printk("Workqueue");
12864 +#endif
12865 +
12866 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12867 +       printk(",SkbRecycle");
12868 +#endif
12869 +       printk(")\n");
12870 +
12871 +
12872 +       ei_local = netdev_priv(dev); // get device pointer from System
12873 +       // unsigned int flags;
12874 +
12875 +       if (ei_local == NULL)
12876 +       {
12877 +               printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
12878 +               return -ENXIO;
12879 +       }
12880 +
12881 +        /* receiving packet buffer allocation - NUM_RX_DESC x MAX_RX_LENGTH */
12882 +        for ( i = 0; i < NUM_RX_DESC; i++)
12883 +        {
12884 +#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
12885 +                ei_local->netrx0_skbuf[i] = skbmgr_dev_alloc_skb2k();
12886 +#else
12887 +                ei_local->netrx0_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12888 +#endif
12889 +                if (ei_local->netrx0_skbuf[i] == NULL ) {
12890 +                        printk("rx skbuff buffer allocation failed!");
12891 +               } else {
12892 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12893 +                   skb_reserve(ei_local->netrx0_skbuf[i], NET_IP_ALIGN);
12894 +#endif
12895 +               }
12896 +               
12897 +
12898 +#if defined (CONFIG_RAETH_HW_LRO) 
12899 +               ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12900 +        if (ei_local->netrx3_skbuf[i] == NULL ) {
12901 +            printk("rx3 skbuff buffer allocation failed!");
12902 +               } else {
12903 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12904 +                   skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
12905 +#endif
12906 +               }
12907 +               ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12908 +        if (ei_local->netrx2_skbuf[i] == NULL ) {
12909 +            printk("rx2 skbuff buffer allocation failed!");
12910 +               } else {
12911 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12912 +                   skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
12913 +#endif
12914 +               }
12915 +               ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
12916 +        if (ei_local->netrx1_skbuf[i] == NULL ) {
12917 +            printk("rx1 skbuff buffer allocation failed!");
12918 +               } else {
12919 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12920 +                   skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
12921 +#endif
12922 +               }
12923 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING) 
12924 +#if defined(CONFIG_ARCH_MT7623)
12925 +               ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12926 +        if (ei_local->netrx3_skbuf[i] == NULL ) {
12927 +            printk("rx3 skbuff buffer allocation failed!");
12928 +               } else {
12929 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12930 +                   skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
12931 +#endif
12932 +               }
12933 +               ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12934 +        if (ei_local->netrx2_skbuf[i] == NULL ) {
12935 +            printk("rx2 skbuff buffer allocation failed!");
12936 +               } else {
12937 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12938 +                   skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
12939 +#endif
12940 +               }
12941 +#endif  /* CONFIG_ARCH_MT7623 */
12942 +               ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
12943 +                if (ei_local->netrx1_skbuf[i] == NULL ) {
12944 +                        printk("rx1 skbuff buffer allocation failed!");
12945 +               } else {
12946 +#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
12947 +                   skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
12948 +#endif
12949 +               }
12950 +#endif
12951 +        }
12952 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
12953 +       trgmii_set_7621(); //reset FE/GMAC in this function
12954 +#endif
12955 +       
12956 +        fe_dma_init(dev);
12957 +       
12958 +#if defined (CONFIG_RAETH_HW_LRO)
12959 +    fe_hw_lro_init(dev);
12960 +#endif  /* CONFIG_RAETH_HW_LRO */
12961 +
12962 +       fe_sw_init(); //initialize fe and switch register
12963 +#if defined (CONFIG_MIPS)
12964 +       err = request_irq( dev->irq, ei_interrupt, IRQF_DISABLED, dev->name, dev);      // try to fix irq in open
12965 +#else
12966 +       err = request_irq(dev->irq, ei_interrupt, /*IRQF_TRIGGER_LOW*/ 0, dev->name, dev);      // try to fix irq in open
12967 +#endif 
12968 +       if (err)
12969 +           return err;
12970 +
12971 +       if ( dev->dev_addr != NULL) {
12972 +           ra2880MacAddressSet((void *)(dev->dev_addr));
12973 +       } else {
12974 +           printk("dev->dev_addr is empty !\n");
12975 +       } 
12976 +/*TODO: MT7623 MCM INT */
12977 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
12978 +       err = request_irq(SURFBOARDINT_ESW, esw_interrupt, IRQF_DISABLED, "Ralink_ESW", dev);
12979 +       if (err)
12980 +               return err;
12981 +       INIT_WORK(&ei_local->kill_sig_wq, kill_sig_workq);
12982 +#if defined (CONFIG_RALINK_MT7621)
12983 +        mii_mgr_write(31, 0x7008, 0x1f); //enable switch link change intr
12984 +       
12985 +#else
12986 +       *((volatile u32 *)(RALINK_INTCL_BASE + 0x34)) = (1<<17);
12987 +       *((volatile u32 *)(ESW_IMR)) &= ~(ESW_INT_ALL);
12988 +
12989 +#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
12990 +    defined (CONFIG_RALINK_MT7620)
12991 +       *((volatile u32 *)(ESW_P0_IntMn)) &= ~(MSK_CNT_INT_ALL);
12992 +       *((volatile u32 *)(ESW_P1_IntMn)) &= ~(MSK_CNT_INT_ALL);
12993 +       *((volatile u32 *)(ESW_P2_IntMn)) &= ~(MSK_CNT_INT_ALL);
12994 +       *((volatile u32 *)(ESW_P3_IntMn)) &= ~(MSK_CNT_INT_ALL);
12995 +       *((volatile u32 *)(ESW_P4_IntMn)) &= ~(MSK_CNT_INT_ALL);
12996 +       *((volatile u32 *)(ESW_P5_IntMn)) &= ~(MSK_CNT_INT_ALL);
12997 +       *((volatile u32 *)(ESW_P6_IntMn)) &= ~(MSK_CNT_INT_ALL);
12998 +#endif
12999 +#if defined(CONFIG_RALINK_MT7620)
13000 +       *((volatile u32 *)(ESW_P7_IntMn)) &= ~(MSK_CNT_INT_ALL);
13001 +#endif
13002 +
13003 +#endif 
13004 +#endif // CONFIG_RT_3052_ESW //
13005 +
13006 +/*TODO*/
13007 +#if !defined (CONFIG_MT7623_FPGA)
13008 +        spin_lock_irqsave(&(ei_local->page_lock), flags);
13009 +#endif
13010 +
13011 +
13012 +#ifdef DELAY_INT
13013 +        sysRegWrite(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
13014 +       sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
13015 +    #if defined (CONFIG_RAETH_HW_LRO)
13016 +        sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13017 +    #endif  /* CONFIG_RAETH_HW_LRO */
13018 +#else
13019 +       sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
13020 +    #if defined (CONFIG_RAETH_HW_LRO)
13021 +        sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
13022 +    #endif  /* CONFIG_RAETH_HW_LRO */
13023 +#endif
13024 +
13025 +#ifdef CONFIG_RAETH_QDMA
13026 +#ifdef DELAY_INT
13027 +        sysRegWrite(QDMA_DELAY_INT, DELAY_INT_INIT);
13028 +       sysRegWrite(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
13029 +#else
13030 +       sysRegWrite(QFE_INT_ENABLE, QFE_INT_ALL);
13031 +
13032 +#endif
13033 +#endif
13034 +
13035 +       INIT_WORK(&ei_local->reset_task, ei_reset_task);
13036 +       
13037 +#ifdef WORKQUEUE_BH
13038 +#ifndef CONFIG_RAETH_NAPI
13039 +       INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13040 +#endif // CONFIG_RAETH_NAPI //
13041 +#else
13042 +#ifndef CONFIG_RAETH_NAPI
13043 +#if defined (TASKLET_WORKQUEUE_SW)
13044 +       working_schedule = init_schedule;
13045 +       INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
13046 +       tasklet_init(&ei_local->rx_tasklet, ei_receive_workq, 0);
13047 +#else
13048 +       tasklet_init(&ei_local->rx_tasklet, ei_receive, 0);
13049 +#endif
13050 +#endif // CONFIG_RAETH_NAPI //
13051 +#endif // WORKQUEUE_BH //
13052 +
13053 +       netif_start_queue(dev);
13054 +
13055 +#ifdef CONFIG_RAETH_NAPI
13056 +       atomic_dec(&ei_local->irq_sem);
13057 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13058 +        napi_enable(&ei_local->napi);
13059 +#else
13060 +        netif_poll_enable(dev);
13061 +#endif
13062 +#endif
13063 +//*TODO*/      
13064 +#if !defined (CONFIG_MT7623_FPGA)
13065 +       spin_unlock_irqrestore(&(ei_local->page_lock), flags);
13066 +#endif 
13067 +
13068 +#ifdef CONFIG_PSEUDO_SUPPORT
13069 +       if(ei_local->PseudoDev == NULL) {
13070 +           RAETH_Init_PSEUDO(ei_local, dev);
13071 +       }
13072
13073 +       if(ei_local->PseudoDev == NULL) 
13074 +               printk("Open PseudoDev failed.\n");
13075 +       else
13076 +               VirtualIF_open(ei_local->PseudoDev);
13077 +
13078 +#endif
13079 +
13080 +#ifdef CONFIG_RAETH_LRO
13081 +       lan_ip_tmp = nvram_get(RT2860_NVRAM, "lan_ipaddr");
13082 +       str_to_ip(&lan_ip, lan_ip_tmp);
13083 +       lro_para.lan_ip1 = lan_ip = htonl(lan_ip);
13084 +#endif // CONFIG_RAETH_LRO //
13085 +
13086 +#if defined (CONFIG_RAETH_HW_LRO)
13087 +    INIT_WORK(&ei_local->hw_lro_wq, ei_hw_lro_workq);
13088 +#endif  /* CONFIG_RAETH_HW_LRO */
13089 +
13090 +       forward_config(dev);
13091 +       return 0;
13092 +}
13093 +
13094 +/**
13095 + * ei_close - shut down network device
13096 + * @dev: network device to clear
13097 + *
13098 + * This routine shut down network device.
13099 + *
13100 + *
13101 + */
13102 +int ei_close(struct net_device *dev)
13103 +{
13104 +       int i;
13105 +       END_DEVICE *ei_local = netdev_priv(dev);        // device pointer
13106 +
13107 +       netif_stop_queue(dev);
13108 +        ra2880stop(ei_local);
13109 +
13110 +       free_irq(dev->irq, dev);
13111 +
13112 +/*TODO: MT7623 MCM INT */
13113 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
13114 +       free_irq(SURFBOARDINT_ESW, dev);
13115 +#endif 
13116 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13117 +       cancel_work_sync(&ei_local->reset_task);
13118 +#endif
13119 +
13120 +#ifdef CONFIG_PSEUDO_SUPPORT
13121 +       VirtualIF_close(ei_local->PseudoDev);
13122 +#endif
13123 +
13124 +
13125 +#ifdef WORKQUEUE_BH
13126 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13127 +       cancel_work_sync(&ei_local->rx_wq);
13128 +#endif
13129 +#else
13130 +#if defined (TASKLET_WORKQUEUE_SW)
13131 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13132 +       cancel_work_sync(&ei_local->rx_wq);
13133 +#endif
13134 +#endif
13135 +       tasklet_kill(&ei_local->tx_tasklet);
13136 +       tasklet_kill(&ei_local->rx_tasklet);
13137 +#endif // WORKQUEUE_BH //
13138 +
13139 +#ifdef CONFIG_RAETH_NAPI
13140 +       atomic_inc(&ei_local->irq_sem);
13141 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
13142 +        napi_disable(&ei_local->napi);
13143 +#else
13144 +        netif_poll_disable(dev);
13145 +#endif
13146 +#endif
13147 +
13148 +
13149 +#if defined (CONFIG_RAETH_HW_LRO)
13150 +    cancel_work_sync(&ei_local->hw_lro_wq);
13151 +#endif  /* CONFIG_RAETH_HW_LRO */   
13152 +
13153 +        for ( i = 0; i < NUM_RX_DESC; i++)
13154 +        {
13155 +                if (ei_local->netrx0_skbuf[i] != NULL) {
13156 +                        dev_kfree_skb(ei_local->netrx0_skbuf[i]);
13157 +                       ei_local->netrx0_skbuf[i] = NULL;
13158 +               }
13159 +#if defined (CONFIG_RAETH_HW_LRO)
13160 +                if (ei_local->netrx3_skbuf[i] != NULL) {
13161 +                        dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13162 +                       ei_local->netrx3_skbuf[i] = NULL;
13163 +               }
13164 +                if (ei_local->netrx2_skbuf[i] != NULL) {
13165 +                        dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13166 +                       ei_local->netrx2_skbuf[i] = NULL;
13167 +               }
13168 +                if (ei_local->netrx1_skbuf[i] != NULL) {
13169 +                        dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13170 +                       ei_local->netrx1_skbuf[i] = NULL;
13171 +               }
13172 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13173 +#if defined(CONFIG_ARCH_MT7623)
13174 +                if (ei_local->netrx3_skbuf[i] != NULL) {
13175 +                        dev_kfree_skb(ei_local->netrx3_skbuf[i]);
13176 +                       ei_local->netrx3_skbuf[i] = NULL;
13177 +               }
13178 +                if (ei_local->netrx2_skbuf[i] != NULL) {
13179 +                        dev_kfree_skb(ei_local->netrx2_skbuf[i]);
13180 +                       ei_local->netrx2_skbuf[i] = NULL;
13181 +               }
13182 +#endif  /* CONFIG_ARCH_MT7623 */
13183 +                if (ei_local->netrx1_skbuf[i] != NULL) {
13184 +                        dev_kfree_skb(ei_local->netrx1_skbuf[i]);
13185 +                       ei_local->netrx1_skbuf[i] = NULL;
13186 +               }
13187 +#endif
13188 +        }
13189 +
13190 +       for ( i = 0; i < NUM_TX_DESC; i++)
13191 +       {
13192 +               if((ei_local->skb_free[i]!=(struct  sk_buff *)0xFFFFFFFF) && (ei_local->skb_free[i]!= 0))
13193 +               {
13194 +                       dev_kfree_skb_any(ei_local->skb_free[i]);
13195 +               }
13196 +       }
13197 +
13198 +       /* TX Ring */
13199 +#ifdef CONFIG_RAETH_QDMA
13200 +       if (ei_local->txd_pool != NULL) {
13201 +          pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct QDMA_txdesc), ei_local->txd_pool, ei_local->phy_txd_pool);
13202 +       }
13203 +       if (ei_local->free_head != NULL){
13204 +              pci_free_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), ei_local->free_head, ei_local->phy_free_head);
13205 +       }
13206 +       if (ei_local->free_page_head != NULL){
13207 +              pci_free_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, ei_local->free_page_head, ei_local->phy_free_page_head);
13208 +       }
13209 +#else  
13210 +       if (ei_local->tx_ring0 != NULL) {
13211 +          pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring0, ei_local->phy_tx_ring0);
13212 +       }
13213 +#endif       
13214 +
13215 +#if defined (CONFIG_RAETH_QOS)
13216 +       if (ei_local->tx_ring1 != NULL) {
13217 +          pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring1, ei_local->phy_tx_ring1);
13218 +       }
13219 +
13220 +#if !defined (CONFIG_RALINK_RT2880)
13221 +       if (ei_local->tx_ring2 != NULL) {
13222 +          pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring2, ei_local->phy_tx_ring2);
13223 +       }
13224 +
13225 +       if (ei_local->tx_ring3 != NULL) {
13226 +          pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring3, ei_local->phy_tx_ring3);
13227 +       }
13228 +#endif
13229 +#endif
13230 +       /* RX Ring */
13231 +#ifdef CONFIG_32B_DESC
13232 +       kfree(ei_local->rx_ring0);
13233 +#else
13234 +        pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring0, ei_local->phy_rx_ring0);
13235 +#endif
13236 +#if defined CONFIG_RAETH_QDMA && !defined(CONFIG_RAETH_QDMATX_QDMARX)  
13237 +#ifdef CONFIG_32B_DESC
13238 +       kfree(ei_local->qrx_ring);
13239 +#else
13240 +       pci_free_consistent(NULL, NUM_QRX_DESC*sizeof(struct PDMA_rxdesc), ei_local->qrx_ring, ei_local->phy_qrx_ring);
13241 +#endif
13242 +#endif 
13243 +#if defined (CONFIG_RAETH_HW_LRO)
13244 +        pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13245 +        pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13246 +        pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13247 +#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
13248 +#ifdef CONFIG_32B_DESC
13249 +       kfree(ei_local->rx_ring1);
13250 +#else
13251 +#if defined(CONFIG_ARCH_MT7623)
13252 +        pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
13253 +        pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
13254 +#endif  /* CONFIG_ARCH_MT7623 */
13255 +        pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
13256 +#endif
13257 +#endif
13258 +
13259 +       printk("Free TX/RX Ring Memory!\n");
13260 +
13261 +       fe_reset();
13262 +
13263 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
13264 +       module_put(THIS_MODULE);
13265 +#else
13266 +       MOD_DEC_USE_COUNT;
13267 +#endif
13268 +       return 0;
13269 +}
13270 +
13271 +#if defined (CONFIG_RT6855A_FPGA)
13272 +void rt6855A_eth_gpio_reset(void)
13273 +{
13274 +       u8 ether_gpio = 12;
13275 +
13276 +       /* Load the ethernet gpio value to reset Ethernet PHY */
13277 +       *(unsigned long *)(RALINK_PIO_BASE + 0x00) |= 1<<(ether_gpio<<1);
13278 +       *(unsigned long *)(RALINK_PIO_BASE + 0x14) |= 1<<(ether_gpio);
13279 +       *(unsigned long *)(RALINK_PIO_BASE + 0x04) &= ~(1<<ether_gpio);
13280 +
13281 +       udelay(100000);
13282 +
13283 +       *(unsigned long *)(RALINK_PIO_BASE + 0x04) |= (1<<ether_gpio);
13284 +
13285 +       /* must wait for 0.6 seconds after reset*/
13286 +       udelay(600000);
13287 +}
13288 +#endif
13289 +
13290 +#if defined(CONFIG_RALINK_RT6855A)
13291 +void rt6855A_gsw_init(void)
13292 +{
13293 +       u32 phy_val=0;
13294 +       u32 rev=0;
13295 +
13296 +#if defined (CONFIG_RT6855A_FPGA)
13297 +    /*keep dump switch mode */
13298 +    rt6855A_eth_gpio_reset();
13299 +
13300 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e353;//(P0,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13301 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e353;//(P1,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
13302 +    //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13303 +    //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
13304 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x8000;//link down
13305 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x8000;//link down
13306 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x8000;//link down
13307 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13308 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13309 +
13310 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13311 +
13312 +    /* In order to use 10M/Full on FPGA board. We configure phy capable to
13313 +     * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
13314 +    for(i=6;i<8;i++){
13315 +       mii_mgr_write(i, 4, 0x07e1);   //Capable of 10M&100M Full/Half Duplex, flow control on/off
13316 +       //mii_mgr_write(i, 4, 0x0461);   //Capable of 10M Full/Half Duplex, flow control on/off
13317 +       mii_mgr_write(i, 0, 0xB100);   //reset all digital logic, except phy_reg
13318 +       mii_mgr_read(i, 9, &phy_val);
13319 +       phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
13320 +       mii_mgr_write(i, 9, phy_val);
13321 +    }
13322 +#elif defined (CONFIG_RT6855A_ASIC)
13323 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
13324 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
13325 +
13326 +    *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1ec) = 0x0fffffff;//Set PSE should pause 4 tx ring as default
13327 +    *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1f0) = 0x0fffffff;//switch IOT more stable
13328 +    
13329 +    *(unsigned long *)(CKGCR) &= ~(0x3 << 4); //keep rx/tx port clock ticking, disable internal clock-gating to avoid switch stuck 
13330 +  
13331 +    /*
13332 +     *Reg 31: Page Control
13333 +     * Bit 15     => PortPageSel, 1=local, 0=global
13334 +     * Bit 14:12  => PageSel, local:0~3, global:0~4
13335 +     *
13336 +     *Reg16~30:Local/Global registers
13337 +     *
13338 +    */
13339 +    /*correct  PHY  setting J8.0*/
13340 +    mii_mgr_read(0, 31, &rev);
13341 +    rev &= (0x0f);
13342 +
13343 +    mii_mgr_write(1, 31, 0x4000); //global, page 4
13344 +  
13345 +    mii_mgr_write(1, 16, 0xd4cc);
13346 +    mii_mgr_write(1, 17, 0x7444);
13347 +    mii_mgr_write(1, 19, 0x0112);
13348 +    mii_mgr_write(1, 21, 0x7160);
13349 +    mii_mgr_write(1, 22, 0x10cf);
13350 +    mii_mgr_write(1, 26, 0x0777);
13351 +    
13352 +    if(rev == 0){
13353 +           mii_mgr_write(1, 25, 0x0102);
13354 +           mii_mgr_write(1, 29, 0x8641);
13355 +    }
13356 +    else{
13357 +            mii_mgr_write(1, 25, 0x0212);
13358 +           mii_mgr_write(1, 29, 0x4640);
13359 +    }
13360 +
13361 +    mii_mgr_write(1, 31, 0x2000); //global, page 2
13362 +    mii_mgr_write(1, 21, 0x0655);
13363 +    mii_mgr_write(1, 22, 0x0fd3);
13364 +    mii_mgr_write(1, 23, 0x003d);
13365 +    mii_mgr_write(1, 24, 0x096e);
13366 +    mii_mgr_write(1, 25, 0x0fed);
13367 +    mii_mgr_write(1, 26, 0x0fc4);
13368 +    
13369 +    mii_mgr_write(1, 31, 0x1000); //global, page 1
13370 +    mii_mgr_write(1, 17, 0xe7f8);
13371 +    
13372 +    
13373 +    mii_mgr_write(1, 31, 0xa000); //local, page 2
13374 +
13375 +    mii_mgr_write(0, 16, 0x0e0e);
13376 +    mii_mgr_write(1, 16, 0x0c0c);
13377 +    mii_mgr_write(2, 16, 0x0f0f);
13378 +    mii_mgr_write(3, 16, 0x1010);
13379 +    mii_mgr_write(4, 16, 0x0909);
13380 +
13381 +    mii_mgr_write(0, 17, 0x0000);
13382 +    mii_mgr_write(1, 17, 0x0000);
13383 +    mii_mgr_write(2, 17, 0x0000);
13384 +    mii_mgr_write(3, 17, 0x0000);
13385 +    mii_mgr_write(4, 17, 0x0000);
13386 +#endif
13387 +
13388 +#if defined (CONFIG_RT6855A_ASIC)
13389 +
13390 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
13391 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
13392 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c;//disable port0-port4 internal phy, set phy base address to 12 
13393 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
13394 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
13395 +
13396 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
13397 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13398 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
13399 +       //rt6855/6 need to modify TX/RX phase
13400 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0xc;//TX/RX CLOCK Phase select
13401 +       
13402 +       enable_auto_negotiate(1);
13403 +
13404 +       if (isICPlusGigaPHY(1)) {
13405 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
13406 +               phy_val |= 1<<10; //enable pause ability
13407 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
13408 +
13409 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13410 +               phy_val |= 1<<9; //restart AN
13411 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13412 +       }
13413 +
13414 +       if (isMarvellGigaPHY(1)) {
13415 +               printk("Reset MARVELL phy1\n");
13416 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
13417 +               phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
13418 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
13419 +
13420 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
13421 +               phy_val |= 1<<15; //PHY Software Reset
13422 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
13423 +        }
13424 +       if (isVtssGigaPHY(1)) {
13425 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
13426 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
13427 +               printk("Vitesse phy skew: %x --> ", phy_val);
13428 +               phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
13429 +               phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
13430 +               printk("%x\n", phy_val);
13431 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
13432 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
13433 +        }
13434 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
13435 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
13436 +#else // Port 5 Disabled //
13437 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
13438 +#endif
13439 +#endif
13440 +}
13441 +#endif
13442 +
13443 +
13444 +
13445 +
13446 +#if defined (CONFIG_MT7623_FPGA)
13447 +void setup_fpga_gsw(void)
13448 +{
13449 +       u32     i;
13450 +       u32     regValue;
13451 +
13452 +       /* reduce RGMII2 PAD driving strength */
13453 +       *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13454 +
13455 +       //RGMII1=Normal mode
13456 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13457 +
13458 +       //GMAC1= RGMII mode
13459 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13460 +
13461 +       //enable MDIO to control MT7530
13462 +       regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13463 +       regValue &= ~(0x3 << 12);
13464 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13465 +
13466 +       for(i=0;i<=4;i++)
13467 +        {
13468 +               //turn off PHY
13469 +               mii_mgr_read(i, 0x0 ,&regValue);
13470 +              regValue |= (0x1<<11);
13471 +              mii_mgr_write(i, 0x0, regValue); 
13472 +       }
13473 +        mii_mgr_write(31, 0x7000, 0x3); //reset switch
13474 +        udelay(10);
13475 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13476 +       mii_mgr_write(31, 0x3600, 0x5e337);
13477 +
13478 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13479 +       mii_mgr_write(31, 0x3500, 0x8000);
13480 +
13481 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13482 +       //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13483 +       mii_mgr_read(31, 0x7804 ,&regValue);
13484 +       regValue &= ~(1<<8); //Enable Port 6
13485 +       regValue |= (1<<6); //Disable Port 5
13486 +       regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13487 +
13488 +#if defined (CONFIG_RAETH_GMAC2)
13489 +       //RGMII2=Normal mode
13490 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13491 +
13492 +       //GMAC2= RGMII mode
13493 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13494 +
13495 +       mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13496 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13497 +
13498 +       enable_auto_negotiate(0);//set polling address
13499 +       /* set MT7530 Port 5 to PHY 0/4 mode */
13500 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13501 +       regValue &= ~((1<<13)|(1<<6));
13502 +       regValue |= ((1<<7)|(1<<16)|(1<<20));
13503 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13504 +       regValue &= ~((1<<13)|(1<<6)|((1<<20)));
13505 +       regValue |= ((1<<7)|(1<<16));
13506 +#endif
13507 +
13508 +       //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13509 +#endif
13510 +       regValue |= (1<<16);//change HW-TRAP
13511 +       printk("change HW-TRAP to 0x%x\n",regValue);
13512 +       mii_mgr_write(31, 0x7804 ,regValue);
13513 +#endif
13514 +       mii_mgr_write(0, 14, 0x1);  /*RGMII*/
13515 +/* set MT7530 central align */
13516 +        mii_mgr_read(31, 0x7830, &regValue);
13517 +        regValue &= ~1;
13518 +        regValue |= 1<<1;
13519 +        mii_mgr_write(31, 0x7830, regValue);
13520 +
13521 +        mii_mgr_read(31, 0x7a40, &regValue);
13522 +        regValue &= ~(1<<30);
13523 +        mii_mgr_write(31, 0x7a40, regValue);
13524 +
13525 +        regValue = 0x855;
13526 +        mii_mgr_write(31, 0x7a78, regValue);
13527 +
13528 +/*to check!!*/
13529 +       mii_mgr_write(31, 0x7b00, 0x102);  //delay setting for 10/1000M
13530 +       mii_mgr_write(31, 0x7b04, 0x14);  //delay setting for 10/1000M
13531 +
13532 +       for(i=0;i<=4;i++) {     
13533 +               mii_mgr_read(i, 4, &regValue);
13534 +                regValue |= (3<<7); //turn on 100Base-T Advertisement
13535 +                //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13536 +               mii_mgr_write(i, 4, regValue);
13537 +       
13538 +               mii_mgr_read(i, 9, &regValue);
13539 +                //regValue |= (3<<8); //turn on 1000Base-T Advertisement
13540 +               regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13541 +                mii_mgr_write(i, 9, regValue);
13542 +
13543 +               //restart AN
13544 +               mii_mgr_read(i, 0, &regValue);
13545 +               regValue |= (1 << 9);
13546 +               mii_mgr_write(i, 0, regValue);
13547 +       }
13548 +
13549 +       /*Tx Driving*/
13550 +       mii_mgr_write(31, 0x7a54, 0x44);  //lower driving
13551 +       mii_mgr_write(31, 0x7a5c, 0x44);  //lower driving
13552 +       mii_mgr_write(31, 0x7a64, 0x44);  //lower driving
13553 +       mii_mgr_write(31, 0x7a6c, 0x44);  //lower driving
13554 +       mii_mgr_write(31, 0x7a74, 0x44);  //lower driving
13555 +       mii_mgr_write(31, 0x7a7c, 0x44);  //lower driving
13556 +
13557 +       for(i=0;i<=4;i++)
13558 +        {
13559 +       //turn on PHY
13560 +                mii_mgr_read(i, 0x0 ,&regValue);
13561 +               regValue &= ~(0x1<<11);
13562 +               mii_mgr_write(i, 0x0, regValue);        
13563 +       }
13564 +}
13565 +#endif
13566 +
13567 +
13568 +#if defined (CONFIG_RALINK_MT7621)
13569 +
13570 +
13571 +void setup_external_gsw(void)
13572 +{
13573 +       u32     regValue;
13574 +
13575 +       /* reduce RGMII2 PAD driving strength */
13576 +       *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13577 +       //enable MDIO 
13578 +       regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13579 +       regValue &= ~(0x3 << 12);
13580 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13581 +
13582 +       //RGMII1=Normal mode
13583 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13584 +       //GMAC1= RGMII mode
13585 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13586 +       
13587 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x00008000);//(GE1, Link down)
13588 +
13589 +       //RGMII2=Normal mode
13590 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13591 +       //GMAC2= RGMII mode
13592 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13593 +
13594 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000M/FD, FC ON)
13595 +
13596 +}
13597 +
13598 +
13599 +
13600 +
13601 +
13602 +
13603 +
13604 +
13605 +
13606 +
13607 +void IsSwitchVlanTableBusy(void)
13608 +{
13609 +       int j = 0;
13610 +       unsigned int value = 0;
13611 +
13612 +       for (j = 0; j < 20; j++) {
13613 +           mii_mgr_read(31, 0x90, &value);
13614 +           if ((value & 0x80000000) == 0 ){ //table busy
13615 +               break;
13616 +           }
13617 +           udelay(70000);
13618 +       }
13619 +       if (j == 20)
13620 +           printk("set vlan timeout value=0x%x.\n", value);
13621 +}
13622 +
13623 +void LANWANPartition(void)
13624 +{
13625 +/*Set  MT7530 */
13626 +#ifdef CONFIG_WAN_AT_P0
13627 +       printk("set LAN/WAN WLLLL\n");
13628 +       //WLLLL, wan at P0
13629 +       //LAN/WAN ports as security mode
13630 +       mii_mgr_write(31, 0x2004, 0xff0003);//port0
13631 +       mii_mgr_write(31, 0x2104, 0xff0003);//port1
13632 +       mii_mgr_write(31, 0x2204, 0xff0003);//port2
13633 +       mii_mgr_write(31, 0x2304, 0xff0003);//port3
13634 +       mii_mgr_write(31, 0x2404, 0xff0003);//port4
13635 +
13636 +       //set PVID
13637 +       mii_mgr_write(31, 0x2014, 0x10002);//port0
13638 +       mii_mgr_write(31, 0x2114, 0x10001);//port1
13639 +       mii_mgr_write(31, 0x2214, 0x10001);//port2
13640 +       mii_mgr_write(31, 0x2314, 0x10001);//port3
13641 +       mii_mgr_write(31, 0x2414, 0x10001);//port4
13642 +       /*port6 */
13643 +       //VLAN member
13644 +       IsSwitchVlanTableBusy();
13645 +       mii_mgr_write(31, 0x94, 0x407e0001);//VAWD1
13646 +       mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13647 +       IsSwitchVlanTableBusy();
13648 +
13649 +       mii_mgr_write(31, 0x94, 0x40610001);//VAWD1
13650 +       mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13651 +       IsSwitchVlanTableBusy();
13652 +#endif
13653 +#ifdef CONFIG_WAN_AT_P4
13654 +       printk("set LAN/WAN LLLLW\n");
13655 +       //LLLLW, wan at P4
13656 +       //LAN/WAN ports as security mode
13657 +       mii_mgr_write(31, 0x2004, 0xff0003);//port0
13658 +       mii_mgr_write(31, 0x2104, 0xff0003);//port1
13659 +       mii_mgr_write(31, 0x2204, 0xff0003);//port2
13660 +       mii_mgr_write(31, 0x2304, 0xff0003);//port3
13661 +       mii_mgr_write(31, 0x2404, 0xff0003);//port4
13662 +
13663 +       //set PVID
13664 +       mii_mgr_write(31, 0x2014, 0x10001);//port0
13665 +       mii_mgr_write(31, 0x2114, 0x10001);//port1
13666 +       mii_mgr_write(31, 0x2214, 0x10001);//port2
13667 +       mii_mgr_write(31, 0x2314, 0x10001);//port3
13668 +       mii_mgr_write(31, 0x2414, 0x10002);//port4
13669 +
13670 +       //VLAN member
13671 +       IsSwitchVlanTableBusy();
13672 +       mii_mgr_write(31, 0x94, 0x404f0001);//VAWD1
13673 +       mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
13674 +       IsSwitchVlanTableBusy();
13675 +       mii_mgr_write(31, 0x94, 0x40500001);//VAWD1
13676 +       mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
13677 +       IsSwitchVlanTableBusy();
13678 +#endif
13679 +}
13680 +
13681 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
13682 +void mt7621_eee_patch(void)
13683 +{
13684 +       u32 i;
13685 +
13686 +       for(i=0;i<5;i++)
13687 +       {
13688 +               /* Enable EEE */
13689 +               mii_mgr_write(i, 13, 0x07);
13690 +               mii_mgr_write(i, 14, 0x3c);
13691 +               mii_mgr_write(i, 13, 0x4007);
13692 +               mii_mgr_write(i, 14, 0x6);
13693 +
13694 +               /* Forced Slave mode */
13695 +               mii_mgr_write(i, 31, 0x0);
13696 +               mii_mgr_write(i, 9, 0x1600);
13697 +               /* Increase SlvDPSready time */
13698 +               mii_mgr_write(i, 31, 0x52b5);
13699 +               mii_mgr_write(i, 16, 0xafae);
13700 +               mii_mgr_write(i, 18, 0x2f);
13701 +               mii_mgr_write(i, 16, 0x8fae);
13702 +               /* Incease post_update_timer */
13703 +               mii_mgr_write(i, 31, 0x3);
13704 +               mii_mgr_write(i, 17, 0x4b);
13705 +               /* Adjust 100_mse_threshold */
13706 +               mii_mgr_write(i, 13, 0x1e);
13707 +               mii_mgr_write(i, 14, 0x123);
13708 +               mii_mgr_write(i, 13, 0x401e);
13709 +               mii_mgr_write(i, 14, 0xffff);
13710 +               /* Disable mcc
13711 +                  mii_mgr_write(i, 13, 0x1e);
13712 +                  mii_mgr_write(i, 14, 0xa6);
13713 +                  mii_mgr_write(i, 13, 0x401e);
13714 +                  mii_mgr_write(i, 14, 0x300);
13715 +               */
13716 +       }
13717 +
13718 +}
13719 +#endif
13720 +
13721 +
13722 +#if defined (CONFIG_RALINK_MT7621) 
13723 +void setup_internal_gsw(void)
13724 +{
13725 +       u32     i;
13726 +       u32     regValue;
13727 +
13728 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13729 +       /*Hardware reset Switch*/
13730 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
13731 +        udelay(1000);
13732 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
13733 +        udelay(10000);
13734 +
13735 +       /* reduce RGMII2 PAD driving strength */
13736 +       *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
13737 +
13738 +       //RGMII1=Normal mode
13739 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
13740 +
13741 +       //GMAC1= RGMII mode
13742 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
13743 +
13744 +       //enable MDIO to control MT7530
13745 +       regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13746 +       regValue &= ~(0x3 << 12);
13747 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13748 +
13749 +       for(i=0;i<=4;i++)
13750 +        {
13751 +               //turn off PHY
13752 +               mii_mgr_read(i, 0x0 ,&regValue);
13753 +              regValue |= (0x1<<11);
13754 +              mii_mgr_write(i, 0x0, regValue); 
13755 +       }
13756 +        mii_mgr_write(31, 0x7000, 0x3); //reset switch
13757 +        udelay(100);
13758 +                                              
13759 +
13760 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13761 +       trgmii_set_7530();   //reset FE, config MDIO again
13762 +
13763 +       //enable MDIO to control MT7530
13764 +       regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
13765 +       regValue &= ~(0x3 << 12);
13766 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
13767 +
13768 +       // switch to APLL if TRGMII and DDR2
13769 +       if ((sysRegRead(0xBE000010)>>4)&0x1)
13770 +       {
13771 +               apll_xtal_enable();
13772 +       }
13773 +#endif
13774 +
13775 +#if defined (CONFIG_MT7621_ASIC)
13776 +       if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
13777 +               sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
13778 +               mii_mgr_write(31, 0x3600, 0x5e30b);
13779 +       } else {
13780 +               sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
13781 +               mii_mgr_write(31, 0x3600, 0x5e33b);
13782 +       }
13783 +#elif defined (CONFIG_MT7621_FPGA)
13784 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
13785 +       mii_mgr_write(31, 0x3600, 0x5e337);
13786 +#endif
13787 +
13788 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
13789 +#endif
13790 +
13791 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
13792 +       //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
13793 +       mii_mgr_read(31, 0x7804 ,&regValue);
13794 +       regValue &= ~(1<<8); //Enable Port 6
13795 +       regValue |= (1<<6); //Disable Port 5
13796 +       regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13797 +
13798 +#if defined (CONFIG_RAETH_GMAC2)
13799 +       //RGMII2=Normal mode
13800 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
13801 +
13802 +       //GMAC2= RGMII mode
13803 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
13804 +#if !defined (CONFIG_RAETH_8023AZ_EEE) 
13805 +       mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
13806 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
13807 +
13808 +       enable_auto_negotiate(0);//set polling address
13809 +#endif
13810 +#if defined (CONFIG_RAETH_8023AZ_EEE)  
13811 +       mii_mgr_write(31, 0x3500, 0x5e33b); //MT7530 P5 Force 1000, we can ignore this setting??????
13812 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000)
13813 +#endif
13814 +
13815 +
13816 +
13817 +       /* set MT7530 Port 5 to PHY 0/4 mode */
13818 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
13819 +       regValue &= ~((1<<13)|(1<<6));
13820 +       regValue |= ((1<<7)|(1<<16)|(1<<20));
13821 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
13822 +       regValue &= ~((1<<13)|(1<<6)|(1<<20));
13823 +       regValue |= ((1<<7)|(1<<16));
13824 +#endif
13825 +       
13826 +#if defined (CONFIG_RAETH_8023AZ_EEE)  
13827 +       regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
13828 +#endif
13829 +       //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
13830 +#endif
13831 +       regValue |= (1<<16);//change HW-TRAP
13832 +       //printk("change HW-TRAP to 0x%x\n",regValue);
13833 +       mii_mgr_write(31, 0x7804 ,regValue);
13834 +#endif
13835 +       mii_mgr_read(31, 0x7800, &regValue);
13836 +       regValue = (regValue >> 9) & 0x3;
13837 +       if(regValue == 0x3) { //25Mhz Xtal
13838 +               /* do nothing */
13839 +       } else if(regValue == 0x2) { //40Mhz
13840 +
13841 +           mii_mgr_write(0, 13, 0x1f);  // disable MT7530 core clock
13842 +           mii_mgr_write(0, 14, 0x410);
13843 +           mii_mgr_write(0, 13, 0x401f);
13844 +           mii_mgr_write(0, 14, 0x0);
13845 +
13846 +           mii_mgr_write(0, 13, 0x1f);  // disable MT7530 PLL
13847 +           mii_mgr_write(0, 14, 0x40d);
13848 +           mii_mgr_write(0, 13, 0x401f);
13849 +           mii_mgr_write(0, 14, 0x2020);
13850 +
13851 +           mii_mgr_write(0, 13, 0x1f);  // for MT7530 core clock = 500Mhz
13852 +           mii_mgr_write(0, 14, 0x40e);  
13853 +           mii_mgr_write(0, 13, 0x401f);  
13854 +           mii_mgr_write(0, 14, 0x119);   
13855 +
13856 +           mii_mgr_write(0, 13, 0x1f);  // enable MT7530 PLL
13857 +           mii_mgr_write(0, 14, 0x40d);
13858 +           mii_mgr_write(0, 13, 0x401f);
13859 +           mii_mgr_write(0, 14, 0x2820);
13860 +
13861 +           udelay(20); //suggest by CD
13862 +
13863 +           mii_mgr_write(0, 13, 0x1f);  // enable MT7530 core clock
13864 +           mii_mgr_write(0, 14, 0x410);
13865 +           mii_mgr_write(0, 13, 0x401f);
13866 +       }else { //20Mhz Xtal
13867 +
13868 +               /* TODO */
13869 +
13870 +       }
13871 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
13872 +       mii_mgr_write(0, 14, 0x3); /*TRGMII*/
13873 +#else
13874 +       mii_mgr_write(0, 14, 0x1);  /*RGMII*/
13875 +/* set MT7530 central align */
13876 +        mii_mgr_read(31, 0x7830, &regValue);
13877 +        regValue &= ~1;
13878 +        regValue |= 1<<1;
13879 +        mii_mgr_write(31, 0x7830, regValue);
13880 +
13881 +        mii_mgr_read(31, 0x7a40, &regValue);
13882 +        regValue &= ~(1<<30);
13883 +        mii_mgr_write(31, 0x7a40, regValue);
13884 +
13885 +        regValue = 0x855;
13886 +        mii_mgr_write(31, 0x7a78, regValue);
13887 +
13888 +#endif
13889 +#if !defined (CONFIG_RAETH_8023AZ_EEE) 
13890 +       mii_mgr_write(31, 0x7b00, 0x102);  //delay setting for 10/1000M
13891 +       mii_mgr_write(31, 0x7b04, 0x14);  //delay setting for 10/1000M
13892 +#endif
13893 +#if 0 
13894 +       for(i=0;i<=4;i++) {     
13895 +               mii_mgr_read(i, 4, &regValue);
13896 +                regValue |= (3<<7); //turn on 100Base-T Advertisement
13897 +                //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
13898 +               mii_mgr_write(i, 4, regValue);
13899 +       
13900 +               mii_mgr_read(i, 9, &regValue);
13901 +                regValue |= (3<<8); //turn on 1000Base-T Advertisement
13902 +               //regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
13903 +                mii_mgr_write(i, 9, regValue);
13904 +
13905 +               //restart AN
13906 +               mii_mgr_read(i, 0, &regValue);
13907 +               regValue |= (1 << 9);
13908 +               mii_mgr_write(i, 0, regValue);
13909 +       }
13910 +#endif
13911 +
13912 +       /*Tx Driving*/
13913 +       mii_mgr_write(31, 0x7a54, 0x44);  //lower driving
13914 +       mii_mgr_write(31, 0x7a5c, 0x44);  //lower driving
13915 +       mii_mgr_write(31, 0x7a64, 0x44);  //lower driving
13916 +       mii_mgr_write(31, 0x7a6c, 0x44);  //lower driving
13917 +       mii_mgr_write(31, 0x7a74, 0x44);  //lower driving
13918 +       mii_mgr_write(31, 0x7a7c, 0x44);  //lower driving
13919 +
13920 +
13921 +       LANWANPartition();
13922 +
13923 +#if !defined (CONFIG_RAETH_8023AZ_EEE) 
13924 +       //disable EEE
13925 +       for(i=0;i<=4;i++)
13926 +       {
13927 +           mii_mgr_write(i, 13, 0x7);
13928 +           mii_mgr_write(i, 14, 0x3C);
13929 +           mii_mgr_write(i, 13, 0x4007);
13930 +           mii_mgr_write(i, 14, 0x0);
13931 +       }
13932 +
13933 +       //Disable EEE 10Base-Te:
13934 +       for(i=0;i<=4;i++)
13935 +       {
13936 +           mii_mgr_write(i, 13, 0x1f);
13937 +           mii_mgr_write(i, 14, 0x027b);
13938 +           mii_mgr_write(i, 13, 0x401f);
13939 +           mii_mgr_write(i, 14, 0x1177);
13940 +       }
13941 +#endif
13942 +
13943 +       for(i=0;i<=4;i++)
13944 +        {
13945 +       //turn on PHY
13946 +                mii_mgr_read(i, 0x0 ,&regValue);
13947 +               regValue &= ~(0x1<<11);
13948 +               mii_mgr_write(i, 0x0, regValue);        
13949 +       }
13950 +       
13951 +       mii_mgr_read(31, 0x7808 ,&regValue);
13952 +        regValue |= (3<<16); //Enable INTR
13953 +       mii_mgr_write(31, 0x7808 ,regValue);
13954 +#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
13955 +                       mt7621_eee_patch();
13956 +#endif
13957 +}
13958 +#endif
13959 +
13960 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
13961 +void apll_xtal_enable(void)
13962 +{
13963 +        unsigned long data = 0;
13964 +        unsigned long regValue = 0;
13965 +
13966 +        /* Firstly, reset all required register to default value */
13967 +        sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008000);
13968 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, 0x01401d61);
13969 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, 0x38233d0e);
13970 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, 0x80120004);
13971 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
13972 +
13973 +        /* toggle RG_XPTL_CHG */
13974 +        sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008800);
13975 +        sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008c00);
13976 +
13977 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0014);
13978 +        data &= ~(0x0000ffc0);
13979 +
13980 +       regValue = *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x10);
13981 +       regValue = (regValue >> 6) & 0x7;
13982 +       if(regValue < 6) { //20/40Mhz Xtal
13983 +               data |= REGBIT(0x1d, 8);
13984 +       }else {
13985 +               data |= REGBIT(0x17, 8);
13986 +       }
13987 +       
13988 +       if(regValue < 6) { //20/40Mhz Xtal
13989 +               data |= REGBIT(0x1, 6);
13990 +       }
13991 +        
13992 +       sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, data);
13993 +
13994 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0018);
13995 +        data &= ~(0xf0773f00);
13996 +        data |= REGBIT(0x3, 28);
13997 +        data |= REGBIT(0x2, 20);
13998 +       if(regValue < 6) { //20/40Mhz Xtal
13999 +               data |= REGBIT(0x3, 16);
14000 +       }else {
14001 +               data |= REGBIT(0x2, 16);
14002 +       }
14003 +        data |= REGBIT(0x3, 12);
14004 +
14005 +       if(regValue < 6) { //20/40Mhz Xtal
14006 +               data |= REGBIT(0xd, 8);
14007 +       }else {
14008 +               data |= REGBIT(0x7, 8);
14009 +       }
14010 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, data);
14011 +
14012 +       if(regValue < 6) { //20/40Mhz Xtal
14013 +               sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
14014 +       }else {
14015 +               sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1697cc39);
14016 +       }
14017 +       //*Common setting - Set PLLGP_CTRL_4 *//
14018 +       ///* 1. Bit 31 */
14019 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14020 +        data &= ~(REGBIT(0x1, 31));
14021 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14022 +
14023 +       /* 2. Bit 0 */
14024 +       data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14025 +        data |= REGBIT(0x1, 0);
14026 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14027 +
14028 +       /* 3. Bit 3 */
14029 +       data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14030 +        data |= REGBIT(0x1, 3);
14031 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14032 +
14033 +       /* 4. Bit 8 */
14034 +       data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14035 +        data |= REGBIT(0x1, 8);
14036 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14037 +
14038 +       /* 5. Bit 6 */
14039 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14040 +        data |= REGBIT(0x1, 6);
14041 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14042 +
14043 +       /* 6. Bit 7 */
14044 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14045 +        data |= REGBIT(0x1, 5);
14046 +        data |= REGBIT(0x1, 7);
14047 +        sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14048 +
14049 +       /* 7. Bit 17 */
14050 +        data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
14051 +        data &= ~REGBIT(0x1, 17);
14052 +       sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
14053 +        
14054 +       /* 8. TRGMII TX CLK SEL APLL */
14055 +       data = sysRegRead(0xbe00002c);
14056 +       data &= 0xffffff9f;
14057 +       data |= 0x40;
14058 +       sysRegWrite(0xbe00002c, data);
14059 +
14060 +}
14061 +#endif
14062 +
14063 +#endif
14064 +#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
14065 +void rt_gsw_init(void)
14066 +{
14067 +#if defined (CONFIG_P4_MAC_TO_PHY_MODE) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
14068 +       u32 phy_val=0;
14069 +#endif
14070 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14071 +       u32 i=0;
14072 +#elif defined (CONFIG_MT7620_ASIC)
14073 +       u32 is_BGA=0;
14074 +#endif
14075 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14076 +        unsigned int regValue = 0;
14077 +#endif
14078 +#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
14079 +    /*keep dump switch mode */
14080 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14081 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14082 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x5e333;//(P2, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14083 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x5e333;//(P3, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14084 +#if defined (CONFIG_RAETH_HAS_PORT4)
14085 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P4, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14086 +#else
14087 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e333;//(P4, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
14088 +#endif
14089 +    *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14090 +
14091 +    /* In order to use 10M/Full on FPGA board. We configure phy capable to
14092 +     * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14093 +#if defined (CONFIG_RAETH_HAS_PORT4)
14094 +    for(i=0;i<4;i++){
14095 +#else
14096 +    for(i=0;i<5;i++){
14097 +#endif
14098 +       mii_mgr_write(i, 4, 0x0461);   //Capable of 10M Full/Half Duplex, flow control on/off
14099 +       mii_mgr_write(i, 0, 0xB100);   //reset all digital logic, except phy_reg
14100 +    }
14101 +
14102 +#endif
14103 +
14104 +#if defined (CONFIG_PDMA_NEW)
14105 +    *(unsigned long *)(SYSCFG1) |= (0x1 << 8); //PCIE_RC_MODE=1
14106 +#endif
14107 +
14108 +
14109 +#if defined (CONFIG_MT7620_ASIC) && !defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14110 +    is_BGA = (sysRegRead(RALINK_SYSCTL_BASE + 0xc) >> 16) & 0x1;
14111 +    /*
14112 +    * Reg 31: Page Control
14113 +    * Bit 15     => PortPageSel, 1=local, 0=global
14114 +    * Bit 14:12  => PageSel, local:0~3, global:0~4
14115 +    *
14116 +    * Reg16~30:Local/Global registers
14117 +    *
14118 +    */
14119 +    /*correct  PHY  setting L3.0 BGA*/
14120 +    mii_mgr_write(1, 31, 0x4000); //global, page 4
14121 +  
14122 +    mii_mgr_write(1, 17, 0x7444);
14123 +    if(is_BGA){
14124 +       mii_mgr_write(1, 19, 0x0114);
14125 +    }else{
14126 +       mii_mgr_write(1, 19, 0x0117);
14127 +    }
14128 +
14129 +    mii_mgr_write(1, 22, 0x10cf);
14130 +    mii_mgr_write(1, 25, 0x6212);
14131 +    mii_mgr_write(1, 26, 0x0777);
14132 +    mii_mgr_write(1, 29, 0x4000);
14133 +    mii_mgr_write(1, 28, 0xc077);
14134 +    mii_mgr_write(1, 24, 0x0000);
14135 +    
14136 +    mii_mgr_write(1, 31, 0x3000); //global, page 3
14137 +    mii_mgr_write(1, 17, 0x4838);
14138 +
14139 +    mii_mgr_write(1, 31, 0x2000); //global, page 2
14140 +    if(is_BGA){
14141 +       mii_mgr_write(1, 21, 0x0515);
14142 +       mii_mgr_write(1, 22, 0x0053);
14143 +       mii_mgr_write(1, 23, 0x00bf);
14144 +       mii_mgr_write(1, 24, 0x0aaf);
14145 +       mii_mgr_write(1, 25, 0x0fad);
14146 +       mii_mgr_write(1, 26, 0x0fc1);
14147 +    }else{
14148 +       mii_mgr_write(1, 21, 0x0517);
14149 +       mii_mgr_write(1, 22, 0x0fd2);
14150 +       mii_mgr_write(1, 23, 0x00bf);
14151 +       mii_mgr_write(1, 24, 0x0aab);
14152 +       mii_mgr_write(1, 25, 0x00ae);
14153 +       mii_mgr_write(1, 26, 0x0fff);
14154 +    }
14155 +    mii_mgr_write(1, 31, 0x1000); //global, page 1
14156 +    mii_mgr_write(1, 17, 0xe7f8);
14157 +    
14158 +    mii_mgr_write(1, 31, 0x8000); //local, page 0
14159 +    mii_mgr_write(0, 30, 0xa000);
14160 +    mii_mgr_write(1, 30, 0xa000);
14161 +    mii_mgr_write(2, 30, 0xa000);
14162 +    mii_mgr_write(3, 30, 0xa000);
14163 +#if !defined (CONFIG_RAETH_HAS_PORT4)   
14164 +    mii_mgr_write(4, 30, 0xa000);
14165 +#endif
14166 +
14167 +    mii_mgr_write(0, 4, 0x05e1);
14168 +    mii_mgr_write(1, 4, 0x05e1);
14169 +    mii_mgr_write(2, 4, 0x05e1);
14170 +    mii_mgr_write(3, 4, 0x05e1);
14171 +#if !defined (CONFIG_RAETH_HAS_PORT4)   
14172 +    mii_mgr_write(4, 4, 0x05e1);
14173 +#endif
14174 +
14175 +    mii_mgr_write(1, 31, 0xa000); //local, page 2
14176 +    mii_mgr_write(0, 16, 0x1111);
14177 +    mii_mgr_write(1, 16, 0x1010);
14178 +    mii_mgr_write(2, 16, 0x1515);
14179 +    mii_mgr_write(3, 16, 0x0f0f);
14180 +#if !defined (CONFIG_RAETH_HAS_PORT4)   
14181 +    mii_mgr_write(4, 16, 0x1313);
14182 +#endif
14183 +
14184 +#if !defined (CONFIG_RAETH_8023AZ_EEE) 
14185 +    mii_mgr_write(1, 31, 0xb000); //local, page 3
14186 +    mii_mgr_write(0, 17, 0x0);
14187 +    mii_mgr_write(1, 17, 0x0);
14188 +    mii_mgr_write(2, 17, 0x0);
14189 +    mii_mgr_write(3, 17, 0x0);
14190 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14191 +    mii_mgr_write(4, 17, 0x0);
14192 +#endif
14193 +#endif
14194 +
14195 +
14196 +
14197 +#if 0
14198 +    // for ethernet extended mode
14199 +    mii_mgr_write(1, 31, 0x3000);
14200 +    mii_mgr_write(1, 19, 0x122);
14201 +    mii_mgr_write(1, 20, 0x0044);
14202 +    mii_mgr_write(1, 23, 0xa80c);
14203 +    mii_mgr_write(1, 24, 0x129d);
14204 +    mii_mgr_write(1, 31, 9000);
14205 +    mii_mgr_write(0, 18, 0x140c);
14206 +    mii_mgr_write(1, 18, 0x140c);
14207 +    mii_mgr_write(2, 18, 0x140c);
14208 +    mii_mgr_write(3, 18, 0x140c);
14209 +    mii_mgr_write(0, 0, 0x3300);
14210 +    mii_mgr_write(1, 0, 0x3300);
14211 +    mii_mgr_write(2, 0, 0x3300);
14212 +    mii_mgr_write(3, 0, 0x3300);
14213 +#if !defined (CONFIG_RAETH_HAS_PORT4)
14214 +    mii_mgr_write(4, 18, 0x140c);
14215 +    mii_mgr_write(4, 0, 0x3300);
14216 +#endif
14217 +#endif
14218 +
14219 +#endif
14220 +
14221 +#if defined(CONFIG_RALINK_MT7620)
14222 +       if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
14223 +               *(unsigned long *)(RALINK_ETH_SW_BASE+0x701c) = 0x800000c; //enlarge FE2SW_IPG
14224 +       }
14225 +#endif // CONFIG_RAETH_7620 //
14226 +
14227 +
14228 +
14229 +#if defined (CONFIG_MT7620_FPGA)|| defined (CONFIG_MT7620_ASIC)
14230 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
14231 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0x7f7f7fe0;//Set Port6 CPU Port
14232 +
14233 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14234 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14235 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c; //disable port 0 ~ 4 internal phy, set phy base address to 12
14236 +       /*MT7620 need mac learning for PPE*/
14237 +       //*(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
14238 +       //*(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
14239 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14240 +       //rxclk_skew, txclk_skew = 0
14241 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14242 +#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
14243 +
14244 +       *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14245 +
14246 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;//(P4, AN)
14247 +       *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14248 +       //rxclk_skew, txclk_skew = 0
14249 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14250 +
14251 +
14252 +       /* set MT7530 Port 0 to PHY mode */
14253 +       mii_mgr_read(31, 0x7804 ,&regValue);
14254 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14255 +       regValue &= ~((1<<13)|(1<<6)|(1<<5)|(1<<15));
14256 +       regValue |= ((1<<7)|(1<<16)|(1<<20)|(1<<24));
14257 +       //mii_mgr_write(31, 0x7804 ,0x115c8f);
14258 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14259 +       regValue &= ~((1<<13)|(1<<6)|(1<<20)|(1<<5)|(1<<15));
14260 +       regValue |= ((1<<7)|(1<<16)|(1<<24));
14261 +#endif
14262 +       regValue &= ~(1<<8); //Enable Port 6
14263 +       mii_mgr_write(31, 0x7804 ,regValue); //bit 24 standalone switch
14264 +
14265 +/* set MT7530 central align */
14266 +        mii_mgr_read(31, 0x7830, &regValue);
14267 +        regValue &= ~1;
14268 +        regValue |= 1<<1;
14269 +        mii_mgr_write(31, 0x7830, regValue);
14270 +
14271 +        mii_mgr_read(31, 0x7a40, &regValue);
14272 +        regValue &= ~(1<<30);
14273 +        mii_mgr_write(31, 0x7a40, regValue);
14274 +
14275 +        regValue = 0x855;
14276 +        mii_mgr_write(31, 0x7a78, regValue);
14277 +
14278 +       /*AN should be set after MT7530 HWSTRAP*/
14279 +#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
14280 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000100;//(P0, AN polling)
14281 +#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14282 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000504;//(P4, AN polling)
14283 +#endif
14284 +#endif
14285 +
14286 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14287 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14288 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14289 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14290 +       *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14291 +
14292 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14293 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14294 +       *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14295 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
14296 +       
14297 +       enable_auto_negotiate(1);
14298 +
14299 +       if (isICPlusGigaPHY(1)) {
14300 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
14301 +               phy_val |= 1<<10; //enable pause ability
14302 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
14303 +
14304 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14305 +               phy_val |= 1<<9; //restart AN
14306 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14307 +       }else if (isMarvellGigaPHY(1)) {
14308 +#if defined (CONFIG_MT7620_FPGA)
14309 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14310 +               phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
14311 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14312 +#endif
14313 +               printk("Reset MARVELL phy1\n");
14314 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14315 +               phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14316 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14317 +
14318 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14319 +               phy_val |= 1<<15; //PHY Software Reset
14320 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14321 +        }else if (isVtssGigaPHY(1)) {
14322 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14323 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14324 +               printk("Vitesse phy skew: %x --> ", phy_val);
14325 +               phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14326 +               phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14327 +               printk("%x\n", phy_val);
14328 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14329 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14330 +        }
14331 +
14332 +
14333 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14334 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14335 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14336 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14337 +       *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14338 +
14339 +#else // Port 5 Disabled //
14340 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
14341 +       *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode
14342 +#endif
14343 +#endif
14344 +
14345 +#if defined (CONFIG_P4_RGMII_TO_MAC_MODE)
14346 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e33b;//(P4, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
14347 +       *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14348 +       //rxclk_skew, txclk_skew = 0
14349 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14350 +
14351 +#elif defined (CONFIG_P4_MII_TO_MAC_MODE)
14352 +       *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14353 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=Mii Mode
14354 +       *(unsigned long *)(SYSCFG1) |= (0x1 << 14);
14355 +
14356 +#elif defined (CONFIG_P4_MAC_TO_PHY_MODE)
14357 +       *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14358 +       *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
14359 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
14360 +
14361 +       enable_auto_negotiate(1);
14362
14363 +       if (isICPlusGigaPHY(2)) {
14364 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, &phy_val);
14365 +               phy_val |= 1<<10; //enable pause ability
14366 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, phy_val);
14367 +
14368 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14369 +               phy_val |= 1<<9; //restart AN
14370 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14371 +       }else if (isMarvellGigaPHY(2)) {
14372 +#if defined (CONFIG_MT7620_FPGA)
14373 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &phy_val);
14374 +               phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
14375 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, phy_val);
14376 +#endif
14377 +               printk("Reset MARVELL phy2\n");
14378 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, &phy_val);
14379 +               phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14380 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, phy_val);
14381 +
14382 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
14383 +               phy_val |= 1<<15; //PHY Software Reset
14384 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
14385 +        }else if (isVtssGigaPHY(2)) {
14386 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0001); //extended page
14387 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, &phy_val);
14388 +               printk("Vitesse phy skew: %x --> ", phy_val);
14389 +               phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14390 +               phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14391 +               printk("%x\n", phy_val);
14392 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, phy_val);
14393 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0000); //main registers
14394 +        }
14395 +
14396 +#elif defined (CONFIG_P4_RMII_TO_MAC_MODE)
14397 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
14398 +       *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
14399 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE1_MODE=RvMii Mode
14400 +       *(unsigned long *)(SYSCFG1) |= (0x2 << 14);
14401 +#elif defined (CONFIG_GE_RGMII_MT7530_P0_AN) || defined (CONFIG_GE_RGMII_MT7530_P4_AN)
14402 +#else // Port 4 Disabled //
14403 +        *(unsigned long *)(SYSCFG1) |= (0x3 << 14); //GE2_MODE=RJ45 Mode
14404 +       *(unsigned long *)(0xb0000060) |= (1 << 10); //set RGMII2 to GPIO mode
14405 +#endif
14406 +
14407 +}
14408 +#endif
14409 +
14410 +#if defined (CONFIG_RALINK_MT7628)
14411 +
14412 +void mt7628_ephy_init(void)
14413 +{
14414 +       int i;
14415 +       u32 phy_val;
14416 +       mii_mgr_write(0, 31, 0x2000); //change G2 page
14417 +       mii_mgr_write(0, 26, 0x0000);
14418 +
14419 +       for(i=0; i<5; i++){
14420 +               mii_mgr_write(i, 31, 0x8000); //change L0 page
14421 +               mii_mgr_write(i,  0, 0x3100);
14422 +
14423 +#if defined (CONFIG_RAETH_8023AZ_EEE)  
14424 +               mii_mgr_read(i, 26, &phy_val);// EEE setting
14425 +               phy_val |= (1 << 5);
14426 +               mii_mgr_write(i, 26, phy_val);
14427 +#else
14428 +               //disable EEE
14429 +               mii_mgr_write(i, 13, 0x7);
14430 +               mii_mgr_write(i, 14, 0x3C);
14431 +               mii_mgr_write(i, 13, 0x4007);
14432 +               mii_mgr_write(i, 14, 0x0);
14433 +#endif
14434 +               mii_mgr_write(i, 30, 0xa000);
14435 +               mii_mgr_write(i, 31, 0xa000); // change L2 page
14436 +               mii_mgr_write(i, 16, 0x0606);
14437 +               mii_mgr_write(i, 23, 0x0f0e);
14438 +               mii_mgr_write(i, 24, 0x1610);
14439 +               mii_mgr_write(i, 30, 0x1f15);
14440 +               mii_mgr_write(i, 28, 0x6111);
14441 +
14442 +               mii_mgr_read(i, 4, &phy_val);
14443 +               phy_val |= (1 << 10);
14444 +               mii_mgr_write(i, 4, phy_val);
14445 +       }
14446 +
14447 +        //100Base AOI setting
14448 +       mii_mgr_write(0, 31, 0x5000);  //change G5 page
14449 +       mii_mgr_write(0, 19, 0x004a);
14450 +       mii_mgr_write(0, 20, 0x015a);
14451 +       mii_mgr_write(0, 21, 0x00ee);
14452 +       mii_mgr_write(0, 22, 0x0033);
14453 +       mii_mgr_write(0, 23, 0x020a);
14454 +       mii_mgr_write(0, 24, 0x0000);
14455 +       mii_mgr_write(0, 25, 0x024a);
14456 +       mii_mgr_write(0, 26, 0x035a);
14457 +       mii_mgr_write(0, 27, 0x02ee);
14458 +       mii_mgr_write(0, 28, 0x0233);
14459 +       mii_mgr_write(0, 29, 0x000a);
14460 +       mii_mgr_write(0, 30, 0x0000);
14461 +       /* Fix EPHY idle state abnormal behavior */
14462 +       mii_mgr_write(0, 31, 0x4000); //change G4 page
14463 +       mii_mgr_write(0, 29, 0x000d);
14464 +       mii_mgr_write(0, 30, 0x0500);
14465 +
14466 +}
14467 +
14468 +#endif
14469 +
14470 +
14471 +#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14472 +void rt305x_esw_init(void)
14473 +{
14474 +       int i=0;
14475 +       u32 phy_val=0, val=0;
14476 +#if defined (CONFIG_RT3052_ASIC)
14477 +       u32 phy_val2;
14478 +#endif
14479 +
14480 +#if defined (CONFIG_RT5350_ASIC)
14481 +       *(unsigned long *)(RALINK_ETH_SW_BASE+0x0168) = 0x17;
14482 +#endif
14483 +
14484 +       /*
14485 +        * FC_RLS_TH=200, FC_SET_TH=160
14486 +        * DROP_RLS=120, DROP_SET_TH=80
14487 +        */
14488 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0008) = 0xC8A07850;
14489 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00E4) = 0x00000000;
14490 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0014) = 0x00405555;
14491 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0050) = 0x00002001;
14492 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0090) = 0x00007f7f;
14493 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0098) = 0x00007f3f; //disable VLAN
14494 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00CC) = 0x0002500c;
14495 +#ifndef CONFIG_UNH_TEST
14496 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x0008a301; //hashing algorithm=XOR48, aging interval=300sec
14497 +#else
14498 +       /*
14499 +        * bit[30]:1    Backoff Algorithm Option: The latest one to pass UNH test
14500 +        * bit[29]:1    Length of Received Frame Check Enable
14501 +        * bit[8]:0     Enable collision 16 packet abort and late collision abort
14502 +        * bit[7:6]:01  Maximum Packet Length: 1518
14503 +        */
14504 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x6008a241;
14505 +#endif
14506 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x008C) = 0x02404040;
14507 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC) || defined (CONFIG_RT5350_ASIC) || defined (CONFIG_MT7628_ASIC)
14508 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x3f502b28; //Change polling Ext PHY Addr=0x1F
14509 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0x00000000;
14510 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x7d000000; //1us cycle number=125 (FE's clock=125Mhz)
14511 +#elif defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA) || defined (CONFIG_RT5350_FPGA) || defined (CONFIG_MT7628_FPGA)
14512 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x00f03ff9; //polling Ext PHY Addr=0x0, force port5 as 100F/D (disable auto-polling)
14513 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0xffdf1f00;
14514 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x0d000000; //1us cycle number=13 (FE's clock=12.5Mhz)
14515 +
14516 +       /* In order to use 10M/Full on FPGA board. We configure phy capable to
14517 +        * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
14518 +        for(i=0;i<5;i++){
14519 +           mii_mgr_write(i, 4, 0x0461);   //Capable of 10M Full/Half Duplex, flow control on/off
14520 +           mii_mgr_write(i, 0, 0xB100);   //reset all digital logic, except phy_reg
14521 +       }
14522 +#endif
14523 +       
14524 +       /*
14525 +        * set port 5 force to 1000M/Full when connecting to switch or iNIC
14526 +        */
14527 +#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
14528 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14529 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14530 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3fff; //force 1000M full duplex
14531 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0xf<<20); //rxclk_skew, txclk_skew = 0
14532 +#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
14533 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14534 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14535 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff); 
14536 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14537 +
14538 +#if defined (CONFIG_RALINK_RT3352)
14539 +        *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
14540 +        *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
14541 +#endif
14542 +
14543 +#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
14544 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14545 +       *(unsigned long *)(0xb0000060) &= ~(1 << 7); //set MDIO to Normal mode
14546 +#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC)
14547 +       enable_auto_negotiate(1);
14548 +#endif
14549 +        if (isMarvellGigaPHY(1)) {
14550 +#if defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA)
14551 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
14552 +               phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
14553 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
14554 +#endif
14555 +               printk("\n Reset MARVELL phy\n");
14556 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
14557 +               phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
14558 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
14559 +
14560 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
14561 +               phy_val |= 1<<15; //PHY Software Reset
14562 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
14563 +        }
14564 +       if (isVtssGigaPHY(1)) {
14565 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
14566 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
14567 +               printk("Vitesse phy skew: %x --> ", phy_val);
14568 +               phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
14569 +               phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
14570 +               printk("%x\n", phy_val);
14571 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
14572 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
14573 +        }
14574 +       
14575 +#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
14576 +       *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
14577 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
14578 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff); 
14579 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
14580 +        
14581 +#if defined (CONFIG_RALINK_RT3352)
14582 +       *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
14583 +        *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
14584 +#endif
14585 +#else // Port 5 Disabled //
14586 +
14587 +#if defined (CONFIG_RALINK_RT3052)
14588 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14589 +        *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14590 +        *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO41-GPIO50)
14591 +        *(unsigned long *)(0xb0000674) = 0xFFF; //GPIO41-GPIO50 output mode
14592 +        *(unsigned long *)(0xb000067C) = 0x0; //GPIO41-GPIO50 output low
14593 +#elif defined (CONFIG_RALINK_RT3352)
14594 +        *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
14595 +       *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
14596 +        *(unsigned long *)(0xb0000624) = 0xC0000000; //GPIO22-GPIO23 output mode
14597 +        *(unsigned long *)(0xb000062C) = 0xC0000000; //GPIO22-GPIO23 output high
14598 +        
14599 +        *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO24-GPIO35)
14600 +       *(unsigned long *)(0xb000064C) = 0xFFF; //GPIO24-GPIO35 output mode
14601 +        *(unsigned long *)(0xb0000654) = 0xFFF; //GPIO24-GPIO35 output high
14602 +#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
14603 +       /* do nothing */
14604 +#endif
14605 +#endif // CONFIG_P5_RGMII_TO_MAC_MODE //
14606 +
14607 +
14608 +#if defined (CONFIG_RT3052_ASIC)
14609 +       rw_rf_reg(0, 0, &phy_val);
14610 +        phy_val = phy_val >> 4;
14611 +
14612 +        if(phy_val > 0x5) {
14613 +
14614 +            rw_rf_reg(0, 26, &phy_val);
14615 +            phy_val2 = (phy_val | (0x3 << 5));
14616 +            rw_rf_reg(1, 26, &phy_val2);
14617 +
14618 +                       // reset EPHY
14619 +                       val = sysRegRead(RSTCTRL);
14620 +                       val = val | RALINK_EPHY_RST;
14621 +                       sysRegWrite(RSTCTRL, val);
14622 +                       val = val & ~(RALINK_EPHY_RST);
14623 +                       sysRegWrite(RSTCTRL, val);
14624 +
14625 +            rw_rf_reg(1, 26, &phy_val);
14626 +
14627 +            //select local register
14628 +            mii_mgr_write(0, 31, 0x8000);
14629 +            for(i=0;i<5;i++){
14630 +                mii_mgr_write(i, 26, 0x1600);   //TX10 waveform coefficient //LSB=0 disable PHY
14631 +                mii_mgr_write(i, 29, 0x7058);   //TX100/TX10 AD/DA current bias
14632 +                mii_mgr_write(i, 30, 0x0018);   //TX100 slew rate control
14633 +            }
14634 +
14635 +            //select global register
14636 +            mii_mgr_write(0, 31, 0x0);
14637 +            mii_mgr_write(0,  1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14638 +            mii_mgr_write(0,  2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14639 +            mii_mgr_write(0,  3, 0xa17f); //enlarge agcsel threshold 6
14640 +//#define ENABLE_LDPS
14641 +#if defined (ENABLE_LDPS)
14642 +            mii_mgr_write(0, 12, 0x7eaa);
14643 +            mii_mgr_write(0, 22, 0x252f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14644 +#else
14645 +            mii_mgr_write(0, 12, 0x0);
14646 +            mii_mgr_write(0, 22, 0x052f);
14647 +#endif
14648 +            mii_mgr_write(0, 14, 0x65);   //longer TP_IDL tail length
14649 +            mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14650 +            mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14651 +            mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14652 +            mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14653 +            mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14654 +            mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14655 +           mii_mgr_write(0, 31, 0x8000); //select local register
14656 +
14657 +            for(i=0;i<5;i++){
14658 +                //LSB=1 enable PHY
14659 +                mii_mgr_read(i, 26, &phy_val);
14660 +                phy_val |= 0x0001;
14661 +                mii_mgr_write(i, 26, phy_val);
14662 +            }
14663 +       } else {
14664 +           //select local register
14665 +            mii_mgr_write(0, 31, 0x8000);
14666 +            for(i=0;i<5;i++){
14667 +                mii_mgr_write(i, 26, 0x1600);   //TX10 waveform coefficient //LSB=0 disable PHY
14668 +                mii_mgr_write(i, 29, 0x7058);   //TX100/TX10 AD/DA current bias
14669 +                mii_mgr_write(i, 30, 0x0018);   //TX100 slew rate control
14670 +            }
14671 +
14672 +            //select global register
14673 +            mii_mgr_write(0, 31, 0x0);
14674 +            mii_mgr_write(0,  1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14675 +            mii_mgr_write(0,  2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14676 +            mii_mgr_write(0,  3, 0xa17f); //enlarge agcsel threshold 6
14677 +            mii_mgr_write(0, 14, 0x65);   //longer TP_IDL tail length
14678 +            mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14679 +            mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14680 +            mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14681 +            mii_mgr_write(0, 22, 0x052f); //tune TP_IDL tail and head waveform
14682 +            mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
14683 +            mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14684 +           mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14685 +           mii_mgr_write(0, 31, 0x8000); //select local register
14686 +
14687 +            for(i=0;i<5;i++){
14688 +                //LSB=1 enable PHY
14689 +                mii_mgr_read(i, 26, &phy_val);
14690 +                phy_val |= 0x0001;
14691 +                mii_mgr_write(i, 26, phy_val);
14692 +            }
14693 +       }
14694 +#elif defined (CONFIG_RT3352_ASIC)
14695 +       //PHY IOT
14696 +       // reset EPHY
14697 +       val = sysRegRead(RSTCTRL);
14698 +       val = val | RALINK_EPHY_RST;
14699 +       sysRegWrite(RSTCTRL, val);
14700 +       val = val & ~(RALINK_EPHY_RST);
14701 +       sysRegWrite(RSTCTRL, val);
14702 +
14703 +       //select local register
14704 +        mii_mgr_write(0, 31, 0x8000);
14705 +        for(i=0;i<5;i++){
14706 +            mii_mgr_write(i, 26, 0x1600);   //TX10 waveform coefficient //LSB=0 disable PHY
14707 +            mii_mgr_write(i, 29, 0x7016);   //TX100/TX10 AD/DA current bias
14708 +            mii_mgr_write(i, 30, 0x0038);   //TX100 slew rate control
14709 +        }
14710 +
14711 +        //select global register
14712 +        mii_mgr_write(0, 31, 0x0);
14713 +        mii_mgr_write(0,  1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14714 +        mii_mgr_write(0,  2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14715 +        mii_mgr_write(0,  3, 0xa17f); //enlarge agcsel threshold 6
14716 +        mii_mgr_write(0, 12, 0x7eaa);
14717 +        mii_mgr_write(0, 14, 0x65);   //longer TP_IDL tail length
14718 +        mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14719 +        mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14720 +        mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14721 +        mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14722 +        mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14723 +        mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14724 +        mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14725 +        mii_mgr_write(0, 31, 0x8000); //select local register
14726 +
14727 +        for(i=0;i<5;i++){
14728 +            //LSB=1 enable PHY
14729 +            mii_mgr_read(i, 26, &phy_val);
14730 +            phy_val |= 0x0001;
14731 +            mii_mgr_write(i, 26, phy_val);
14732 +        }
14733 +
14734 +#elif defined (CONFIG_RT5350_ASIC)
14735 +       //PHY IOT
14736 +       // reset EPHY
14737 +       val = sysRegRead(RSTCTRL);
14738 +       val = val | RALINK_EPHY_RST;
14739 +       sysRegWrite(RSTCTRL, val);
14740 +       val = val & ~(RALINK_EPHY_RST);
14741 +       sysRegWrite(RSTCTRL, val);
14742 +
14743 +       //select local register
14744 +        mii_mgr_write(0, 31, 0x8000);
14745 +        for(i=0;i<5;i++){
14746 +            mii_mgr_write(i, 26, 0x1600);   //TX10 waveform coefficient //LSB=0 disable PHY
14747 +            mii_mgr_write(i, 29, 0x7015);   //TX100/TX10 AD/DA current bias
14748 +            mii_mgr_write(i, 30, 0x0038);   //TX100 slew rate control
14749 +        }
14750 +
14751 +        //select global register
14752 +        mii_mgr_write(0, 31, 0x0);
14753 +        mii_mgr_write(0,  1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
14754 +        mii_mgr_write(0,  2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
14755 +        mii_mgr_write(0,  3, 0xa17f); //enlarge agcsel threshold 6
14756 +        mii_mgr_write(0, 12, 0x7eaa);
14757 +        mii_mgr_write(0, 14, 0x65);   //longer TP_IDL tail length
14758 +        mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
14759 +        mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
14760 +        mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
14761 +        mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
14762 +        mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
14763 +        mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
14764 +        mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
14765 +        mii_mgr_write(0, 31, 0x8000); //select local register
14766 +
14767 +        for(i=0;i<5;i++){
14768 +            //LSB=1 enable PHY
14769 +            mii_mgr_read(i, 26, &phy_val);
14770 +            phy_val |= 0x0001;
14771 +            mii_mgr_write(i, 26, phy_val);
14772 +        }
14773 +#elif defined (CONFIG_MT7628_ASIC)
14774 +/*INIT MT7628 PHY HERE*/
14775 +       val = sysRegRead(RT2880_AGPIOCFG_REG);
14776 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14777 +       val |= (MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14778 +       val = val & ~(MT7628_P0_EPHY_AIO_EN);
14779 +#else
14780 +       val = val & ~(MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
14781 +#endif
14782 +       if ((*((volatile u32 *)(RALINK_SYSCTL_BASE + 0x8))) & 0x10000)
14783 +               val &= ~0x1f0000;
14784 +       sysRegWrite(RT2880_AGPIOCFG_REG, val);
14785 +
14786 +       val = sysRegRead(RSTCTRL);
14787 +       val = val | RALINK_EPHY_RST;
14788 +       sysRegWrite(RSTCTRL, val);
14789 +       val = val & ~(RALINK_EPHY_RST);
14790 +       sysRegWrite(RSTCTRL, val);
14791 +
14792 +
14793 +       val = sysRegRead(RALINK_SYSCTL_BASE + 0x64);
14794 +#if defined (CONFIG_ETH_ONE_PORT_ONLY)
14795 +       val &= 0xf003f003;
14796 +       val |= 0x05540554;
14797 +       sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0 EPHY LED mode
14798 +#else
14799 +       val &= 0xf003f003;
14800 +       sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0~P4 EPHY LED mode
14801 +#endif
14802 +
14803 +       udelay(5000);
14804 +       mt7628_ephy_init();
14805 +
14806 +#endif
14807 +}
14808 +#endif
14809 +
14810 +#if defined (CONFIG_ARCH_MT7623)       /* TODO: just for bring up, should be removed!!! */
14811 +void mt7623_pinmux_set(void)
14812 +{
14813 +       unsigned long regValue;
14814 +       
14815 +       //printk("[mt7623_pinmux_set]start\n");
14816 +       /* Pin277: ESW_RST (1) */
14817 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14818 +       regValue &= ~(BITS(6,8));
14819 +       regValue |= BIT(6);
14820 +       *(volatile u_long *)(0xf0005ad0) = regValue;
14821 +
14822 +       /* Pin262: G2_TXEN (1) */
14823 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14824 +       regValue &= ~(BITS(6,8));
14825 +       regValue |= BIT(6);
14826 +       *(volatile u_long *)(0xf0005aa0) = regValue;
14827 +       /* Pin263: G2_TXD3 (1) */
14828 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14829 +       regValue &= ~(BITS(9,11));
14830 +       regValue |= BIT(9);
14831 +       *(volatile u_long *)(0xf0005aa0) = regValue;
14832 +       /* Pin264: G2_TXD2 (1) */
14833 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
14834 +       regValue &= ~(BITS(12,14));
14835 +       regValue |= BIT(12);
14836 +       *(volatile u_long *)(0xf0005aa0) = regValue;
14837 +       /* Pin265: G2_TXD1 (1) */
14838 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14839 +       regValue &= ~(BITS(0,2));
14840 +       regValue |= BIT(0);
14841 +       *(volatile u_long *)(0xf0005ab0) = regValue;
14842 +       /* Pin266: G2_TXD0 (1) */
14843 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14844 +       regValue &= ~(BITS(3,5));
14845 +       regValue |= BIT(3);
14846 +       *(volatile u_long *)(0xf0005ab0) = regValue;
14847 +       /* Pin267: G2_TXC (1) */
14848 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14849 +       regValue &= ~(BITS(6,8));
14850 +       regValue |= BIT(6);
14851 +       *(volatile u_long *)(0xf0005ab0) = regValue;
14852 +       /* Pin268: G2_RXC (1) */
14853 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14854 +       regValue &= ~(BITS(9,11));
14855 +       regValue |= BIT(9);
14856 +       *(volatile u_long *)(0xf0005ab0) = regValue;
14857 +       /* Pin269: G2_RXD0 (1) */
14858 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
14859 +       regValue &= ~(BITS(12,14));
14860 +       regValue |= BIT(12);
14861 +       *(volatile u_long *)(0xf0005ab0) = regValue;
14862 +       /* Pin270: G2_RXD1 (1) */
14863 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14864 +       regValue &= ~(BITS(0,2));
14865 +       regValue |= BIT(0);
14866 +       *(volatile u_long *)(0xf0005ac0) = regValue;
14867 +       /* Pin271: G2_RXD2 (1) */
14868 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14869 +       regValue &= ~(BITS(3,5));
14870 +       regValue |= BIT(3);
14871 +       *(volatile u_long *)(0xf0005ac0) = regValue;
14872 +       /* Pin272: G2_RXD3 (1) */
14873 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14874 +       regValue &= ~(BITS(6,8));
14875 +       regValue |= BIT(6);
14876 +       *(volatile u_long *)(0xf0005ac0) = regValue;
14877 +       /* Pin274: G2_RXDV (1) */
14878 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
14879 +       regValue &= ~(BITS(12,14));
14880 +       regValue |= BIT(12);
14881 +       *(volatile u_long *)(0xf0005ac0) = regValue;
14882 +
14883 +       /* Pin275: MDC (1) */
14884 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14885 +       regValue &= ~(BITS(0,2));
14886 +       regValue |= BIT(0);
14887 +       *(volatile u_long *)(0xf0005ad0) = regValue;
14888 +       /* Pin276: MDIO (1) */
14889 +       regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
14890 +       regValue &= ~(BITS(3,5));
14891 +       regValue |= BIT(3);
14892 +       *(volatile u_long *)(0xf0005ad0) = regValue;
14893 +       //printk("[mt7623_pinmux_set]end\n");
14894 +}
14895 +
14896 +void wait_loop(void) {
14897 +       int i,j;
14898 +       int read_data;
14899 +       j =0;
14900 +       while (j< 10) {
14901 +               for(i = 0; i<32; i = i+1){
14902 +                       read_data = *(volatile u_long *)(0xFB110610);
14903 +               }
14904 +               j++;
14905 +       }
14906 +}
14907 +
14908 +void trgmii_calibration_7623(void) {
14909 +
14910 +       unsigned int  tap_a[5]; // minumum delay for all correct
14911 +       unsigned int  tap_b[5]; // maximum delay for all correct
14912 +       unsigned int  final_tap[5];
14913 +       unsigned int  bslip_en;
14914 +       unsigned int  rxc_step_size;
14915 +       unsigned int  rxd_step_size;
14916 +       unsigned int  read_data;
14917 +       unsigned int  tmp;
14918 +       unsigned int  rd_wd;
14919 +       int  i;
14920 +       unsigned int err_cnt[5];
14921 +       unsigned int init_toggle_data;
14922 +       unsigned int err_flag[5];
14923 +       unsigned int err_total_flag;
14924 +       unsigned int training_word;
14925 +       unsigned int rd_tap;
14926 +
14927 +       u32  TRGMII_7623_base;
14928 +       u32  TRGMII_7623_RD_0;
14929 +       u32  TRGMII_RD_1;
14930 +       u32  TRGMII_RD_2;
14931 +       u32  TRGMII_RD_3;
14932 +       u32  TRGMII_RXCTL;
14933 +       u32  TRGMII_RCK_CTRL;
14934 +       u32 TRGMII_7530_base;
14935 +       TRGMII_7623_base = 0xFB110300;
14936 +       TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
14937 +       TRGMII_RCK_CTRL = TRGMII_7623_base;
14938 +       rxd_step_size =0x1;
14939 +       rxc_step_size =0x4;
14940 +       init_toggle_data = 0x00000055;
14941 +       training_word    = 0x000000AC;
14942 +
14943 +       //printk("Calibration begin ........");
14944 +       *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff;   // RX clock gating in MT7623
14945 +       *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000;   // Assert RX  reset in MT7623
14946 +       *(volatile u_long *)(TRGMII_7623_base +0x78) |= 0x00002000;   // Set TX OE edge in  MT7623
14947 +       *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000;   // Disable RX clock gating in MT7623
14948 +       *(volatile u_long *)(TRGMII_7623_base )      &= 0x7fffffff;   // Release RX reset in MT7623
14949 +       //printk("Check Point 1 .....\n");
14950 +       for (i = 0 ; i<5 ; i++) {
14951 +               *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) |= 0x80000000;   // Set bslip_en = 1
14952 +       }
14953 +
14954 +       //printk("Enable Training Mode in MT7530\n");
14955 +       mii_mgr_read(0x1F,0x7A40,&read_data);
14956 +       read_data |= 0xc0000000;
14957 +       mii_mgr_write(0x1F,0x7A40,read_data);  //Enable Training Mode in MT7530
14958 +       err_total_flag = 0;
14959 +       //printk("Adjust RXC delay in MT7623\n");
14960 +       read_data =0x0;
14961 +       while (err_total_flag == 0 && read_data != 0x68) {
14962 +               //printk("2nd Enable EDGE CHK in MT7623\n");
14963 +               /* Enable EDGE CHK in MT7623*/
14964 +               for (i = 0 ; i<5 ; i++) {
14965 +                       tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
14966 +                       tmp |= 0x40000000;
14967 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
14968 +               }
14969 +               wait_loop();
14970 +               err_total_flag = 1;
14971 +               for  (i = 0 ; i<5 ; i++) {
14972 +                       err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8)  & 0x0000000f;
14973 +                       rd_wd = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 16)  & 0x000000ff;
14974 +                       //printk("ERR_CNT = %d, RD_WD =%x\n",err_cnt[i],rd_wd);
14975 +                       if ( err_cnt[i] !=0 ) {
14976 +                               err_flag[i] = 1;
14977 +                       }
14978 +                       else if (rd_wd != 0x55) {
14979 +                               err_flag[i] = 1;
14980 +                       }       
14981 +                       else {
14982 +                               err_flag[i] = 0;
14983 +                       }
14984 +                       err_total_flag = err_flag[i] &  err_total_flag;
14985 +               }
14986 +
14987 +               //printk("2nd Disable EDGE CHK in MT7623\n");
14988 +               /* Disable EDGE CHK in MT7623*/
14989 +               for (i = 0 ; i<5 ; i++) {
14990 +                       tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
14991 +                       tmp |= 0x40000000;
14992 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
14993 +               }
14994 +               wait_loop();
14995 +               //printk("2nd Disable EDGE CHK in MT7623\n");
14996 +               /* Adjust RXC delay */
14997 +               *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000;   // Assert RX  reset in MT7623
14998 +               *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff;   // RX clock gating in MT7623
14999 +               read_data = *(volatile u_long *)(TRGMII_7623_base);
15000 +               if (err_total_flag == 0) {
15001 +                 tmp = (read_data & 0x0000007f) + rxc_step_size;
15002 +                 //printk(" RXC delay = %d\n", tmp);
15003 +                 read_data >>= 8;
15004 +                 read_data &= 0xffffff80;
15005 +                 read_data |= tmp;
15006 +                 read_data <<=8;
15007 +                 read_data &= 0xffffff80;
15008 +                 read_data |=tmp;
15009 +                 *(volatile u_long *)(TRGMII_7623_base)  =   read_data;
15010 +               }
15011 +                 read_data &=0x000000ff;
15012 +                 *(volatile u_long *)(TRGMII_7623_base )      &= 0x7fffffff;   // Release RX reset in MT7623
15013 +                 *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000;   // Disable RX clock gating in MT7623
15014 +                 for (i = 0 ; i<5 ; i++) {
15015 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) =  (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) | 0x80000000;  // Set bslip_en = ~bit_slip_en
15016 +                 }
15017 +       }
15018 +       //printk("Finish RXC Adjustment while loop\n");
15019 +       //printk("Read RD_WD MT7623\n");
15020 +       /* Read RD_WD MT7623*/
15021 +       for  (i = 0 ; i<5 ; i++) {
15022 +               rd_tap=0;
15023 +               while (err_flag[i] != 0) {
15024 +                       /* Enable EDGE CHK in MT7623*/
15025 +                       tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15026 +                       tmp |= 0x40000000;
15027 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15028 +                       wait_loop();
15029 +                       read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15030 +                       err_cnt[i] = (read_data >> 8)  & 0x0000000f;     // Read MT7623 Errcnt
15031 +                       rd_wd = (read_data >> 16)  & 0x000000ff;
15032 +                       if (err_cnt[i] != 0 || rd_wd !=0x55){
15033 +                          err_flag [i] =  1;
15034 +                       }   
15035 +                       else {
15036 +                          err_flag[i] =0;
15037 +                       }       
15038 +                       /* Disable EDGE CHK in MT7623*/
15039 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) &= 0x4fffffff;
15040 +                       tmp |= 0x40000000;
15041 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15042 +                       wait_loop();
15043 +                       //err_cnt[i] = ((read_data) >> 8)  & 0x0000000f;     // Read MT7623 Errcnt
15044 +                       if (err_flag[i] !=0) {
15045 +                           rd_tap    = (read_data & 0x0000007f) + rxd_step_size;                     // Add RXD delay in MT7623
15046 +                           read_data = (read_data & 0xffffff80) | rd_tap;
15047 +                           *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15048 +                           tap_a[i] = rd_tap;
15049 +                       } else {
15050 +                            rd_tap    = (read_data & 0x0000007f) + 4;
15051 +                           read_data = (read_data & 0xffffff80) | rd_tap;
15052 +                           *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15053 +                       }       
15054 +                       //err_cnt[i] = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8) >> 8)  & 0x0000000f;     // Read MT7623 Errcnt
15055 +
15056 +               }
15057 +               //printk("%dth bit  Tap_a = %d\n", i, tap_a[i]);
15058 +       }
15059 +       //printk("Last While Loop\n");
15060 +       for  (i = 0 ; i<5 ; i++) {
15061 +               //printk(" Bit%d\n", i);
15062 +               rd_tap =0;
15063 +               while ((err_cnt[i] == 0) && (rd_tap !=128)) {
15064 +                       read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15065 +                       rd_tap    = (read_data & 0x0000007f) + rxd_step_size;                     // Add RXD delay in MT7623
15066 +                       read_data = (read_data & 0xffffff80) | rd_tap;
15067 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15068 +                       /* Enable EDGE CHK in MT7623*/
15069 +                       tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15070 +                       tmp |= 0x40000000;
15071 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15072 +                       wait_loop();
15073 +                       err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8)  & 0x0000000f;     // Read MT7623 Errcnt
15074 +                       /* Disable EDGE CHK in MT7623*/
15075 +                       tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15076 +                       tmp |= 0x40000000;
15077 +                       *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15078 +                       wait_loop();
15079 +                       //err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8)  & 0x0000000f;     // Read MT7623 Errcnt
15080 +
15081 +               }
15082 +               tap_b[i] =   rd_tap;// -rxd_step_size;                                        // Record the max delay TAP_B
15083 +               //printk("tap_b[%d] is %d \n", i,tap_b[i]);
15084 +               final_tap[i] = (tap_a[i]+tap_b[i])/2;                                              //  Calculate RXD delay = (TAP_A + TAP_B)/2
15085 +               //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15086 +               read_data = (read_data & 0xffffff80) | final_tap[i];
15087 +               *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
15088 +       }
15089 +//     /*word alignment*/
15090 +//     mii_mgr_read(0x1F,0x7A50,&read_data);
15091 +//     read_data &= ~(0xff);
15092 +//     read_data |= 0xac;
15093 +//     mii_mgr_write(0x1F,0x7A50,read_data);
15094 +//     while (i <10) {
15095 +//             i++;
15096 +//             wait_loop();
15097 +//     }
15098 +//     /* Enable EDGE CHK in MT7623*/
15099 +//     for (i=0; i<5; i++) {
15100 +//             tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15101 +//             tmp |= 0x40000000;
15102 +//             *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15103 +//             wait_loop();
15104 +//             /* Disable EDGE CHK in MT7623*/
15105 +//             tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
15106 +//             tmp |= 0x40000000;
15107 +//             *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
15108 +//             wait_loop();
15109 +//             read_data = *(volatile u_long *)(TRGMII_7623_RD_0+i*8);
15110 +//             printk(" MT7623 training word = %x\n", read_data);
15111 +//     }
15112 +
15113 +
15114 +       mii_mgr_read(0x1F,0x7A40,&read_data);
15115 +       //printk(" MT7530 0x7A40 = %x\n", read_data);
15116 +       read_data &=0x3fffffff;
15117 +       mii_mgr_write(0x1F,0x7A40,read_data);
15118 +}
15119 +
15120 +
15121 +void trgmii_calibration_7530(void){ 
15122 +
15123 +       unsigned int  tap_a[5];
15124 +       unsigned int  tap_b[5];
15125 +       unsigned int  final_tap[5];
15126 +       unsigned int  bslip_en;
15127 +       unsigned int  rxc_step_size;
15128 +       unsigned int  rxd_step_size;
15129 +       unsigned int  read_data;
15130 +       unsigned int  tmp;
15131 +       int  i,j;
15132 +       unsigned int err_cnt[5];
15133 +       unsigned int rd_wd;
15134 +       unsigned int init_toggle_data;
15135 +       unsigned int err_flag[5];
15136 +       unsigned int err_total_flag;
15137 +       unsigned int training_word;
15138 +       unsigned int rd_tap;
15139 +
15140 +       u32  TRGMII_7623_base;
15141 +       u32  TRGMII_7530_RD_0;
15142 +       u32  TRGMII_RD_1;
15143 +       u32  TRGMII_RD_2;
15144 +       u32  TRGMII_RD_3;
15145 +       u32  TRGMII_RXCTL;
15146 +       u32  TRGMII_RCK_CTRL;
15147 +       u32 TRGMII_7530_base;
15148 +       u32 TRGMII_7530_TX_base;
15149 +       TRGMII_7623_base = 0xFB110300;
15150 +       TRGMII_7530_base = 0x7A00;
15151 +       TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
15152 +       TRGMII_RCK_CTRL = TRGMII_7623_base;
15153 +       rxd_step_size = 0x1;
15154 +       rxc_step_size = 0x8;
15155 +       init_toggle_data = 0x00000055;
15156 +       training_word = 0x000000AC;
15157 +
15158 +       TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
15159 +
15160 +       //printk("Calibration begin ........\n");
15161 +       *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15162 +       mii_mgr_read(0x1F, 0x7a10, &read_data);
15163 +       //printk("TRGMII_7530_RD_0 is %x\n", read_data);
15164 +
15165 +       mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15166 +       read_data &= 0x3fffffff;
15167 +       mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data);     // RX clock gating in MT7530
15168 +
15169 +       mii_mgr_read(0x1F,TRGMII_7530_base+0x78,&read_data);
15170 +       read_data |= 0x00002000;
15171 +       mii_mgr_write(0x1F,TRGMII_7530_base+0x78,read_data);     // Set TX OE edge in  MT7530
15172 +
15173 +       mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15174 +       read_data |= 0x80000000;
15175 +       mii_mgr_write(0x1F,TRGMII_7530_base,read_data);          // Assert RX  reset in MT7530
15176 +
15177 +
15178 +       mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15179 +       read_data &= 0x7fffffff;
15180 +       mii_mgr_write(0x1F,TRGMII_7530_base,read_data);          // Release RX reset in MT7530
15181 +
15182 +       mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15183 +       read_data |= 0xC0000000;
15184 +       mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data);     // Disable RX clock gating in MT7530
15185 +
15186 +       //printk("Enable Training Mode in MT7623\n");
15187 +       /*Enable Training Mode in MT7623*/
15188 +       *(volatile u_long *)(TRGMII_7623_base + 0x40) &= 0xbfffffff;
15189 +       *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
15190 +       *(volatile u_long *)(TRGMII_7623_base + 0x78) &= 0xfffff0ff;
15191 +       *(volatile u_long *)(TRGMII_7623_base + 0x78) |= 0x00000400;
15192 +
15193 +       err_total_flag =0;
15194 +       //printk("Adjust RXC delay in MT7530\n");
15195 +       read_data =0x0;
15196 +       while (err_total_flag == 0 && (read_data != 0x68)) {
15197 +               //printk("2nd Enable EDGE CHK in MT7530\n");
15198 +               /* Enable EDGE CHK in MT7530*/
15199 +               for (i = 0 ; i<5 ; i++) {
15200 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15201 +                       read_data |= 0x40000000;
15202 +                       read_data &= 0x4fffffff;
15203 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15204 +                       wait_loop();
15205 +                       //printk("2nd Disable EDGE CHK in MT7530\n");
15206 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&err_cnt[i]);
15207 +                       //printk("***** MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15208 +                       //printk("MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
15209 +                       err_cnt[i] >>= 8;
15210 +                       err_cnt[i] &= 0x0000ff0f;
15211 +                       rd_wd  = err_cnt[i] >> 8;
15212 +                       rd_wd &= 0x000000ff;    
15213 +                       err_cnt[i] &= 0x0000000f;
15214 +                       //mii_mgr_read(0x1F,0x7a10,&read_data);
15215 +                       if ( err_cnt[i] !=0 ) {
15216 +                               err_flag[i] = 1;
15217 +                       }
15218 +                       else if (rd_wd != 0x55) {
15219 +                                err_flag[i] = 1;
15220 +                       } else {        
15221 +                               err_flag[i] = 0;
15222 +                       }
15223 +                       if (i==0) {
15224 +                          err_total_flag = err_flag[i];
15225 +                       } else {
15226 +                          err_total_flag = err_flag[i] & err_total_flag;
15227 +                       }       
15228 +               /* Disable EDGE CHK in MT7530*/
15229 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15230 +                       read_data |= 0x40000000;
15231 +                       read_data &= 0x4fffffff;
15232 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15233 +                         wait_loop();
15234 +               }
15235 +               /*Adjust RXC delay*/
15236 +               if (err_total_flag ==0) {
15237 +                  mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15238 +                  read_data |= 0x80000000;
15239 +                  mii_mgr_write(0x1F,TRGMII_7530_base,read_data);          // Assert RX  reset in MT7530
15240 +
15241 +                  mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15242 +                  read_data &= 0x3fffffff;
15243 +                  mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data);       // RX clock gating in MT7530
15244 +
15245 +                  mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15246 +                  tmp = read_data;
15247 +                  tmp &= 0x0000007f;
15248 +                  tmp += rxc_step_size;
15249 +                  //printk("Current rxc delay = %d\n", tmp);
15250 +                  read_data &= 0xffffff80;
15251 +                  read_data |= tmp;
15252 +                  mii_mgr_write (0x1F,TRGMII_7530_base,read_data);
15253 +                  mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15254 +                  //printk("Current RXC delay = %x\n", read_data); 
15255 +
15256 +                  mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
15257 +                  read_data &= 0x7fffffff;
15258 +                  mii_mgr_write(0x1F,TRGMII_7530_base,read_data);          // Release RX reset in MT7530
15259 +
15260 +                  mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
15261 +                  read_data |= 0xc0000000;
15262 +                  mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data);       // Disable RX clock gating in MT7530
15263 +                }
15264 +               read_data = tmp;
15265 +       }
15266 +       //printk("RXC delay is %d\n", tmp);
15267 +       //printk("Finish RXC Adjustment while loop\n");
15268 +
15269 +       //printk("Read RD_WD MT7530\n");
15270 +       /* Read RD_WD MT7530*/
15271 +       for  (i = 0 ; i<5 ; i++) {
15272 +               rd_tap = 0;
15273 +               while (err_flag[i] != 0) {
15274 +                       /* Enable EDGE CHK in MT7530*/
15275 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15276 +                       read_data |= 0x40000000;
15277 +                       read_data &= 0x4fffffff;
15278 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15279 +                       wait_loop();
15280 +                       err_cnt[i] = (read_data >> 8) & 0x0000000f; 
15281 +                       rd_wd = (read_data >> 16) & 0x000000ff;
15282 +                       //printk("##### %dth bit  ERR_CNT = %x RD_WD =%x ######\n", i, err_cnt[i],rd_wd);
15283 +                       if (err_cnt[i] != 0 || rd_wd !=0x55){
15284 +                          err_flag [i] =  1;
15285 +                       }   
15286 +                       else {
15287 +                          err_flag[i] =0;
15288 +                       }       
15289 +                       if (err_flag[i] !=0 ) { 
15290 +                          rd_tap = (read_data & 0x0000007f) + rxd_step_size;                        // Add RXD delay in MT7530
15291 +                          read_data = (read_data & 0xffffff80) | rd_tap;
15292 +                          mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15293 +                          tap_a[i] = rd_tap;
15294 +                       } else {
15295 +                          tap_a[i] = (read_data & 0x0000007f);                                     // Record the min delay TAP_A
15296 +                          rd_tap   =  tap_a[i] + 0x4;                     
15297 +                          read_data = (read_data & 0xffffff80) | rd_tap  ;
15298 +                          mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15299 +                       }       
15300 +
15301 +                       /* Disable EDGE CHK in MT7530*/
15302 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15303 +                       read_data |= 0x40000000;
15304 +                       read_data &= 0x4fffffff;
15305 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15306 +                       wait_loop();
15307 +
15308 +               }
15309 +               //printk("%dth bit  Tap_a = %d\n", i, tap_a[i]);
15310 +       }
15311 +       //printk("Last While Loop\n");
15312 +       for  (i = 0 ; i<5 ; i++) {
15313 +       rd_tap =0;
15314 +               while (err_cnt[i] == 0 && (rd_tap!=128)) {
15315 +                       /* Enable EDGE CHK in MT7530*/
15316 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15317 +                       read_data |= 0x40000000;
15318 +                       read_data &= 0x4fffffff;
15319 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15320 +                       wait_loop();
15321 +                       err_cnt[i] = (read_data >> 8) & 0x0000000f;
15322 +                       //rd_tap = (read_data & 0x0000007f) + 0x4;                                    // Add RXD delay in MT7530
15323 +                       if (err_cnt[i] == 0 && (rd_tap!=128)) {
15324 +                           rd_tap = (read_data & 0x0000007f) + rxd_step_size;                        // Add RXD delay in MT7530
15325 +                           read_data = (read_data & 0xffffff80) | rd_tap;
15326 +                           mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15327 +                       }    
15328 +                       /* Disable EDGE CHK in MT7530*/
15329 +                       mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
15330 +                       read_data |= 0x40000000;
15331 +                       read_data &= 0x4fffffff;
15332 +                       mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15333 +                       wait_loop();
15334 +               }
15335 +               tap_b[i] = rd_tap;// - rxd_step_size;                                     // Record the max delay TAP_B
15336 +               //printk("%dth bit  Tap_b = %d, ERR_CNT=%d\n", i, tap_b[i],err_cnt[i]);
15337 +               final_tap[i] = (tap_a[i]+tap_b[i])/2;                                     //  Calculate RXD delay = (TAP_A + TAP_B)/2
15338 +               //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
15339 +
15340 +               read_data = ( read_data & 0xffffff80) | final_tap[i];
15341 +               mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
15342 +       }
15343 +               *(volatile u_long *)(TRGMII_7623_base + 0x40) &=0x3fffffff;
15344 +       
15345 +}
15346 +
15347 +void set_trgmii_325_delay_setting(void)
15348 +{
15349 +       /*mt7530 side*/                               
15350 +       *(volatile u_long *)(0xfb110300) = 0x80020050;
15351 +       *(volatile u_long *)(0xfb110304) = 0x00980000;
15352 +       *(volatile u_long *)(0xfb110300) = 0x40020050;
15353 +       *(volatile u_long *)(0xfb110304) = 0xc0980000;
15354 +       *(volatile u_long *)(0xfb110310) = 0x00000028;
15355 +       *(volatile u_long *)(0xfb110318) = 0x0000002e;
15356 +       *(volatile u_long *)(0xfb110320) = 0x0000002d;
15357 +       *(volatile u_long *)(0xfb110328) = 0x0000002b;
15358 +       *(volatile u_long *)(0xfb110330) = 0x0000002a;
15359 +       *(volatile u_long *)(0xfb110340) = 0x00020000;
15360 +       /*mt7530 side*/                               
15361 +       mii_mgr_write(31, 0x7a00, 0x10);              
15362 +       mii_mgr_write(31, 0x7a10, 0x23);              
15363 +       mii_mgr_write(31, 0x7a18, 0x27);              
15364 +       mii_mgr_write(31, 0x7a20, 0x24);              
15365 +       mii_mgr_write(31, 0x7a28, 0x29);              
15366 +       mii_mgr_write(31, 0x7a30, 0x24);              
15367 +
15368 +}
15369 +
15370 +
15371 +void setup_internal_gsw(void)
15372 +{
15373 +       u32     i;
15374 +       u32     regValue;
15375 +       u32     xtal_mode;
15376 +
15377 +       mt7623_pinmux_set();    /* TODO: just for bring up, should be removed!!! */
15378 +
15379 +#if 0
15380 +       /* GE1: RGMII mode setting */   
15381 +       *(volatile u_long *)(0xfb110300) = 0x80020000;
15382 +       *(volatile u_long *)(0xfb110304) = 0x00980000;
15383 +       *(volatile u_long *)(0xfb110300) = 0x40020000;
15384 +       *(volatile u_long *)(0xfb110304) = 0xc0980000;
15385 +       *(volatile u_long *)(0xfb110310) = 0x00000041;
15386 +       *(volatile u_long *)(0xfb110318) = 0x00000044;
15387 +       *(volatile u_long *)(0xfb110320) = 0x00000043;
15388 +       *(volatile u_long *)(0xfb110328) = 0x00000042;
15389 +       *(volatile u_long *)(0xfb110330) = 0x00000042;
15390 +       *(volatile u_long *)(0xfb110340) = 0x00020000;
15391 +       *(volatile u_long *)(0xfb110390) &= 0xfffffff8; //RGMII mode
15392 +#else
15393 +       /* GE1: TRGMII mode setting */  
15394 +       *(volatile u_long *)(0xfb110390) |= 0x00000002; //TRGMII mode
15395 +#endif
15396 +
15397 +       /*Todo: Hardware reset Switch*/
15398 +       /*Hardware reset Switch*/
15399 +#if defined(CONFIG_ARCH_MT7623)
15400 +       regValue = *(volatile u_long *)(0xfb00000c);
15401 +       /*MT7530 Reset. Flows for MT7623 and MT7683 are both excuted.*/
15402 +       /* Should Modify this section if EFUSE is ready*/
15403 +       /*For MT7683 reset MT7530*/
15404 +       if(!(regValue & (1<<16)))
15405 +       {
15406 +               *(volatile u_long *)(0xf0005520) &= ~(1<<1);
15407 +               udelay(1000);
15408 +               *(volatile u_long *)(0xf0005520) |= (1<<1);
15409 +               mdelay(100);
15410 +       }
15411 +       //printk("Assert MT7623 RXC reset\n");
15412 +       *(volatile u_long *)(0xfb110300) |= 0x80000000;   // Assert MT7623 RXC reset
15413 +        /*For MT7623 reset MT7530*/
15414 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
15415 +       udelay(1000);
15416 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
15417 +       mdelay(100);
15418 +#endif
15419 +
15420 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15421 +       for(i=0;i<=4;i++)
15422 +        {
15423 +               //turn off PHY
15424 +               mii_mgr_read(i, 0x0 ,&regValue);
15425 +              regValue |= (0x1<<11);
15426 +              mii_mgr_write(i, 0x0, regValue); 
15427 +       }
15428 +        mii_mgr_write(31, 0x7000, 0x3); //reset switch
15429 +        udelay(100);
15430 +
15431 +#if defined (CONFIG_MT7621_ASIC) || defined (CONFIG_ARCH_MT7623)
15432 +#if 0
15433 +       if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
15434 +               sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
15435 +               mii_mgr_write(31, 0x3600, 0x5e30b);
15436 +       } else 
15437 +#endif
15438 +       {
15439 +               sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
15440 +               mii_mgr_write(31, 0x3600, 0x5e33b);
15441 +               mii_mgr_read(31, 0x3600 ,&regValue);
15442 +       }
15443 +#endif
15444 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
15445 +#endif
15446 +
15447 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
15448 +       //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
15449 +       mii_mgr_read(31, 0x7804 ,&regValue);
15450 +       regValue &= ~(1<<8); //Enable Port 6
15451 +       regValue |= (1<<6); //Disable Port 5
15452 +       regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
15453 +
15454 +#if defined (CONFIG_RAETH_GMAC2)
15455 +       //RGMII2=Normal mode
15456 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
15457 +
15458 +       //GMAC2= RGMII mode
15459 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
15460 +       mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
15461 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
15462 +       enable_auto_negotiate(0);//set polling address
15463 +       
15464 +       /* set MT7530 Port 5 to PHY 0/4 mode */
15465 +#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
15466 +       regValue &= ~((1<<13)|(1<<6));
15467 +       regValue |= ((1<<7)|(1<<16)|(1<<20));
15468 +#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
15469 +       regValue &= ~((1<<13)|(1<<6)|(1<<20));
15470 +       regValue |= ((1<<7)|(1<<16));
15471 +#endif
15472 +       /*Set MT7530 phy direct access mode**/
15473 +       regValue &= ~(1<<5);
15474 +
15475 +       //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
15476 +#endif
15477 +       regValue |= (1<<16);//change HW-TRAP
15478 +       printk("change HW-TRAP to 0x%x\n",regValue);
15479 +       mii_mgr_write(31, 0x7804 ,regValue);
15480 +#endif
15481 +       mii_mgr_read(31, 0x7800, &regValue);
15482 +       regValue = (regValue >> 9) & 0x3;
15483 +       if(regValue == 0x3)//25Mhz Xtal
15484 +               xtal_mode = 1;
15485 +       else if(regValue == 0x2) //40Mhz
15486 +               xtal_mode = 2;
15487 +       else
15488 +               xtal_mode = 3;
15489 +
15490 +       if(xtal_mode == 1) { //25Mhz Xtal
15491 +               /* do nothing */
15492 +       } else if(xtal_mode = 2) { //40Mhz
15493 +               mii_mgr_write(0, 13, 0x1f);  // disable MT7530 core clock
15494 +               mii_mgr_write(0, 14, 0x410);
15495 +               mii_mgr_write(0, 13, 0x401f);
15496 +               mii_mgr_write(0, 14, 0x0);
15497 +
15498 +               mii_mgr_write(0, 13, 0x1f);  // disable MT7530 PLL
15499 +               mii_mgr_write(0, 14, 0x40d);
15500 +               mii_mgr_write(0, 13, 0x401f);
15501 +               mii_mgr_write(0, 14, 0x2020);
15502 +
15503 +               mii_mgr_write(0, 13, 0x1f);  // for MT7530 core clock = 500Mhz
15504 +               mii_mgr_write(0, 14, 0x40e);
15505 +               mii_mgr_write(0, 13, 0x401f);
15506 +               mii_mgr_write(0, 14, 0x119);
15507 +
15508 +               mii_mgr_write(0, 13, 0x1f);  // enable MT7530 PLL
15509 +               mii_mgr_write(0, 14, 0x40d);
15510 +               mii_mgr_write(0, 13, 0x401f);
15511 +               mii_mgr_write(0, 14, 0x2820);
15512 +
15513 +               udelay(20); //suggest by CD
15514 +
15515 +               mii_mgr_write(0, 13, 0x1f);  // enable MT7530 core clock
15516 +               mii_mgr_write(0, 14, 0x410);
15517 +               mii_mgr_write(0, 13, 0x401f);
15518 +       }else {//20MHz
15519 +               /*TODO*/
15520 +       }
15521 +
15522 +#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
15523 +       mii_mgr_write(0, 14, 0x3); /*TRGMII*/
15524 +#else
15525 +       mii_mgr_write(0, 14, 0x1);  /*RGMII*/
15526 +/* set MT7530 central align */
15527 +        mii_mgr_read(31, 0x7830, &regValue);
15528 +        regValue &= ~1;
15529 +        regValue |= 1<<1;
15530 +        mii_mgr_write(31, 0x7830, regValue);
15531 +
15532 +        mii_mgr_read(31, 0x7a40, &regValue);
15533 +        regValue &= ~(1<<30);
15534 +        mii_mgr_write(31, 0x7a40, regValue);
15535 +
15536 +        regValue = 0x855;
15537 +        mii_mgr_write(31, 0x7a78, regValue);
15538 +
15539 +#endif
15540 +       mii_mgr_write(31, 0x7b00, 0x104);  //delay setting for 10/1000M
15541 +       mii_mgr_write(31, 0x7b04, 0x10);  //delay setting for 10/1000M
15542 +
15543 +       /*Tx Driving*/
15544 +       mii_mgr_write(31, 0x7a54, 0x88);  //lower GE1 driving
15545 +       mii_mgr_write(31, 0x7a5c, 0x88);  //lower GE1 driving
15546 +       mii_mgr_write(31, 0x7a64, 0x88);  //lower GE1 driving
15547 +       mii_mgr_write(31, 0x7a6c, 0x88);  //lower GE1 driving
15548 +       mii_mgr_write(31, 0x7a74, 0x88);  //lower GE1 driving
15549 +       mii_mgr_write(31, 0x7a7c, 0x88);  //lower GE1 driving
15550 +       mii_mgr_write(31, 0x7810, 0x11);  //lower GE2 driving
15551 +       /*Set MT7623/MT7683 TX Driving*/
15552 +       *(volatile u_long *)(0xfb110354) = 0x88;
15553 +       *(volatile u_long *)(0xfb11035c) = 0x88;
15554 +       *(volatile u_long *)(0xfb110364) = 0x88;
15555 +       *(volatile u_long *)(0xfb11036c) = 0x88;
15556 +       *(volatile u_long *)(0xfb110374) = 0x88;
15557 +       *(volatile u_long *)(0xfb11037c) = 0x88;
15558 +#if defined (CONFIG_GE2_RGMII_AN)      
15559 +       *(volatile u_long *)(0xf0005f00) = 0xe00; //Set GE2 driving and slew rate
15560 +#else
15561 +       *(volatile u_long *)(0xf0005f00) = 0xa00; //Set GE2 driving and slew rate
15562 +#endif
15563 +       *(volatile u_long *)(0xf00054c0) = 0x5;   //set GE2 TDSEL
15564 +       *(volatile u_long *)(0xf0005ed0) = 0;     //set GE2 TUNE
15565 +
15566 +       /* TRGMII Clock */
15567 +//     printk("Set TRGMII mode clock stage 1\n");
15568 +       mii_mgr_write(0, 13, 0x1f);
15569 +       mii_mgr_write(0, 14, 0x404);
15570 +       mii_mgr_write(0, 13, 0x401f);
15571 +       if (xtal_mode == 1){ //25MHz
15572 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15573 +               mii_mgr_write(0, 14, 0x1d00); // 362.5MHz
15574 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15575 +               mii_mgr_write(0, 14, 0x1a00); // 325MHz
15576 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15577 +               mii_mgr_write(0, 14, 0x1400); //250MHz
15578 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15579 +               mii_mgr_write(0, 14, 0x00a0); //125MHz
15580 +#endif
15581 +       }else if(xtal_mode == 2){//40MHz
15582 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15583 +               mii_mgr_write(0, 14, 0x1220); // 362.5MHz
15584 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
15585 +               mii_mgr_write(0, 14, 0x1040); // 325MHz
15586 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15587 +               mii_mgr_write(0, 14, 0x0c80); //250MHz
15588 +#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
15589 +               mii_mgr_write(0, 14, 0x0640); //125MHz
15590 +#endif 
15591 +       }
15592 +//     printk("Set TRGMII mode clock stage 2\n");
15593 +       mii_mgr_write(0, 13, 0x1f);
15594 +       mii_mgr_write(0, 14, 0x405);
15595 +       mii_mgr_write(0, 13, 0x401f);
15596 +       mii_mgr_write(0, 14, 0x0);
15597 +
15598 +//     printk("Set TRGMII mode clock stage 3\n");
15599 +       mii_mgr_write(0, 13, 0x1f);
15600 +       mii_mgr_write(0, 14, 0x409);
15601 +       mii_mgr_write(0, 13, 0x401f);
15602 +       mii_mgr_write(0, 14, 0x0087);             
15603 +
15604 +//     printk("Set TRGMII mode clock stage 4\n");
15605 +       mii_mgr_write(0, 13, 0x1f);
15606 +       mii_mgr_write(0, 14, 0x40a);
15607 +       mii_mgr_write(0, 13, 0x401f);
15608 +       mii_mgr_write(0, 14, 0x0087);
15609 +
15610 +//     printk("Set TRGMII mode clock stage 5\n");
15611 +       mii_mgr_write(0, 13, 0x1f);
15612 +       mii_mgr_write(0, 14, 0x403);
15613 +       mii_mgr_write(0, 13, 0x401f);
15614 +       mii_mgr_write(0, 14, 0x1800);
15615 +
15616 +//     printk("Set TRGMII mode clock stage 6\n");
15617 +       mii_mgr_write(0, 13, 0x1f);
15618 +       mii_mgr_write(0, 14, 0x403);
15619 +       mii_mgr_write(0, 13, 0x401f);
15620 +       mii_mgr_write(0, 14, 0x1c00);
15621 +
15622 +//     printk("Set TRGMII mode clock stage 7\n");
15623 +       mii_mgr_write(0, 13, 0x1f);
15624 +       mii_mgr_write(0, 14, 0x401);
15625 +       mii_mgr_write(0, 13, 0x401f);
15626 +       mii_mgr_write(0, 14, 0xc020);
15627 +
15628 +//     printk("Set TRGMII mode clock stage 8\n");
15629 +       mii_mgr_write(0, 13, 0x1f);
15630 +       mii_mgr_write(0, 14, 0x406);
15631 +       mii_mgr_write(0, 13, 0x401f);
15632 +       mii_mgr_write(0, 14, 0xa030);
15633 +
15634 +//     printk("Set TRGMII mode clock stage 9\n");
15635 +       mii_mgr_write(0, 13, 0x1f);
15636 +       mii_mgr_write(0, 14, 0x406);
15637 +       mii_mgr_write(0, 13, 0x401f);
15638 +       mii_mgr_write(0, 14, 0xa038);
15639 +
15640 +       udelay(120); // for MT7623 bring up test
15641 +
15642 +//     printk("Set TRGMII mode clock stage 10\n");
15643 +       mii_mgr_write(0, 13, 0x1f);
15644 +       mii_mgr_write(0, 14, 0x410);
15645 +       mii_mgr_write(0, 13, 0x401f);
15646 +       mii_mgr_write(0, 14, 0x3);
15647 +
15648 +//     printk("Set TRGMII mode clock stage 11\n");
15649 +
15650 +       mii_mgr_read(31, 0x7830 ,&regValue);
15651 +       regValue &=0xFFFFFFFC;
15652 +       regValue |=0x00000001;
15653 +       mii_mgr_write(31, 0x7830, regValue);
15654 +
15655 +//     printk("Set TRGMII mode clock stage 12\n");
15656 +       mii_mgr_read(31, 0x7a40 ,&regValue);
15657 +       regValue &= ~(0x1<<30);
15658 +       regValue &= ~(0x1<<28);
15659 +       mii_mgr_write(31, 0x7a40, regValue);
15660 +
15661 +       //mii_mgr_write(31, 0x7a78, 0x855);            
15662 +       mii_mgr_write(31, 0x7a78, 0x55);            
15663 +//     printk(" Adjust MT7530 TXC delay\n");
15664 +       udelay(100); // for mt7623 bring up test
15665 +
15666 +//     printk(" Release MT7623 RXC Reset\n");
15667 +       *(volatile u_long *)(0xfb110300) &= 0x7fffffff;   // Release MT7623 RXC reset
15668 +       //disable EEE
15669 +       for(i=0;i<=4;i++)
15670 +       {
15671 +           mii_mgr_write(i, 13, 0x7);
15672 +           mii_mgr_write(i, 14, 0x3C);
15673 +           mii_mgr_write(i, 13, 0x4007);
15674 +           mii_mgr_write(i, 14, 0x0);
15675 +       }
15676 +
15677 +       //Disable EEE 10Base-Te:
15678 +       for(i=0;i<=4;i++)
15679 +       {
15680 +           mii_mgr_write(i, 13, 0x1f);
15681 +           mii_mgr_write(i, 14, 0x027b);
15682 +           mii_mgr_write(i, 13, 0x401f);
15683 +           mii_mgr_write(i, 14, 0x1177);
15684 +       }
15685 +
15686 +       for(i=0;i<=4;i++)
15687 +        {
15688 +       //turn on PHY
15689 +                mii_mgr_read(i, 0x0 ,&regValue);
15690 +               regValue &= ~(0x1<<11);
15691 +               mii_mgr_write(i, 0x0, regValue);        
15692 +       }
15693 +
15694 +       for(i=0;i<=4;i++) {
15695 +               mii_mgr_read(i, 4, &regValue);
15696 +                regValue |= (3<<7); //turn on 100Base-T Advertisement
15697 +               mii_mgr_write(i, 4, regValue);
15698 +       
15699 +               mii_mgr_read(i, 9, &regValue);
15700 +                regValue |= (3<<8); //turn on 1000Base-T Advertisement
15701 +                mii_mgr_write(i, 9, regValue);
15702 +
15703 +               //restart AN
15704 +               mii_mgr_read(i, 0, &regValue);
15705 +               regValue |= (1 << 9);
15706 +               mii_mgr_write(i, 0, regValue);
15707 +       }
15708 +
15709 +       mii_mgr_read(31, 0x7808 ,&regValue);
15710 +        regValue |= (3<<16); //Enable INTR
15711 +       mii_mgr_write(31, 0x7808 ,regValue);
15712 +}
15713 +
15714 +void mt7623_ethifsys_init(void)
15715 +{
15716 +#define TRGPLL_CON0             (0xF0209280)
15717 +#define TRGPLL_CON1             (0xF0209284)
15718 +#define TRGPLL_CON2             (0xF0209288)
15719 +#define TRGPLL_PWR_CON0         (0xF020928C)
15720 +#define ETHPLL_CON0             (0xF0209290)
15721 +#define ETHPLL_CON1             (0xF0209294)
15722 +#define ETHPLL_CON2             (0xF0209298)
15723 +#define ETHPLL_PWR_CON0         (0xF020929C)
15724 +#define ETH_PWR_CON             (0xF00062A0)
15725 +#define HIF_PWR_CON             (0xF00062A4)
15726 +
15727 +        u32 temp, pwr_ack_status;
15728 +        /*=========================================================================*/
15729 +        /* Enable ETHPLL & TRGPLL*/
15730 +        /*=========================================================================*/
15731 +        /* xPLL PWR ON*/
15732 +        temp = sysRegRead(ETHPLL_PWR_CON0);
15733 +        sysRegWrite(ETHPLL_PWR_CON0, temp | 0x1);
15734 +
15735 +        temp = sysRegRead(TRGPLL_PWR_CON0);
15736 +        sysRegWrite(TRGPLL_PWR_CON0, temp | 0x1);
15737 +
15738 +        udelay(5); /* wait for xPLL_PWR_ON ready (min delay is 1us)*/
15739 +
15740 +        /* xPLL ISO Disable*/
15741 +        temp = sysRegRead(ETHPLL_PWR_CON0);
15742 +        sysRegWrite(ETHPLL_PWR_CON0, temp & ~0x2);
15743 +
15744 +        temp = sysRegRead(TRGPLL_PWR_CON0);
15745 +        sysRegWrite(TRGPLL_PWR_CON0, temp & ~0x2);
15746 +
15747 +        /* xPLL Frequency Set*/
15748 +        temp = sysRegRead(ETHPLL_CON0);
15749 +        sysRegWrite(ETHPLL_CON0, temp | 0x1);
15750 +#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
15751 +       temp = sysRegRead(TRGPLL_CON0);
15752 +       sysRegWrite(TRGPLL_CON0,  temp | 0x1);
15753 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)   
15754 +        sysRegWrite(TRGPLL_CON1,  0xB2000000);
15755 +        temp = sysRegRead(TRGPLL_CON0);
15756 +        sysRegWrite(TRGPLL_CON0, temp | 0x1);
15757 +#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
15758 +        sysRegWrite(TRGPLL_CON1, 0xCCEC4EC5);
15759 +        sysRegWrite(TRGPLL_CON0,  0x121);
15760 +#endif
15761 +        udelay(40); /* wait for PLL stable (min delay is 20us)*/
15762 +
15763 +       
15764 +       /*=========================================================================*/
15765 +       /* Power on ETHDMASYS and HIFSYS*/
15766 +       /*=========================================================================*/
15767 +       /* Power on ETHDMASYS*/
15768 +       sysRegWrite(0xF0006000, 0x0b160001);
15769 +       pwr_ack_status = (sysRegRead(ETH_PWR_CON) & 0x0000f000) >> 12;
15770 +
15771 +       if(pwr_ack_status == 0x0) {
15772 +               printk("ETH already turn on and power on flow will be skipped...\n");
15773 +       }else {
15774 +               temp = sysRegRead(ETH_PWR_CON)  ;
15775 +               sysRegWrite(ETH_PWR_CON, temp | 0x4);          /* PWR_ON*/
15776 +               temp = sysRegRead(ETH_PWR_CON)  ;
15777 +               sysRegWrite(ETH_PWR_CON, temp | 0x8);          /* PWR_ON_S*/
15778 +
15779 +               udelay(5); /* wait power settle time (min delay is 1us)*/
15780 +
15781 +               temp = sysRegRead(ETH_PWR_CON)  ;
15782 +               sysRegWrite(ETH_PWR_CON, temp & ~0x10);      /* PWR_CLK_DIS*/
15783 +               temp = sysRegRead(ETH_PWR_CON)  ;
15784 +               sysRegWrite(ETH_PWR_CON, temp & ~0x2);        /* PWR_ISO*/
15785 +               temp = sysRegRead(ETH_PWR_CON)  ;
15786 +               sysRegWrite(ETH_PWR_CON, temp & ~0x100);   /* SRAM_PDN 0*/
15787 +               temp = sysRegRead(ETH_PWR_CON)  ;
15788 +               sysRegWrite(ETH_PWR_CON, temp & ~0x200);   /* SRAM_PDN 1*/
15789 +               temp = sysRegRead(ETH_PWR_CON)  ;
15790 +               sysRegWrite(ETH_PWR_CON, temp & ~0x400);   /* SRAM_PDN 2*/
15791 +               temp = sysRegRead(ETH_PWR_CON)  ;
15792 +               sysRegWrite(ETH_PWR_CON, temp & ~0x800);   /* SRAM_PDN 3*/
15793 +
15794 +               udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15795 +
15796 +               temp = sysRegRead(ETH_PWR_CON)  ;
15797 +               sysRegWrite(ETH_PWR_CON, temp | 0x1);          /* PWR_RST_B*/
15798 +       }
15799 +
15800 +       /* Power on HIFSYS*/
15801 +       pwr_ack_status = (sysRegRead(HIF_PWR_CON) & 0x0000f000) >> 12;
15802 +       if(pwr_ack_status == 0x0) {
15803 +               printk("HIF already turn on and power on flow will be skipped...\n");
15804 +       }
15805 +       else {
15806 +               temp = sysRegRead(HIF_PWR_CON)  ;
15807 +               sysRegWrite(HIF_PWR_CON, temp | 0x4);          /* PWR_ON*/
15808 +               temp = sysRegRead(HIF_PWR_CON)  ;
15809 +               sysRegWrite(HIF_PWR_CON, temp | 0x8);          /* PWR_ON_S*/
15810 +
15811 +               udelay(5); /* wait power settle time (min delay is 1us)*/
15812 +
15813 +               temp = sysRegRead(HIF_PWR_CON)  ;
15814 +               sysRegWrite(HIF_PWR_CON, temp & ~0x10);      /* PWR_CLK_DIS*/
15815 +               temp = sysRegRead(HIF_PWR_CON)  ;
15816 +               sysRegWrite(HIF_PWR_CON, temp & ~0x2);        /* PWR_ISO*/
15817 +               temp = sysRegRead(HIF_PWR_CON)  ;
15818 +               sysRegWrite(HIF_PWR_CON, temp & ~0x100);   /* SRAM_PDN 0*/
15819 +               temp = sysRegRead(HIF_PWR_CON)  ;
15820 +               sysRegWrite(HIF_PWR_CON, temp & ~0x200);   /* SRAM_PDN 1*/
15821 +               temp = sysRegRead(HIF_PWR_CON)  ;
15822 +               sysRegWrite(HIF_PWR_CON, temp & ~0x400);   /* SRAM_PDN 2*/
15823 +               temp = sysRegRead(HIF_PWR_CON)  ;
15824 +               sysRegWrite(HIF_PWR_CON, temp & ~0x800);   /* SRAM_PDN 3*/
15825 +
15826 +               udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
15827 +
15828 +               temp = sysRegRead(HIF_PWR_CON)  ;
15829 +               sysRegWrite(HIF_PWR_CON, temp | 0x1);          /* PWR_RST_B*/
15830 +       }
15831 +
15832 +       /* Release mt7530 reset */
15833 +       temp = le32_to_cpu(*(volatile u_long *)(0xfb000034));
15834 +       temp &= ~(BIT(2));
15835 +       *(volatile u_long *)(0xfb000034) = temp;
15836 +}
15837 +#endif
15838 +
15839 +/**
15840 + * ra2882eth_init - Module Init code
15841 + *
15842 + * Called by kernel to register net_device
15843 + *
15844 + */
15845 +
15846 +static int fe_probe(struct platform_device *pdev)
15847 +{
15848 +       int ret;
15849 +       struct net_device *dev = alloc_etherdev(sizeof(END_DEVICE));
15850 +
15851 +        fe_irq = platform_get_irq(pdev, 0);
15852 +
15853 +#ifdef CONFIG_RALINK_VISTA_BASIC
15854 +       int sw_id=0;
15855 +       mii_mgr_read(29, 31, &sw_id);
15856 +       is_switch_175c = (sw_id == 0x175c) ? 1:0;
15857 +#endif 
15858 +
15859 +       if (!dev)
15860 +               return -ENOMEM;
15861 +
15862 +       strcpy(dev->name, DEV_NAME);
15863 +       printk("%s:%s[%d]%d\n", __FILE__, __func__, __LINE__, fe_irq);
15864 +       dev->irq  = fe_irq;
15865 +       dev->addr_len = 6;
15866 +       dev->base_addr = RALINK_FRAME_ENGINE_BASE;
15867 +
15868 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15869 +       rather_probe(dev);
15870 +#else
15871 +       dev->init =  rather_probe;
15872 +#endif
15873 +       ra2880_setup_dev_fptable(dev);
15874 +
15875 +       /* net_device structure Init */
15876 +       ethtool_init(dev);
15877 +       printk("Ralink APSoC Ethernet Driver Initilization. %s  %d rx/tx descriptors allocated, mtu = %d!\n", RAETH_VERSION, NUM_RX_DESC, dev->mtu);
15878 +#ifdef CONFIG_RAETH_NAPI
15879 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
15880 +       printk("NAPI enable, Tx Ring = %d, Rx Ring = %d\n", NUM_TX_DESC, NUM_RX_DESC);
15881 +#else
15882 +       printk("NAPI enable, weight = %d, Tx Ring = %d, Rx Ring = %d\n", dev->weight, NUM_TX_DESC, NUM_RX_DESC);
15883 +#endif
15884 +#endif
15885 +
15886 +       /* Register net device for the driver */
15887 +       if ( register_netdev(dev) != 0) {
15888 +               printk(KERN_WARNING " " __FILE__ ": No ethernet port found.\n");
15889 +               return -ENXIO;
15890 +       }
15891 +
15892 +
15893 +#ifdef CONFIG_RAETH_NETLINK
15894 +       csr_netlink_init();
15895 +#endif
15896 +       ret = debug_proc_init();
15897 +
15898 +       dev_raether = dev;
15899 +#ifdef CONFIG_ARCH_MT7623
15900 +       mt7623_ethifsys_init();
15901 +#endif
15902 +       return ret;
15903 +}
15904 +
15905 +
15906 +
15907 +
15908 +
15909 +
15910 +
15911 +void fe_sw_init(void)
15912 +{
15913 +#if defined (CONFIG_GIGAPHY) || defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
15914 +        unsigned int regValue = 0;
15915 +#endif
15916 +
15917 +       // Case1: RT288x/RT3883/MT7621 GE1 + GigaPhy
15918 +#if defined (CONFIG_GE1_RGMII_AN)
15919 +       enable_auto_negotiate(1);
15920 +       if (isMarvellGigaPHY(1)) {
15921 +#if defined (CONFIG_RT3883_FPGA)
15922 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &regValue);
15923 +               regValue &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
15924 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
15925 +               
15926 +               printk("\n Reset MARVELL phy\n");
15927 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &regValue);
15928 +               regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
15929 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, regValue);
15930 +
15931 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &regValue);
15932 +               regValue |= 1<<15; //PHY Software Reset
15933 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
15934 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
15935 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &regValue);
15936 +               regValue &= ~(3<<8); //turn off 1000Base-T Advertisement  (9.9=1000Full, 9.8=1000Half)
15937 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
15938 +       
15939 +               /*10Mbps, debug*/
15940 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, 0x461);
15941 +
15942 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &regValue);
15943 +               regValue |= 1<<9; //restart AN
15944 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
15945 +#endif
15946 +
15947 +       }
15948 +       if (isVtssGigaPHY(1)) {
15949 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 1);
15950 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &regValue);
15951 +               printk("Vitesse phy skew: %x --> ", regValue);
15952 +               regValue |= (0x3<<12);
15953 +               regValue &= ~(0x3<<14);
15954 +               printk("%x\n", regValue);
15955 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, regValue);
15956 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0);
15957 +        }
15958 +#if defined (CONFIG_RALINK_MT7621)
15959 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
15960 +#endif
15961 +#endif // CONFIG_GE1_RGMII_AN //
15962 +
15963 +       // Case2: RT3883/MT7621 GE2 + GigaPhy
15964 +#if defined (CONFIG_GE2_RGMII_AN)
15965 +       enable_auto_negotiate(2);
15966 +       if (isMarvellGigaPHY(2)) {
15967 +#if defined (CONFIG_RT3883_FPGA)
15968 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &regValue);
15969 +               regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15970 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
15971 +               
15972 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, &regValue);
15973 +               regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
15974 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, regValue);
15975 +
15976 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &regValue);
15977 +               regValue |= 1<<15; //PHY Software Reset
15978 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
15979 +#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
15980 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &regValue);
15981 +               regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
15982 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
15983 +               
15984 +               /*10Mbps, debug*/
15985 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, 0x461);
15986 +
15987 +
15988 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &regValue);
15989 +               regValue |= 1<<9; //restart AN
15990 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
15991 +#endif
15992 +
15993 +       }
15994 +       if (isVtssGigaPHY(2)) {
15995 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 1);
15996 +               mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, &regValue);
15997 +               printk("Vitesse phy skew: %x --> ", regValue);
15998 +               regValue |= (0x3<<12);
15999 +               regValue &= ~(0x3<<14);
16000 +               printk("%x\n", regValue);
16001 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, regValue);
16002 +               mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0);
16003 +       }
16004 +#if defined (CONFIG_RALINK_MT7621)
16005 +       //RGMII2=Normal mode
16006 +       *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
16007 +       //GMAC2= RGMII mode
16008 +       *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
16009 +
16010 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16011 +#endif
16012 +#endif // CONFIG_GE2_RGMII_AN //
16013 +
16014 +       // Case3: RT305x/RT335x/RT6855/RT6855A/MT7620 + EmbeddedSW
16015 +#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_RALINK_MT7621) && !defined(CONFIG_ARCH_MT7623)
16016 +#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
16017 +       rt_gsw_init();
16018 +#elif defined(CONFIG_RALINK_RT6855A)
16019 +       rt6855A_gsw_init();
16020 +#else
16021 +       rt305x_esw_init();
16022 +#endif
16023 +#endif 
16024 +       // Case4:  RT288x/RT388x/MT7621 GE1 + Internal GigaSW
16025 +#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200)  || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
16026 +#if defined (CONFIG_RALINK_MT7621)
16027 +       setup_internal_gsw();
16028 +       /*MT7530 Init*/
16029 +#elif defined (CONFIG_ARCH_MT7623)
16030 +#if defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)        
16031 +       *(volatile u_long *)(0xfb00002c) |=  (1<<11);
16032 +#else
16033 +       *(volatile u_long *)(0xfb00002c) &= ~(1<<11);
16034 +#endif
16035 +       setup_internal_gsw();
16036 +       trgmii_calibration_7623();
16037 +       trgmii_calibration_7530();
16038 +       //*(volatile u_long *)(0xfb110300) |= (0x1f << 24);     //Just only for 312.5/325MHz
16039 +       *(volatile u_long *)(0xfb110340) = 0x00020000;
16040 +       *(volatile u_long *)(0xfb110304) &= 0x3fffffff;         // RX clock gating in MT7623
16041 +       *(volatile u_long *)(0xfb110300) |= 0x80000000;         // Assert RX  reset in MT7623
16042 +       *(volatile u_long *)(0xfb110300 )      &= 0x7fffffff;   // Release RX reset in MT7623
16043 +       *(volatile u_long *)(0xfb110300 +0x04) |= 0xC0000000;   // Disable RX clock gating in MT7623
16044 +/*GE1@125MHz(RGMII mode) TX delay adjustment*/
16045 +#if defined (CONFIG_GE1_RGMII_FORCE_1000)
16046 +        *(volatile u_long *)(0xfb110350) = 0x55;
16047 +        *(volatile u_long *)(0xfb110358) = 0x55;
16048 +        *(volatile u_long *)(0xfb110360) = 0x55;
16049 +        *(volatile u_long *)(0xfb110368) = 0x55;
16050 +        *(volatile u_long *)(0xfb110370) = 0x55;
16051 +        *(volatile u_long *)(0xfb110378) = 0x855;
16052 +#endif
16053 +
16054 +       
16055 +#elif defined (CONFIG_MT7623_FPGA)     /* Nelson: remove for bring up, should be added!!! */
16056 +       setup_fpga_gsw();
16057 +#else
16058 +       sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
16059 +#endif
16060 +#endif 
16061 +
16062 +       // Case5: RT388x/MT7621 GE2 + GigaSW
16063 +#if defined (CONFIG_GE2_RGMII_FORCE_1000)
16064 +#if defined (CONFIG_RALINK_MT7621)
16065 +       setup_external_gsw();
16066 +#else
16067 +       sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
16068 +#endif
16069 +#endif 
16070 +
16071 +       // Case6: RT288x GE1 /RT388x,MT7621 GE1/GE2 + (10/100 Switch or 100PHY)
16072 +#if defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
16073 +
16074 +       //set GMAC to MII or RvMII mode
16075 +#if defined (CONFIG_RALINK_RT3883)
16076 +       regValue = sysRegRead(SYSCFG1);
16077 +#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE1_MII_AN)
16078 +       regValue &= ~(0x3 << 12);
16079 +       regValue |= 0x1 << 12; // GE1 MII Mode
16080 +#elif defined (CONFIG_GE1_RVMII_FORCE_100)
16081 +       regValue &= ~(0x3 << 12);
16082 +       regValue |= 0x2 << 12; // GE1 RvMII Mode
16083 +#endif 
16084 +
16085 +#if defined (CONFIG_GE2_MII_FORCE_100) || defined (CONFIG_GE2_MII_AN) 
16086 +       regValue &= ~(0x3 << 14);
16087 +       regValue |= 0x1 << 14; // GE2 MII Mode
16088 +#elif defined (CONFIG_GE2_RVMII_FORCE_100)
16089 +       regValue &= ~(0x3 << 14);
16090 +       regValue |= 0x2 << 14; // GE2 RvMII Mode
16091 +#endif 
16092 +       sysRegWrite(SYSCFG1, regValue);
16093 +#endif // CONFIG_RALINK_RT3883 //
16094 +
16095 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
16096 +
16097 +#if defined (CONFIG_GE1_MII_FORCE_100)
16098 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x5e337);//(P0, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16099 +#endif
16100 +#if defined (CONFIG_GE2_MII_FORCE_100)
16101 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x5e337);//(P1, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
16102 +#endif
16103 +#if defined (CONFIG_GE1_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16104 +       enable_auto_negotiate(1);
16105 +#if defined (CONFIG_RALINK_MT7621)
16106 +       sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
16107 +#endif
16108 +#endif
16109 +#if defined (CONFIG_GE2_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
16110 +       enable_auto_negotiate(2);
16111 +#if defined (CONFIG_RALINK_MT7621)
16112 +       sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
16113 +#endif
16114 +#endif
16115 +
16116 +#else
16117 +#if defined (CONFIG_GE1_MII_FORCE_100)
16118 +#if defined (CONFIG_RALINK_MT7621)
16119 +#else
16120 +       sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_100_FD);
16121 +#endif
16122 +#endif
16123 +#if defined (CONFIG_GE2_MII_FORCE_100)
16124 +#if defined (CONFIG_RALINK_MT7621)
16125 +#else
16126 +       sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_100_FD);
16127 +#endif
16128 +#endif
16129 +       //add switch configuration here for other switch chips.
16130 +#if defined (CONFIG_GE1_MII_FORCE_100) ||  defined (CONFIG_GE2_MII_FORCE_100)
16131 +       // IC+ 175x: force IC+ switch cpu port is 100/FD
16132 +       mii_mgr_write(29, 22, 0x8420);
16133 +#endif
16134 +
16135 +
16136 +#endif // defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY) //
16137 +
16138 +}
16139 +
16140 +
16141 +/**
16142 + * ra2882eth_cleanup_module - Module Exit code
16143 + *
16144 + * Cmd 'rmmod' will invode the routine to exit the module
16145 + *
16146 + */
16147 +#if 0
16148 + void ra2882eth_cleanup_module(void)
16149 +{
16150 +       struct net_device *dev = dev_raether;
16151 +       END_DEVICE *ei_local;
16152 +
16153 +       ei_local = netdev_priv(dev);
16154 +
16155 +#ifdef CONFIG_PSEUDO_SUPPORT
16156 +       unregister_netdev(ei_local->PseudoDev);
16157 +       free_netdev(ei_local->PseudoDev);
16158 +#endif
16159 +       unregister_netdev(dev);
16160 +       RAETH_PRINT("Free ei_local and unregister netdev...\n");
16161 +
16162 +       free_netdev(dev);
16163 +       debug_proc_exit();
16164 +#ifdef CONFIG_RAETH_NETLINK
16165 +       csr_netlink_end();
16166 +#endif
16167 +}
16168 +#endif
16169 +EXPORT_SYMBOL(set_fe_dma_glo_cfg);
16170 +//module_init(ra2882eth_init);
16171 +//module_exit(ra2882eth_cleanup_module);
16172 +
16173 +const struct of_device_id of_fe_match[] = {
16174 +       { .compatible = "mediatek,mt7623-net", },
16175 +       {},
16176 +};
16177 +
16178 +MODULE_DEVICE_TABLE(of, of_fe_match);
16179 +
16180 +static struct platform_driver fe_driver = {
16181 +       .probe = fe_probe,
16182 +//     .remove = ra2882eth_cleanup_module,
16183 +       .driver = {
16184 +               .name = "ralink_soc_eth",
16185 +               .owner = THIS_MODULE,
16186 +               .of_match_table = of_fe_match,
16187 +       },
16188 +};
16189 +
16190 +static int __init init_rtfe(void)
16191 +{
16192 +       int ret;
16193 +       ret = platform_driver_register(&fe_driver);
16194 +       return ret;
16195 +}
16196 +
16197 +static void __exit exit_rtfe(void)
16198 +{
16199 +       platform_driver_unregister(&fe_driver);
16200 +}
16201 +
16202 +module_init(init_rtfe);
16203 +module_exit(exit_rtfe);
16204 +
16205 +
16206 +MODULE_LICENSE("GPL");
16207 --- /dev/null
16208 +++ b/drivers/net/ethernet/raeth/raether.h
16209 @@ -0,0 +1,126 @@
16210 +#ifndef RA2882ETHEND_H
16211 +#define RA2882ETHEND_H
16212 +
16213 +#ifdef DSP_VIA_NONCACHEABLE
16214 +#define ESRAM_BASE     0xa0800000      /* 0x0080-0000  ~ 0x00807FFF */
16215 +#else
16216 +#define ESRAM_BASE     0x80800000      /* 0x0080-0000  ~ 0x00807FFF */
16217 +#endif
16218 +
16219 +#define RX_RING_BASE   ((int)(ESRAM_BASE + 0x7000))
16220 +#define TX_RING_BASE   ((int)(ESRAM_BASE + 0x7800))
16221 +
16222 +#if defined(CONFIG_RALINK_RT2880)
16223 +#define NUM_TX_RINGS   1
16224 +#else
16225 +#define NUM_TX_RINGS   4
16226 +#endif
16227 +#ifdef MEMORY_OPTIMIZATION
16228 +#ifdef CONFIG_RAETH_ROUTER
16229 +#define NUM_RX_DESC     32 //128
16230 +#define NUM_TX_DESC            32 //128
16231 +#elif CONFIG_RT_3052_ESW
16232 +#define NUM_RX_DESC     16 //64
16233 +#define NUM_TX_DESC     16 //64
16234 +#else
16235 +#define NUM_RX_DESC     32 //128
16236 +#define NUM_TX_DESC     32 //128
16237 +#endif
16238 +//#define NUM_RX_MAX_PROCESS 32
16239 +#define NUM_RX_MAX_PROCESS 32
16240 +#else
16241 +#if defined (CONFIG_RAETH_ROUTER)
16242 +#define NUM_RX_DESC     256
16243 +#define NUM_TX_DESC            256
16244 +#elif defined (CONFIG_RT_3052_ESW)
16245 +#if defined (CONFIG_RALINK_MT7621)
16246 +#define NUM_RX_DESC     512
16247 +#define NUM_QRX_DESC     16
16248 +#define NUM_TX_DESC     512 
16249 +#else
16250 +#define NUM_RX_DESC     256
16251 +#define NUM_QRX_DESC NUM_RX_DESC
16252 +#define NUM_TX_DESC     256
16253 +#endif
16254 +#else
16255 +#define NUM_RX_DESC     256
16256 +#define NUM_QRX_DESC NUM_RX_DESC
16257 +#define NUM_TX_DESC     256
16258 +#endif
16259 +#if defined(CONFIG_RALINK_RT3883) || defined(CONFIG_RALINK_MT7620) 
16260 +#define NUM_RX_MAX_PROCESS 2
16261 +#else
16262 +#define NUM_RX_MAX_PROCESS 16
16263 +#endif
16264 +#endif
16265 +#define NUM_LRO_RX_DESC        16
16266 +
16267 +#if defined (CONFIG_SUPPORT_OPENWRT)
16268 +#define DEV_NAME        "eth0"
16269 +#define DEV2_NAME       "eth1"
16270 +#else
16271 +#define DEV_NAME        "eth2"
16272 +#define DEV2_NAME       "eth3"
16273 +#endif
16274 +
16275 +#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7621)
16276 +#define GMAC0_OFFSET    0xE000
16277 +#define GMAC2_OFFSET    0xE006
16278 +#else
16279 +#define GMAC0_OFFSET    0x28 
16280 +#define GMAC2_OFFSET    0x22
16281 +#endif
16282 +
16283 +#if defined(CONFIG_RALINK_RT6855A)
16284 +#define IRQ_ENET0      22
16285 +#elif defined(CONFIG_ARCH_MT7623)
16286 +#define IRQ_ENET0      232
16287 +#else
16288 +#define IRQ_ENET0      3       /* hardware interrupt #3, defined in RT2880 Soc Design Spec Rev 0.03, pp43 */
16289 +#endif
16290 +
16291 +#if defined (CONFIG_RAETH_HW_LRO)
16292 +#define        HW_LRO_TIMER_UNIT   1
16293 +#define        HW_LRO_REFRESH_TIME 50000
16294 +#define        HW_LRO_MAX_AGG_CNT      64
16295 +#define        HW_LRO_AGG_DELTA        1
16296 +#if defined(CONFIG_RAETH_PDMA_DVT)
16297 +#define        MAX_LRO_RX_LENGTH       10240
16298 +#else
16299 +#define        MAX_LRO_RX_LENGTH       (PAGE_SIZE - SKB_DATA_ALIGN(NET_SKB_PAD + sizeof(struct skb_shared_info)))
16300 +#endif
16301 +#define        HW_LRO_AGG_TIME         10      /* 200us */
16302 +#define        HW_LRO_AGE_TIME         50
16303 +#define        HW_LRO_BW_THRE          3000
16304 +#define        HW_LRO_PKT_INT_ALPHA    100
16305 +#endif  /* CONFIG_RAETH_HW_LRO */
16306 +#define FE_INT_STATUS_REG (*(volatile unsigned long *)(FE_INT_STATUS))
16307 +#define FE_INT_STATUS_CLEAN(reg) (*(volatile unsigned long *)(FE_INT_STATUS)) = reg
16308 +
16309 +//#define RAETH_DEBUG
16310 +#ifdef RAETH_DEBUG
16311 +#define RAETH_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
16312 +#else
16313 +#define RAETH_PRINT(fmt, args...) { }
16314 +#endif
16315 +
16316 +struct net_device_stats *ra_get_stats(struct net_device *dev);
16317 +
16318 +void ei_tx_timeout(struct net_device *dev);
16319 +int rather_probe(struct net_device *dev);
16320 +int ei_open(struct net_device *dev);
16321 +int ei_close(struct net_device *dev);
16322 +
16323 +int ra2882eth_init(void);
16324 +void ra2882eth_cleanup_module(void);
16325 +
16326 +void ei_xmit_housekeeping(unsigned long data);
16327 +
16328 +u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
16329 +u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
16330 +u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
16331 +u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data);
16332 +u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data);
16333 +void fe_sw_init(void);
16334 +
16335 +#endif
16336 --- /dev/null
16337 +++ b/drivers/net/ethernet/raeth/raether_hwlro.c
16338 @@ -0,0 +1,347 @@
16339 +#include <linux/module.h>
16340 +#include <linux/version.h>
16341 +#include <linux/kernel.h>
16342 +#include <linux/types.h>
16343 +#include <linux/pci.h>
16344 +#include <linux/init.h>
16345 +#include <linux/skbuff.h>
16346 +#include <linux/if_vlan.h>
16347 +#include <linux/if_ether.h>
16348 +#include <linux/fs.h>
16349 +#include <asm/uaccess.h>
16350 +#include <asm/rt2880/surfboardint.h>
16351 +#include <linux/delay.h>
16352 +#include <linux/sched.h>
16353 +#include <asm/rt2880/rt_mmap.h>
16354 +#include "ra2882ethreg.h"
16355 +#include "raether.h"
16356 +#include "ra_mac.h"
16357 +#include "ra_ioctl.h"
16358 +#include "ra_rfrw.h"
16359 +
16360 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16361 +int set_fe_lro_ring1_cfg(struct net_device *dev)
16362 +{
16363 +       unsigned int ip;
16364 +
16365 +       netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring1_cfg()\n");
16366 +
16367 +       /* 1. Set RX ring mode to force port */
16368 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
16369 +
16370 +       /* 2. Configure lro ring */
16371 +       /* 2.1 set src/destination TCP ports */
16372 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
16373 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
16374 +       /* 2.2 set src/destination IPs */
16375 +       str_to_ip(&ip, "10.10.10.3");
16376 +       sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
16377 +       str_to_ip(&ip, "10.10.10.254");
16378 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16379 +       /* 2.3 IPv4 force port mode */
16380 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
16381 +       /* 2.4 IPv6 force port mode */
16382 +       SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
16383 +
16384 +       /* 3. Set Age timer: 10 msec. */
16385 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16386 +
16387 +       /* 4. Valid LRO ring */
16388 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16389 +
16390 +       return 0;
16391 +}
16392 +
16393 +int set_fe_lro_ring2_cfg(struct net_device *dev)
16394 +{
16395 +       unsigned int ip;
16396 +
16397 +       netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring2_cfg()\n");
16398 +
16399 +       /* 1. Set RX ring mode to force port */
16400 +       SET_PDMA_RXRING2_MODE(PDMA_RX_FORCE_PORT);
16401 +
16402 +       /* 2. Configure lro ring */
16403 +       /* 2.1 set src/destination TCP ports */
16404 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
16405 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
16406 +       /* 2.2 set src/destination IPs */
16407 +       str_to_ip(&ip, "10.10.10.3");
16408 +       sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
16409 +       str_to_ip(&ip, "10.10.10.254");
16410 +       sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
16411 +       /* 2.3 IPv4 force port mode */
16412 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
16413 +       /* 2.4 IPv6 force port mode */
16414 +       SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
16415 +
16416 +       /* 3. Set Age timer: 10 msec. */
16417 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16418 +
16419 +       /* 4. Valid LRO ring */
16420 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16421 +
16422 +       return 0;
16423 +}
16424 +
16425 +int set_fe_lro_ring3_cfg(struct net_device *dev)
16426 +{
16427 +       unsigned int ip;
16428 +
16429 +       netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring3_cfg()\n");
16430 +
16431 +       /* 1. Set RX ring mode to force port */
16432 +       SET_PDMA_RXRING3_MODE(PDMA_RX_FORCE_PORT);
16433 +
16434 +       /* 2. Configure lro ring */
16435 +       /* 2.1 set src/destination TCP ports */
16436 +       SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
16437 +       SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
16438 +       /* 2.2 set src/destination IPs */
16439 +       str_to_ip(&ip, "10.10.10.3");
16440 +       sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
16441 +       str_to_ip(&ip, "10.10.10.254");
16442 +       sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
16443 +       /* 2.3 IPv4 force port mode */
16444 +       SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
16445 +       /* 2.4 IPv6 force port mode */
16446 +       SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
16447 +
16448 +       /* 3. Set Age timer: 10 msec. */
16449 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16450 +
16451 +       /* 4. Valid LRO ring */
16452 +       SET_PDMA_RXRING3_VALID(1);
16453 +
16454 +       return 0;
16455 +}
16456 +
16457 +int set_fe_lro_glo_cfg(struct net_device *dev)
16458 +{
16459 +       unsigned int regVal = 0;
16460 +
16461 +       netdev_printk(KERN_CRIT, dev, "set_fe_lro_glo_cfg()\n");
16462 +
16463 +       /* 1 Set max AGG timer: 10 msec. */
16464 +       SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
16465 +
16466 +       /* 2. Set max LRO agg count */
16467 +       SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
16468 +
16469 +       /* PDMA prefetch enable setting */
16470 +       SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16471 +
16472 +       /* 2.1 IPv4 checksum update enable */
16473 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16474 +
16475 +       /* 3. Polling relinguish */
16476 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16477 +               ;
16478 +
16479 +       /* 4. Enable LRO */
16480 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16481 +       regVal |= PDMA_LRO_EN;
16482 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16483 +
16484 +       return 0;
16485 +}
16486 +#else
16487 +int set_fe_lro_auto_cfg(struct net_device *dev)
16488 +{
16489 +       unsigned int regVal = 0;
16490 +       unsigned int ip;
16491 +
16492 +       netdev_printk(KERN_CRIT, dev, "set_fe_lro_auto_cfg()\n");
16493 +
16494 +       /* 1.1 Set my IP_1 */
16495 +       str_to_ip(&ip, "10.10.10.254");
16496 +       sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
16497 +       sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
16498 +       sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
16499 +       sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
16500 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
16501 +
16502 +       /* 1.2 Set my IP_2 */
16503 +       str_to_ip(&ip, "10.10.20.254");
16504 +       sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
16505 +       sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
16506 +       sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
16507 +       sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
16508 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
16509 +
16510 +       /* 1.3 Set my IP_3 */
16511 +       sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
16512 +       sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
16513 +       sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
16514 +       sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
16515 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
16516 +
16517 +       /* 1.4 Set my IP_4 */
16518 +       sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
16519 +       sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
16520 +       sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
16521 +       sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
16522 +       SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
16523 +
16524 +       /* 2.1 Set RX ring1~3 to auto-learn modes */
16525 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
16526 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
16527 +       SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
16528 +
16529 +       /* 2.2 Valid LRO ring */
16530 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
16531 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
16532 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
16533 +       SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
16534 +
16535 +       /* 2.3 Set AGE timer */
16536 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
16537 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
16538 +       SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
16539 +
16540 +       /* 2.4 Set max AGG timer */
16541 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
16542 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
16543 +       SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
16544 +
16545 +       /* 2.5 Set max LRO agg count */
16546 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
16547 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
16548 +       SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
16549 +
16550 +       /* 3.0 IPv6 LRO enable */
16551 +       SET_PDMA_LRO_IPV6_EN(1);
16552 +
16553 +       /* 3.1 IPv4 checksum update enable */
16554 +       SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
16555 +
16556 +       /* 3.2 TCP push option check disable */
16557 +       //SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0);
16558 +
16559 +       /* PDMA prefetch enable setting */
16560 +       SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
16561 +
16562 +       /* 3.2 switch priority comparison to byte count mode */
16563 +/* SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE); */
16564 +       SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
16565 +
16566 +       /* 3.3 bandwidth threshold setting */
16567 +       SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
16568 +
16569 +       /* 3.4 auto-learn score delta setting */
16570 +       sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
16571 +
16572 +       /* 3.5 Set ALT timer to 20us: (unit: 20us) */
16573 +       SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
16574 +       /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
16575 +       SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
16576 +
16577 +       /* 3.7 the least remaining room of SDL0 in RXD for lro aggregation */
16578 +       SET_PDMA_LRO_MIN_RXD_SDL(1522);
16579 +
16580 +       /* 4. Polling relinguish */
16581 +       while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
16582 +               ;
16583 +
16584 +       /* 5. Enable LRO */
16585 +       regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
16586 +       regVal |= PDMA_LRO_EN;
16587 +       sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
16588 +
16589 +       return 0;
16590 +}
16591 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16592 +
16593 +int fe_hw_lro_init(struct net_device *dev)
16594 +{
16595 +       int i;
16596 +       END_DEVICE *ei_local = netdev_priv(dev);
16597 +
16598 +       /* Initial RX Ring 3 */
16599 +       ei_local->rx_ring3 =
16600 +           pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16601 +                                &ei_local->phy_rx_ring3);
16602 +       for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16603 +               memset(&ei_local->rx_ring3[i], 0, sizeof(struct PDMA_rxdesc));
16604 +               ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
16605 +               ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
16606 +               ei_local->rx_ring3[i].rxd_info2.PLEN0 =
16607 +                   SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16608 +               ei_local->rx_ring3[i].rxd_info2.PLEN1 =
16609 +                   SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16610 +               ei_local->rx_ring3[i].rxd_info1.PDP0 =
16611 +                   dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data,
16612 +                                  MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16613 +       }
16614 +       netdev_printk(KERN_CRIT, dev,
16615 +                     "\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",
16616 +                     ei_local->phy_rx_ring3, ei_local->rx_ring3);
16617 +       /* Initial RX Ring 2 */
16618 +       ei_local->rx_ring2 =
16619 +           pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16620 +                                &ei_local->phy_rx_ring2);
16621 +       for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16622 +               memset(&ei_local->rx_ring2[i], 0, sizeof(struct PDMA_rxdesc));
16623 +               ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
16624 +               ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
16625 +               ei_local->rx_ring2[i].rxd_info2.PLEN0 =
16626 +                   SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16627 +               ei_local->rx_ring2[i].rxd_info2.PLEN1 =
16628 +                   SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16629 +               ei_local->rx_ring2[i].rxd_info1.PDP0 =
16630 +                   dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data,
16631 +                                  MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16632 +       }
16633 +       netdev_printk(KERN_CRIT, dev,
16634 +                     "\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",
16635 +                     ei_local->phy_rx_ring2, ei_local->rx_ring2);
16636 +       /* Initial RX Ring 1 */
16637 +       ei_local->rx_ring1 =
16638 +           pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
16639 +                                &ei_local->phy_rx_ring1);
16640 +       for (i = 0; i < NUM_LRO_RX_DESC; i++) {
16641 +               memset(&ei_local->rx_ring1[i], 0, sizeof(struct PDMA_rxdesc));
16642 +               ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
16643 +               ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
16644 +               ei_local->rx_ring1[i].rxd_info2.PLEN0 =
16645 +                   SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
16646 +               ei_local->rx_ring1[i].rxd_info2.PLEN1 =
16647 +                   SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
16648 +               ei_local->rx_ring1[i].rxd_info1.PDP0 =
16649 +                   dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data,
16650 +                                  MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
16651 +       }
16652 +       netdev_printk(KERN_CRIT, dev,
16653 +                     "\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",
16654 +                     ei_local->phy_rx_ring1, ei_local->rx_ring1);
16655 +
16656 +       sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
16657 +       sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16658 +       sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16659 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
16660 +       sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
16661 +       sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16662 +       sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16663 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
16664 +       sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
16665 +       sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_LRO_RX_DESC));
16666 +       sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
16667 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
16668 +
16669 +#if defined(CONFIG_RAETH_HW_LRO_FORCE)
16670 +       set_fe_lro_ring1_cfg(dev);
16671 +       set_fe_lro_ring2_cfg(dev);
16672 +       set_fe_lro_ring3_cfg(dev);
16673 +       set_fe_lro_glo_cfg(dev);
16674 +#else
16675 +       set_fe_lro_auto_cfg(dev);
16676 +#endif /* CONFIG_RAETH_HW_LRO_FORCE */
16677 +
16678 +       /* HW LRO parameter settings */
16679 +       ei_local->hw_lro_alpha = HW_LRO_PKT_INT_ALPHA;
16680 +       ei_local->hw_lro_fix_setting = 1;
16681 +
16682 +       return 1;
16683 +}
16684 +EXPORT_SYMBOL(fe_hw_lro_init);
16685 +
16686 --- /dev/null
16687 +++ b/drivers/net/ethernet/raeth/raether_pdma.c
16688 @@ -0,0 +1,1121 @@
16689 +#include <linux/module.h>
16690 +#include <linux/version.h>
16691 +#include <linux/kernel.h>
16692 +#include <linux/types.h>
16693 +#include <linux/pci.h>
16694 +#include <linux/init.h>
16695 +#include <linux/skbuff.h>
16696 +#include <linux/if_vlan.h>
16697 +#include <linux/if_ether.h>
16698 +#include <linux/fs.h>
16699 +#include <asm/uaccess.h>
16700 +#include <asm/rt2880/surfboardint.h>
16701 +#if defined (CONFIG_RAETH_TSO)
16702 +#include <linux/tcp.h>
16703 +#include <net/ipv6.h>
16704 +#include <linux/ip.h>
16705 +#include <net/ip.h>
16706 +#include <net/tcp.h>
16707 +#include <linux/in.h>
16708 +#include <linux/ppp_defs.h>
16709 +#include <linux/if_pppox.h>
16710 +#endif
16711 +#if defined (CONFIG_RAETH_LRO)
16712 +#include <linux/inet_lro.h>
16713 +#endif
16714 +#include <linux/delay.h>
16715 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
16716 +#include <linux/sched.h>
16717 +#endif
16718 +
16719 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
16720 +#include <asm/rt2880/rt_mmap.h>
16721 +#else
16722 +#include <linux/libata-compat.h>
16723 +#endif
16724
16725 +#include "ra2882ethreg.h"
16726 +#include "raether.h"
16727 +#include "ra_mac.h"
16728 +#include "ra_ioctl.h"
16729 +#include "ra_rfrw.h"
16730 +#ifdef CONFIG_RAETH_NETLINK
16731 +#include "ra_netlink.h"
16732 +#endif
16733 +#if defined (CONFIG_RAETH_QOS)
16734 +#include "ra_qos.h"
16735 +#endif
16736 +
16737 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
16738 +#include "../../../net/nat/hw_nat/ra_nat.h"
16739 +#endif
16740 +#if defined(CONFIG_RAETH_PDMA_DVT)
16741 +#include "dvt/raether_pdma_dvt.h"
16742 +#endif  /* CONFIG_RAETH_PDMA_DVT */
16743 +
16744 +#if !defined(CONFIG_RA_NAT_NONE)
16745 +/* bruce+
16746 + */
16747 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
16748 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
16749 +#endif
16750 +
16751 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
16752 +/* Qwert+
16753 + */
16754 +#include <asm/mipsregs.h>
16755 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
16756 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
16757 +#endif /* CONFIG_RA_CLASSIFIER */
16758 +
16759 +#if defined (CONFIG_RALINK_RT3052_MP2)
16760 +int32_t mcast_rx(struct sk_buff * skb);
16761 +int32_t mcast_tx(struct sk_buff * skb);
16762 +#endif
16763 +
16764 +#if 0
16765 +#ifdef RA_MTD_RW_BY_NUM
16766 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
16767 +#else
16768 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
16769 +#endif
16770 +#endif
16771 +/* gmac driver feature set config */
16772 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
16773 +#undef DELAY_INT
16774 +#else
16775 +#if defined     (CONFIG_ARCH_MT7623)
16776 +#undef DELAY_INT
16777 +#else
16778 +#define DELAY_INT       1
16779 +#endif
16780 +#endif
16781 +
16782 +//#define CONFIG_UNH_TEST
16783 +/* end of config */
16784 +
16785 +#if defined (CONFIG_RAETH_JUMBOFRAME)
16786 +#define        MAX_RX_LENGTH   4096
16787 +#else
16788 +#define        MAX_RX_LENGTH   1536
16789 +#endif
16790 +
16791 +extern struct net_device               *dev_raether;
16792 +
16793 +
16794 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
16795 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16796 +extern int rx_calc_idx1;
16797 +#endif
16798 +#endif
16799 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
16800 +extern int rx_calc_idx0;
16801 +static unsigned long tx_cpu_owner_idx0=0;
16802 +#endif
16803 +extern unsigned long tx_ring_full;
16804 +
16805 +#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
16806 +#include "ra_ethtool.h"
16807 +extern struct ethtool_ops      ra_ethtool_ops;
16808 +#ifdef CONFIG_PSEUDO_SUPPORT
16809 +extern struct ethtool_ops      ra_virt_ethtool_ops;
16810 +#endif // CONFIG_PSEUDO_SUPPORT //
16811 +#endif // (CONFIG_ETHTOOL //
16812 +
16813 +#ifdef CONFIG_RALINK_VISTA_BASIC
16814 +int is_switch_175c = 1;
16815 +#endif
16816 +
16817 +#ifdef CONFIG_RAETH_PDMATX_QDMARX      /* QDMA RX */
16818 +struct QDMA_txdesc *free_head = NULL;
16819 +#endif
16820 +
16821 +//#if defined (CONFIG_RAETH_LRO)
16822 +#if 0
16823 +unsigned int lan_ip;
16824 +struct lro_para_struct lro_para; 
16825 +int lro_flush_needed;
16826 +extern char const *nvram_get(int index, char *name);
16827 +#endif
16828 +
16829 +#define KSEG1                   0xa0000000
16830 +#define PHYS_TO_VIRT(x)         ((void *)((x) | KSEG1))
16831 +#define VIRT_TO_PHYS(x)         ((unsigned long)(x) & ~KSEG1)
16832 +
16833 +extern void set_fe_dma_glo_cfg(void);
16834 +
16835 +/*
16836 + *  @brief cal txd number for a page
16837 + *
16838 + *  @parm size
16839 + *
16840 + *  @return frag_txd_num
16841 + */
16842 +
16843 +unsigned int cal_frag_txd_num(unsigned int size)
16844 +{
16845 +       unsigned int frag_txd_num = 0;
16846 +       if(size == 0)
16847 +               return 0;
16848 +       while(size > 0){
16849 +               if(size > MAX_TXD_LEN){
16850 +                       frag_txd_num++;
16851 +                       size -= MAX_TXD_LEN;
16852 +               }else{
16853 +                       frag_txd_num++;
16854 +                       size = 0;
16855 +               }
16856 +       }
16857 +       return frag_txd_num;
16858 +
16859 +}
16860 +
16861 +#ifdef CONFIG_RAETH_PDMATX_QDMARX      /* QDMA RX */
16862 +bool fq_qdma_init(struct net_device *dev)
16863 +{
16864 +       END_DEVICE* ei_local = netdev_priv(dev);
16865 +       unsigned int phy_free_head;
16866 +       unsigned int phy_free_tail;
16867 +       unsigned int *free_page_head = NULL;
16868 +       unsigned int phy_free_page_head;
16869 +       int i;
16870 +    
16871 +       free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
16872 +       if (unlikely(free_head == NULL)){
16873 +               printk(KERN_ERR "QDMA FQ decriptor not available...\n");
16874 +               return 0;
16875 +       }
16876 +       memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
16877 +
16878 +       free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
16879 +       if (unlikely(free_page_head == NULL)){
16880 +               printk(KERN_ERR "QDMA FQ page not available...\n");
16881 +               return 0;
16882 +       }       
16883 +       for (i=0; i < NUM_QDMA_PAGE; i++) {
16884 +               free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
16885 +               if(i < (NUM_QDMA_PAGE-1)){
16886 +                       free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
16887 +
16888 +
16889 +#if 0
16890 +                       printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
16891 +                       printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
16892 +                       printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
16893 +                       printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
16894 +#endif
16895 +               }
16896 +               free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
16897 +
16898 +       }
16899 +       phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
16900 +
16901 +       printk("phy_free_head is 0x%x!!!\n", phy_free_head);
16902 +       printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
16903 +       sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
16904 +       sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
16905 +       sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
16906 +       sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
16907 +
16908 +       ei_local->free_head = free_head;
16909 +       ei_local->phy_free_head = phy_free_head;
16910 +       ei_local->free_page_head = free_page_head;
16911 +       ei_local->phy_free_page_head = phy_free_page_head;
16912 +    return 1;
16913 +}
16914 +#endif
16915 +
16916 +int fe_dma_init(struct net_device *dev)
16917 +{
16918 +
16919 +       int             i;
16920 +       unsigned int    regVal;
16921 +       END_DEVICE* ei_local = netdev_priv(dev);
16922 +#if defined (CONFIG_RAETH_QOS)
16923 +       int             j;
16924 +#endif
16925 +
16926 +       while(1)
16927 +       {
16928 +               regVal = sysRegRead(PDMA_GLO_CFG);
16929 +               if((regVal & RX_DMA_BUSY))
16930 +               {
16931 +                       printk("\n  RX_DMA_BUSY !!! ");
16932 +                       continue;
16933 +               }
16934 +               if((regVal & TX_DMA_BUSY))
16935 +               {
16936 +                       printk("\n  TX_DMA_BUSY !!! ");
16937 +                       continue;
16938 +               }
16939 +               break;
16940 +       }
16941 +
16942 +#if defined(CONFIG_RAETH_PDMA_DVT)
16943 +       pdma_dvt_set_dma_mode();
16944 +#endif  /* CONFIG_RAETH_PDMA_DVT */
16945 +
16946 +#if defined (CONFIG_RAETH_QOS)
16947 +       for (i=0;i<NUM_TX_RINGS;i++){
16948 +               for (j=0;j<NUM_TX_DESC;j++){
16949 +                       ei_local->skb_free[i][j]=0;
16950 +               }
16951 +                ei_local->free_idx[i]=0;
16952 +       }
16953 +       /*
16954 +        * RT2880: 2 x TX_Ring, 1 x Rx_Ring
16955 +        * RT2883: 4 x TX_Ring, 1 x Rx_Ring
16956 +        * RT3883: 4 x TX_Ring, 1 x Rx_Ring
16957 +        * RT3052: 4 x TX_Ring, 1 x Rx_Ring
16958 +        */
16959 +       fe_tx_desc_init(dev, 0, 3, 1);
16960 +       if (ei_local->tx_ring0 == NULL) {
16961 +               printk("RAETH: tx ring0 allocation failed\n");
16962 +               return 0;
16963 +       }
16964 +
16965 +       fe_tx_desc_init(dev, 1, 3, 1);
16966 +       if (ei_local->tx_ring1 == NULL) {
16967 +               printk("RAETH: tx ring1 allocation failed\n");
16968 +               return 0;
16969 +       }
16970 +
16971 +       printk("\nphy_tx_ring0 = %08x, tx_ring0 = %p, size: %d bytes\n", ei_local->phy_tx_ring0, ei_local->tx_ring0, sizeof(struct PDMA_txdesc));
16972 +
16973 +       printk("\nphy_tx_ring1 = %08x, tx_ring1 = %p, size: %d bytes\n", ei_local->phy_tx_ring1, ei_local->tx_ring1, sizeof(struct PDMA_txdesc));
16974 +
16975 +#if ! defined (CONFIG_RALINK_RT2880)
16976 +       fe_tx_desc_init(dev, 2, 3, 1);
16977 +       if (ei_local->tx_ring2 == NULL) {
16978 +               printk("RAETH: tx ring2 allocation failed\n");
16979 +               return 0;
16980 +       }
16981 +
16982 +       fe_tx_desc_init(dev, 3, 3, 1);
16983 +       if (ei_local->tx_ring3 == NULL) {
16984 +               printk("RAETH: tx ring3 allocation failed\n");
16985 +               return 0;
16986 +       }
16987 +
16988 +       printk("\nphy_tx_ring2 = %08x, tx_ring2 = %p, size: %d bytes\n", ei_local->phy_tx_ring2, ei_local->tx_ring2, sizeof(struct PDMA_txdesc));
16989 +
16990 +       printk("\nphy_tx_ring3 = %08x, tx_ring3 = %p, size: %d bytes\n", ei_local->phy_tx_ring3, ei_local->tx_ring3, sizeof(struct PDMA_txdesc));
16991 +
16992 +#endif // CONFIG_RALINK_RT2880 //
16993 +#else
16994 +       for (i=0;i<NUM_TX_DESC;i++){
16995 +               ei_local->skb_free[i]=0;
16996 +       }
16997 +       ei_local->free_idx =0;
16998 +#if defined (CONFIG_MIPS)
16999 +       ei_local->tx_ring0 = pci_alloc_consistent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0);
17000 +#else
17001 +       ei_local->tx_ring0 = dma_alloc_coherent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0, GFP_KERNEL);
17002 +#endif
17003 +       printk("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n", ei_local->phy_tx_ring0, ei_local->tx_ring0);
17004 +
17005 +       for (i=0; i < NUM_TX_DESC; i++) {
17006 +               memset(&ei_local->tx_ring0[i],0,sizeof(struct PDMA_txdesc));
17007 +               ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
17008 +               ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
17009 +
17010 +       }
17011 +#endif // CONFIG_RAETH_QOS
17012 +
17013 +#ifdef CONFIG_RAETH_PDMATX_QDMARX      /* QDMA RX */
17014 +
17015 +       fq_qdma_init(dev);
17016 +
17017 +       while(1)
17018 +       {
17019 +               regVal = sysRegRead(QDMA_GLO_CFG);
17020 +               if((regVal & RX_DMA_BUSY))
17021 +               {
17022 +                       printk("\n  RX_DMA_BUSY !!! ");
17023 +                       continue;
17024 +               }
17025 +               if((regVal & TX_DMA_BUSY))
17026 +               {
17027 +                       printk("\n  TX_DMA_BUSY !!! ");
17028 +                       continue;
17029 +               }
17030 +               break;
17031 +       }
17032 +
17033 +       /* Initial RX Ring 0*/
17034 +       
17035 +#ifdef CONFIG_32B_DESC
17036 +       ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17037 +       ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
17038 +#else
17039 +       ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
17040 +#endif
17041 +       for (i = 0; i < NUM_QRX_DESC; i++) {
17042 +               memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
17043 +               ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
17044 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17045 +               ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
17046 +               ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17047 +#else
17048 +               ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
17049 +#endif
17050 +               ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17051 +       }
17052 +       printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
17053 +
17054 +       regVal = sysRegRead(QDMA_GLO_CFG);
17055 +       regVal &= 0x000000FF;
17056 +
17057 +       sysRegWrite(QDMA_GLO_CFG, regVal);
17058 +       regVal=sysRegRead(QDMA_GLO_CFG);
17059 +
17060 +       /* Tell the adapter where the TX/RX rings are located. */
17061 +
17062 +       sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17063 +       sysRegWrite(QRX_MAX_CNT_0,  cpu_to_le32((u32) NUM_QRX_DESC));
17064 +       sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17065 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17066 +       rx_calc_idx0 = rx_dma_owner_idx0 =  sysRegRead(QRX_CRX_IDX_0);
17067 +#endif
17068 +       sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
17069 +
17070 +        ei_local->rx_ring0 = ei_local->qrx_ring;
17071 +
17072 +#else  /* PDMA RX */
17073 +
17074 +       /* Initial RX Ring 0*/
17075 +#ifdef CONFIG_32B_DESC
17076 +       ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17077 +       ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
17078 +#else
17079 +#if defined (CONFIG_MIPS)
17080 +       ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
17081 +#else  
17082 +       ei_local->rx_ring0 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0, GFP_KERNEL);
17083 +#endif
17084 +#endif
17085 +       for (i = 0; i < NUM_RX_DESC; i++) {
17086 +               memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
17087 +               ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
17088 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17089 +               ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
17090 +               ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17091 +#else
17092 +               ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
17093 +#endif
17094 +               ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17095 +       }
17096 +       printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
17097 +
17098 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17099 +       /* Initial RX Ring 1*/
17100 +#ifdef CONFIG_32B_DESC
17101 +       ei_local->rx_ring1 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
17102 +       ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
17103 +#else
17104 +#if defined (CONFIG_MIPS)
17105 +       ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
17106 +#else
17107 +       ei_local->rx_ring1 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1, GFP_KERNEL);
17108 +
17109 +#endif
17110 +#endif
17111 +       for (i = 0; i < NUM_RX_DESC; i++) {
17112 +               memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
17113 +               ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
17114 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
17115 +               ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
17116 +               ei_local->rx_ring1[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
17117 +#else
17118 +               ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
17119 +#endif
17120 +               ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17121 +       }
17122 +       printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
17123 +#if defined(CONFIG_ARCH_MT7623)
17124 +    /* Initial RX Ring 2*/
17125 +    ei_local->rx_ring2 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring2);
17126 +    for (i = 0; i < NUM_RX_DESC; i++) {
17127 +        memset(&ei_local->rx_ring2[i],0,sizeof(struct PDMA_rxdesc));
17128 +        ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
17129 +        ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
17130 +        ei_local->rx_ring2[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17131 +        ei_local->rx_ring2[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17132 +        ei_local->rx_ring2[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17133 +    }
17134 +    printk("\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",ei_local->phy_rx_ring2,ei_local->rx_ring2);
17135 +    /* Initial RX Ring 3*/
17136 +       ei_local->rx_ring3 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring3);
17137 +       for (i = 0; i < NUM_RX_DESC; i++) {
17138 +               memset(&ei_local->rx_ring3[i],0,sizeof(struct PDMA_rxdesc));
17139 +       ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
17140 +        ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
17141 +        ei_local->rx_ring3[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
17142 +        ei_local->rx_ring3[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
17143 +               ei_local->rx_ring3[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
17144 +       }
17145 +       printk("\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",ei_local->phy_rx_ring3,ei_local->rx_ring3); 
17146 +#endif  /* CONFIG_ARCH_MT7623 */
17147 +#endif
17148 +
17149 +#endif
17150 +
17151 +       regVal = sysRegRead(PDMA_GLO_CFG);
17152 +       regVal &= 0x000000FF;
17153 +       sysRegWrite(PDMA_GLO_CFG, regVal);
17154 +       regVal=sysRegRead(PDMA_GLO_CFG);
17155 +
17156 +       /* Tell the adapter where the TX/RX rings are located. */
17157 +#if !defined (CONFIG_RAETH_QOS)
17158 +        sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0));
17159 +       sysRegWrite(TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC));
17160 +       sysRegWrite(TX_CTX_IDX0, 0);
17161 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17162 +       tx_cpu_owner_idx0 = 0;
17163 +#endif
17164 +       sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
17165 +#endif
17166 +
17167 +#ifdef CONFIG_RAETH_PDMATX_QDMARX      /* QDMA RX */
17168 +       sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
17169 +       sysRegWrite(QRX_MAX_CNT_0,  cpu_to_le32((u32) NUM_QRX_DESC));
17170 +       sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
17171 +#else  /* PDMA RX */
17172 +       sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
17173 +       sysRegWrite(RX_MAX_CNT0,  cpu_to_le32((u32) NUM_RX_DESC));
17174 +       sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17175 +#endif
17176 +
17177 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17178 +       rx_calc_idx0 =  sysRegRead(RX_CALC_IDX0);
17179 +#endif
17180 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
17181 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17182 +       sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
17183 +       sysRegWrite(RX_MAX_CNT1,  cpu_to_le32((u32) NUM_RX_DESC));
17184 +       sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17185 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17186 +       rx_calc_idx1 =  sysRegRead(RX_CALC_IDX1);
17187 +#endif
17188 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
17189 +#if defined(CONFIG_ARCH_MT7623)
17190 +       sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
17191 +       sysRegWrite(RX_MAX_CNT2,  cpu_to_le32((u32) NUM_RX_DESC));
17192 +       sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17193 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
17194 +    sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
17195 +       sysRegWrite(RX_MAX_CNT3,  cpu_to_le32((u32) NUM_RX_DESC));
17196 +       sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
17197 +       sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
17198 +#endif  /* CONFIG_ARCH_MT7623 */
17199 +#endif
17200 +#if defined (CONFIG_RALINK_RT6855A)
17201 +       regVal = sysRegRead(RX_DRX_IDX0);
17202 +       regVal = (regVal == 0)? (NUM_RX_DESC - 1) : (regVal - 1);
17203 +       sysRegWrite(RX_CALC_IDX0, cpu_to_le32(regVal));
17204 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17205 +       rx_calc_idx0 =  sysRegRead(RX_CALC_IDX0);
17206 +#endif
17207 +       regVal = sysRegRead(TX_DTX_IDX0);
17208 +       sysRegWrite(TX_CTX_IDX0, cpu_to_le32(regVal));
17209 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17210 +       tx_cpu_owner_idx0 = regVal;
17211 +#endif
17212 +       ei_local->free_idx = regVal;
17213 +#endif
17214 +
17215 +#if defined (CONFIG_RAETH_QOS)
17216 +       set_scheduler_weight();
17217 +       set_schedule_pause_condition();
17218 +       set_output_shaper();
17219 +#endif
17220 +
17221 +       set_fe_dma_glo_cfg();
17222 +
17223 +       return 1;
17224 +}
17225 +
17226 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
17227 +{
17228 +       unsigned int    length=skb->len;
17229 +       END_DEVICE*     ei_local = netdev_priv(dev);
17230 +#ifndef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17231 +       unsigned long   tx_cpu_owner_idx0 = sysRegRead(TX_CTX_IDX0);
17232 +#endif
17233 +#if defined (CONFIG_RAETH_TSO)
17234 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17235 +       unsigned long   ctx_idx_start_addr = tx_cpu_owner_idx0;
17236 +#endif
17237 +        struct iphdr *iph = NULL;
17238 +        struct tcphdr *th = NULL;
17239 +       struct skb_frag_struct *frag;
17240 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
17241 +       int i=0;
17242 +       unsigned int len, size, offset, frag_txd_num, skb_txd_num ;
17243 +#endif // CONFIG_RAETH_TSO //
17244 +
17245 +#if defined (CONFIG_RAETH_TSOV6)
17246 +       struct ipv6hdr *ip6h = NULL;
17247 +#endif
17248 +
17249 +#ifdef CONFIG_PSEUDO_SUPPORT
17250 +       PSEUDO_ADAPTER *pAd;
17251 +#endif
17252 +
17253 +       while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17254 +       {
17255 +#ifdef CONFIG_PSEUDO_SUPPORT
17256 +               if (gmac_no == 2) {
17257 +                       if (ei_local->PseudoDev != NULL) {
17258 +                               pAd = netdev_priv(ei_local->PseudoDev);
17259 +                               pAd->stat.tx_errors++;
17260 +                       }
17261 +               } else
17262 +#endif
17263 +                       ei_local->stat.tx_errors++;
17264 +       }
17265 +
17266 +#if !defined (CONFIG_RAETH_TSO)
17267 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17268 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = length;
17269 +#if defined (CONFIG_RALINK_MT7620)
17270 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17271 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17272 +       if (gmac_no == 1) {
17273 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17274 +       }else {
17275 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17276 +       }
17277 +#else
17278 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17279 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17280 +#endif
17281 +
17282 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17283 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
17284 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17285 +       }else {
17286 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17287 +       }
17288 +#endif
17289 +
17290 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17291 +       if(vlan_tx_tag_present(skb)) {
17292 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17293 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17294 +#else
17295 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17296 +#endif
17297 +       }else {
17298 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17299 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17300 +#else
17301 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17302 +#endif
17303 +       }
17304 +#endif
17305 +
17306 +#if defined(CONFIG_RAETH_PDMA_DVT)
17307 +    raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17308 +#endif  /* CONFIG_RAETH_PDMA_DVT */
17309 +
17310 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17311 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17312 +           if(ra_sw_nat_hook_rx!= NULL){
17313 +#if defined (CONFIG_RALINK_MT7620)
17314 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17315 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17316 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17317 +#else
17318 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17319 +#endif
17320 +               FOE_MAGIC_TAG(skb) = 0;
17321 +           }
17322 +       }
17323 +#endif
17324 +       
17325 +#if defined(CONFIG_RAETH_PDMA_DVT)
17326 +    raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17327 +#endif  /* CONFIG_RAETH_PDMA_DVT */
17328 +
17329 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17330 +
17331 +#if 0  
17332 +       printk("---------------\n");
17333 +       printk("tx_info1=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1);
17334 +       printk("tx_info2=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2);
17335 +       printk("tx_info3=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3);
17336 +       printk("tx_info4=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4);
17337 +#endif
17338 +
17339 +#else
17340 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
17341 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = (length - skb->data_len);
17342 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = nr_frags ? 0:1;
17343 +#if defined (CONFIG_RALINK_MT7620)
17344 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
17345 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17346 +       if (gmac_no == 1) {
17347 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
17348 +       }else {
17349 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
17350 +       }
17351 +#else
17352 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
17353 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
17354 +#endif
17355 +       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO = 0;
17356 +
17357 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
17358 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
17359 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
17360 +       }else {
17361 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
17362 +       }
17363 +#endif
17364 +
17365 +#ifdef CONFIG_RAETH_HW_VLAN_TX
17366 +       if(vlan_tx_tag_present(skb)) {
17367 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17368 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
17369 +#else
17370 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
17371 +#endif
17372 +       }else {
17373 +#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17374 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
17375 +#else
17376 +           ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
17377 +#endif
17378 +       }
17379 +#endif
17380 +   
17381 +#if defined(CONFIG_RAETH_PDMA_DVT)
17382 +    raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
17383 +#endif  /* CONFIG_RAETH_PDMA_DVT */
17384 +
17385 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17386 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
17387 +           if(ra_sw_nat_hook_rx!= NULL){
17388 +#if defined (CONFIG_RALINK_MT7620)
17389 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
17390 +#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
17391 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
17392 +#else
17393 +               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
17394 +#endif
17395 +               FOE_MAGIC_TAG(skb) = 0;
17396 +           }
17397 +       }
17398 +#endif
17399 +
17400 +       skb_txd_num = 1;
17401 +
17402 +       if(nr_frags > 0) {
17403 +
17404 +               for(i=0;i<nr_frags;i++) {
17405 +                       frag = &skb_shinfo(skb)->frags[i];
17406 +                       offset = frag->page_offset;
17407 +                       len = frag->size;
17408 +                       frag_txd_num = cal_frag_txd_num(len);
17409 +
17410 +                       while(frag_txd_num > 0){
17411 +                               if(len < MAX_TXD_LEN)
17412 +                                       size = len;
17413 +                               else
17414 +                                       size = MAX_TXD_LEN;
17415 +                               if(skb_txd_num%2 == 0) { 
17416 +                                       tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC; 
17417 +
17418 +                                       while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17419 +                                       {
17420 +#ifdef config_pseudo_support
17421 +                                               if (gmac_no == 2) {
17422 +                                                       if (ei_local->pseudodev != null) {
17423 +                                                               pad = netdev_priv(ei_local->pseudodev);
17424 +                                                               pad->stat.tx_errors++;
17425 +                                                       }
17426 +                                               } else
17427 +#endif
17428 +                                                       ei_local->stat.tx_errors++;
17429 +                                       }
17430 +
17431 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)                                 
17432 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17433 +#else
17434 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17435 +#endif
17436 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = size;
17437 +
17438 +                                       if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17439 +                                               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 1;
17440 +                                       else
17441 +                                               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 0;
17442 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
17443 +                               }else { 
17444 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)                                 
17445 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
17446 +#else
17447 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
17448 +
17449 +#endif
17450 +                                       ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1 = size;
17451 +                                       if( (i==(nr_frags-1)) && (frag_txd_num == 1))
17452 +                                               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 1;
17453 +                                       else
17454 +                                               ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 0;
17455 +                               }
17456 +                               offset += size;
17457 +                               len -= size;
17458 +                               frag_txd_num--;
17459 +                               skb_txd_num++;
17460 +                       }
17461 +               }
17462 +       }
17463 +
17464 +#if defined(CONFIG_RAETH_PDMA_DVT)
17465 +    if( (pdma_dvt_get_debug_test_config() & PDMA_TEST_TSO_DEBUG) ){
17466 +        printk("skb_shinfo(skb)->gso_segs = %d\n", skb_shinfo(skb)->gso_segs);
17467 +    }
17468 +#endif  /* CONFIG_RAETH_PDMA_DVT */
17469 +       /* fill in MSS info in tcp checksum field */
17470 +       if(skb_shinfo(skb)->gso_segs > 1) {
17471 +
17472 +//             TsoLenUpdate(skb->len);
17473 +
17474 +               /* TCP over IPv4 */
17475 +               iph = (struct iphdr *)skb_network_header(skb);
17476 +#if defined (CONFIG_RAETH_TSOV6)
17477 +               /* TCP over IPv6 */
17478 +               ip6h = (struct ipv6hdr *)skb_network_header(skb);
17479 +#endif                         
17480 +               if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
17481 +                       th = (struct tcphdr *)skb_transport_header(skb);
17482 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17483 +                       ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17484 +#else
17485 +                       ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17486 +#endif
17487 +                       th->check = htons(skb_shinfo(skb)->gso_size);
17488 +#if defined (CONFIG_MIPS)
17489 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17490 +#else
17491 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17492 +#endif
17493 +               } 
17494 +           
17495 +#if defined (CONFIG_RAETH_TSOV6)
17496 +               /* TCP over IPv6 */
17497 +               else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
17498 +                       th = (struct tcphdr *)skb_transport_header(skb);
17499 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17500 +                       ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
17501 +#else
17502 +                       ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
17503 +#endif
17504 +                       th->check = htons(skb_shinfo(skb)->gso_size);
17505 +#if defined (CONFIG_MIPS)
17506 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
17507 +#else
17508 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
17509 +#endif
17510 +               }
17511 +#endif // CONFIG_RAETH_TSOV6 //
17512 +       }
17513 +
17514 +#if defined(CONFIG_RAETH_PDMA_DVT)
17515 +    raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
17516 +#endif  /* CONFIG_RAETH_PDMA_DVT */
17517 +
17518 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17519 +       ei_local->tx_ring0[ctx_idx_start_addr].txd_info2.DDONE_bit = 0;
17520 +#else
17521 +       ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info2.DDONE_bit = 0;
17522 +#endif
17523 +#endif // CONFIG_RAETH_TSO //
17524 +
17525 +       tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
17526 +       while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
17527 +       {
17528 +//             printk(KERN_ERR "%s: TXD=%lu TX DMA is Busy !!\n", dev->name, tx_cpu_owner_idx0);
17529 +#ifdef CONFIG_PSEUDO_SUPPORT
17530 +               if (gmac_no == 2) {
17531 +                       if (ei_local->PseudoDev != NULL) {
17532 +                               pAd = netdev_priv(ei_local->PseudoDev);
17533 +                               pAd->stat.tx_errors++;
17534 +                       }
17535 +               } else
17536 +#endif
17537 +                       ei_local->stat.tx_errors++;
17538 +       }
17539 +       sysRegWrite(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
17540 +
17541 +#ifdef CONFIG_PSEUDO_SUPPORT
17542 +       if (gmac_no == 2) {
17543 +               if (ei_local->PseudoDev != NULL) {
17544 +                       pAd = netdev_priv(ei_local->PseudoDev);
17545 +                       pAd->stat.tx_packets++;
17546 +                       pAd->stat.tx_bytes += length;
17547 +               }
17548 +       } else
17549 +#endif
17550 +       {
17551 +               ei_local->stat.tx_packets++;
17552 +               ei_local->stat.tx_bytes += length;
17553 +       }
17554 +#ifdef CONFIG_RAETH_NAPI
17555 +       if ( ei_local->tx_full == 1) {
17556 +               ei_local->tx_full = 0;
17557 +               netif_wake_queue(dev);
17558 +       }
17559 +#endif
17560 +
17561 +       return length;
17562 +}
17563 +
17564 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
17565 +{
17566 +       END_DEVICE *ei_local = netdev_priv(dev);
17567 +       unsigned long flags;
17568 +       unsigned long tx_cpu_owner_idx;
17569 +       unsigned int tx_cpu_owner_idx_next;
17570 +       unsigned int num_of_txd = 0;
17571 +#if defined (CONFIG_RAETH_TSO)
17572 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
17573 +       struct skb_frag_struct *frag;
17574 +#endif
17575 +#if    !defined(CONFIG_RAETH_QOS)
17576 +       unsigned int tx_cpu_owner_idx_next2;
17577 +#else
17578 +       int ring_no, queue_no, port_no;
17579 +#endif
17580 +#ifdef CONFIG_RALINK_VISTA_BASIC
17581 +       struct vlan_ethhdr *veth;
17582 +#endif
17583 +#ifdef CONFIG_PSEUDO_SUPPORT
17584 +       PSEUDO_ADAPTER *pAd;
17585 +#endif
17586 +
17587 +#if !defined(CONFIG_RA_NAT_NONE)
17588 +       if(ra_sw_nat_hook_tx!= NULL)
17589 +       {
17590 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17591 +           if(FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
17592 +#endif
17593 +           {
17594 +               //spin_lock_irqsave(&ei_local->page_lock, flags);
17595 +               if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
17596 +                   //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17597 +               }else{
17598 +                   kfree_skb(skb);
17599 +                   //spin_unlock_irqrestore(&ei_local->page_lock, flags);
17600 +                   return 0;
17601 +               }
17602 +           }
17603 +       }
17604 +#endif
17605 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17606 +               /* Qwert+
17607 +                */
17608 +               if(ra_classifier_hook_tx!= NULL)
17609 +               {
17610 +#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
17611 +                       ra_classifier_hook_tx(skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
17612 +#else                  
17613 +                       ra_classifier_hook_tx(skb, read_c0_count());
17614 +#endif                 
17615 +               }
17616 +#endif /* CONFIG_RA_CLASSIFIER */
17617 +
17618 +#if defined (CONFIG_RALINK_RT3052_MP2)
17619 +       mcast_tx(skb);
17620 +#endif
17621 +
17622 +#if !defined (CONFIG_RALINK_RT6855) && !defined (CONFIG_RALINK_RT6855A) && \
17623 +    !defined(CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
17624 +
17625 +#define MIN_PKT_LEN  60
17626 +        if (skb->len < MIN_PKT_LEN) {
17627 +            if (skb_padto(skb, MIN_PKT_LEN)) {
17628 +                printk("raeth: skb_padto failed\n");
17629 +                return 0;
17630 +            }
17631 +            skb_put(skb, MIN_PKT_LEN - skb->len);
17632 +        }
17633 +#endif
17634 +
17635 +       dev->trans_start = jiffies;     /* save the timestamp */
17636 +       spin_lock_irqsave(&ei_local->page_lock, flags);
17637 +#if defined (CONFIG_MIPS)
17638 +       dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
17639 +#else
17640 +       dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
17641 +
17642 +#endif
17643 +
17644 +#ifdef CONFIG_RALINK_VISTA_BASIC
17645 +       veth = (struct vlan_ethhdr *)(skb->data);
17646 +       if (is_switch_175c && veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
17647 +               if ((veth->h_vlan_TCI & __constant_htons(VLAN_VID_MASK)) == 0) {
17648 +                       veth->h_vlan_TCI |= htons(VLAN_DEV_INFO(dev)->vlan_id);
17649 +               }
17650 +       }
17651 +#endif
17652 +
17653 +#if defined (CONFIG_RAETH_QOS)
17654 +       if(pkt_classifier(skb, gmac_no, &ring_no, &queue_no, &port_no)) {
17655 +               get_tx_ctx_idx(ring_no, &tx_cpu_owner_idx);
17656 +               tx_cpu_owner_idx_next = (tx_cpu_owner_idx + 1) % NUM_TX_DESC;
17657 +         if(((ei_local->skb_free[ring_no][tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[ring_no][tx_cpu_owner_idx_next]==0)){
17658 +           fe_qos_packet_send(dev, skb, ring_no, queue_no, port_no);
17659 +         }else{
17660 +           ei_local->stat.tx_dropped++;
17661 +           kfree_skb(skb);
17662 +           spin_unlock_irqrestore(&ei_local->page_lock, flags);
17663 +           return 0;
17664 +         }
17665 +       }
17666 +#else
17667 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17668 +       tx_cpu_owner_idx = tx_cpu_owner_idx0;
17669 +#else
17670 +       tx_cpu_owner_idx = sysRegRead(TX_CTX_IDX0);
17671 +#endif
17672 +#if defined (CONFIG_RAETH_TSO)
17673 +//     num_of_txd = (nr_frags==0) ? 1 : ((nr_frags>>1) + 1);
17674 +//     NumOfTxdUpdate(num_of_txd);
17675 +       if(nr_frags != 0){
17676 +               for(i=0;i<nr_frags;i++) {
17677 +                       frag = &skb_shinfo(skb)->frags[i];
17678 +                       num_of_txd  += cal_frag_txd_num(frag->size);
17679 +               }
17680 +               num_of_txd = (num_of_txd >> 1) + 1;
17681 +       }else
17682 +               num_of_txd = 1;
17683 +
17684 +#else
17685 +       num_of_txd = 1;
17686 +#endif
17687 +       tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % NUM_TX_DESC;
17688 +
17689 +       if(((ei_local->skb_free[tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[tx_cpu_owner_idx_next]==0)){
17690 +               rt2880_eth_send(dev, skb, gmac_no);
17691 +
17692 +               tx_cpu_owner_idx_next2 = (tx_cpu_owner_idx_next + 1) % NUM_TX_DESC;
17693 +
17694 +               if(ei_local->skb_free[tx_cpu_owner_idx_next2]!=0){
17695 +#if defined (CONFIG_RAETH_SW_FC)                   
17696 +                               netif_stop_queue(dev);
17697 +#ifdef CONFIG_PSEUDO_SUPPORT
17698 +                               netif_stop_queue(ei_local->PseudoDev);
17699 +#endif
17700 +                               tx_ring_full=1;
17701 +#endif
17702 +               }
17703 +       }else {
17704 +#ifdef CONFIG_PSEUDO_SUPPORT
17705 +               if (gmac_no == 2) {
17706 +                       if (ei_local->PseudoDev != NULL) {
17707 +                               pAd = netdev_priv(ei_local->PseudoDev);
17708 +                               pAd->stat.tx_dropped++;
17709 +                       }
17710 +               } else
17711 +#endif
17712 +                       ei_local->stat.tx_dropped++;
17713 +#if defined (CONFIG_RAETH_SW_FC)                   
17714 +               printk("tx_ring_full, drop packet\n");
17715 +#endif
17716 +               kfree_skb(skb);
17717 +               spin_unlock_irqrestore(&ei_local->page_lock, flags);
17718 +               return 0;
17719 +       }
17720 +
17721 +#if defined (CONFIG_RAETH_TSO)
17722 +       /* SG: use multiple TXD to send the packet (only have one skb) */
17723 +       ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd - 1) % NUM_TX_DESC] = skb;
17724 +       while(--num_of_txd) {
17725 +               ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd -1) % NUM_TX_DESC] = (struct  sk_buff *)0xFFFFFFFF; //MAGIC ID
17726 +       }
17727 +#else
17728 +       ei_local->skb_free[tx_cpu_owner_idx] = skb;
17729 +#endif
17730 +#endif
17731 +       spin_unlock_irqrestore(&ei_local->page_lock, flags);
17732 +       return 0;
17733 +}
17734 +
17735 +void ei_xmit_housekeeping(unsigned long unused)
17736 +{
17737 +    struct net_device *dev = dev_raether;
17738 +    END_DEVICE *ei_local = netdev_priv(dev);
17739 +    struct PDMA_txdesc *tx_desc;
17740 +    unsigned long skb_free_idx;
17741 +    unsigned long tx_dtx_idx __maybe_unused;
17742 +#ifndef CONFIG_RAETH_NAPI
17743 +    unsigned long reg_int_mask=0;
17744 +#endif
17745 +
17746 +#ifdef CONFIG_RAETH_QOS
17747 +    int i;
17748 +    for (i=0;i<NUM_TX_RINGS;i++){
17749 +        skb_free_idx = ei_local->free_idx[i];
17750 +       if((ei_local->skb_free[i][skb_free_idx])==0){
17751 +               continue;
17752 +       }
17753 +
17754 +       get_tx_desc_and_dtx_idx(ei_local, i, &tx_dtx_idx, &tx_desc);
17755 +
17756 +       while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[i][skb_free_idx])!=0 ){
17757 +               dev_kfree_skb_any((ei_local->skb_free[i][skb_free_idx]));
17758 +
17759 +           ei_local->skb_free[i][skb_free_idx]=0;
17760 +           skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17761 +       }
17762 +       ei_local->free_idx[i] = skb_free_idx;
17763 +    }
17764 +#else
17765 +       tx_dtx_idx = sysRegRead(TX_DTX_IDX0);
17766 +       tx_desc = ei_local->tx_ring0;
17767 +       skb_free_idx = ei_local->free_idx;
17768 +       if ((ei_local->skb_free[skb_free_idx]) != 0 && tx_desc[skb_free_idx].txd_info2.DDONE_bit==1) {
17769 +               while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[skb_free_idx])!=0 ){
17770 +#if defined (CONFIG_RAETH_TSO)
17771 +           if(ei_local->skb_free[skb_free_idx]!=(struct  sk_buff *)0xFFFFFFFF) {
17772 +                   dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17773 +           }
17774 +#else
17775 +           dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
17776 +#endif
17777 +           ei_local->skb_free[skb_free_idx]=0;
17778 +           skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
17779 +       }
17780 +
17781 +       netif_wake_queue(dev);
17782 +#ifdef CONFIG_PSEUDO_SUPPORT
17783 +               netif_wake_queue(ei_local->PseudoDev);
17784 +#endif
17785 +               tx_ring_full=0;
17786 +               ei_local->free_idx = skb_free_idx;
17787 +       }  /* if skb_free != 0 */
17788 +#endif
17789 +
17790 +#ifndef CONFIG_RAETH_NAPI
17791 +    reg_int_mask=sysRegRead(FE_INT_ENABLE);
17792 +#if defined (DELAY_INT)
17793 +    sysRegWrite(FE_INT_ENABLE, reg_int_mask| TX_DLY_INT);
17794 +#else
17795 +
17796 +    sysRegWrite(FE_INT_ENABLE, reg_int_mask | TX_DONE_INT0 \
17797 +                                           | TX_DONE_INT1 \
17798 +                                           | TX_DONE_INT2 \
17799 +                                           | TX_DONE_INT3);
17800 +#endif
17801 +#endif //CONFIG_RAETH_NAPI//
17802 +}
17803 +
17804 +
17805 +
17806 +EXPORT_SYMBOL(ei_start_xmit);
17807 +EXPORT_SYMBOL(ei_xmit_housekeeping);
17808 +EXPORT_SYMBOL(fe_dma_init);
17809 +EXPORT_SYMBOL(rt2880_eth_send);
17810 --- /dev/null
17811 +++ b/drivers/net/ethernet/raeth/raether_qdma.c
17812 @@ -0,0 +1,1407 @@
17813 +#include <linux/module.h>
17814 +#include <linux/version.h>
17815 +#include <linux/kernel.h>
17816 +#include <linux/types.h>
17817 +#include <linux/pci.h>
17818 +#include <linux/init.h>
17819 +#include <linux/skbuff.h>
17820 +#include <linux/if_vlan.h>
17821 +#include <linux/if_ether.h>
17822 +#include <linux/fs.h>
17823 +#include <asm/uaccess.h>
17824 +#include <asm/rt2880/surfboardint.h>
17825 +#if defined (CONFIG_RAETH_TSO)
17826 +#include <linux/tcp.h>
17827 +#include <net/ipv6.h>
17828 +#include <linux/ip.h>
17829 +#include <net/ip.h>
17830 +#include <net/tcp.h>
17831 +#include <linux/in.h>
17832 +#include <linux/ppp_defs.h>
17833 +#include <linux/if_pppox.h>
17834 +#endif
17835 +#include <linux/delay.h>
17836 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
17837 +#include <linux/sched.h>
17838 +#endif
17839 +#if defined (CONFIG_HW_SFQ)
17840 +#include <linux/if_vlan.h>
17841 +#include <net/ipv6.h>
17842 +#include <net/ip.h>
17843 +#include <linux/if_pppox.h>
17844 +#include <linux/ppp_defs.h>
17845 +#endif
17846 +
17847 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
17848 +#include <asm/rt2880/rt_mmap.h>
17849 +#else
17850 +#include <linux/libata-compat.h>
17851 +#endif
17852
17853 +#include "ra2882ethreg.h"
17854 +#include "raether.h"
17855 +#include "ra_mac.h"
17856 +#include "ra_ioctl.h"
17857 +#include "ra_rfrw.h"
17858 +#ifdef CONFIG_RAETH_NETLINK
17859 +#include "ra_netlink.h"
17860 +#endif
17861 +#if defined (CONFIG_RAETH_QOS)
17862 +#include "ra_qos.h"
17863 +#endif
17864 +
17865 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
17866 +#include "../../../net/nat/hw_nat/ra_nat.h"
17867 +#endif
17868 +
17869 +
17870 +#if !defined(CONFIG_RA_NAT_NONE)
17871 +/* bruce+
17872 + */
17873 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
17874 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
17875 +#endif
17876 +
17877 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
17878 +/* Qwert+
17879 + */
17880 +#include <asm/mipsregs.h>
17881 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
17882 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
17883 +#endif /* CONFIG_RA_CLASSIFIER */
17884 +
17885 +#if defined (CONFIG_RALINK_RT3052_MP2)
17886 +int32_t mcast_rx(struct sk_buff * skb);
17887 +int32_t mcast_tx(struct sk_buff * skb);
17888 +#endif
17889 +
17890 +#ifdef RA_MTD_RW_BY_NUM
17891 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
17892 +#else
17893 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
17894 +#endif
17895 +
17896 +/* gmac driver feature set config */
17897 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
17898 +#undef DELAY_INT
17899 +#else
17900 +#if defined     (CONFIG_ARCH_MT7623)
17901 +#undef DELAY_INT
17902 +#else
17903 +#define DELAY_INT       1
17904 +#endif
17905 +#endif
17906 +
17907 +//#define CONFIG_UNH_TEST
17908 +/* end of config */
17909 +
17910 +#if defined (CONFIG_RAETH_JUMBOFRAME)
17911 +#define        MAX_RX_LENGTH   4096
17912 +#else
17913 +#define        MAX_RX_LENGTH   1536
17914 +#endif
17915 +
17916 +extern struct net_device               *dev_raether;
17917 +
17918 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
17919 +static int rx_dma_owner_idx1;
17920 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17921 +static int rx_calc_idx1;
17922 +#endif
17923 +#endif
17924 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
17925 +static int rx_calc_idx0;
17926 +static unsigned long tx_cpu_owner_idx0=0;
17927 +#endif
17928 +extern unsigned long tx_ring_full;
17929 +
17930 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
17931 +#include "ra_ethtool.h"
17932 +extern struct ethtool_ops      ra_ethtool_ops;
17933 +#ifdef CONFIG_PSEUDO_SUPPORT
17934 +extern struct ethtool_ops      ra_virt_ethtool_ops;
17935 +#endif // CONFIG_PSEUDO_SUPPORT //
17936 +#endif // (CONFIG_ETHTOOL //
17937 +
17938 +#ifdef CONFIG_RALINK_VISTA_BASIC
17939 +int is_switch_175c = 1;
17940 +#endif
17941 +
17942 +//skb->mark to queue mapping table
17943 +extern unsigned int M2Q_table[64];
17944 +struct QDMA_txdesc *free_head = NULL;
17945 +extern unsigned int lan_wan_separate;
17946 +#if defined (CONFIG_HW_SFQ)
17947 +extern unsigned int web_sfq_enable;
17948 +#define HwSfqQUp 3
17949 +#define HwSfqQDl 1
17950 +#endif
17951 +int dbg =0;//debug used
17952 +#if defined (CONFIG_HW_SFQ)
17953 +struct SFQ_table *sfq0;
17954 +struct SFQ_table *sfq1;
17955 +struct SFQ_table *sfq2;
17956 +struct SFQ_table *sfq3;
17957 +#endif
17958 +
17959 +#define KSEG1                   0xa0000000
17960 +#if defined (CONFIG_MIPS)
17961 +#define PHYS_TO_VIRT(x)         ((void *)((x) | KSEG1))
17962 +#define VIRT_TO_PHYS(x)         ((unsigned long)(x) & ~KSEG1)
17963 +#else
17964 +#define PHYS_TO_VIRT(x)         phys_to_virt(x)
17965 +#define VIRT_TO_PHYS(x)         virt_to_phys(x)
17966 +#endif
17967 +
17968 +extern void set_fe_dma_glo_cfg(void);
17969 +
17970 +#if defined (CONFIG_HW_SFQ)
17971 +ParseResult            SfqParseResult;
17972 +#endif
17973 +
17974 +/**
17975 + *
17976 + * @brief: get the TXD index from its address
17977 + *
17978 + * @param: cpu_ptr
17979 + *
17980 + * @return: TXD index
17981 +*/
17982 +
17983 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
17984 +{
17985 +       struct net_device *dev = dev_raether;
17986 +       END_DEVICE *ei_local = netdev_priv(dev);
17987 +       int ctx_offset;
17988 +       //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
17989 +       //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
17990 +       ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
17991 +
17992 +       return ctx_offset;
17993 +} 
17994 +
17995 +
17996 +
17997 +
17998 +/**
17999 + * @brief cal txd number for a page
18000 + *
18001 + * @parm size
18002 + *
18003 + * @return frag_txd_num
18004 + */
18005 +
18006 +unsigned int cal_frag_txd_num(unsigned int size)
18007 +{
18008 +       unsigned int frag_txd_num = 0;
18009 +       if(size == 0)
18010 +               return 0;
18011 +       while(size > 0){
18012 +               if(size > MAX_TXD_LEN){
18013 +                       frag_txd_num++;
18014 +                       size -= MAX_TXD_LEN;
18015 +               }else{
18016 +                       frag_txd_num++;
18017 +                       size = 0;
18018 +               }
18019 +       }
18020 +        return frag_txd_num;
18021 +
18022 +}
18023 +
18024 +/**
18025 + * @brief get free TXD from TXD queue
18026 + *
18027 + * @param free_txd
18028 + *
18029 + * @return 
18030 + */
18031 +static int get_free_txd(struct QDMA_txdesc **free_txd)
18032 +{
18033 +       struct net_device *dev = dev_raether;
18034 +       END_DEVICE *ei_local = netdev_priv(dev);
18035 +       unsigned int tmp_idx;
18036 +
18037 +       if(ei_local->free_txd_num > 0){
18038 +               tmp_idx = ei_local->free_txd_head;
18039 +               ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
18040 +               ei_local->free_txd_num -= 1;
18041 +               //*free_txd = &ei_local->txd_pool[tmp_idx];
18042 +               *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
18043 +               return tmp_idx;
18044 +       }else
18045 +               return NUM_TX_DESC;     
18046 +}
18047 +
18048 +
18049 +/**
18050 + * @brief add free TXD into TXD queue
18051 + *
18052 + * @param free_txd
18053 + *
18054 + * @return 
18055 + */
18056 +int put_free_txd(int free_txd_idx)
18057 +{
18058 +       struct net_device *dev = dev_raether;
18059 +       END_DEVICE *ei_local = netdev_priv(dev);
18060 +       ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
18061 +       ei_local->free_txd_tail = free_txd_idx;
18062 +       ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
18063 +        ei_local->free_txd_num += 1;
18064 +       return 1;
18065 +}
18066 +
18067 +/*define qdma initial alloc*/
18068 +/**
18069 + * @brief 
18070 + *
18071 + * @param net_dev
18072 + *
18073 + * @return  0: fail
18074 + *         1: success
18075 + */
18076 +bool qdma_tx_desc_alloc(void)
18077 +{
18078 +       struct net_device *dev = dev_raether;
18079 +       END_DEVICE *ei_local = netdev_priv(dev);
18080 +       struct QDMA_txdesc *free_txd = NULL;
18081 +       unsigned int txd_idx;
18082 +       int i = 0;
18083 +
18084 +
18085 +       ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
18086 +       printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
18087 +
18088 +       if (ei_local->txd_pool == NULL) {
18089 +               printk("adapter->txd_pool allocation failed!\n");
18090 +               return 0;
18091 +       }
18092 +       printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
18093 +       //set all txd_pool_info to 0.
18094 +       for ( i = 0; i < NUM_TX_DESC; i++)
18095 +       {
18096 +               ei_local->skb_free[i]= 0;
18097 +               ei_local->txd_pool_info[i] = i + 1;
18098 +               ei_local->txd_pool[i].txd_info3.LS_bit = 1;
18099 +               ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
18100 +       }
18101 +
18102 +       ei_local->free_txd_head = 0;
18103 +       ei_local->free_txd_tail = NUM_TX_DESC - 1;
18104 +       ei_local->free_txd_num = NUM_TX_DESC;
18105 +       
18106 +
18107 +       //get free txd from txd pool
18108 +       txd_idx = get_free_txd(&free_txd);
18109 +       if( txd_idx == NUM_TX_DESC) {
18110 +               printk("get_free_txd fail\n");
18111 +               return 0;
18112 +       }
18113 +       
18114 +       //add null TXD for transmit
18115 +       //ei_local->tx_dma_ptr = VIRT_TO_PHYS(free_txd);
18116 +       //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18117 +       ei_local->tx_dma_ptr = free_txd;
18118 +       ei_local->tx_cpu_ptr = free_txd;
18119 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18120 +       sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
18121 +       
18122 +       //get free txd from txd pool
18123 +
18124 +       txd_idx = get_free_txd(&free_txd);
18125 +       if( txd_idx == NUM_TX_DESC) {
18126 +               printk("get_free_txd fail\n");
18127 +               return 0;
18128 +       }
18129 +       // add null TXD for release
18130 +       //sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(free_txd));
18131 +       //sysRegWrite(QTX_DRX_PTR, VIRT_TO_PHYS(free_txd));
18132 +       sysRegWrite(QTX_CRX_PTR, free_txd);
18133 +       sysRegWrite(QTX_DRX_PTR, free_txd);
18134 +       printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
18135 +       
18136 +       printk(" POOL  HEAD_PTR | DMA_PTR | CPU_PTR \n");
18137 +       printk("----------------+---------+--------\n");
18138 +       printk("     0x%p 0x%08X 0x%08X\n",ei_local->txd_pool, ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
18139 +       return 1;
18140 +}
18141 +#if defined (CONFIG_HW_SFQ)
18142 +bool sfq_init(void)
18143 +{
18144 +       unsigned int regVal;
18145 +       
18146 +       unsigned int sfq_phy0;
18147 +       unsigned int sfq_phy1;
18148 +       unsigned int sfq_phy2;
18149 +       unsigned int sfq_phy3;  
18150 +  struct SFQ_table *sfq0;
18151 +       struct SFQ_table *sfq1;
18152 +       struct SFQ_table *sfq2;
18153 +       struct SFQ_table *sfq3;
18154 +       int i = 0;
18155 +       regVal = sysRegRead(VQTX_GLO);
18156 +       regVal = regVal | VQTX_MIB_EN |(1<<16) ;
18157 +       sysRegWrite(VQTX_GLO, regVal);// Virtual table extends to 32bytes
18158 +       regVal = sysRegRead(VQTX_GLO);
18159 +       sysRegWrite(VQTX_NUM, (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) | (VQTX_NUM_3));
18160 +       sysRegWrite(VQTX_HASH_CFG, 0xF002710); //10 s change hash algorithm
18161 +  sysRegWrite(VQTX_VLD_CFG, 0x00);
18162 +       sysRegWrite(VQTX_HASH_SD, 0x0D);
18163 +       sysRegWrite(QDMA_FC_THRES, 0x9b9b4444);
18164 +       sysRegWrite(QDMA_HRED1, 0);
18165 +       sysRegWrite(QDMA_HRED2, 0);
18166 +       sysRegWrite(QDMA_SRED1, 0);
18167 +       sysRegWrite(QDMA_SRED2, 0);
18168 +       sfq0 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy0);
18169 +       memset(sfq0, 0x0, 256*sizeof(struct SFQ_table) );
18170 +       for (i=0; i < 256; i++) {
18171 +                       sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
18172 +      sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
18173 +       }
18174 +#if(1)
18175 +       sfq1 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy1);
18176 +
18177 +       memset(sfq1, 0x0, 256*sizeof(struct SFQ_table) );
18178 +       for (i=0; i < 256; i++) {
18179 +                       sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
18180 +      sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
18181 +       }
18182 +       
18183 +       sfq2 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy2);
18184 +       memset(sfq2, 0x0, 256*sizeof(struct SFQ_table) );
18185 +       for (i=0; i < 256; i++) {
18186 +                       sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
18187 +      sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
18188 +       }
18189 +
18190 +       sfq3 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy3);
18191 +       memset(sfq3, 0x0, 256*sizeof(struct SFQ_table) );
18192 +       for (i=0; i < 256; i++) {
18193 +                       sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
18194 +      sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
18195 +       }
18196 +
18197 +#endif
18198 +               printk("*****sfq_phy0 is 0x%x!!!*******\n", sfq_phy0);
18199 +               printk("*****sfq_phy1 is 0x%x!!!*******\n", sfq_phy1);
18200 +               printk("*****sfq_phy2 is 0x%x!!!*******\n", sfq_phy2);
18201 +               printk("*****sfq_phy3 is 0x%x!!!*******\n", sfq_phy3);
18202 +               printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18203 +               printk("*****sfq_virt1 is 0x%x!!!*******\n", sfq1);
18204 +               printk("*****sfq_virt2 is 0x%x!!!*******\n", sfq2);
18205 +               printk("*****sfq_virt3 is 0x%x!!!*******\n", sfq3);
18206 +               printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
18207 +               sysRegWrite(VQTX_TB_BASE0, (u32)sfq_phy0);
18208 +               sysRegWrite(VQTX_TB_BASE1, (u32)sfq_phy1);
18209 +               sysRegWrite(VQTX_TB_BASE2, (u32)sfq_phy2);
18210 +               sysRegWrite(VQTX_TB_BASE3, (u32)sfq_phy3);
18211 +
18212 +        return 1;
18213 +}
18214 +#endif
18215 +bool fq_qdma_init(struct net_device *dev)
18216 +{
18217 +       END_DEVICE* ei_local = netdev_priv(dev);
18218 +       //struct QDMA_txdesc *free_head = NULL;
18219 +       unsigned int phy_free_head;
18220 +       unsigned int phy_free_tail;
18221 +       unsigned int *free_page_head = NULL;
18222 +       unsigned int phy_free_page_head;
18223 +       int i;
18224 +    
18225 +       free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
18226 +       if (unlikely(free_head == NULL)){
18227 +               printk(KERN_ERR "QDMA FQ decriptor not available...\n");
18228 +               return 0;
18229 +       }
18230 +       memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
18231 +
18232 +       free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
18233 +       if (unlikely(free_page_head == NULL)){
18234 +               printk(KERN_ERR "QDMA FQ page not available...\n");
18235 +               return 0;
18236 +       }       
18237 +       for (i=0; i < NUM_QDMA_PAGE; i++) {
18238 +               free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
18239 +               if(i < (NUM_QDMA_PAGE-1)){
18240 +                       free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
18241 +
18242 +
18243 +#if 0
18244 +                       printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
18245 +                       printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
18246 +                       printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
18247 +                       printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
18248 +#endif
18249 +               }
18250 +               free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
18251 +
18252 +       }
18253 +       phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
18254 +
18255 +       printk("phy_free_head is 0x%x!!!\n", phy_free_head);
18256 +       printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
18257 +       sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
18258 +       sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
18259 +       sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
18260 +       sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
18261 +
18262 +       ei_local->free_head = free_head;
18263 +       ei_local->phy_free_head = phy_free_head;
18264 +       ei_local->free_page_head = free_page_head;
18265 +       ei_local->phy_free_page_head = phy_free_page_head;
18266 +    return 1;
18267 +}
18268 +
18269 +int fe_dma_init(struct net_device *dev)
18270 +{
18271 +
18272 +       int i;
18273 +       unsigned int    regVal;
18274 +       END_DEVICE* ei_local = netdev_priv(dev);
18275 +       
18276 +       
18277 +       #if defined (CONFIG_HW_SFQ)
18278 +       sfq_init();
18279 +  #endif
18280 +       fq_qdma_init(dev);
18281 +
18282 +       while(1)
18283 +       {
18284 +               regVal = sysRegRead(QDMA_GLO_CFG);
18285 +               if((regVal & RX_DMA_BUSY))
18286 +               {
18287 +                       printk("\n  RX_DMA_BUSY !!! ");
18288 +                       continue;
18289 +               }
18290 +               if((regVal & TX_DMA_BUSY))
18291 +               {
18292 +                       printk("\n  TX_DMA_BUSY !!! ");
18293 +                       continue;
18294 +               }
18295 +               break;
18296 +       }
18297 +       /*tx desc alloc, add a NULL TXD to HW*/
18298 +
18299 +       qdma_tx_desc_alloc();
18300 +
18301 +       /* Initial RX Ring 0*/
18302 +       
18303 +#ifdef CONFIG_32B_DESC
18304 +       ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18305 +       ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
18306 +#else
18307 +       ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
18308 +#endif
18309 +       for (i = 0; i < NUM_QRX_DESC; i++) {
18310 +               memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
18311 +               ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
18312 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18313 +               ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
18314 +               ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18315 +#else
18316 +               ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
18317 +#endif
18318 +               ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18319 +       }
18320 +       printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
18321 +
18322 +       regVal = sysRegRead(QDMA_GLO_CFG);
18323 +       regVal &= 0x000000FF;
18324 +
18325 +       sysRegWrite(QDMA_GLO_CFG, regVal);
18326 +       regVal=sysRegRead(QDMA_GLO_CFG);
18327 +
18328 +       /* Tell the adapter where the TX/RX rings are located. */
18329 +
18330 +       sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
18331 +       sysRegWrite(QRX_MAX_CNT_0,  cpu_to_le32((u32) NUM_QRX_DESC));
18332 +       sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
18333 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18334 +       rx_calc_idx0 = rx_dma_owner_idx0 =  sysRegRead(QRX_CRX_IDX_0);
18335 +#endif
18336 +       sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
18337 +
18338 +        ei_local->rx_ring0 = ei_local->qrx_ring;
18339 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX)      
18340 +       /* Initial PDMA RX Ring 0*/
18341 +#ifdef CONFIG_32B_DESC
18342 +        ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
18343 +        ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
18344 +#else
18345 +        ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
18346 +#endif
18347 +        for (i = 0; i < NUM_RX_DESC; i++) {
18348 +               memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
18349 +               ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
18350 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
18351 +       ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
18352 +       ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
18353 +#else
18354 +       ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
18355 +#endif
18356 +       ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
18357 +                                                               }
18358 +        printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
18359 +
18360 +        regVal = sysRegRead(PDMA_GLO_CFG);
18361 +        regVal &= 0x000000FF;
18362 +        sysRegWrite(PDMA_GLO_CFG, regVal);
18363 +        regVal=sysRegRead(PDMA_GLO_CFG);
18364 +
18365 +        sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
18366 +        sysRegWrite(RX_MAX_CNT0,  cpu_to_le32((u32) NUM_RX_DESC));
18367 +        sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
18368 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
18369 +        rx_calc_idx0 =  sysRegRead(RX_CALC_IDX0);
18370 +#endif
18371 +        sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
18372 +#endif 
18373 +#if !defined (CONFIG_HW_SFQ)
18374 +        /* Enable randon early drop and set drop threshold automatically */
18375 +       sysRegWrite(QDMA_FC_THRES, 0x174444);
18376 +#endif
18377 +       sysRegWrite(QDMA_HRED2, 0x0);
18378 +       set_fe_dma_glo_cfg();
18379 +#if defined    (CONFIG_ARCH_MT7623)
18380 +       printk("Enable QDMA TX NDP coherence check and re-read mechanism\n");
18381 +       regVal=sysRegRead(QDMA_GLO_CFG);
18382 +       regVal = regVal | 0x400;
18383 +       sysRegWrite(QDMA_GLO_CFG, regVal);
18384 +       printk("***********QDMA_GLO_CFG=%x\n", sysRegRead(QDMA_GLO_CFG));
18385 +#endif 
18386 +
18387 +       return 1;
18388 +}
18389 +
18390 +#if defined (CONFIG_HW_SFQ)
18391 +
18392 +int sfq_prot = 0;
18393 +int proto_id=0;
18394 +int udp_source_port=0;
18395 +int tcp_source_port=0;
18396 +int ack_packt =0;
18397 +int SfqParseLayerInfo(struct sk_buff * skb)
18398 +{
18399 +
18400 +       struct vlan_hdr *vh_sfq = NULL;
18401 +       struct ethhdr *eth_sfq = NULL;
18402 +       struct iphdr *iph_sfq = NULL;
18403 +       struct ipv6hdr *ip6h_sfq = NULL;
18404 +       struct tcphdr *th_sfq = NULL;
18405 +       struct udphdr *uh_sfq = NULL;
18406 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18407 +       struct vlan_hdr pseudo_vhdr_sfq;
18408 +#endif
18409 +       
18410 +       memset(&SfqParseResult, 0, sizeof(SfqParseResult));
18411 +
18412 +       eth_sfq = (struct ethhdr *)skb->data;
18413 +       memcpy(SfqParseResult.dmac, eth_sfq->h_dest, ETH_ALEN);
18414 +       memcpy(SfqParseResult.smac, eth_sfq->h_source, ETH_ALEN);
18415 +       SfqParseResult.eth_type = eth_sfq->h_proto;
18416 +       
18417 +       
18418 +       if (SfqParseResult.eth_type == htons(ETH_P_8021Q)){
18419 +               SfqParseResult.vlan1_gap = VLAN_HLEN;
18420 +               vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18421 +               SfqParseResult.eth_type = vh_sfq->h_vlan_encapsulated_proto;
18422 +       }else{
18423 +               SfqParseResult.vlan1_gap = 0;
18424 +       }
18425 +               
18426 +       
18427 +       
18428 +       LAYER2_HEADER(skb) = skb->data;
18429 +  LAYER3_HEADER(skb) = (skb->data + ETH_HLEN + (SfqParseResult.vlan1_gap));
18430 +  
18431 +
18432 +  
18433 +       /* set layer4 start addr */
18434 +       if ((SfqParseResult.eth_type == htons(ETH_P_IP)) || (SfqParseResult.eth_type == htons(ETH_P_PPP_SES) 
18435 +               && SfqParseResult.ppp_tag == htons(PPP_IP))) {
18436 +               iph_sfq = (struct iphdr *)LAYER3_HEADER(skb);
18437 +
18438 +               //prepare layer3/layer4 info
18439 +               memcpy(&SfqParseResult.iph, iph_sfq, sizeof(struct iphdr));
18440 +               if (iph_sfq->protocol == IPPROTO_TCP) {
18441 +
18442 +                       LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + (iph_sfq->ihl * 4));
18443 +                       th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18444 +                       memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18445 +                       SfqParseResult.pkt_type = IPV4_HNAPT;
18446 +                       //printk("tcp parsing\n");
18447 +                       tcp_source_port = ntohs(SfqParseResult.th.source);
18448 +                       udp_source_port = 0;
18449 +                       #if(0) //for TCP ack, test use  
18450 +                               if(ntohl(SfqParseResult.iph.saddr) == 0xa0a0a04){ // tcp ack packet 
18451 +                                       ack_packt = 1;
18452 +                               }else { 
18453 +                                       ack_packt = 0;
18454 +                               }
18455 +                       #endif
18456 +      sfq_prot = 2;//IPV4_HNAPT
18457 +      proto_id = 1;//TCP
18458 +                       if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18459 +                               //return 1;
18460 +                       }
18461 +               } else if (iph_sfq->protocol == IPPROTO_UDP) {
18462 +                       LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + iph_sfq->ihl * 4);
18463 +                       uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18464 +                       memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18465 +                       SfqParseResult.pkt_type = IPV4_HNAPT;
18466 +                       udp_source_port = ntohs(SfqParseResult.uh.source);
18467 +                       tcp_source_port = 0;
18468 +                       ack_packt = 0;
18469 +                       sfq_prot = 2;//IPV4_HNAPT
18470 +                       proto_id =2;//UDP
18471 +                       if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
18472 +                               return 1;
18473 +                       }
18474 +               }else{
18475 +                       sfq_prot = 1;
18476 +               }
18477 +       }else if (SfqParseResult.eth_type == htons(ETH_P_IPV6) || 
18478 +                       (SfqParseResult.eth_type == htons(ETH_P_PPP_SES) &&
18479 +                       SfqParseResult.ppp_tag == htons(PPP_IPV6))) {
18480 +                       ip6h_sfq = (struct ipv6hdr *)LAYER3_HEADER(skb);
18481 +                       memcpy(&SfqParseResult.ip6h, ip6h_sfq, sizeof(struct ipv6hdr));
18482 +
18483 +                       if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
18484 +                               LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18485 +                               th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
18486 +                               memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
18487 +                               SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18488 +                               sfq_prot = 4;//IPV6_5T
18489 +                       #if(0) //for TCP ack, test use  
18490 +            if(ntohl(SfqParseResult.ip6h.saddr.s6_addr32[3]) == 8){
18491 +                               ack_packt = 1;
18492 +                       }else { 
18493 +                               ack_packt = 0;
18494 +                       }
18495 +                       #endif
18496 +                       } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
18497 +                               LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
18498 +                               uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
18499 +                               memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
18500 +                               SfqParseResult.pkt_type = IPV6_5T_ROUTE;
18501 +                               ack_packt = 0;
18502 +                               sfq_prot = 4;//IPV6_5T
18503 +       
18504 +                       }else{
18505 +                               sfq_prot = 3;//IPV6_3T
18506 +                       }
18507 +       }
18508 +       
18509 +       return 0;
18510 +}
18511 +#endif
18512 +
18513 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
18514 +{
18515 +       unsigned int    length=skb->len;
18516 +       END_DEVICE*     ei_local = netdev_priv(dev);
18517 +       
18518 +       struct QDMA_txdesc *cpu_ptr;
18519 +
18520 +       struct QDMA_txdesc *dma_ptr __maybe_unused;
18521 +       struct QDMA_txdesc *free_txd;
18522 +       int  ctx_offset;
18523 +#if defined (CONFIG_RAETH_TSO)
18524 +       struct iphdr *iph = NULL;
18525 +        struct QDMA_txdesc *init_cpu_ptr;
18526 +        struct tcphdr *th = NULL;
18527 +       struct skb_frag_struct *frag;
18528 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
18529 +       unsigned int len, size, offset, frag_txd_num;
18530 +       int init_txd_idx, i;
18531 +#endif // CONFIG_RAETH_TSO //
18532 +
18533 +#if defined (CONFIG_RAETH_TSOV6)
18534 +       struct ipv6hdr *ip6h = NULL;
18535 +#endif
18536 +
18537 +#ifdef CONFIG_PSEUDO_SUPPORT
18538 +       PSEUDO_ADAPTER *pAd;
18539 +#endif
18540 +       //cpu_ptr = PHYS_TO_VIRT(ei_local->tx_cpu_ptr);
18541 +       //dma_ptr = PHYS_TO_VIRT(ei_local->tx_dma_ptr);
18542 +       //ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18543 +       cpu_ptr = (ei_local->tx_cpu_ptr);
18544 +       ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
18545 +       cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
18546 +       dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
18547 +       cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18548 +       ei_local->skb_free[ctx_offset] = skb;
18549 +#if defined (CONFIG_RAETH_TSO)
18550 +        init_cpu_ptr = cpu_ptr;
18551 +        init_txd_idx = ctx_offset;
18552 +#endif
18553 +
18554 +#if !defined (CONFIG_RAETH_TSO)
18555 +
18556 +       //2. prepare data
18557 +       //cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data);
18558 +       cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18559 +       cpu_ptr->txd_info3.SDL = skb->len;
18560 +#if defined (CONFIG_HW_SFQ)
18561 +       SfqParseLayerInfo(skb);
18562 +  cpu_ptr->txd_info4.VQID0 = 1;//1:HW hash 0:CPU
18563 +
18564 +
18565 +#if(0)// for tcp ack use, test use  
18566 +  if (ack_packt==1){
18567 +       cpu_ptr->txd_info3.QID = 0x0a;
18568 +       //cpu_ptr->txd_info3.VQID = 0;
18569 +  }else{
18570 +                       cpu_ptr->txd_info3.QID = 0;
18571 +  }
18572 +#endif  
18573 +  cpu_ptr->txd_info3.PROT = sfq_prot;
18574 +  cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18575 +  
18576 +#endif
18577 +       if (gmac_no == 1) {
18578 +               cpu_ptr->txd_info4.FPORT = 1;
18579 +       }else {
18580 +               cpu_ptr->txd_info4.FPORT = 2;
18581 +       }
18582 +       
18583 +       cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18584 +#ifdef CONFIG_PSEUDO_SUPPORT
18585 +       if((lan_wan_separate==1) && (gmac_no==2)){
18586 +               cpu_ptr->txd_info3.QID += 8;
18587 +#if defined (CONFIG_HW_SFQ)
18588 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18589 +                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18590 +               }
18591 +#endif                 
18592 +       }
18593 +#if defined (CONFIG_HW_SFQ)
18594 +       if((lan_wan_separate==1) && (gmac_no==1)){
18595 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18596 +                       cpu_ptr->txd_info3.QID = HwSfqQDl;      
18597 +               }
18598 +       }
18599 +#endif
18600 +#endif //end CONFIG_PSEUDO_SUPPORT
18601 +
18602 +       if(dbg==1){
18603 +               printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18604 +               printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18605 +       }
18606 +#if 0 
18607 +       iph = (struct iphdr *)skb_network_header(skb);
18608 +        if (iph->tos == 0xe0)
18609 +               cpu_ptr->txd_info3.QID = 3;
18610 +       else if (iph->tos == 0xa0) 
18611 +               cpu_ptr->txd_info3.QID = 2;     
18612 +        else if (iph->tos == 0x20)
18613 +               cpu_ptr->txd_info3.QID = 1;
18614 +        else 
18615 +               cpu_ptr->txd_info3.QID = 0;
18616 +#endif
18617 +
18618 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18619 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
18620 +           cpu_ptr->txd_info4.TUI_CO = 7;
18621 +       }else {
18622 +           cpu_ptr->txd_info4.TUI_CO = 0;
18623 +       }
18624 +#endif
18625 +
18626 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18627 +       if(vlan_tx_tag_present(skb)) {
18628 +           cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18629 +       }else {
18630 +           cpu_ptr->txd_info4.VLAN_TAG = 0;
18631 +       }
18632 +#endif
18633 +
18634 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used
18635 +
18636 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18637 +               cpu_ptr->txd_info3.QID += 8;
18638 +#if defined (CONFIG_HW_SFQ)
18639 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18640 +                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18641 +               }
18642 +#endif                 
18643 +       }
18644 +#if defined (CONFIG_HW_SFQ)
18645 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18646 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18647 +                       cpu_ptr->txd_info3.QID = HwSfqQDl;      
18648 +               }
18649 +       }
18650 +#endif
18651 +#endif // CONFIG_RAETH_HW_VLAN_TX
18652 +
18653 +
18654 +//no hw van, no GE2, web UI used
18655 +#ifndef CONFIG_PSEUDO_SUPPORT
18656 +#ifndef CONFIG_RAETH_HW_VLAN_TX 
18657 +       if(lan_wan_separate==1){
18658 +               struct vlan_hdr *vh = NULL;
18659 +    unsigned short vlanid = 0;
18660 +    unsigned short vlan_TCI;
18661 +               vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18662 +               vlan_TCI = vh->h_vlan_TCI;
18663 +    vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18664 +               if(vlanid == 2)//to wan
18665 +               {
18666 +                               cpu_ptr->txd_info3.QID += 8;
18667 +#if defined (CONFIG_HW_SFQ)
18668 +                               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18669 +                                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18670 +                               }
18671 +#endif                 
18672 +               }else if(vlanid == 1){ //to lan
18673 +#if defined (CONFIG_HW_SFQ)
18674 +                               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18675 +                                       cpu_ptr->txd_info3.QID = HwSfqQDl;      
18676 +                               }
18677 +#endif
18678 +               }
18679 +       }
18680 +#endif
18681 +#endif
18682 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18683 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18684 +               if(ra_sw_nat_hook_rx!= NULL){
18685 +                   cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18686 +                   FOE_MAGIC_TAG(skb) = 0;
18687 +           }
18688 +  }
18689 +#endif
18690 +#if 0
18691 +       cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18692 +       cpu_ptr->txd_info4.UDF = 0x2F;
18693 +#endif
18694 +       
18695 +#if defined (CONFIG_MIPS)      
18696 +       dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
18697 +#else
18698 +       dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
18699 +#endif
18700 +       cpu_ptr->txd_info3.SWC_bit = 1;
18701 +
18702 +       //3. get NULL TXD and decrease free_tx_num by 1.
18703 +       ctx_offset = get_free_txd(&free_txd);
18704 +       if(ctx_offset == NUM_TX_DESC) {
18705 +           printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
18706 +           return 0;
18707 +       }
18708 +
18709 +       //4. hook new TXD in the end of queue
18710 +       //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18711 +       cpu_ptr->txd_info2.NDP = (free_txd);
18712 +
18713 +
18714 +       //5. move CPU_PTR to new TXD
18715 +       //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18716 +       ei_local->tx_cpu_ptr = (free_txd);
18717 +       cpu_ptr->txd_info3.OWN_bit = 0;
18718 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
18719 +       
18720 +#if 0 
18721 +       printk("----------------------------------------------\n");
18722 +       printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
18723 +       printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
18724 +       printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
18725 +       printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
18726 +#endif                 
18727 +
18728 +#else //#if !defined (CONFIG_RAETH_TSO)        
18729 +       cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
18730 +       cpu_ptr->txd_info3.SDL = (length - skb->data_len);
18731 +       cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
18732 +#if defined (CONFIG_HW_SFQ)            
18733 +               SfqParseLayerInfo(skb);
18734 +                // printk("tcp_source_port=%d\n", tcp_source_port);
18735 +#if(0)
18736 +   cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
18737 +  if (tcp_source_port==1000)  cpu_ptr->txd_info3.VQID = 0;
18738 +  else if (tcp_source_port==1100)  cpu_ptr->txd_info3.VQID = 1;
18739 +  else if (tcp_source_port==1200)  cpu_ptr->txd_info3.VQID = 2;
18740 +  else cpu_ptr->txd_info3.VQID = 0;
18741 + #else 
18742 +       cpu_ptr->txd_info4.VQID0 = 1;
18743 +  cpu_ptr->txd_info3.PROT = sfq_prot;
18744 +  cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
18745 +#endif
18746 +#endif
18747 +       if (gmac_no == 1) {
18748 +               cpu_ptr->txd_info4.FPORT = 1;
18749 +       }else {
18750 +               cpu_ptr->txd_info4.FPORT = 2;
18751 +       }
18752 +       
18753 +       cpu_ptr->txd_info4.TSO = 0;
18754 +        cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];         
18755 +#ifdef CONFIG_PSEUDO_SUPPORT //web UI used tso
18756 +       if((lan_wan_separate==1) && (gmac_no==2)){
18757 +               cpu_ptr->txd_info3.QID += 8;
18758 +#if defined (CONFIG_HW_SFQ)
18759 +               if(web_sfq_enable == 1 &&(skb->mark == 2)){ 
18760 +                       cpu_ptr->txd_info3.QID = HwSfqQUp;      
18761 +               }
18762 +#endif         
18763 +       }
18764 +#if defined (CONFIG_HW_SFQ)
18765 +       if((lan_wan_separate==1) && (gmac_no==1)){
18766 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18767 +                               cpu_ptr->txd_info3.QID = HwSfqQDl;      
18768 +               }
18769 +       }
18770 +#endif
18771 +#endif //CONFIG_PSEUDO_SUPPORT
18772 +       if(dbg==1){
18773 +               printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18774 +               printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18775 +       }
18776 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
18777 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
18778 +           cpu_ptr->txd_info4.TUI_CO = 7;
18779 +       }else {
18780 +           cpu_ptr->txd_info4.TUI_CO = 0;
18781 +       }
18782 +#endif
18783 +
18784 +#ifdef CONFIG_RAETH_HW_VLAN_TX
18785 +       if(vlan_tx_tag_present(skb)) {
18786 +           cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
18787 +       }else {
18788 +           cpu_ptr->txd_info4.VLAN_TAG = 0;
18789 +       }
18790 +#endif
18791 +#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used tso
18792 +
18793 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18794 +       //cpu_ptr->txd_info3.QID += 8;
18795 +               cpu_ptr->txd_info3.QID += 8;
18796 +#if defined (CONFIG_HW_SFQ)
18797 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18798 +                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18799 +               }
18800 +#endif                 
18801 +       }
18802 +#if defined (CONFIG_HW_SFQ)
18803 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18804 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18805 +                       cpu_ptr->txd_info3.QID = HwSfqQDl;      
18806 +               }
18807 +       }
18808 +#endif
18809 +#endif // CONFIG_RAETH_HW_VLAN_TX
18810 +
18811 +
18812 +//no hw van, no GE2, web UI used
18813 +#ifndef CONFIG_PSEUDO_SUPPORT
18814 +#ifndef CONFIG_RAETH_HW_VLAN_TX 
18815 +       if(lan_wan_separate==1){
18816 +               struct vlan_hdr *vh = NULL;
18817 +    unsigned short vlanid = 0;
18818 +    unsigned short vlan_TCI;
18819 +               vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18820 +               vlan_TCI = vh->h_vlan_TCI;
18821 +    vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18822 +               if(vlanid == 2)//eth2.2 to wan
18823 +               {
18824 +                       cpu_ptr->txd_info3.QID += 8;
18825 +#if defined (CONFIG_HW_SFQ)
18826 +                               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18827 +                                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18828 +                               }
18829 +#endif                 
18830 +               }else if(!strcmp(netdev, "eth2.1")){ // eth2.1 to lan
18831 +#if defined (CONFIG_HW_SFQ)
18832 +                       if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18833 +                               cpu_ptr->txd_info3.QID = HwSfqQDl;      
18834 +                       }
18835 +#endif
18836 +               }
18837 +}
18838 +#endif
18839 +#endif
18840 +
18841 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
18842 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
18843 +           if(ra_sw_nat_hook_rx!= NULL){
18844 +                   cpu_ptr->txd_info4.FPORT = 4; /* PPE */
18845 +                   FOE_MAGIC_TAG(skb) = 0;
18846 +           }
18847 +       }
18848 +#endif
18849 +
18850 +        cpu_ptr->txd_info3.SWC_bit = 1;
18851 +
18852 +        ctx_offset = get_free_txd(&free_txd);
18853 +        if(ctx_offset == NUM_TX_DESC) {
18854 +            printk("get_free_txd fail\n"); 
18855 +        return 0;
18856 +       }
18857 +        //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18858 +        //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18859 +       cpu_ptr->txd_info2.NDP = free_txd;
18860 +       ei_local->tx_cpu_ptr = free_txd;
18861 +
18862 +
18863 +       if(nr_frags > 0) {
18864 +               for(i=0;i<nr_frags;i++) {
18865 +                       // 1. set or get init value for current fragment
18866 +                       offset = 0;  
18867 +                       frag = &skb_shinfo(skb)->frags[i];
18868 +                       len = frag->size; 
18869 +                       frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
18870 +                       for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
18871 +                               // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
18872 +                               if(len < MAX_TXD_LEN)
18873 +                                       size = len;
18874 +                               else
18875 +                                       size = MAX_TXD_LEN;                     
18876 +
18877 +                               //3. Update TXD info
18878 +                               cpu_ptr = (ei_local->txd_pool + (ctx_offset));
18879 +                               cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
18880 +#ifdef CONFIG_PSEUDO_SUPPORT //QoS Web UI used , nr_frags
18881 +                               if((lan_wan_separate==1) && (gmac_no==2)){
18882 +                                       //cpu_ptr->txd_info3.QID += 8;
18883 +                                       cpu_ptr->txd_info3.QID += 8;
18884 +#if defined (CONFIG_HW_SFQ)
18885 +                                       if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18886 +                                               cpu_ptr->txd_info3.QID = HwSfqQUp;      
18887 +                                       }
18888 +#endif
18889 +                               }
18890 +#if defined (CONFIG_HW_SFQ)                            
18891 +                               if((lan_wan_separate==1) && (gmac_no==1)){
18892 +                                       if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18893 +                                               cpu_ptr->txd_info3.QID = HwSfqQDl;      
18894 +                                       }
18895 +                               }
18896 +#endif
18897 +#endif //CONFIG_PSEUDO_SUPPORT
18898 +
18899 +//QoS web used, nr_frags
18900 +#ifdef CONFIG_RAETH_HW_VLAN_TX 
18901 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
18902 +               cpu_ptr->txd_info3.QID += 8;
18903 +#if defined (CONFIG_HW_SFQ)
18904 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18905 +                       cpu_ptr->txd_info3.QID = HwSfqQUp;
18906 +               }
18907 +#endif                 
18908 +       }
18909 +#if defined (CONFIG_HW_SFQ)
18910 +       if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
18911 +               if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18912 +                       cpu_ptr->txd_info3.QID = HwSfqQDl;      
18913 +               }
18914 +       }
18915 +#endif
18916 +#endif // CONFIG_RAETH_HW_VLAN_TX
18917 +//no hw van, no GE2, web UI used
18918 +#ifndef CONFIG_PSEUDO_SUPPORT
18919 +#ifndef CONFIG_RAETH_HW_VLAN_TX 
18920 +       if(lan_wan_separate==1){
18921 +               struct vlan_hdr *vh = NULL;
18922 +    unsigned short vlanid = 0;
18923 +    unsigned short vlan_TCI;
18924 +               vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
18925 +               vlan_TCI = vh->h_vlan_TCI;
18926 +    vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
18927 +               if(vlanid == 2))//eth2.2 to wan
18928 +               {
18929 +                       cpu_ptr->txd_info3.QID += 8;
18930 +#if defined (CONFIG_HW_SFQ)
18931 +                       if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18932 +                               cpu_ptr->txd_info3.QID = HwSfqQUp;
18933 +                       }
18934 +#endif                 
18935 +               }
18936 +               }else if(vlanid == 1){ // eth2.1 to lan
18937 +#if defined (CONFIG_HW_SFQ)    
18938 +                       if(web_sfq_enable==1 &&(skb->mark == 2)){ 
18939 +                               cpu_ptr->txd_info3.QID = HwSfqQDl;      
18940 +                       }
18941 +#endif
18942 +               }
18943 +       }
18944 +#endif
18945 +#endif
18946 +       if(dbg==1){
18947 +               printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
18948 +               printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
18949 +       }
18950 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
18951 +                               cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
18952 +#else
18953 +                               cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
18954 +//                             printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
18955 +#endif
18956 +                               cpu_ptr->txd_info3.SDL = size;
18957 +                               if( (i==(nr_frags-1)) && (frag_txd_num == 1))
18958 +                                       cpu_ptr->txd_info3.LS_bit = 1;
18959 +                               else
18960 +                                       cpu_ptr->txd_info3.LS_bit = 0;
18961 +                               cpu_ptr->txd_info3.OWN_bit = 0;
18962 +                               cpu_ptr->txd_info3.SWC_bit = 1;
18963 +                               //4. Update skb_free for housekeeping
18964 +                               ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct  sk_buff *)0xFFFFFFFF; //MAGIC ID
18965 +
18966 +                               //5. Get next TXD
18967 +                               ctx_offset = get_free_txd(&free_txd);
18968 +                               //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
18969 +                               //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
18970 +                               cpu_ptr->txd_info2.NDP = free_txd;
18971 +                               ei_local->tx_cpu_ptr = free_txd;
18972 +                               //6. Update offset and len.
18973 +                               offset += size;
18974 +                               len -= size;
18975 +                       }
18976 +               }
18977 +               ei_local->skb_free[init_txd_idx]= (struct  sk_buff *)0xFFFFFFFF; //MAGIC ID
18978 +       }
18979 +
18980 +       if(skb_shinfo(skb)->gso_segs > 1) {
18981 +
18982 +//             TsoLenUpdate(skb->len);
18983 +
18984 +               /* TCP over IPv4 */
18985 +               iph = (struct iphdr *)skb_network_header(skb);
18986 +#if defined (CONFIG_RAETH_TSOV6)
18987 +               /* TCP over IPv6 */
18988 +               ip6h = (struct ipv6hdr *)skb_network_header(skb);
18989 +#endif                         
18990 +               if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
18991 +                       th = (struct tcphdr *)skb_transport_header(skb);
18992 +#if defined (CONFIG_HW_SFQ)
18993 +#if(0)
18994 +  init_cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
18995 +  if (tcp_source_port==1000)  init_cpu_ptr->txd_info3.VQID = 0;
18996 +  else if (tcp_source_port==1100)  init_cpu_ptr->txd_info3.VQID = 1;
18997 +  else if (tcp_source_port==1200)  init_cpu_ptr->txd_info3.VQID = 2;
18998 +  else cpu_ptr->txd_info3.VQID = 0;
18999 + #else 
19000 + init_cpu_ptr->txd_info4.VQID0 = 1;
19001 +  init_cpu_ptr->txd_info3.PROT = sfq_prot;
19002 +  init_cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
19003 +#endif
19004 +#endif
19005 +                       init_cpu_ptr->txd_info4.TSO = 1;
19006 +
19007 +                       th->check = htons(skb_shinfo(skb)->gso_size);
19008 +#if defined (CONFIG_MIPS)      
19009 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19010 +#else
19011 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19012 +#endif
19013 +               } 
19014 +           
19015 +#if defined (CONFIG_RAETH_TSOV6)
19016 +               /* TCP over IPv6 */
19017 +               //ip6h = (struct ipv6hdr *)skb_network_header(skb);
19018 +               else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
19019 +                       th = (struct tcphdr *)skb_transport_header(skb);
19020 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19021 +                       init_cpu_ptr->txd_info4.TSO = 1;
19022 +#else
19023 +                       init_cpu_ptr->txd_info4.TSO = 1;
19024 +#endif
19025 +                       th->check = htons(skb_shinfo(skb)->gso_size);
19026 +#if defined (CONFIG_MIPS)      
19027 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19028 +#else
19029 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19030 +#endif
19031 +               }
19032 +#endif
19033 +       }
19034 +
19035 +               
19036 +//     dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);  
19037 +
19038 +       init_cpu_ptr->txd_info3.OWN_bit = 0;
19039 +#endif // CONFIG_RAETH_TSO //
19040 +
19041 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19042 +
19043 +#ifdef CONFIG_PSEUDO_SUPPORT
19044 +       if (gmac_no == 2) {
19045 +               if (ei_local->PseudoDev != NULL) {
19046 +                               pAd = netdev_priv(ei_local->PseudoDev);
19047 +                               pAd->stat.tx_packets++;
19048 +                               pAd->stat.tx_bytes += length;
19049 +                       }
19050 +               } else
19051 +               
19052 +#endif
19053 +        {
19054 +       ei_local->stat.tx_packets++;
19055 +       ei_local->stat.tx_bytes += skb->len;
19056 +       }
19057 +#ifdef CONFIG_RAETH_NAPI
19058 +       if ( ei_local->tx_full == 1) {
19059 +               ei_local->tx_full = 0;
19060 +               netif_wake_queue(dev);
19061 +       }
19062 +#endif
19063 +
19064 +       return length;
19065 +}
19066 +
19067 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
19068 +{
19069 +       END_DEVICE *ei_local = netdev_priv(dev);
19070 +       unsigned long flags;
19071 +       unsigned int num_of_txd = 0;
19072 +#if defined (CONFIG_RAETH_TSO)
19073 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
19074 +       struct skb_frag_struct *frag;
19075 +#endif
19076 +#ifdef CONFIG_PSEUDO_SUPPORT
19077 +       PSEUDO_ADAPTER *pAd;
19078 +#endif
19079 +
19080 +#if !defined(CONFIG_RA_NAT_NONE)
19081 +         if(ra_sw_nat_hook_tx!= NULL)
19082 +         {
19083 +//        spin_lock_irqsave(&ei_local->page_lock, flags);
19084 +           if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
19085 +//             spin_unlock_irqrestore(&ei_local->page_lock, flags);
19086 +          }else{
19087 +               kfree_skb(skb);
19088 +//             spin_unlock_irqrestore(&ei_local->page_lock, flags);
19089 +               return 0;
19090 +          }
19091 +         }
19092 +#endif
19093 +
19094 +
19095 +
19096 +       dev->trans_start = jiffies;     /* save the timestamp */
19097 +       spin_lock_irqsave(&ei_local->page_lock, flags);
19098 +#if defined (CONFIG_MIPS)      
19099 +       dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19100 +#else
19101 +       dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19102 +#endif
19103 +
19104 +
19105 +//check free_txd_num before calling rt288_eth_send()
19106 +
19107 +#if defined (CONFIG_RAETH_TSO)
19108 +       //      num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
19109 +       if(nr_frags != 0){
19110 +               for(i=0;i<nr_frags;i++) {
19111 +                       frag = &skb_shinfo(skb)->frags[i];
19112 +                       num_of_txd  += cal_frag_txd_num(frag->size);
19113 +               }
19114 +       }else
19115 +               num_of_txd = 1;
19116 +#else
19117 +       num_of_txd = 1;
19118 +#endif
19119 +   
19120 +#if defined(CONFIG_RALINK_MT7621)
19121 +    if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
19122 +           ei_xmit_housekeeping(0);
19123 +    }
19124 +#endif
19125 +       
19126 +
19127 +    if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
19128 +    {
19129 +        rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
19130 +               if (ei_local->free_txd_num < 3)
19131 +               {
19132 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)                    
19133 +                   netif_stop_queue(dev);
19134 +#ifdef CONFIG_PSEUDO_SUPPORT
19135 +                   netif_stop_queue(ei_local->PseudoDev);
19136 +#endif
19137 +                   tx_ring_full = 1;
19138 +#endif
19139 +               }
19140 +    } else {  
19141 +#ifdef CONFIG_PSEUDO_SUPPORT
19142 +               if (gmac_no == 2) 
19143 +               {
19144 +                       if (ei_local->PseudoDev != NULL) 
19145 +                       {
19146 +                           pAd = netdev_priv(ei_local->PseudoDev);
19147 +                           pAd->stat.tx_dropped++;
19148 +                   }
19149 +               } else
19150 +#endif
19151 +               ei_local->stat.tx_dropped++;
19152 +               kfree_skb(skb);
19153 +                spin_unlock_irqrestore(&ei_local->page_lock, flags);
19154 +               return 0;
19155 +     } 
19156 +       spin_unlock_irqrestore(&ei_local->page_lock, flags);
19157 +       return 0;
19158 +}
19159 +
19160 +void ei_xmit_housekeeping(unsigned long unused)
19161 +{
19162 +    struct net_device *dev = dev_raether;
19163 +    END_DEVICE *ei_local = netdev_priv(dev);
19164 +#ifndef CONFIG_RAETH_NAPI
19165 +    unsigned long reg_int_mask=0;
19166 +#endif
19167 +    struct QDMA_txdesc *dma_ptr = NULL;
19168 +    struct QDMA_txdesc *cpu_ptr = NULL;
19169 +    struct QDMA_txdesc *tmp_ptr = NULL;
19170 +    unsigned int ctx_offset = 0;
19171 +    unsigned int dtx_offset = 0;
19172 +
19173 +    cpu_ptr = sysRegRead(QTX_CRX_PTR);
19174 +    dma_ptr = sysRegRead(QTX_DRX_PTR);
19175 +    ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19176 +    dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19177 +    cpu_ptr     = (ei_local->txd_pool + (ctx_offset));
19178 +    dma_ptr     = (ei_local->txd_pool + (dtx_offset));
19179 +
19180 +       while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
19181 +                //1. keep cpu next TXD
19182 +               tmp_ptr = cpu_ptr->txd_info2.NDP;
19183 +                //2. release TXD
19184 +               put_free_txd(ctx_offset);
19185 +                //3. update ctx_offset and free skb memory
19186 +               ctx_offset = GET_TXD_OFFSET(&tmp_ptr);
19187 +#if defined (CONFIG_RAETH_TSO)
19188 +               if(ei_local->skb_free[ctx_offset]!=(struct  sk_buff *)0xFFFFFFFF) {
19189 +                       dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19190 +               }
19191 +#else
19192 +               dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
19193 +#endif
19194 +               ei_local->skb_free[ctx_offset] = 0;
19195 +
19196 +               netif_wake_queue(dev);
19197 +#ifdef CONFIG_PSEUDO_SUPPORT
19198 +               netif_wake_queue(ei_local->PseudoDev);
19199 +#endif
19200 +               tx_ring_full=0;
19201 +                //4. update cpu_ptr
19202 +               cpu_ptr = (ei_local->txd_pool + ctx_offset);
19203 +       }
19204 +       sysRegWrite(QTX_CRX_PTR, (ei_local->phy_txd_pool + (ctx_offset << 4)));
19205 +#ifndef CONFIG_RAETH_NAPI
19206 +    reg_int_mask=sysRegRead(QFE_INT_ENABLE);
19207 +#if defined (DELAY_INT)
19208 +    sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
19209 +#else
19210 +
19211 +    sysRegWrite(QFE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
19212 +#endif
19213 +#endif //CONFIG_RAETH_NAPI//
19214 +}
19215 +
19216 +EXPORT_SYMBOL(ei_start_xmit);
19217 +EXPORT_SYMBOL(ei_xmit_housekeeping);
19218 +EXPORT_SYMBOL(fe_dma_init);
19219 +EXPORT_SYMBOL(rt2880_eth_send);
19220 --- /dev/null
19221 +++ b/drivers/net/ethernet/raeth/raether_qdma_mt7623.c
19222 @@ -0,0 +1,1020 @@
19223 +#include <linux/module.h>
19224 +#include <linux/version.h>
19225 +#include <linux/kernel.h>
19226 +#include <linux/types.h>
19227 +#include <linux/pci.h>
19228 +#include <linux/init.h>
19229 +#include <linux/skbuff.h>
19230 +#include <linux/if_vlan.h>
19231 +#include <linux/if_ether.h>
19232 +#include <linux/fs.h>
19233 +#include <asm/uaccess.h>
19234 +#include <asm/rt2880/surfboardint.h>
19235 +#if defined (CONFIG_RAETH_TSO)
19236 +#include <linux/tcp.h>
19237 +#include <net/ipv6.h>
19238 +#include <linux/ip.h>
19239 +#include <net/ip.h>
19240 +#include <net/tcp.h>
19241 +#include <linux/in.h>
19242 +#include <linux/ppp_defs.h>
19243 +#include <linux/if_pppox.h>
19244 +#endif
19245 +#include <linux/delay.h>
19246 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
19247 +#include <linux/sched.h>
19248 +#endif
19249 +
19250 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
19251 +#include <asm/rt2880/rt_mmap.h>
19252 +#else
19253 +#include <linux/libata-compat.h>
19254 +#endif
19255
19256 +#include "ra2882ethreg.h"
19257 +#include "raether.h"
19258 +#include "ra_mac.h"
19259 +#include "ra_ioctl.h"
19260 +#include "ra_rfrw.h"
19261 +#ifdef CONFIG_RAETH_NETLINK
19262 +#include "ra_netlink.h"
19263 +#endif
19264 +#if defined (CONFIG_RAETH_QOS)
19265 +#include "ra_qos.h"
19266 +#endif
19267 +
19268 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19269 +#include "../../../net/nat/hw_nat/ra_nat.h"
19270 +#endif
19271 +
19272 +
19273 +#if !defined(CONFIG_RA_NAT_NONE)
19274 +/* bruce+
19275 + */
19276 +extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
19277 +extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
19278 +#endif
19279 +
19280 +#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
19281 +/* Qwert+
19282 + */
19283 +#include <asm/mipsregs.h>
19284 +extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
19285 +extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
19286 +#endif /* CONFIG_RA_CLASSIFIER */
19287 +
19288 +#if defined (CONFIG_RALINK_RT3052_MP2)
19289 +int32_t mcast_rx(struct sk_buff * skb);
19290 +int32_t mcast_tx(struct sk_buff * skb);
19291 +#endif
19292 +
19293 +#ifdef RA_MTD_RW_BY_NUM
19294 +int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
19295 +#else
19296 +int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
19297 +#endif
19298 +
19299 +/* gmac driver feature set config */
19300 +#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
19301 +#undef DELAY_INT
19302 +#else
19303 +#define DELAY_INT      1
19304 +#endif
19305 +
19306 +//#define CONFIG_UNH_TEST
19307 +/* end of config */
19308 +
19309 +#if defined (CONFIG_RAETH_JUMBOFRAME)
19310 +#define        MAX_RX_LENGTH   4096
19311 +#else
19312 +#define        MAX_RX_LENGTH   1536
19313 +#endif
19314 +
19315 +extern struct net_device               *dev_raether;
19316 +
19317 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19318 +static int rx_dma_owner_idx1;
19319 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19320 +static int rx_calc_idx1;
19321 +#endif
19322 +#endif
19323 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19324 +static int rx_calc_idx0;
19325 +static unsigned long tx_cpu_owner_idx0=0;
19326 +#endif
19327 +static unsigned long tx_ring_full=0;
19328 +
19329 +#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
19330 +#include "ra_ethtool.h"
19331 +extern struct ethtool_ops      ra_ethtool_ops;
19332 +#ifdef CONFIG_PSEUDO_SUPPORT
19333 +extern struct ethtool_ops      ra_virt_ethtool_ops;
19334 +#endif // CONFIG_PSEUDO_SUPPORT //
19335 +#endif // (CONFIG_ETHTOOL //
19336 +
19337 +#ifdef CONFIG_RALINK_VISTA_BASIC
19338 +int is_switch_175c = 1;
19339 +#endif
19340 +
19341 +//skb->mark to queue mapping table
19342 +extern unsigned int M2Q_table[64];
19343 +
19344 +
19345 +#define KSEG1                   0xa0000000
19346 +#if defined (CONFIG_MIPS)
19347 +#define PHYS_TO_VIRT(x)         ((void *)((x) | KSEG1))
19348 +#define VIRT_TO_PHYS(x)         ((unsigned long)(x) & ~KSEG1)
19349 +#else
19350 +#define PHYS_TO_VIRT(x)         phys_to_virt(x)
19351 +#define VIRT_TO_PHYS(x)         virt_to_phys(x)
19352 +#endif
19353 +
19354 +
19355 +extern void set_fe_dma_glo_cfg(void);
19356 +
19357 +
19358 +/**
19359 + *
19360 + * @brief: get the TXD index from its address
19361 + *
19362 + * @param: cpu_ptr
19363 + *
19364 + * @return: TXD index
19365 +*/
19366 +
19367 +static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
19368 +{
19369 +       struct net_device *dev = dev_raether;
19370 +       END_DEVICE *ei_local = netdev_priv(dev);
19371 +       int ctx_offset;
19372 +       //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19373 +       //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
19374 +       /*kurtis*/
19375 +       ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
19376 +       return ctx_offset;
19377 +} 
19378 +
19379 +
19380 +
19381 +
19382 +/**
19383 + * @brief cal txd number for a page
19384 + *
19385 + * @parm size
19386 + *
19387 + * @return frag_txd_num
19388 + */
19389 +
19390 +unsigned int cal_frag_txd_num(unsigned int size)
19391 +{
19392 +       unsigned int frag_txd_num = 0;
19393 +       if(size == 0)
19394 +               return 0;
19395 +       while(size > 0){
19396 +               if(size > MAX_TXD_LEN){
19397 +                       frag_txd_num++;
19398 +                       size -= MAX_TXD_LEN;
19399 +               }else{
19400 +                       frag_txd_num++;
19401 +                       size = 0;
19402 +               }
19403 +       }
19404 +        return frag_txd_num;
19405 +
19406 +}
19407 +
19408 +/**
19409 + * @brief get free TXD from TXD queue
19410 + *
19411 + * @param free_txd
19412 + *
19413 + * @return 
19414 + */
19415 +static int get_free_txd(struct QDMA_txdesc **free_txd)
19416 +{
19417 +       struct net_device *dev = dev_raether;
19418 +       END_DEVICE *ei_local = netdev_priv(dev);
19419 +       unsigned int tmp_idx;
19420 +
19421 +       if(ei_local->free_txd_num > 0){
19422 +               tmp_idx = ei_local->free_txd_head;
19423 +               ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
19424 +               ei_local->free_txd_num -= 1;
19425 +               //*free_txd = &ei_local->txd_pool[tmp_idx];
19426 +               *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
19427 +               return tmp_idx;
19428 +       }else
19429 +               return NUM_TX_DESC;     
19430 +}
19431 +
19432 +
19433 +/**
19434 + * @brief add free TXD into TXD queue
19435 + *
19436 + * @param free_txd
19437 + *
19438 + * @return 
19439 + */
19440 +int put_free_txd(int free_txd_idx)
19441 +{
19442 +       struct net_device *dev = dev_raether;
19443 +       END_DEVICE *ei_local = netdev_priv(dev);
19444 +       ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
19445 +       ei_local->free_txd_tail = free_txd_idx;
19446 +       ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
19447 +        ei_local->free_txd_num += 1;
19448 +       return 1;
19449 +}
19450 +
19451 +/*define qdma initial alloc*/
19452 +/**
19453 + * @brief 
19454 + *
19455 + * @param net_dev
19456 + *
19457 + * @return  0: fail
19458 + *         1: success
19459 + */
19460 +bool qdma_tx_desc_alloc(void)
19461 +{
19462 +       struct net_device *dev = dev_raether;
19463 +       END_DEVICE *ei_local = netdev_priv(dev);
19464 +       struct QDMA_txdesc *free_txd = NULL;
19465 +       unsigned int txd_idx;
19466 +       int i = 0;
19467 +
19468 +
19469 +       ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
19470 +       printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
19471 +
19472 +       if (ei_local->txd_pool == NULL) {
19473 +               printk("adapter->txd_pool allocation failed!\n");
19474 +               return 0;
19475 +       }
19476 +       printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
19477 +       //set all txd_pool_info to 0.
19478 +       for ( i = 0; i < NUM_TX_DESC; i++)
19479 +       {
19480 +               ei_local->skb_free[i]= 0;
19481 +               ei_local->txd_pool_info[i] = i + 1;
19482 +               ei_local->txd_pool[i].txd_info3.LS_bit = 1;
19483 +               ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
19484 +       }
19485 +
19486 +       ei_local->free_txd_head = 0;
19487 +       ei_local->free_txd_tail = NUM_TX_DESC - 1;
19488 +       ei_local->free_txd_num = NUM_TX_DESC;
19489 +       
19490 +
19491 +       //get free txd from txd pool
19492 +       txd_idx = get_free_txd(&free_txd);
19493 +       if( txd_idx == NUM_TX_DESC) {
19494 +               printk("get_free_txd fail\n");
19495 +               return 0;
19496 +       }
19497 +       
19498 +       //add null TXD for transmit
19499 +
19500 +       /*kurtis test*/
19501 +       ei_local->tx_dma_ptr = free_txd;
19502 +       ei_local->tx_cpu_ptr = free_txd;
19503 +       //ei_local->tx_dma_ptr = virt_to_phys(free_txd);
19504 +       //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19505 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19506 +       sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
19507 +       
19508 +       printk("kurtis: free_txd = 0x%x!!!\n", free_txd);
19509 +       printk("kurtis: ei_local->tx_dma_ptr = 0x%x!!!\n", ei_local->tx_dma_ptr);
19510 +       
19511 +       //get free txd from txd pool
19512 +
19513 +       txd_idx = get_free_txd(&free_txd);
19514 +       if( txd_idx == NUM_TX_DESC) {
19515 +               printk("get_free_txd fail\n");
19516 +               return 0;
19517 +       }
19518 +       // add null TXD for release
19519 +       //sysRegWrite(QTX_CRX_PTR, virt_to_phys(free_txd));
19520 +       //sysRegWrite(QTX_DRX_PTR, virt_to_phys(free_txd));
19521 +       sysRegWrite(QTX_CRX_PTR, free_txd);
19522 +       sysRegWrite(QTX_DRX_PTR, free_txd);
19523 +       
19524 +       printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
19525 +       
19526 +       printk(" POOL  HEAD_PTR | DMA_PTR | CPU_PTR \n");
19527 +       printk("----------------+---------+--------\n");
19528 +#if 1
19529 +       printk("     0x%p 0x%08X 0x%08X\n",ei_local->txd_pool,
19530 +                       ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
19531 +#endif
19532 +       return 1;
19533 +}
19534 +
19535 +bool fq_qdma_init(void)
19536 +{
19537 +       struct QDMA_txdesc *free_head = NULL;
19538 +       unsigned int free_head_phy;
19539 +       unsigned int free_tail_phy;
19540 +       unsigned int *free_page_head = NULL;
19541 +       unsigned int free_page_head_phy;
19542 +       int i;
19543 +    
19544 +       free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &free_head_phy);
19545 +       if (unlikely(free_head == NULL)){
19546 +               printk(KERN_ERR "QDMA FQ decriptor not available...\n");
19547 +               return 0;
19548 +       }
19549 +       memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
19550 +
19551 +       free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &free_page_head_phy);
19552 +       if (unlikely(free_page_head == NULL)){
19553 +               printk(KERN_ERR "QDMA FQ pager not available...\n");
19554 +               return 0;
19555 +       }       
19556 +       for (i=0; i < NUM_QDMA_PAGE; i++) {
19557 +               free_head[i].txd_info1.SDP = (free_page_head_phy + (i * QDMA_PAGE_SIZE));
19558 +               if(i < (NUM_QDMA_PAGE-1)){
19559 +                       free_head[i].txd_info2.NDP = (free_head_phy + ((i+1) * sizeof(struct QDMA_txdesc)));
19560 +
19561 +
19562 +#if 0
19563 +                       printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
19564 +                       printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
19565 +                       printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
19566 +                       printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
19567 +#endif
19568 +               }
19569 +               free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
19570 +
19571 +       }
19572 +       free_tail_phy = (free_head_phy + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
19573 +
19574 +       printk("free_head_phy is 0x%x!!!\n", free_head_phy);
19575 +       printk("free_tail_phy is 0x%x!!!\n", free_tail_phy);
19576 +       sysRegWrite(QDMA_FQ_HEAD, (u32)free_head_phy);
19577 +       sysRegWrite(QDMA_FQ_TAIL, (u32)free_tail_phy);
19578 +       sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
19579 +       sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
19580 +    return 1;
19581 +}
19582 +
19583 +int fe_dma_init(struct net_device *dev)
19584 +{
19585 +
19586 +       int i;
19587 +       unsigned int    regVal;
19588 +       END_DEVICE* ei_local = netdev_priv(dev);
19589 +
19590 +       fq_qdma_init();
19591 +
19592 +       while(1)
19593 +       {
19594 +               regVal = sysRegRead(QDMA_GLO_CFG);
19595 +               if((regVal & RX_DMA_BUSY))
19596 +               {
19597 +                       printk("\n  RX_DMA_BUSY !!! ");
19598 +                       continue;
19599 +               }
19600 +               if((regVal & TX_DMA_BUSY))
19601 +               {
19602 +                       printk("\n  TX_DMA_BUSY !!! ");
19603 +                       continue;
19604 +               }
19605 +               break;
19606 +       }
19607 +       /*tx desc alloc, add a NULL TXD to HW*/
19608 +
19609 +       qdma_tx_desc_alloc();
19610 +
19611 +
19612 +       /* Initial RX Ring 0*/
19613 +#ifdef CONFIG_32B_DESC
19614 +       ei_local->rx_ring0 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19615 +       ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19616 +#else
19617 +       ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19618 +#endif
19619 +       for (i = 0; i < NUM_QRX_DESC; i++) {
19620 +               memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19621 +               ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19622 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19623 +               ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19624 +               ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19625 +#else
19626 +               ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19627 +#endif
19628 +               ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19629 +       }
19630 +       printk("QDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19631 +
19632 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19633 +       /* Initial RX Ring 1*/
19634 +#ifdef CONFIG_32B_DESC
19635 +       ei_local->rx_ring1 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19636 +       ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
19637 +#else
19638 +       ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
19639 +#endif
19640 +       for (i = 0; i < NUM_QRX_DESC; i++) {
19641 +               memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
19642 +               ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
19643 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19644 +               ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19645 +               ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19646 +#else
19647 +               ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
19648 +#endif
19649 +               ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19650 +       }
19651 +       printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
19652 +#endif
19653 +
19654 +       regVal = sysRegRead(QDMA_GLO_CFG);
19655 +       regVal &= 0x000000FF;
19656 +       sysRegWrite(QDMA_GLO_CFG, regVal);
19657 +       regVal=sysRegRead(QDMA_GLO_CFG);
19658 +
19659 +       /* Tell the adapter where the TX/RX rings are located. */
19660 +       
19661 +       sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19662 +       sysRegWrite(QRX_MAX_CNT_0,  cpu_to_le32((u32) NUM_QRX_DESC));
19663 +       sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19664 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19665 +       rx_calc_idx0 = rx_dma_owner_idx0 =  sysRegRead(QRX_CRX_IDX_0);
19666 +#endif
19667 +       sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
19668 +#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
19669 +       sysRegWrite(QRX_BASE_PTR_1, phys_to_bus((u32) ei_local->phy_rx_ring1));
19670 +       sysRegWrite(QRX_MAX_CNT_1,  cpu_to_le32((u32) NUM_QRX_DESC));
19671 +       sysRegWrite(QRX_CRX_IDX_1, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
19672 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19673 +       rx_calc_idx1 = rx_dma_owner_idx1 =  sysRegRead(QRX_CRX_IDX_1);
19674 +#endif
19675 +       sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX1);
19676 +#endif
19677 +
19678 +#if !defined (CONFIG_RAETH_QDMATX_QDMARX) 
19679 +       /* Initial PDMA RX Ring 0*/
19680 +#ifdef CONFIG_32B_DESC
19681 +        ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
19682 +        ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
19683 +#else
19684 +        ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
19685 +#endif
19686 +        for (i = 0; i < NUM_RX_DESC; i++) {
19687 +               memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
19688 +               ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
19689 +#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
19690 +       ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
19691 +       ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
19692 +#else
19693 +       ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
19694 +#endif
19695 +       ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
19696 +                                                               }
19697 +        printk("PDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
19698 +
19699 +        regVal = sysRegRead(PDMA_GLO_CFG);
19700 +        regVal &= 0x000000FF;
19701 +        sysRegWrite(PDMA_GLO_CFG, regVal);
19702 +        regVal=sysRegRead(PDMA_GLO_CFG);
19703 +
19704 +        sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
19705 +        sysRegWrite(RX_MAX_CNT0,  cpu_to_le32((u32) NUM_RX_DESC));
19706 +        sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
19707 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19708 +        rx_calc_idx0 =  sysRegRead(RX_CALC_IDX0);
19709 +#endif
19710 +        sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
19711 +               
19712 +#endif/*kurtis*/
19713 +        /* Enable randon early drop and set drop threshold automatically */
19714 +       sysRegWrite(QDMA_FC_THRES, 0x174444);
19715 +       sysRegWrite(QDMA_HRED2, 0x0);
19716 +       set_fe_dma_glo_cfg();
19717 +       
19718 +       return 1;
19719 +}
19720 +
19721 +inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
19722 +{
19723 +       unsigned int    length=skb->len;
19724 +       END_DEVICE*     ei_local = netdev_priv(dev);
19725 +       
19726 +       struct QDMA_txdesc *cpu_ptr;
19727 +
19728 +       struct QDMA_txdesc *dma_ptr __maybe_unused;
19729 +       struct QDMA_txdesc *free_txd;
19730 +       unsigned int  ctx_offset = 0;
19731 +       unsigned int  dtx_offset = 0;
19732 +#if defined (CONFIG_RAETH_TSO)
19733 +       struct iphdr *iph = NULL;
19734 +        struct QDMA_txdesc *init_cpu_ptr;
19735 +        struct tcphdr *th = NULL;
19736 +       struct skb_frag_struct *frag;
19737 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
19738 +       unsigned int len, size, offset, frag_txd_num;
19739 +       int init_txd_idx, i;
19740 +#endif // CONFIG_RAETH_TSO //
19741 +
19742 +#if defined (CONFIG_RAETH_TSOV6)
19743 +       struct ipv6hdr *ip6h = NULL;
19744 +#endif
19745 +
19746 +#ifdef CONFIG_PSEUDO_SUPPORT
19747 +       PSEUDO_ADAPTER *pAd;
19748 +#endif
19749 +       cpu_ptr = (ei_local->tx_cpu_ptr);
19750 +       ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
19751 +       cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
19752 +       dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
19753 +/*kurtis test*/
19754 +       //dma_ptr = (ei_local->tx_dma_ptr);
19755 +       
19756 +       
19757 +       /*only modify virtual address*/
19758 +       //cpu_ptr = (ei_local->txd_pool) + (ctx_offset * sizeof(struct QDMA_txdesc));
19759 +       cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19760 +       
19761 +       //dtx_offset = GET_TXD_OFFSET(&dma_ptr);
19762 +       //dma_ptr = (ei_local->txd_pool) + (dtx_offset * sizeof(struct QDMA_txdesc));
19763 +       
19764 +       //printk("eth_send  ctx_offset = 0x%x!!!\n", ctx_offset);
19765 +       //printk("eth_send  dtx_offset = 0x%x!!!\n", dtx_offset);
19766 +       //printk("eth_send  ei_local->txd_pool = 0x%x!!!\n", ei_local->txd_pool);
19767 +       //printk("eth_send  cpu_ptr = 0x%x!!!\n", cpu_ptr);
19768 +       //printk("eth_send  ctx_offset = 0x%x!!!\n", ctx_offset);
19769 +       //printk("eth_send  ei_local->skb_free[ctx_offset] = 0x%x!!!\n", skb);
19770 +       
19771 +       
19772 +       ei_local->skb_free[ctx_offset] = skb;
19773 +#if defined (CONFIG_RAETH_TSO)
19774 +        init_cpu_ptr = cpu_ptr;
19775 +        init_txd_idx = ctx_offset;
19776 +#endif
19777 +
19778 +#if !defined (CONFIG_RAETH_TSO)
19779 +
19780 +       //2. prepare data
19781 +       cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19782 +       cpu_ptr->txd_info3.SDL = skb->len;
19783 +
19784 +       if (gmac_no == 1) {
19785 +               cpu_ptr->txd_info4.FPORT = 1;
19786 +       }else {
19787 +               cpu_ptr->txd_info4.FPORT = 2;
19788 +       }
19789 +
19790 +
19791 +  cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19792 +#if 0 
19793 +       iph = (struct iphdr *)skb_network_header(skb);
19794 +        if (iph->tos == 0xe0)
19795 +               cpu_ptr->txd_info3.QID = 3;
19796 +       else if (iph->tos == 0xa0) 
19797 +               cpu_ptr->txd_info3.QID = 2;     
19798 +        else if (iph->tos == 0x20)
19799 +               cpu_ptr->txd_info3.QID = 1;
19800 +        else 
19801 +               cpu_ptr->txd_info3.QID = 0;
19802 +#endif
19803 +
19804 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19805 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
19806 +           cpu_ptr->txd_info4.TUI_CO = 7;
19807 +       }else {
19808 +           cpu_ptr->txd_info4.TUI_CO = 0;
19809 +       }
19810 +#endif
19811 +
19812 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19813 +       if(vlan_tx_tag_present(skb)) {
19814 +           cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19815 +       }else {
19816 +           cpu_ptr->txd_info4.VLAN_TAG = 0;
19817 +       }
19818 +#endif
19819 +
19820 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19821 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19822 +               if(ra_sw_nat_hook_rx!= NULL){
19823 +                   cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19824 +                   FOE_MAGIC_TAG(skb) = 0;
19825 +           }
19826 +  }
19827 +#endif
19828 +#if 0
19829 +       cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19830 +       cpu_ptr->txd_info4.UDF = 0x2F;
19831 +#endif
19832 +       
19833 +#if defined (CONFIG_MIPS)      
19834 +       dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
19835 +#else
19836 +       dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
19837 +#endif
19838 +       cpu_ptr->txd_info3.SWC_bit = 1;
19839 +
19840 +       //3. get NULL TXD and decrease free_tx_num by 1.
19841 +       ctx_offset = get_free_txd(&free_txd);
19842 +       if(ctx_offset == NUM_TX_DESC) {
19843 +           printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
19844 +           return 0;
19845 +       }
19846 +
19847 +       //4. hook new TXD in the end of queue
19848 +       //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19849 +       cpu_ptr->txd_info2.NDP = (free_txd);
19850 +
19851 +
19852 +       //5. move CPU_PTR to new TXD
19853 +       //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);        
19854 +       ei_local->tx_cpu_ptr = (free_txd);      
19855 +       cpu_ptr->txd_info3.OWN_bit = 0;
19856 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
19857 +       
19858 +#if 0 
19859 +       printk("----------------------------------------------\n");
19860 +       printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
19861 +       printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
19862 +       printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
19863 +       printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
19864 +#endif                 
19865 +
19866 +#else //#if !defined (CONFIG_RAETH_TSO)        
19867 +        cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
19868 +       cpu_ptr->txd_info3.SDL = (length - skb->data_len);
19869 +       cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
19870 +       if (gmac_no == 1) {
19871 +               cpu_ptr->txd_info4.FPORT = 1;
19872 +       }else {
19873 +               cpu_ptr->txd_info4.FPORT = 2;
19874 +       }
19875 +       
19876 +       cpu_ptr->txd_info4.TSO = 0;
19877 +        cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];         
19878 +#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
19879 +       if (skb->ip_summed == CHECKSUM_PARTIAL){
19880 +           cpu_ptr->txd_info4.TUI_CO = 7;
19881 +       }else {
19882 +           cpu_ptr->txd_info4.TUI_CO = 0;
19883 +       }
19884 +#endif
19885 +
19886 +#ifdef CONFIG_RAETH_HW_VLAN_TX
19887 +       if(vlan_tx_tag_present(skb)) {
19888 +           cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
19889 +       }else {
19890 +           cpu_ptr->txd_info4.VLAN_TAG = 0;
19891 +       }
19892 +#endif
19893 +
19894 +#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
19895 +       if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
19896 +           if(ra_sw_nat_hook_rx!= NULL){
19897 +                   cpu_ptr->txd_info4.FPORT = 4; /* PPE */
19898 +                   FOE_MAGIC_TAG(skb) = 0;
19899 +           }
19900 +       }
19901 +#endif
19902 +
19903 +        cpu_ptr->txd_info3.SWC_bit = 1;
19904 +
19905 +        ctx_offset = get_free_txd(&free_txd);
19906 +        if(ctx_offset == NUM_TX_DESC) {
19907 +            printk("get_free_txd fail\n"); 
19908 +        return 0;
19909 +       }
19910 +        //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19911 +        //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19912 +        cpu_ptr->txd_info2.NDP = free_txd;
19913 +        ei_local->tx_cpu_ptr = free_txd;
19914 +
19915 +       if(nr_frags > 0) {
19916 +               for(i=0;i<nr_frags;i++) {
19917 +                       // 1. set or get init value for current fragment
19918 +                       offset = 0;  
19919 +                       frag = &skb_shinfo(skb)->frags[i];
19920 +                       len = frag->size; 
19921 +                       frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
19922 +                       for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
19923 +                               // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
19924 +                               if(len < MAX_TXD_LEN)
19925 +                                       size = len;
19926 +                               else
19927 +                                       size = MAX_TXD_LEN;                     
19928 +
19929 +                               //3. Update TXD info
19930 +                               cpu_ptr = (ei_local->txd_pool + (ctx_offset));
19931 +                               cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
19932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
19933 +                               cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
19934 +#else
19935 +                               cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
19936 +//                             printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
19937 +#endif
19938 +                               cpu_ptr->txd_info3.SDL = size;
19939 +                               if( (i==(nr_frags-1)) && (frag_txd_num == 1))
19940 +                                       cpu_ptr->txd_info3.LS_bit = 1;
19941 +                               else
19942 +                                       cpu_ptr->txd_info3.LS_bit = 0;
19943 +                               cpu_ptr->txd_info3.OWN_bit = 0;
19944 +                               cpu_ptr->txd_info3.SWC_bit = 1;
19945 +                               //4. Update skb_free for housekeeping
19946 +                               ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct  sk_buff *)0xFFFFFFFF; //MAGIC ID
19947 +
19948 +                               //5. Get next TXD
19949 +                               ctx_offset = get_free_txd(&free_txd);
19950 +                               //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
19951 +                               //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
19952 +                               cpu_ptr->txd_info2.NDP = free_txd;
19953 +                               ei_local->tx_cpu_ptr = free_txd;
19954 +                               //6. Update offset and len.
19955 +                               offset += size;
19956 +                               len -= size;
19957 +                       }
19958 +               }
19959 +               ei_local->skb_free[init_txd_idx]= (struct  sk_buff *)0xFFFFFFFF; //MAGIC ID
19960 +       }
19961 +
19962 +       if(skb_shinfo(skb)->gso_segs > 1) {
19963 +
19964 +//             TsoLenUpdate(skb->len);
19965 +
19966 +               /* TCP over IPv4 */
19967 +               iph = (struct iphdr *)skb_network_header(skb);
19968 +#if defined (CONFIG_RAETH_TSOV6)
19969 +               /* TCP over IPv6 */
19970 +               ip6h = (struct ipv6hdr *)skb_network_header(skb);
19971 +#endif                         
19972 +               if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
19973 +                       th = (struct tcphdr *)skb_transport_header(skb);
19974 +
19975 +                       init_cpu_ptr->txd_info4.TSO = 1;
19976 +
19977 +                       th->check = htons(skb_shinfo(skb)->gso_size);
19978 +#if defined (CONFIG_MIPS)      
19979 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19980 +#else
19981 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
19982 +#endif
19983 +               } 
19984 +           
19985 +#if defined (CONFIG_RAETH_TSOV6)
19986 +               /* TCP over IPv6 */
19987 +               //ip6h = (struct ipv6hdr *)skb_network_header(skb);
19988 +               else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
19989 +                       th = (struct tcphdr *)skb_transport_header(skb);
19990 +#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
19991 +                       init_cpu_ptr->txd_info4.TSO = 1;
19992 +#else
19993 +                       init_cpu_ptr->txd_info4.TSO = 1;
19994 +#endif
19995 +                       th->check = htons(skb_shinfo(skb)->gso_size);
19996 +#if defined (CONFIG_MIPS)      
19997 +                       dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
19998 +#else
19999 +                       dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
20000 +#endif
20001 +               }
20002 +#endif
20003 +       }
20004 +
20005 +               
20006 +//     dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);  
20007 +
20008 +       init_cpu_ptr->txd_info3.OWN_bit = 0;
20009 +#endif // CONFIG_RAETH_TSO //
20010 +
20011 +       sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
20012 +
20013 +#ifdef CONFIG_PSEUDO_SUPPORT
20014 +       if (gmac_no == 2) {
20015 +               if (ei_local->PseudoDev != NULL) {
20016 +                               pAd = netdev_priv(ei_local->PseudoDev);
20017 +                               pAd->stat.tx_packets++;
20018 +                               pAd->stat.tx_bytes += length;
20019 +                       }
20020 +               } else
20021 +               
20022 +#endif
20023 +        {
20024 +       ei_local->stat.tx_packets++;
20025 +       ei_local->stat.tx_bytes += skb->len;
20026 +       }
20027 +#ifdef CONFIG_RAETH_NAPI
20028 +       if ( ei_local->tx_full == 1) {
20029 +               ei_local->tx_full = 0;
20030 +               netif_wake_queue(dev);
20031 +       }
20032 +#endif
20033 +
20034 +       return length;
20035 +}
20036 +
20037 +int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
20038 +{
20039 +       END_DEVICE *ei_local = netdev_priv(dev);
20040 +       unsigned long flags;
20041 +       unsigned int num_of_txd = 0;
20042 +#if defined (CONFIG_RAETH_TSO)
20043 +       unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
20044 +       struct skb_frag_struct *frag;
20045 +#endif
20046 +#ifdef CONFIG_PSEUDO_SUPPORT
20047 +       PSEUDO_ADAPTER *pAd;
20048 +#endif
20049 +
20050 +#if !defined(CONFIG_RA_NAT_NONE)
20051 +         if(ra_sw_nat_hook_tx!= NULL)
20052 +         {
20053 +//        spin_lock_irqsave(&ei_local->page_lock, flags);
20054 +           if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
20055 +//             spin_unlock_irqrestore(&ei_local->page_lock, flags);
20056 +          }else{
20057 +               kfree_skb(skb);
20058 +//             spin_unlock_irqrestore(&ei_local->page_lock, flags);
20059 +               return 0;
20060 +          }
20061 +         }
20062 +#endif
20063 +
20064 +#if defined(CONFIG_RALINK_MT7621) || defined(CONFIG_ARCH_MT7623) 
20065 +#define MIN_PKT_LEN  64
20066 +        if (skb->len < MIN_PKT_LEN) {
20067 +                if (skb_padto(skb, MIN_PKT_LEN)) {
20068 +                        printk("raeth: skb_padto failed\n");
20069 +                        return 0;
20070 +                }
20071 +                skb_put(skb, MIN_PKT_LEN - skb->len);
20072 +        }
20073 +#endif
20074 +
20075 +
20076 +       dev->trans_start = jiffies;     /* save the timestamp */
20077 +       spin_lock_irqsave(&ei_local->page_lock, flags);
20078 +#if defined (CONFIG_MIPS)      
20079 +       dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
20080 +#else
20081 +       dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
20082 +#endif
20083 +
20084 +
20085 +//check free_txd_num before calling rt288_eth_send()
20086 +
20087 +#if defined (CONFIG_RAETH_TSO)
20088 +       //      num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
20089 +       if(nr_frags != 0){
20090 +               for(i=0;i<nr_frags;i++) {
20091 +                       frag = &skb_shinfo(skb)->frags[i];
20092 +                       num_of_txd  += cal_frag_txd_num(frag->size);
20093 +               }
20094 +       }else
20095 +               num_of_txd = 1;
20096 +#else
20097 +       num_of_txd = 1;
20098 +#endif
20099 +   
20100 +#if defined(CONFIG_RALINK_MT7621)
20101 +    if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
20102 +           ei_xmit_housekeeping(0);
20103 +    }
20104 +#endif
20105 +       
20106 +    ei_xmit_housekeeping(0);
20107 +
20108 +    //if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
20109 +    if ((ei_local->free_txd_num > num_of_txd + 5) && (ei_local->free_txd_num != NUM_TX_DESC))
20110 +    {
20111 +        rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
20112 +               if (ei_local->free_txd_num < 3)
20113 +               {
20114 +#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)                    
20115 +                   netif_stop_queue(dev);
20116 +#ifdef CONFIG_PSEUDO_SUPPORT
20117 +                   netif_stop_queue(ei_local->PseudoDev);
20118 +#endif
20119 +                   tx_ring_full = 1;
20120 +#endif
20121 +               }
20122 +    } else {  
20123 +#ifdef CONFIG_PSEUDO_SUPPORT
20124 +               if (gmac_no == 2) 
20125 +               {
20126 +                       if (ei_local->PseudoDev != NULL) 
20127 +                       {
20128 +                           pAd = netdev_priv(ei_local->PseudoDev);
20129 +                           pAd->stat.tx_dropped++;
20130 +                   }
20131 +               } else
20132 +#endif
20133 +               ei_local->stat.tx_dropped++;
20134 +               kfree_skb(skb);
20135 +                spin_unlock_irqrestore(&ei_local->page_lock, flags);
20136 +               return 0;
20137 +     } 
20138 +       spin_unlock_irqrestore(&ei_local->page_lock, flags);
20139 +       return 0;
20140 +}
20141 +
20142 +void ei_xmit_housekeeping(unsigned long unused)
20143 +{
20144 +    struct net_device *dev = dev_raether;
20145 +    END_DEVICE *ei_local = netdev_priv(dev);
20146 +#ifndef CONFIG_RAETH_NAPI
20147 +    unsigned long reg_int_mask=0;
20148 +#endif
20149 +    struct QDMA_txdesc *dma_ptr = NULL;
20150 +    struct QDMA_txdesc *cpu_ptr = NULL;
20151 +    struct QDMA_txdesc *tmp_ptr = NULL;
20152 +    unsigned int htx_offset = 0;
20153 +    unsigned int ctx_offset = 0;
20154 +    unsigned int dtx_offset = 0;
20155 +
20156 +    //dma_ptr = phys_to_virt(sysRegRead(QTX_DRX_PTR));
20157 +    //cpu_ptr = phys_to_virt(sysRegRead(QTX_CRX_PTR));
20158 +    //printk("kurtis:housekeeping  QTX_DRX_PTR = 0x%x!!!\n", sysRegRead(QTX_DRX_PTR));
20159 +    //printk("kurtis:housekeeping  DMA_PTR = 0x%x!!!\n", dma_ptr);
20160 +
20161 +    cpu_ptr = sysRegRead(QTX_CRX_PTR);    
20162 +    dma_ptr = sysRegRead(QTX_DRX_PTR);
20163 +
20164 +    //printk("kurtis:housekeeping  QTX_CRX_PTR = 0x%x!!!\n", cpu_ptr);
20165 +    //printk("kurtis:housekeeping  QTX_DRX_PTR = 0x%x!!!\n", dma_ptr);
20166 +    ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
20167 +    dtx_offset = GET_TXD_OFFSET(&dma_ptr);
20168 +    htx_offset = ctx_offset;
20169 +    cpu_ptr    = (ei_local->txd_pool + (ctx_offset));
20170 +    dma_ptr    = (ei_local->txd_pool + (dtx_offset));
20171 +    
20172 +
20173 +    //printk("kurtis:housekeeping  CPU_PTR = 0x%x!!!\n", cpu_ptr);
20174 +    //printk("kurtis:housekeeping  DMA_PTR = 0x%x!!!\n", dma_ptr);
20175 +
20176 +/*temp mark*/
20177 +#if 1 
20178 +
20179 +
20180 +    if(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20181 +       while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
20182 +
20183 +           //1. keep cpu next TXD                      
20184 +           //tmp_ptr = phys_to_virt(cpu_ptr->txd_info2.NDP);
20185 +           tmp_ptr = cpu_ptr->txd_info2.NDP;
20186 +            htx_offset = GET_TXD_OFFSET(&tmp_ptr);
20187 +       //printk("kurtis:housekeeping  cpu_ptr->txd_info2.NDP = 0x%x!!!\n", cpu_ptr->txd_info2.NDP);
20188 +       //printk("kurtis:housekeeping  tmp_ptr = 0x%x!!!\n", tmp_ptr);
20189 +       //printk("kurtis:housekeeping  htx_offset = 0x%x!!!\n", htx_offset);
20190 +            //2. free skb meomry
20191 +#if defined (CONFIG_RAETH_TSO)
20192 +           if(ei_local->skb_free[htx_offset]!=(struct  sk_buff *)0xFFFFFFFF) {
20193 +                   dev_kfree_skb_any(ei_local->skb_free[htx_offset]); 
20194 +           }
20195 +#else
20196 +           dev_kfree_skb_any(ei_local->skb_free[htx_offset]); 
20197 +#endif                 
20198 +                
20199 +           //3. release TXD
20200 +           //htx_offset = GET_TXD_OFFSET(&cpu_ptr);                    
20201 +           //put_free_txd(htx_offset);
20202 +           put_free_txd(ctx_offset);
20203 +
20204 +
20205 +
20206 +            netif_wake_queue(dev);
20207 +#ifdef CONFIG_PSEUDO_SUPPORT
20208 +           netif_wake_queue(ei_local->PseudoDev);
20209 +#endif                 
20210 +           tx_ring_full=0;
20211 +                
20212 +           //4. update cpu_ptr to next ptr
20213 +           //cpu_ptr = tmp_ptr;
20214 +           cpu_ptr = (ei_local->txd_pool + htx_offset);
20215 +           ctx_offset = htx_offset;
20216 +           //cpu_ptr = (cpu_ptr + (htx_offset));
20217 +           //printk("kurtis:housekeeping 4. update  cpu_ptr = 0x%x!!!\n", cpu_ptr);
20218 +       }
20219 +    }
20220 +    //sysRegWrite(QTX_CRX_PTR, virt_to_phys(cpu_ptr));
20221 +    //sysRegWrite(QTX_CRX_PTR, cpu_ptr);
20222 +    tmp_ptr = (ei_local->phy_txd_pool + (htx_offset << 4));
20223 +    //printk("kurtis:housekeeping 5. update  QTX_CRX_PTR = 0x%x!!!\n", tmp_ptr);
20224 +    sysRegWrite(QTX_CRX_PTR, tmp_ptr);
20225 +
20226 +#endif
20227 +
20228 +#ifndef CONFIG_RAETH_NAPI
20229 +    reg_int_mask=sysRegRead(QFE_INT_ENABLE);
20230 +#if defined (DELAY_INT)
20231 +    sysRegWrite(FE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
20232 +#else
20233 +
20234 +    sysRegWrite(FE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
20235 +#endif
20236 +#endif //CONFIG_RAETH_NAPI//
20237 +}
20238 +
20239 +EXPORT_SYMBOL(ei_start_xmit);
20240 +EXPORT_SYMBOL(ei_xmit_housekeeping);
20241 +EXPORT_SYMBOL(fe_dma_init);
20242 +EXPORT_SYMBOL(rt2880_eth_send);
20243 --- /dev/null
20244 +++ b/drivers/net/ethernet/raeth/smb_hook.c
20245 @@ -0,0 +1,17 @@
20246 +#include <linux/version.h>
20247 +#include <linux/module.h>
20248 +#include <linux/kernel.h>
20249 +#include <linux/types.h>
20250 +#include <linux/skbuff.h>
20251 +
20252 +
20253 +int (*smb_nf_local_in_hook)(struct sk_buff *skb) = NULL;
20254 +int (*smb_nf_pre_routing_hook)(struct sk_buff *skb) = NULL;
20255 +int (*smb_nf_local_out_hook)(struct sk_buff *skb) = NULL;
20256 +int (*smb_nf_post_routing_hook)(struct sk_buff *skb) = NULL;
20257 +EXPORT_SYMBOL(smb_nf_local_in_hook);
20258 +EXPORT_SYMBOL(smb_nf_pre_routing_hook);
20259 +EXPORT_SYMBOL(smb_nf_local_out_hook);
20260 +EXPORT_SYMBOL(smb_nf_post_routing_hook);
20261 +
20262 +
20263 --- /dev/null
20264 +++ b/drivers/net/ethernet/raeth/smb_nf.c
20265 @@ -0,0 +1,177 @@
20266 +#include <linux/module.h>
20267 +#include <linux/version.h>
20268 +#include <linux/kernel.h>
20269 +#include <linux/types.h>
20270 +
20271 +#include <linux/inetdevice.h>
20272 +#include <linux/tcp.h>
20273 +#include <linux/ip.h>
20274 +#include <net/tcp.h>
20275 +#include <net/ip.h>
20276 +
20277 +extern int (*smb_nf_local_in_hook)(struct sk_buff *skb);
20278 +extern int (*smb_nf_pre_routing_hook)(struct sk_buff *skb);
20279 +extern int (*smb_nf_local_out_hook)(struct sk_buff *skb);
20280 +extern int (*smb_nf_post_routing_hook)(struct sk_buff *skb);
20281 +
20282 +struct net_device *lan_int = NULL;
20283 +struct in_ifaddr *lan_ifa = NULL;
20284 +
20285 +
20286 +int mtk_smb_nf_local_in_hook(struct sk_buff *skb)
20287 +{
20288 +       struct iphdr *iph = ip_hdr(skb);
20289 +
20290 +       if (skb->protocol == htons(ETH_P_IP)) {
20291 +               struct iphdr *iph = ip_hdr(skb);
20292 +                       
20293 +               if (iph->protocol == IPPROTO_TCP) {
20294 +                       struct tcphdr *th = tcp_hdr(skb);
20295 +                       unsigned short sport, dport;
20296 +
20297 +                       th = tcp_hdr(skb);
20298 +                       th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20299 +
20300 +                       if ((iph->daddr == lan_ifa->ifa_local) 
20301 +                               && ((th->dest == 0xbd01) || (th->dest == 0x8900) 
20302 +                               || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20303 +                               return 1;
20304 +                       else
20305 +                               return 0;
20306 +               }
20307 +
20308 +       }
20309 +       
20310 +       return 0;
20311 +}
20312 +
20313 +int mtk_smb_nf_pre_routing_hook(struct sk_buff *skb)
20314 +{
20315 +       struct iphdr *iph = ip_hdr(skb);
20316 +
20317 +       if (skb->protocol == htons(ETH_P_IP)) {
20318 +               struct iphdr *iph = ip_hdr(skb);
20319 +                       
20320 +               if (iph->protocol == IPPROTO_TCP) {
20321 +                       struct tcphdr *th = tcp_hdr(skb);
20322 +                       unsigned short sport, dport;
20323 +
20324 +                       th = tcp_hdr(skb);
20325 +                       th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20326 +                       if ((iph->daddr == lan_ifa->ifa_local) 
20327 +                               && ((th->dest == 0xbd01) || (th->dest == 0x8900) 
20328 +                               || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
20329 +                               return 1;
20330 +                       else
20331 +                               return 0;
20332 +               }
20333 +
20334 +       }       
20335 +
20336 +       return 0;
20337 +}
20338 +
20339 +int mtk_smb_nf_local_out_hook(struct sk_buff *skb)
20340 +{
20341 +       struct iphdr *iph = ip_hdr(skb);
20342 +
20343 +       if (iph->protocol == IPPROTO_TCP) {
20344 +               struct tcphdr *th = tcp_hdr(skb);
20345 +
20346 +               th = tcp_hdr(skb);
20347 +               th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20348 +
20349 +               if ((iph->saddr == lan_ifa->ifa_local)
20350 +                       && ((th->source == 0xbd01) || (th->source == 0x8900) 
20351 +                       || (th->source == 0x8a00) || (th->source == 0x8b00)))
20352 +                       return 1;
20353 +               else
20354 +                       return 0;
20355 +       }
20356 +
20357 +       return 0;
20358 +}
20359 +
20360 +int mtk_smb_nf_post_routing_hook(struct sk_buff *skb)
20361 +{
20362 +       struct iphdr *iph = ip_hdr(skb);
20363 +
20364 +       if (skb->protocol == htons(ETH_P_IP)) {
20365 +               struct iphdr *iph = ip_hdr(skb);
20366 +                       
20367 +               if (iph->protocol == IPPROTO_TCP) {
20368 +                       struct tcphdr *th = tcp_hdr(skb);
20369 +
20370 +                       th = tcp_hdr(skb);
20371 +                       th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
20372 +
20373 +                       if ((iph->saddr == lan_ifa->ifa_local)
20374 +                               && ((th->source == 0xbd01) || (th->source == 0x8900) 
20375 +                               || (th->source == 0x8a00) || (th->source == 0x8b00)))
20376 +                               return 1;
20377 +                       else
20378 +                               return 0;
20379 +               }
20380 +
20381 +       }       
20382 +
20383 +       return 0;
20384 +}
20385 +
20386 +int __init mtk_smb_hook_init(void)
20387 +{
20388 +       struct in_device *in_dev;
20389 +       struct in_ifaddr **ifap = NULL;
20390 +       struct in_ifaddr *ifa = NULL;
20391 +
20392 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
20393 +       lan_int = dev_get_by_name(&init_net, "br0");
20394 +#else
20395 +       lan_int = dev_get_by_name("br0");
20396 +#endif
20397 +       if (lan_int)
20398 +               in_dev = __in_dev_get_rtnl(lan_int);
20399 +       else
20400 +               return 0;
20401 +
20402 +       if (in_dev) {
20403 +               for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
20404 +                    ifap = &ifa->ifa_next) {
20405 +                       if (!strcmp("br0", ifa->ifa_label))
20406 +                       {
20407 +                               lan_ifa = ifa;
20408 +                               break; /* found */
20409 +                       }
20410 +               }
20411 +       }
20412 +       else
20413 +               return 0;
20414 +
20415 +       if (lan_ifa) {
20416 +               smb_nf_local_in_hook = mtk_smb_nf_local_in_hook;
20417 +               smb_nf_pre_routing_hook = mtk_smb_nf_pre_routing_hook;
20418 +               smb_nf_local_out_hook = mtk_smb_nf_local_out_hook;
20419 +               smb_nf_post_routing_hook = mtk_smb_nf_post_routing_hook;
20420 +       }
20421 +
20422 +       printk("Samba Netfilter Hook Enabled\n");
20423 +
20424 +       return 0;
20425 +}
20426 +
20427 +void mtk_smb_hook_cleanup(void)
20428 +{
20429 +       lan_int = NULL;
20430 +       lan_ifa = NULL;
20431 +       smb_nf_local_in_hook = NULL;
20432 +       smb_nf_pre_routing_hook = NULL;
20433 +       smb_nf_local_out_hook = NULL;
20434 +       smb_nf_post_routing_hook = NULL;
20435 +
20436 +       return;
20437 +}
20438 +
20439 +module_init(mtk_smb_hook_init);
20440 +module_exit(mtk_smb_hook_cleanup);
20441 +
20442 +MODULE_LICENSE("GPL");
20443 --- /dev/null
20444 +++ b/drivers/net/ethernet/raeth/sync_write.h
20445 @@ -0,0 +1,103 @@
20446 +#ifndef _MT_SYNC_WRITE_H
20447 +#define _MT_SYNC_WRITE_H
20448 +
20449 +#if defined(__KERNEL__)
20450 +
20451 +#include <linux/io.h>
20452 +#include <asm/cacheflush.h>
20453 +//#include <asm/system.h>
20454 +
20455 +/*
20456 + * Define macros.
20457 + */
20458 +
20459 +#define mt65xx_reg_sync_writel(v, a) \
20460 +        do {    \
20461 +            __raw_writel((v), IOMEM((a)));   \
20462 +            dsb();  \
20463 +        } while (0)
20464 +
20465 +#define mt65xx_reg_sync_writew(v, a) \
20466 +        do {    \
20467 +            __raw_writew((v), IOMEM((a)));   \
20468 +            dsb();  \
20469 +        } while (0)
20470 +
20471 +#define mt65xx_reg_sync_writeb(v, a) \
20472 +        do {    \
20473 +            __raw_writeb((v), IOMEM((a)));   \
20474 +            dsb();  \
20475 +        } while (0)
20476 +
20477 +#define mt_reg_sync_writel(v, a) \
20478 +        do {    \
20479 +            __raw_writel((v), IOMEM((a)));   \
20480 +            dsb();  \
20481 +        } while (0)
20482 +
20483 +#define mt_reg_sync_writew(v, a) \
20484 +        do {    \
20485 +            __raw_writew((v), IOMEM((a)));   \
20486 +            dsb();  \
20487 +        } while (0)
20488 +
20489 +#define mt_reg_sync_writeb(v, a) \
20490 +        do {    \
20491 +            __raw_writeb((v), IOMEM((a)));   \
20492 +            dsb();  \
20493 +        } while (0)
20494 +
20495 +
20496 +#else   /* __KERNEL__ */
20497 +
20498 +#include <sys/types.h>
20499 +#include <sys/stat.h>
20500 +#include <fcntl.h>
20501 +#include <unistd.h>
20502 +#include <string.h>
20503 +
20504 +#define dsb()   \
20505 +        do {    \
20506 +            __asm__ __volatile__ ("dsb" : : : "memory"); \
20507 +        } while (0)
20508 +
20509 +#define mt65xx_reg_sync_writel(v, a) \
20510 +        do {    \
20511 +            *(volatile unsigned int *)(a) = (v);    \
20512 +            dsb(); \
20513 +        } while (0)
20514 +
20515 +#define mt65xx_reg_sync_writew(v, a) \
20516 +        do {    \
20517 +            *(volatile unsigned short *)(a) = (v);    \
20518 +            dsb(); \
20519 +        } while (0)
20520 +
20521 +#define mt65xx_reg_sync_writeb(v, a) \
20522 +        do {    \
20523 +            *(volatile unsigned char *)(a) = (v);    \
20524 +            dsb(); \
20525 +        } while (0)
20526 +
20527 +#define mt_reg_sync_writel(v, a) \
20528 +        do {    \
20529 +            *(volatile unsigned int *)(a) = (v);    \
20530 +            dsb(); \
20531 +        } while (0)
20532 +
20533 +#define mt_reg_sync_writew(v, a) \
20534 +        do {    \
20535 +            *(volatile unsigned short *)(a) = (v);    \
20536 +            dsb(); \
20537 +        } while (0)
20538 +
20539 +#define mt_reg_sync_writeb(v, a) \
20540 +        do {    \
20541 +            *(volatile unsigned char *)(a) = (v);    \
20542 +            dsb(); \
20543 +        } while (0)
20544 +
20545 +
20546 +#endif  /* __KERNEL__ */
20547 +
20548 +#endif  /* !_MT_SYNC_WRITE_H */