394f3a625e4d45b7972c461784a84f2daf0f2b5a
[openwrt.git] / target / linux / lantiq / patches-3.2 / 0043-NET-adds-driver-for-lantiq-vr9-ethernet.patch
1 From 7591c5702cfe842f415e42f387532fe71ea3640f Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Fri, 9 Mar 2012 19:03:40 +0100
4 Subject: [PATCH 43/70] NET: adds driver for lantiq vr9 ethernet
5
6 ---
7  .../mips/include/asm/mach-lantiq/xway/lantiq_soc.h |    2 +-
8  arch/mips/lantiq/xway/devices.c                    |   20 +
9  arch/mips/lantiq/xway/devices.h                    |    1 +
10  drivers/net/ethernet/Kconfig                       |    6 +
11  drivers/net/ethernet/Makefile                      |    1 +
12  drivers/net/ethernet/lantiq_vrx200.c               | 1358 ++++++++++++++++++++
13  6 files changed, 1387 insertions(+), 1 deletions(-)
14  create mode 100644 drivers/net/ethernet/lantiq_vrx200.c
15
16 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
17 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
18 @@ -102,7 +102,7 @@
19  
20  /* GBIT - gigabit switch */
21  #define LTQ_GBIT_BASE_ADDR     0x1E108000
22 -#define LTQ_GBIT_SIZE          0x200
23 +#define LTQ_GBIT_SIZE          0x4000
24  
25  /* DMA */
26  #define LTQ_DMA_BASE_ADDR      0x1E104100
27 --- a/arch/mips/lantiq/xway/devices.c
28 +++ b/arch/mips/lantiq/xway/devices.c
29 @@ -83,6 +83,7 @@ static struct platform_device ltq_etop =
30         .name           = "ltq_etop",
31         .resource       = ltq_etop_resources,
32         .num_resources  = 1,
33 +       .id             = -1,
34  };
35  
36  void __init
37 @@ -96,3 +97,22 @@ ltq_register_etop(struct ltq_eth_data *e
38                 platform_device_register(&ltq_etop);
39         }
40  }
41 +
42 +/* ethernet */
43 +static struct resource ltq_vrx200_resources[] = {
44 +       MEM_RES("gbit", LTQ_GBIT_BASE_ADDR, LTQ_GBIT_SIZE),
45 +};
46 +
47 +static struct platform_device ltq_vrx200 = {
48 +       .name           = "ltq_vrx200",
49 +       .resource       = ltq_vrx200_resources,
50 +       .num_resources  = 1,
51 +       .id = -1,
52 +};
53 +
54 +void __init
55 +ltq_register_vrx200(struct ltq_eth_data *eth)
56 +{
57 +       ltq_vrx200.dev.platform_data = eth;
58 +       platform_device_register(&ltq_vrx200);
59 +}
60 --- a/arch/mips/lantiq/xway/devices.h
61 +++ b/arch/mips/lantiq/xway/devices.h
62 @@ -17,5 +17,6 @@ extern void ltq_register_gpio_stp(void);
63  extern void ltq_register_ase_asc(void);
64  extern void ltq_register_etop(struct ltq_eth_data *eth);
65  extern void xway_register_nand(struct mtd_partition *parts, int count);
66 +extern void ltq_register_vrx200(struct ltq_eth_data *eth);
67  
68  #endif
69 --- a/drivers/net/ethernet/Kconfig
70 +++ b/drivers/net/ethernet/Kconfig
71 @@ -84,6 +84,12 @@ config LANTIQ_ETOP
72         ---help---
73           Support for the MII0 inside the Lantiq SoC
74  
75 +config LANTIQ_VRX200
76 +       tristate "Lantiq SoC vrx200 driver"
77 +       depends on SOC_TYPE_XWAY
78 +       ---help---
79 +         Support for the MII0 inside the Lantiq SoC
80 +
81  source "drivers/net/ethernet/marvell/Kconfig"
82  source "drivers/net/ethernet/mellanox/Kconfig"
83  source "drivers/net/ethernet/micrel/Kconfig"
84 --- a/drivers/net/ethernet/Makefile
85 +++ b/drivers/net/ethernet/Makefile
86 @@ -35,6 +35,7 @@ obj-$(CONFIG_IP1000) += icplus/
87  obj-$(CONFIG_JME) += jme.o
88  obj-$(CONFIG_KORINA) += korina.o
89  obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
90 +obj-$(CONFIG_LANTIQ_VRX200) += lantiq_vrx200.o
91  obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
92  obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
93  obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
94 --- /dev/null
95 +++ b/drivers/net/ethernet/lantiq_vrx200.c
96 @@ -0,0 +1,1358 @@
97 +/*
98 + *   This program is free software; you can redistribute it and/or modify it
99 + *   under the terms of the GNU General Public License version 2 as published
100 + *   by the Free Software Foundation.
101 + *
102 + *   This program is distributed in the hope that it will be useful,
103 + *   but WITHOUT ANY WARRANTY; without even the implied warranty of
104 + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
105 + *   GNU General Public License for more details.
106 + *
107 + *   You should have received a copy of the GNU General Public License
108 + *   along with this program; if not, write to the Free Software
109 + *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
110 + *
111 + *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
112 + */
113 +
114 +#include <linux/kernel.h>
115 +#include <linux/slab.h>
116 +#include <linux/errno.h>
117 +#include <linux/types.h>
118 +#include <linux/interrupt.h>
119 +#include <linux/uaccess.h>
120 +#include <linux/in.h>
121 +#include <linux/netdevice.h>
122 +#include <linux/etherdevice.h>
123 +#include <linux/phy.h>
124 +#include <linux/ip.h>
125 +#include <linux/tcp.h>
126 +#include <linux/skbuff.h>
127 +#include <linux/mm.h>
128 +#include <linux/platform_device.h>
129 +#include <linux/ethtool.h>
130 +#include <linux/init.h>
131 +#include <linux/delay.h>
132 +#include <linux/io.h>
133 +#include <linux/dma-mapping.h>
134 +#include <linux/module.h>
135 +#include <linux/clk.h>
136 +
137 +#include <asm/checksum.h>
138 +
139 +#include <lantiq_soc.h>
140 +#include <xway_dma.h>
141 +#include <lantiq_platform.h>
142 +
143 +#define LTQ_SWITCH_BASE                 0x1E108000
144 +#define LTQ_SWITCH_CORE_BASE            LTQ_SWITCH_BASE
145 +#define LTQ_SWITCH_TOP_PDI_BASE         LTQ_SWITCH_CORE_BASE
146 +#define LTQ_SWITCH_BM_PDI_BASE          (LTQ_SWITCH_CORE_BASE + 4 * 0x40)
147 +#define LTQ_SWITCH_MAC_PDI_0_BASE       (LTQ_SWITCH_CORE_BASE + 4 * 0x900)
148 +#define LTQ_SWITCH_MAC_PDI_X_BASE(x)    (LTQ_SWITCH_MAC_PDI_0_BASE + x * 0x30)
149 +#define LTQ_SWITCH_TOPLEVEL_BASE        (LTQ_SWITCH_BASE + 4 * 0xC40)
150 +#define LTQ_SWITCH_MDIO_PDI_BASE        (LTQ_SWITCH_TOPLEVEL_BASE)
151 +#define LTQ_SWITCH_MII_PDI_BASE         (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x36)
152 +#define LTQ_SWITCH_PMAC_PDI_BASE        (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x82)
153 +
154 +#define LTQ_ETHSW_MAC_CTRL0_PADEN               (1 << 8)
155 +#define LTQ_ETHSW_MAC_CTRL0_FCS                 (1 << 7)
156 +#define LTQ_ETHSW_MAC_CTRL1_SHORTPRE            (1 << 8)
157 +#define LTQ_ETHSW_MAC_CTRL2_MLEN                (1 << 3)
158 +#define LTQ_ETHSW_MAC_CTRL2_LCHKL               (1 << 2)
159 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_DIS           0
160 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG         1
161 +#define LTQ_ETHSW_MAC_CTRL2_LCHKS_TAG           2
162 +#define LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT   9
163 +#define LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS        (1 << 6)
164 +#define LTQ_ETHSW_GLOB_CTRL_SE                  (1 << 15)
165 +#define LTQ_ETHSW_MDC_CFG1_MCEN                 (1 << 8)
166 +#define LTQ_ETHSW_PMAC_HD_CTL_FC                (1 << 10)
167 +#define LTQ_ETHSW_PMAC_HD_CTL_RC                (1 << 4)
168 +#define LTQ_ETHSW_PMAC_HD_CTL_AC                (1 << 2)
169 +#define ADVERTIZE_MPD          (1 << 10)
170 +
171 +#define MDIO_DEVAD_NONE                    (-1)
172 +
173 +#define LTQ_ETH_RX_BUFFER_CNT           PKTBUFSRX
174 +
175 +#define LTQ_MDIO_DRV_NAME               "ltq-mdio"
176 +#define LTQ_ETH_DRV_NAME                "ltq-eth"
177 +
178 +#define LTQ_ETHSW_MAX_GMAC              1
179 +#define LTQ_ETHSW_PMAC                  1
180 +
181 +#define ltq_setbits(a, set) \
182 +        ltq_w32(ltq_r32(a) | (set), a)
183 +
184 +enum ltq_reset_modules {
185 +       LTQ_RESET_CORE,
186 +       LTQ_RESET_DMA,
187 +       LTQ_RESET_ETH,
188 +       LTQ_RESET_PHY,
189 +       LTQ_RESET_HARD,
190 +       LTQ_RESET_SOFT,
191 +};
192 +
193 +static inline void
194 +dbg_ltq_writel(void *a, unsigned int b)
195 +{
196 +       ltq_w32(b, a);
197 +}
198 +
199 +int ltq_reset_once(enum ltq_reset_modules module, ulong usec);
200 +
201 +struct ltq_ethsw_mac_pdi_x_regs {
202 +       u32     pstat;          /* Port status */
203 +       u32     pisr;           /* Interrupt status */
204 +       u32     pier;           /* Interrupt enable */
205 +       u32     ctrl_0;         /* Control 0 */
206 +       u32     ctrl_1;         /* Control 1 */
207 +       u32     ctrl_2;         /* Control 2 */
208 +       u32     ctrl_3;         /* Control 3 */
209 +       u32     ctrl_4;         /* Control 4 */
210 +       u32     ctrl_5;         /* Control 5 */
211 +       u32     ctrl_6;         /* Control 6 */
212 +       u32     bufst;          /* TX/RX buffer control */
213 +       u32     testen;         /* Test enable */
214 +};
215 +
216 +struct ltq_ethsw_mac_pdi_regs {
217 +       struct ltq_ethsw_mac_pdi_x_regs mac[12];
218 +};
219 +
220 +struct ltq_ethsw_mdio_pdi_regs {
221 +       u32     glob_ctrl;      /* Global control 0 */
222 +       u32     rsvd0[7];
223 +       u32     mdio_ctrl;      /* MDIO control */
224 +       u32     mdio_read;      /* MDIO read data */
225 +       u32     mdio_write;     /* MDIO write data */
226 +       u32     mdc_cfg_0;      /* MDC clock configuration 0 */
227 +       u32     mdc_cfg_1;      /* MDC clock configuration 1 */
228 +       u32     rsvd[3];
229 +       u32     phy_addr_5;     /* PHY address port 5 */
230 +       u32     phy_addr_4;     /* PHY address port 4 */
231 +       u32     phy_addr_3;     /* PHY address port 3 */
232 +       u32     phy_addr_2;     /* PHY address port 2 */
233 +       u32     phy_addr_1;     /* PHY address port 1 */
234 +       u32     phy_addr_0;     /* PHY address port 0 */
235 +       u32     mdio_stat_0;    /* MDIO PHY polling status port 0 */
236 +       u32     mdio_stat_1;    /* MDIO PHY polling status port 1 */
237 +       u32     mdio_stat_2;    /* MDIO PHY polling status port 2 */
238 +       u32     mdio_stat_3;    /* MDIO PHY polling status port 3 */
239 +       u32     mdio_stat_4;    /* MDIO PHY polling status port 4 */
240 +       u32     mdio_stat_5;    /* MDIO PHY polling status port 5 */
241 +};
242 +
243 +struct ltq_ethsw_mii_pdi_regs {
244 +       u32     mii_cfg0;       /* xMII port 0 configuration */
245 +       u32     pcdu0;          /* Port 0 clock delay configuration */
246 +       u32     mii_cfg1;       /* xMII port 1 configuration */
247 +       u32     pcdu1;          /* Port 1 clock delay configuration */
248 +       u32     mii_cfg2;       /* xMII port 2 configuration */
249 +       u32     rsvd0;
250 +       u32     mii_cfg3;       /* xMII port 3 configuration */
251 +       u32     rsvd1;
252 +       u32     mii_cfg4;       /* xMII port 4 configuration */
253 +       u32     rsvd2;
254 +       u32     mii_cfg5;       /* xMII port 5 configuration */
255 +       u32     pcdu5;          /* Port 5 clock delay configuration */
256 +};
257 +
258 +struct ltq_ethsw_pmac_pdi_regs {
259 +       u32     hd_ctl;         /* PMAC header control */
260 +       u32     tl;             /* PMAC type/length */
261 +       u32     sa1;            /* PMAC source address 1 */
262 +       u32     sa2;            /* PMAC source address 2 */
263 +       u32     sa3;            /* PMAC source address 3 */
264 +       u32     da1;            /* PMAC destination address 1 */
265 +       u32     da2;            /* PMAC destination address 2 */
266 +       u32     da3;            /* PMAC destination address 3 */
267 +       u32     vlan;           /* PMAC VLAN */
268 +       u32     rx_ipg;         /* PMAC interpacket gap in RX direction */
269 +       u32     st_etype;       /* PMAC special tag ethertype */
270 +       u32     ewan;           /* PMAC ethernet WAN group */
271 +};
272 +
273 +struct ltq_mdio_phy_addr_reg {
274 +       union {
275 +               struct {
276 +                       unsigned rsvd:1;
277 +                       unsigned lnkst:2;       /* Link status control */
278 +                       unsigned speed:2;       /* Speed control */
279 +                       unsigned fdup:2;        /* Full duplex control */
280 +                       unsigned fcontx:2;      /* Flow control mode TX */
281 +                       unsigned fconrx:2;      /* Flow control mode RX */
282 +                       unsigned addr:5;        /* PHY address */
283 +               } bits;
284 +               u16 val;
285 +       };
286 +};
287 +
288 +enum ltq_mdio_phy_addr_lnkst {
289 +       LTQ_MDIO_PHY_ADDR_LNKST_AUTO = 0,
290 +       LTQ_MDIO_PHY_ADDR_LNKST_UP = 1,
291 +       LTQ_MDIO_PHY_ADDR_LNKST_DOWN = 2,
292 +};
293 +
294 +enum ltq_mdio_phy_addr_speed {
295 +       LTQ_MDIO_PHY_ADDR_SPEED_M10 = 0,
296 +       LTQ_MDIO_PHY_ADDR_SPEED_M100 = 1,
297 +       LTQ_MDIO_PHY_ADDR_SPEED_G1 = 2,
298 +       LTQ_MDIO_PHY_ADDR_SPEED_AUTO = 3,
299 +};
300 +
301 +enum ltq_mdio_phy_addr_fdup {
302 +       LTQ_MDIO_PHY_ADDR_FDUP_AUTO = 0,
303 +       LTQ_MDIO_PHY_ADDR_FDUP_ENABLE = 1,
304 +       LTQ_MDIO_PHY_ADDR_FDUP_DISABLE = 3,
305 +};
306 +
307 +enum ltq_mdio_phy_addr_fcon {
308 +       LTQ_MDIO_PHY_ADDR_FCON_AUTO = 0,
309 +       LTQ_MDIO_PHY_ADDR_FCON_ENABLE = 1,
310 +       LTQ_MDIO_PHY_ADDR_FCON_DISABLE = 3,
311 +};
312 +
313 +struct ltq_mii_mii_cfg_reg {
314 +       union {
315 +               struct {
316 +                       unsigned res:1;         /* Hardware reset */
317 +                       unsigned en:1;          /* xMII interface enable */
318 +                       unsigned isol:1;        /* xMII interface isolate */
319 +                       unsigned ldclkdis:1;    /* Link down clock disable */
320 +                       unsigned rsvd:1;
321 +                       unsigned crs:2;         /* CRS sensitivity config */
322 +                       unsigned rgmii_ibs:1;   /* RGMII In Band status */
323 +                       unsigned rmii:1;        /* RMII ref clock direction */
324 +                       unsigned miirate:3;     /* xMII interface clock rate */
325 +                       unsigned miimode:4;     /* xMII interface mode */
326 +               } bits;
327 +               u16 val;
328 +       };
329 +};
330 +
331 +enum ltq_mii_mii_cfg_miirate {
332 +       LTQ_MII_MII_CFG_MIIRATE_M2P5 = 0,
333 +       LTQ_MII_MII_CFG_MIIRATE_M25 = 1,
334 +       LTQ_MII_MII_CFG_MIIRATE_M125 = 2,
335 +       LTQ_MII_MII_CFG_MIIRATE_M50 = 3,
336 +       LTQ_MII_MII_CFG_MIIRATE_AUTO = 4,
337 +};
338 +
339 +enum ltq_mii_mii_cfg_miimode {
340 +       LTQ_MII_MII_CFG_MIIMODE_MIIP = 0,
341 +       LTQ_MII_MII_CFG_MIIMODE_MIIM = 1,
342 +       LTQ_MII_MII_CFG_MIIMODE_RMIIP = 2,
343 +       LTQ_MII_MII_CFG_MIIMODE_RMIIM = 3,
344 +       LTQ_MII_MII_CFG_MIIMODE_RGMII = 4,
345 +};
346 +
347 +struct ltq_eth_priv {
348 +       struct ltq_dma_device *dma_dev;
349 +       struct mii_dev *bus;
350 +       struct eth_device *dev;
351 +       struct phy_device *phymap[LTQ_ETHSW_MAX_GMAC];
352 +       int rx_num;
353 +};
354 +
355 +enum ltq_mdio_mbusy {
356 +       LTQ_MDIO_MBUSY_IDLE = 0,
357 +       LTQ_MDIO_MBUSY_BUSY = 1,
358 +};
359 +
360 +enum ltq_mdio_op {
361 +       LTQ_MDIO_OP_WRITE = 1,
362 +       LTQ_MDIO_OP_READ = 2,
363 +};
364 +
365 +struct ltq_mdio_access {
366 +       union {
367 +               struct {
368 +                       unsigned rsvd:3;
369 +                       unsigned mbusy:1;
370 +                       unsigned op:2;
371 +                       unsigned phyad:5;
372 +                       unsigned regad:5;
373 +               } bits;
374 +               u16 val;
375 +       };
376 +};
377 +
378 +enum LTQ_ETH_PORT_FLAGS {
379 +       LTQ_ETH_PORT_NONE       = 0,
380 +       LTQ_ETH_PORT_PHY        = 1,
381 +       LTQ_ETH_PORT_SWITCH     = (1 << 1),
382 +       LTQ_ETH_PORT_MAC        = (1 << 2),
383 +};
384 +
385 +struct ltq_eth_port_config {
386 +       u8 num;
387 +       u8 phy_addr;
388 +       u16 flags;
389 +       phy_interface_t phy_if;
390 +};
391 +
392 +struct ltq_eth_board_config {
393 +       const struct ltq_eth_port_config *ports;
394 +       int num_ports;
395 +};
396 +
397 +static const struct ltq_eth_port_config eth_port_config[] = {
398 +       /* GMAC0: external Lantiq PEF7071 10/100/1000 PHY for LAN port 0 */
399 +       { 0, 0x0, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
400 +       /* GMAC1: external Lantiq PEF7071 10/100/1000 PHY for LAN port 1 */
401 +       { 1, 0x1, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
402 +};
403 +
404 +static const struct ltq_eth_board_config board_config = {
405 +       .ports = eth_port_config,
406 +       .num_ports = ARRAY_SIZE(eth_port_config),
407 +};
408 +
409 +static struct ltq_ethsw_mac_pdi_regs *ltq_ethsw_mac_pdi_regs =
410 +       (struct ltq_ethsw_mac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MAC_PDI_0_BASE);
411 +
412 +static struct ltq_ethsw_mdio_pdi_regs *ltq_ethsw_mdio_pdi_regs =
413 +       (struct ltq_ethsw_mdio_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MDIO_PDI_BASE);
414 +
415 +static struct ltq_ethsw_mii_pdi_regs *ltq_ethsw_mii_pdi_regs =
416 +       (struct ltq_ethsw_mii_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MII_PDI_BASE);
417 +
418 +static struct ltq_ethsw_pmac_pdi_regs *ltq_ethsw_pmac_pdi_regs =
419 +       (struct ltq_ethsw_pmac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_PMAC_PDI_BASE);
420 +
421 +
422 +#define MAX_DMA_CHAN           0x8
423 +#define MAX_DMA_CRC_LEN                0x4
424 +#define MAX_DMA_DATA_LEN       0x600
425 +
426 +/* use 2 static channels for TX/RX
427 +   depending on the SoC we need to use different DMA channels for ethernet */
428 +#define LTQ_ETOP_TX_CHANNEL    1
429 +#define LTQ_ETOP_RX_CHANNEL    0
430 +
431 +#define IS_TX(x)               (x == LTQ_ETOP_TX_CHANNEL)
432 +#define IS_RX(x)               (x == LTQ_ETOP_RX_CHANNEL)
433 +
434 +#define DRV_VERSION    "1.0"
435 +
436 +static void __iomem *ltq_vrx200_membase;
437 +
438 +struct ltq_vrx200_chan {
439 +       int idx;
440 +       int tx_free;
441 +       struct net_device *netdev;
442 +       struct napi_struct napi;
443 +       struct ltq_dma_channel dma;
444 +       struct sk_buff *skb[LTQ_DESC_NUM];
445 +};
446 +
447 +struct ltq_vrx200_priv {
448 +       struct net_device *netdev;
449 +       struct ltq_eth_data *pldata;
450 +       struct resource *res;
451 +
452 +       struct mii_bus *mii_bus;
453 +       struct phy_device *phydev;
454 +
455 +       struct ltq_vrx200_chan ch[MAX_DMA_CHAN];
456 +       int tx_free[MAX_DMA_CHAN >> 1];
457 +
458 +       spinlock_t lock;
459 +
460 +       struct clk *clk_ppe;
461 +};
462 +
463 +static int ltq_vrx200_mdio_wr(struct mii_bus *bus, int phy_addr,
464 +                               int phy_reg, u16 phy_data);
465 +
466 +static int
467 +ltq_vrx200_alloc_skb(struct ltq_vrx200_chan *ch)
468 +{
469 +       ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
470 +       if (!ch->skb[ch->dma.desc])
471 +               return -ENOMEM;
472 +       ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
473 +               ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
474 +               DMA_FROM_DEVICE);
475 +       ch->dma.desc_base[ch->dma.desc].addr =
476 +               CPHYSADDR(ch->skb[ch->dma.desc]->data);
477 +       ch->dma.desc_base[ch->dma.desc].ctl =
478 +               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
479 +               MAX_DMA_DATA_LEN;
480 +       skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
481 +       return 0;
482 +}
483 +
484 +static void
485 +ltq_vrx200_hw_receive(struct ltq_vrx200_chan *ch)
486 +{
487 +       struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
488 +       struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
489 +       struct sk_buff *skb = ch->skb[ch->dma.desc];
490 +       int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
491 +       unsigned long flags;
492 +
493 +       spin_lock_irqsave(&priv->lock, flags);
494 +       if (ltq_vrx200_alloc_skb(ch)) {
495 +               netdev_err(ch->netdev,
496 +                       "failed to allocate new rx buffer, stopping DMA\n");
497 +               ltq_dma_close(&ch->dma);
498 +       }
499 +       ch->dma.desc++;
500 +       ch->dma.desc %= LTQ_DESC_NUM;
501 +       spin_unlock_irqrestore(&priv->lock, flags);
502 +
503 +       skb_put(skb, len);
504 +       skb->dev = ch->netdev;
505 +       skb->protocol = eth_type_trans(skb, ch->netdev);
506 +       netif_receive_skb(skb);
507 +}
508 +
509 +static int
510 +ltq_vrx200_poll_rx(struct napi_struct *napi, int budget)
511 +{
512 +       struct ltq_vrx200_chan *ch = container_of(napi,
513 +                               struct ltq_vrx200_chan, napi);
514 +       struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
515 +       int rx = 0;
516 +       int complete = 0;
517 +       unsigned long flags;
518 +
519 +       while ((rx < budget) && !complete) {
520 +               struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
521 +
522 +               if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
523 +                       ltq_vrx200_hw_receive(ch);
524 +                       rx++;
525 +               } else {
526 +                       complete = 1;
527 +               }
528 +       }
529 +       if (complete || !rx) {
530 +               napi_complete(&ch->napi);
531 +               spin_lock_irqsave(&priv->lock, flags);
532 +               ltq_dma_ack_irq(&ch->dma);
533 +               spin_unlock_irqrestore(&priv->lock, flags);
534 +       }
535 +       return rx;
536 +}
537 +
538 +static int
539 +ltq_vrx200_poll_tx(struct napi_struct *napi, int budget)
540 +{
541 +       struct ltq_vrx200_chan *ch =
542 +               container_of(napi, struct ltq_vrx200_chan, napi);
543 +       struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
544 +       struct netdev_queue *txq =
545 +               netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
546 +       unsigned long flags;
547 +
548 +       spin_lock_irqsave(&priv->lock, flags);
549 +       while ((ch->dma.desc_base[ch->tx_free].ctl &
550 +                       (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
551 +               dev_kfree_skb_any(ch->skb[ch->tx_free]);
552 +               ch->skb[ch->tx_free] = NULL;
553 +               memset(&ch->dma.desc_base[ch->tx_free], 0,
554 +                       sizeof(struct ltq_dma_desc));
555 +               ch->tx_free++;
556 +               ch->tx_free %= LTQ_DESC_NUM;
557 +       }
558 +       spin_unlock_irqrestore(&priv->lock, flags);
559 +
560 +       if (netif_tx_queue_stopped(txq))
561 +               netif_tx_start_queue(txq);
562 +       napi_complete(&ch->napi);
563 +       spin_lock_irqsave(&priv->lock, flags);
564 +       ltq_dma_ack_irq(&ch->dma);
565 +       spin_unlock_irqrestore(&priv->lock, flags);
566 +       return 1;
567 +}
568 +
569 +static irqreturn_t
570 +ltq_vrx200_dma_irq(int irq, void *_priv)
571 +{
572 +       struct ltq_vrx200_priv *priv = _priv;
573 +       int ch = irq - LTQ_DMA_ETOP;
574 +
575 +       napi_schedule(&priv->ch[ch].napi);
576 +       return IRQ_HANDLED;
577 +}
578 +
579 +static void
580 +ltq_vrx200_free_channel(struct net_device *dev, struct ltq_vrx200_chan *ch)
581 +{
582 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
583 +
584 +       ltq_dma_free(&ch->dma);
585 +       if (ch->dma.irq)
586 +               free_irq(ch->dma.irq, priv);
587 +       if (IS_RX(ch->idx)) {
588 +               int desc;
589 +               for (desc = 0; desc < LTQ_DESC_NUM; desc++)
590 +                       dev_kfree_skb_any(ch->skb[ch->dma.desc]);
591 +       }
592 +}
593 +
594 +static void
595 +ltq_vrx200_hw_exit(struct net_device *dev)
596 +{
597 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
598 +       int i;
599 +
600 +       clk_disable(priv->clk_ppe);
601 +
602 +       for (i = 0; i < MAX_DMA_CHAN; i++)
603 +               if (IS_TX(i) || IS_RX(i))
604 +                       ltq_vrx200_free_channel(dev, &priv->ch[i]);
605 +}
606 +
607 +static void *ltq_eth_phy_addr_reg(int num)
608 +{
609 +       switch (num) {
610 +       case 0:
611 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_0;
612 +       case 1:
613 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_1;
614 +       case 2:
615 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_2;
616 +       case 3:
617 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_3;
618 +       case 4:
619 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_4;
620 +       case 5:
621 +               return &ltq_ethsw_mdio_pdi_regs->phy_addr_5;
622 +       }
623 +
624 +       return NULL;
625 +}
626 +
627 +static void *ltq_eth_mii_cfg_reg(int num)
628 +{
629 +       switch (num) {
630 +       case 0:
631 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg0;
632 +       case 1:
633 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg1;
634 +       case 2:
635 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg2;
636 +       case 3:
637 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg3;
638 +       case 4:
639 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg4;
640 +       case 5:
641 +               return &ltq_ethsw_mii_pdi_regs->mii_cfg5;
642 +       }
643 +
644 +       return NULL;
645 +}
646 +
647 +static void ltq_eth_gmac_update(struct phy_device *phydev, int num)
648 +{
649 +       struct ltq_mdio_phy_addr_reg phy_addr_reg;
650 +       struct ltq_mii_mii_cfg_reg mii_cfg_reg;
651 +       void *phy_addr = ltq_eth_phy_addr_reg(num);
652 +       void *mii_cfg = ltq_eth_mii_cfg_reg(num);
653 +
654 +       phy_addr_reg.val = ltq_r32(phy_addr);
655 +       mii_cfg_reg.val = ltq_r32(mii_cfg);
656 +
657 +       phy_addr_reg.bits.addr = phydev->addr;
658 +
659 +       if (phydev->link)
660 +               phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_UP;
661 +       else
662 +               phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
663 +
664 +       switch (phydev->speed) {
665 +       case SPEED_1000:
666 +               phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_G1;
667 +               mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M125;
668 +               break;
669 +       case SPEED_100:
670 +               phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M100;
671 +               switch (mii_cfg_reg.bits.miimode) {
672 +               case LTQ_MII_MII_CFG_MIIMODE_RMIIM:
673 +               case LTQ_MII_MII_CFG_MIIMODE_RMIIP:
674 +                       mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M50;
675 +                       break;
676 +               default:
677 +                       mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M25;
678 +                       break;
679 +               }
680 +               break;
681 +       default:
682 +               phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
683 +               mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
684 +               break;
685 +       }
686 +
687 +       if (phydev->duplex == DUPLEX_FULL)
688 +               phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_ENABLE;
689 +       else
690 +               phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
691 +
692 +       dbg_ltq_writel(phy_addr, phy_addr_reg.val);
693 +       dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
694 +       udelay(1);
695 +}
696 +
697 +
698 +static void ltq_eth_port_config(struct ltq_vrx200_priv *priv,
699 +       const struct ltq_eth_port_config *port)
700 +{
701 +       struct ltq_mii_mii_cfg_reg mii_cfg_reg;
702 +       void *mii_cfg = ltq_eth_mii_cfg_reg(port->num);
703 +       int setup_gpio = 0;
704 +
705 +       mii_cfg_reg.val = ltq_r32(mii_cfg);
706 +
707 +
708 +       switch (port->num) {
709 +       case 0: /* xMII0 */
710 +       case 1: /* xMII1 */
711 +               switch (port->phy_if) {
712 +               case PHY_INTERFACE_MODE_MII:
713 +                       if (port->flags & LTQ_ETH_PORT_PHY)
714 +                               /* MII MAC mode, connected to external PHY */
715 +                               mii_cfg_reg.bits.miimode =
716 +                                       LTQ_MII_MII_CFG_MIIMODE_MIIM;
717 +                       else
718 +                               /* MII PHY mode, connected to external MAC */
719 +                               mii_cfg_reg.bits.miimode =
720 +                                       LTQ_MII_MII_CFG_MIIMODE_MIIP;
721 +                               setup_gpio = 1;
722 +                       break;
723 +               case PHY_INTERFACE_MODE_RMII:
724 +                       if (port->flags & LTQ_ETH_PORT_PHY)
725 +                               /* RMII MAC mode, connected to external PHY */
726 +                               mii_cfg_reg.bits.miimode =
727 +                                       LTQ_MII_MII_CFG_MIIMODE_RMIIM;
728 +                       else
729 +                               /* RMII PHY mode, connected to external MAC */
730 +                               mii_cfg_reg.bits.miimode =
731 +                                       LTQ_MII_MII_CFG_MIIMODE_RMIIP;
732 +                               setup_gpio = 1;
733 +                               break;
734 +               case PHY_INTERFACE_MODE_RGMII:
735 +                       /* RGMII MAC mode, connected to external PHY */
736 +                       mii_cfg_reg.bits.miimode =
737 +                               LTQ_MII_MII_CFG_MIIMODE_RGMII;
738 +                       setup_gpio = 1;
739 +                       break;
740 +               default:
741 +                       break;
742 +               }
743 +               break;
744 +       case 2: /* internal GPHY0 */
745 +       case 3: /* internal GPHY0 */
746 +       case 4: /* internal GPHY1 */
747 +               switch (port->phy_if) {
748 +                       case PHY_INTERFACE_MODE_MII:
749 +                       case PHY_INTERFACE_MODE_GMII:
750 +                               /* MII MAC mode, connected to internal GPHY */
751 +                               mii_cfg_reg.bits.miimode =
752 +                                       LTQ_MII_MII_CFG_MIIMODE_MIIM;
753 +                               setup_gpio = 1;
754 +                               break;
755 +                       default:
756 +                               break;
757 +               }
758 +               break;
759 +       case 5: /* internal GPHY1 or xMII2 */
760 +               switch (port->phy_if) {
761 +               case PHY_INTERFACE_MODE_MII:
762 +                       /* MII MAC mode, connected to internal GPHY */
763 +                       mii_cfg_reg.bits.miimode =
764 +                               LTQ_MII_MII_CFG_MIIMODE_MIIM;
765 +                       setup_gpio = 1;
766 +                       break;
767 +               case PHY_INTERFACE_MODE_RGMII:
768 +                       /* RGMII MAC mode, connected to external PHY */
769 +                       mii_cfg_reg.bits.miimode =
770 +                               LTQ_MII_MII_CFG_MIIMODE_RGMII;
771 +                       setup_gpio = 1;
772 +                       break;
773 +               default:
774 +                       break;
775 +               }
776 +               break;
777 +       default:
778 +               break;
779 +       }
780 +
781 +       /* Enable MII interface */
782 +       mii_cfg_reg.bits.en = port->flags ? 1 : 0;
783 +       dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
784 +
785 +}
786 +
787 +static void ltq_eth_gmac_init(int num)
788 +{
789 +       struct ltq_mdio_phy_addr_reg phy_addr_reg;
790 +       struct ltq_mii_mii_cfg_reg mii_cfg_reg;
791 +       void *phy_addr = ltq_eth_phy_addr_reg(num);
792 +       void *mii_cfg = ltq_eth_mii_cfg_reg(num);
793 +       struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
794 +
795 +       mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[num];
796 +
797 +       /* Reset PHY status to link down */
798 +       phy_addr_reg.val = ltq_r32(phy_addr);
799 +       phy_addr_reg.bits.addr = num;
800 +       phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
801 +       phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
802 +       phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
803 +       dbg_ltq_writel(phy_addr, phy_addr_reg.val);
804 +
805 +       /* Reset and disable MII interface */
806 +       mii_cfg_reg.val = ltq_r32(mii_cfg);
807 +       mii_cfg_reg.bits.en = 0;
808 +       mii_cfg_reg.bits.res = 1;
809 +       mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
810 +       dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
811 +
812 +       /*
813 +       * Enable padding of short frames, enable frame checksum generation
814 +       * in transmit direction
815 +       */
816 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
817 +               LTQ_ETHSW_MAC_CTRL0_FCS);
818 +
819 +       /* Set inter packet gap size to 12 bytes */
820 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_1, 12);
821 +
822 +       /*
823 +       * Configure frame length checks:
824 +       * - allow jumbo frames
825 +       * - enable long length check
826 +       * - enable short length without VLAN tags
827 +       */
828 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
829 +               LTQ_ETHSW_MAC_CTRL2_LCHKL |
830 +               LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
831 +}
832 +
833 +
834 +static void ltq_eth_pmac_init(void)
835 +{
836 +       struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
837 +
838 +       mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[LTQ_ETHSW_PMAC];
839 +
840 +       /*
841 +       * Enable padding of short frames, enable frame checksum generation
842 +       * in transmit direction
843 +       */
844 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
845 +               LTQ_ETHSW_MAC_CTRL0_FCS);
846 +
847 +       /*
848 +       * Configure frame length checks:
849 +       * - allow jumbo frames
850 +       * - enable long length check
851 +       * - enable short length without VLAN tags
852 +       */
853 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
854 +               LTQ_ETHSW_MAC_CTRL2_LCHKL |
855 +               LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
856 +
857 +       /*
858 +       * Apply workaround for buffer congestion:
859 +       * - shorten preambel to 1 byte
860 +       * - set minimum inter packet gap size to 7 bytes
861 +       * - enable receive buffer bypass mode
862 +       */
863 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_1, LTQ_ETHSW_MAC_CTRL1_SHORTPRE | 7);
864 +       dbg_ltq_writel(&mac_pdi_regs->ctrl_6,
865 +               (6 << LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT) |
866 +               LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS);
867 +
868 +       /* Set request assertion threshold to 8, IPG counter to 11 */
869 +       dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->rx_ipg, 0x8B);
870 +
871 +       /*
872 +       * Configure frame header control:
873 +       * - enable reaction on pause frames (flow control)
874 +       * - remove CRC for packets from PMAC to DMA
875 +       * - add CRC for packets from DMA to PMAC
876 +       */
877 +       dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->hd_ctl, LTQ_ETHSW_PMAC_HD_CTL_FC |
878 +               /*LTQ_ETHSW_PMAC_HD_CTL_RC | */LTQ_ETHSW_PMAC_HD_CTL_AC);
879 +}
880 +
881 +static int
882 +ltq_vrx200_hw_init(struct net_device *dev)
883 +{
884 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
885 +       int err = 0;
886 +       int i;
887 +
888 +       netdev_info(dev, "setting up dma\n");
889 +       ltq_dma_init_port(DMA_PORT_ETOP);
890 +
891 +       netdev_info(dev, "setting up pmu\n");
892 +       clk_enable(priv->clk_ppe);
893 +
894 +       /* Reset ethernet and switch subsystems */
895 +       netdev_info(dev, "reset core\n");
896 +       ltq_reset_once(BIT(8), 10);
897 +
898 +       /* Enable switch macro */
899 +       ltq_setbits(&ltq_ethsw_mdio_pdi_regs->glob_ctrl,
900 +               LTQ_ETHSW_GLOB_CTRL_SE);
901 +
902 +       /* Disable MDIO auto-polling for all ports */
903 +       dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_0, 0);
904 +
905 +       /*
906 +        * Enable and set MDIO management clock to 2.5 MHz. This is the
907 +        * maximum clock for FE PHYs.
908 +        * Formula for clock is:
909 +        *
910 +        *      50 MHz
911 +        * x = ----------- - 1
912 +        *      2 * f_MDC
913 +        */
914 +       dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_1,
915 +               LTQ_ETHSW_MDC_CFG1_MCEN | 9);
916 +
917 +       /* Init MAC connected to CPU  */
918 +       ltq_eth_pmac_init();
919 +
920 +       /* Init MACs connected to external MII interfaces */
921 +       for (i = 0; i < LTQ_ETHSW_MAX_GMAC; i++)
922 +               ltq_eth_gmac_init(i);
923 +
924 +       for (i = 0; i < MAX_DMA_CHAN && !err; i++) {
925 +               int irq = LTQ_DMA_ETOP + i;
926 +               struct ltq_vrx200_chan *ch = &priv->ch[i];
927 +
928 +               ch->idx = ch->dma.nr = i;
929 +
930 +               if (IS_TX(i)) {
931 +                       ltq_dma_alloc_tx(&ch->dma);
932 +                       err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
933 +                               "vrx200_tx", priv);
934 +               } else if (IS_RX(i)) {
935 +                       ltq_dma_alloc_rx(&ch->dma);
936 +                       for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
937 +                                       ch->dma.desc++)
938 +                               if (ltq_vrx200_alloc_skb(ch))
939 +                                       err = -ENOMEM;
940 +                       ch->dma.desc = 0;
941 +                       err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
942 +                               "vrx200_rx", priv);
943 +               }
944 +               if (!err)
945 +                       ch->dma.irq = irq;
946 +       }
947 +       for (i = 0; i < board_config.num_ports; i++)
948 +               ltq_eth_port_config(priv, &board_config.ports[i]);
949 +       return err;
950 +}
951 +
952 +static void
953 +ltq_vrx200_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
954 +{
955 +       strcpy(info->driver, "Lantiq ETOP");
956 +       strcpy(info->bus_info, "internal");
957 +       strcpy(info->version, DRV_VERSION);
958 +}
959 +
960 +static int
961 +ltq_vrx200_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
962 +{
963 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
964 +
965 +       return phy_ethtool_gset(priv->phydev, cmd);
966 +}
967 +
968 +static int
969 +ltq_vrx200_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
970 +{
971 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
972 +
973 +       return phy_ethtool_sset(priv->phydev, cmd);
974 +}
975 +
976 +static int
977 +ltq_vrx200_nway_reset(struct net_device *dev)
978 +{
979 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
980 +
981 +       return phy_start_aneg(priv->phydev);
982 +}
983 +
984 +static const struct ethtool_ops ltq_vrx200_ethtool_ops = {
985 +       .get_drvinfo = ltq_vrx200_get_drvinfo,
986 +       .get_settings = ltq_vrx200_get_settings,
987 +       .set_settings = ltq_vrx200_set_settings,
988 +       .nway_reset = ltq_vrx200_nway_reset,
989 +};
990 +
991 +static inline int ltq_mdio_poll(struct mii_bus *bus)
992 +{
993 +       struct ltq_mdio_access acc;
994 +       unsigned cnt = 10000;
995 +
996 +       while (likely(cnt--)) {
997 +               acc.val = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl);
998 +               if (!acc.bits.mbusy)
999 +                       return 0;
1000 +       }
1001 +
1002 +       return 1;
1003 +}
1004 +
1005 +static int
1006 +ltq_vrx200_mdio_wr(struct mii_bus *bus, int addr, int regnum, u16 val)
1007 +{
1008 +       struct ltq_mdio_access acc;
1009 +       int ret;
1010 +
1011 +       acc.val = 0;
1012 +       acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
1013 +       acc.bits.op = LTQ_MDIO_OP_WRITE;
1014 +       acc.bits.phyad = addr;
1015 +       acc.bits.regad = regnum;
1016 +
1017 +       ret = ltq_mdio_poll(bus);
1018 +       if (ret)
1019 +               return ret;
1020 +
1021 +       dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_write, val);
1022 +       dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
1023 +
1024 +       return 0;
1025 +}
1026 +
1027 +static int
1028 +ltq_vrx200_mdio_rd(struct mii_bus *bus, int addr, int regnum)
1029 +{
1030 +       struct ltq_mdio_access acc;
1031 +       int ret;
1032 +
1033 +       acc.val = 0;
1034 +       acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
1035 +       acc.bits.op = LTQ_MDIO_OP_READ;
1036 +       acc.bits.phyad = addr;
1037 +       acc.bits.regad = regnum;
1038 +
1039 +       ret = ltq_mdio_poll(bus);
1040 +       if (ret)
1041 +               goto timeout;
1042 +
1043 +       dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
1044 +
1045 +       ret = ltq_mdio_poll(bus);
1046 +       if (ret)
1047 +               goto timeout;
1048 +
1049 +       ret = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_read);
1050 +
1051 +       return ret;
1052 +timeout:
1053 +       return -1;
1054 +}
1055 +
1056 +static void
1057 +ltq_vrx200_mdio_link(struct net_device *dev)
1058 +{
1059 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1060 +       ltq_eth_gmac_update(priv->phydev, 0);
1061 +}
1062 +
1063 +static int
1064 +ltq_vrx200_mdio_probe(struct net_device *dev)
1065 +{
1066 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1067 +       struct phy_device *phydev = NULL;
1068 +       int val;
1069 +
1070 +       phydev = priv->mii_bus->phy_map[0];
1071 +
1072 +       if (!phydev) {
1073 +               netdev_err(dev, "no PHY found\n");
1074 +               return -ENODEV;
1075 +       }
1076 +
1077 +       phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_vrx200_mdio_link,
1078 +                       0, 0);
1079 +
1080 +       if (IS_ERR(phydev)) {
1081 +               netdev_err(dev, "Could not attach to PHY\n");
1082 +               return PTR_ERR(phydev);
1083 +       }
1084 +
1085 +       phydev->supported &= (SUPPORTED_10baseT_Half
1086 +                             | SUPPORTED_10baseT_Full
1087 +                             | SUPPORTED_100baseT_Half
1088 +                             | SUPPORTED_100baseT_Full
1089 +                             | SUPPORTED_1000baseT_Half
1090 +                             | SUPPORTED_1000baseT_Full
1091 +                             | SUPPORTED_Autoneg
1092 +                             | SUPPORTED_MII
1093 +                             | SUPPORTED_TP);
1094 +       phydev->advertising = phydev->supported;
1095 +       priv->phydev = phydev;
1096 +
1097 +       pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
1098 +              dev->name, phydev->drv->name,
1099 +              dev_name(&phydev->dev), phydev->irq);
1100 +
1101 +       val = ltq_vrx200_mdio_rd(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
1102 +       val |= ADVERTIZE_MPD;
1103 +       ltq_vrx200_mdio_wr(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
1104 +       ltq_vrx200_mdio_wr(priv->mii_bus, 0, 0, 0x1040);
1105 +
1106 +        phy_start_aneg(phydev);
1107 +
1108 +       return 0;
1109 +}
1110 +
1111 +static int
1112 +ltq_vrx200_mdio_init(struct net_device *dev)
1113 +{
1114 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1115 +       int i;
1116 +       int err;
1117 +
1118 +       priv->mii_bus = mdiobus_alloc();
1119 +       if (!priv->mii_bus) {
1120 +               netdev_err(dev, "failed to allocate mii bus\n");
1121 +               err = -ENOMEM;
1122 +               goto err_out;
1123 +       }
1124 +
1125 +       priv->mii_bus->priv = dev;
1126 +       priv->mii_bus->read = ltq_vrx200_mdio_rd;
1127 +       priv->mii_bus->write = ltq_vrx200_mdio_wr;
1128 +       priv->mii_bus->name = "ltq_mii";
1129 +       snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1130 +       priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1131 +       if (!priv->mii_bus->irq) {
1132 +               err = -ENOMEM;
1133 +               goto err_out_free_mdiobus;
1134 +       }
1135 +
1136 +       for (i = 0; i < PHY_MAX_ADDR; ++i)
1137 +               priv->mii_bus->irq[i] = PHY_POLL;
1138 +
1139 +       if (mdiobus_register(priv->mii_bus)) {
1140 +               err = -ENXIO;
1141 +               goto err_out_free_mdio_irq;
1142 +       }
1143 +
1144 +       if (ltq_vrx200_mdio_probe(dev)) {
1145 +               err = -ENXIO;
1146 +               goto err_out_unregister_bus;
1147 +       }
1148 +       return 0;
1149 +
1150 +err_out_unregister_bus:
1151 +       mdiobus_unregister(priv->mii_bus);
1152 +err_out_free_mdio_irq:
1153 +       kfree(priv->mii_bus->irq);
1154 +err_out_free_mdiobus:
1155 +       mdiobus_free(priv->mii_bus);
1156 +err_out:
1157 +       return err;
1158 +}
1159 +
1160 +static void
1161 +ltq_vrx200_mdio_cleanup(struct net_device *dev)
1162 +{
1163 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1164 +
1165 +       phy_disconnect(priv->phydev);
1166 +       mdiobus_unregister(priv->mii_bus);
1167 +       kfree(priv->mii_bus->irq);
1168 +       mdiobus_free(priv->mii_bus);
1169 +}
1170 +
1171 +void phy_dump(struct net_device *dev)
1172 +{
1173 +        struct ltq_vrx200_priv *priv = netdev_priv(dev);
1174 +       int i;
1175 +       for (i = 0; i < 0x1F; i++) {
1176 +               unsigned int val = ltq_vrx200_mdio_rd(priv->mii_bus, 0, i);
1177 +               printk("%d %4X\n", i, val);
1178 +       }
1179 +}
1180 +
1181 +static int
1182 +ltq_vrx200_open(struct net_device *dev)
1183 +{
1184 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1185 +       int i;
1186 +       unsigned long flags;
1187 +
1188 +       for (i = 0; i < MAX_DMA_CHAN; i++) {
1189 +               struct ltq_vrx200_chan *ch = &priv->ch[i];
1190 +
1191 +               if (!IS_TX(i) && (!IS_RX(i)))
1192 +                       continue;
1193 +               napi_enable(&ch->napi);
1194 +               spin_lock_irqsave(&priv->lock, flags);
1195 +               ltq_dma_open(&ch->dma);
1196 +               spin_unlock_irqrestore(&priv->lock, flags);
1197 +       }
1198 +       if (priv->phydev) {
1199 +               phy_start(priv->phydev);
1200 +               phy_dump(dev);
1201 +       }
1202 +       netif_tx_start_all_queues(dev);
1203 +       return 0;
1204 +}
1205 +
1206 +static int
1207 +ltq_vrx200_stop(struct net_device *dev)
1208 +{
1209 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1210 +       int i;
1211 +       unsigned long flags;
1212 +
1213 +       netif_tx_stop_all_queues(dev);
1214 +       if (priv->phydev)
1215 +               phy_stop(priv->phydev);
1216 +       for (i = 0; i < MAX_DMA_CHAN; i++) {
1217 +               struct ltq_vrx200_chan *ch = &priv->ch[i];
1218 +
1219 +               if (!IS_RX(i) && !IS_TX(i))
1220 +                       continue;
1221 +               napi_disable(&ch->napi);
1222 +               spin_lock_irqsave(&priv->lock, flags);
1223 +               ltq_dma_close(&ch->dma);
1224 +               spin_unlock_irqrestore(&priv->lock, flags);
1225 +       }
1226 +       return 0;
1227 +}
1228 +
1229 +static int
1230 +ltq_vrx200_tx(struct sk_buff *skb, struct net_device *dev)
1231 +{
1232 +       int queue = skb_get_queue_mapping(skb);
1233 +       struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
1234 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1235 +       struct ltq_vrx200_chan *ch = &priv->ch[(queue << 1) | 1];
1236 +       struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
1237 +       unsigned long flags;
1238 +       u32 byte_offset;
1239 +       int len;
1240 +
1241 +       len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
1242 +
1243 +       if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
1244 +               netdev_err(dev, "tx ring full\n");
1245 +               netif_tx_stop_queue(txq);
1246 +               return NETDEV_TX_BUSY;
1247 +       }
1248 +
1249 +       /* dma needs to start on a 16 byte aligned address */
1250 +       byte_offset = CPHYSADDR(skb->data) % 16;
1251 +       ch->skb[ch->dma.desc] = skb;
1252 +
1253 +       dev->trans_start = jiffies;
1254 +
1255 +       spin_lock_irqsave(&priv->lock, flags);
1256 +       desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
1257 +                                               DMA_TO_DEVICE)) - byte_offset;
1258 +       wmb();
1259 +       desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
1260 +               LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
1261 +       ch->dma.desc++;
1262 +       ch->dma.desc %= LTQ_DESC_NUM;
1263 +       spin_unlock_irqrestore(&priv->lock, flags);
1264 +
1265 +       if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
1266 +               netif_tx_stop_queue(txq);
1267 +
1268 +       return NETDEV_TX_OK;
1269 +}
1270 +
1271 +static int
1272 +ltq_vrx200_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1273 +{
1274 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1275 +
1276 +       /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
1277 +       return phy_mii_ioctl(priv->phydev, rq, cmd);
1278 +}
1279 +
1280 +static u16
1281 +ltq_vrx200_select_queue(struct net_device *dev, struct sk_buff *skb)
1282 +{
1283 +       /* we are currently only using the first queue */
1284 +       return 0;
1285 +}
1286 +
1287 +static int
1288 +ltq_vrx200_init(struct net_device *dev)
1289 +{
1290 +       struct ltq_vrx200_priv *priv = netdev_priv(dev);
1291 +       struct sockaddr mac;
1292 +       int err;
1293 +
1294 +       ether_setup(dev);
1295 +       dev->watchdog_timeo = 10 * HZ;
1296 +
1297 +       err = ltq_vrx200_hw_init(dev);
1298 +       if (err)
1299 +               goto err_hw;
1300 +
1301 +       memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
1302 +       if (!is_valid_ether_addr(mac.sa_data)) {
1303 +               pr_warn("vrx200: invalid MAC, using random\n");
1304 +               random_ether_addr(mac.sa_data);
1305 +       }
1306 +       eth_mac_addr(dev, &mac);
1307 +
1308 +       if (!ltq_vrx200_mdio_init(dev))
1309 +               dev->ethtool_ops = &ltq_vrx200_ethtool_ops;
1310 +       else
1311 +               pr_warn("vrx200: mdio probe failed\n");;
1312 +       return 0;
1313 +
1314 +err_hw:
1315 +       ltq_vrx200_hw_exit(dev);
1316 +       return err;
1317 +}
1318 +
1319 +static void
1320 +ltq_vrx200_tx_timeout(struct net_device *dev)
1321 +{
1322 +       int err;
1323 +
1324 +       ltq_vrx200_hw_exit(dev);
1325 +       err = ltq_vrx200_hw_init(dev);
1326 +       if (err)
1327 +               goto err_hw;
1328 +       dev->trans_start = jiffies;
1329 +       netif_wake_queue(dev);
1330 +       return;
1331 +
1332 +err_hw:
1333 +       ltq_vrx200_hw_exit(dev);
1334 +       netdev_err(dev, "failed to restart vrx200 after TX timeout\n");
1335 +}
1336 +
1337 +static const struct net_device_ops ltq_eth_netdev_ops = {
1338 +       .ndo_open = ltq_vrx200_open,
1339 +       .ndo_stop = ltq_vrx200_stop,
1340 +       .ndo_start_xmit = ltq_vrx200_tx,
1341 +       .ndo_change_mtu = eth_change_mtu,
1342 +       .ndo_do_ioctl = ltq_vrx200_ioctl,
1343 +       .ndo_set_mac_address = eth_mac_addr,
1344 +       .ndo_validate_addr = eth_validate_addr,
1345 +       .ndo_select_queue = ltq_vrx200_select_queue,
1346 +       .ndo_init = ltq_vrx200_init,
1347 +       .ndo_tx_timeout = ltq_vrx200_tx_timeout,
1348 +};
1349 +
1350 +static int __devinit
1351 +ltq_vrx200_probe(struct platform_device *pdev)
1352 +{
1353 +       struct net_device *dev;
1354 +       struct ltq_vrx200_priv *priv;
1355 +       struct resource *res;
1356 +       int err;
1357 +       int i;
1358 +
1359 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1360 +       if (!res) {
1361 +               dev_err(&pdev->dev, "failed to get vrx200 resource\n");
1362 +               err = -ENOENT;
1363 +               goto err_out;
1364 +       }
1365 +
1366 +       res = devm_request_mem_region(&pdev->dev, res->start,
1367 +               resource_size(res), dev_name(&pdev->dev));
1368 +       if (!res) {
1369 +               dev_err(&pdev->dev, "failed to request vrx200 resource\n");
1370 +               err = -EBUSY;
1371 +               goto err_out;
1372 +       }
1373 +
1374 +       ltq_vrx200_membase = devm_ioremap_nocache(&pdev->dev,
1375 +               res->start, resource_size(res));
1376 +       if (!ltq_vrx200_membase) {
1377 +               dev_err(&pdev->dev, "failed to remap vrx200 engine %d\n",
1378 +                       pdev->id);
1379 +               err = -ENOMEM;
1380 +               goto err_out;
1381 +       }
1382 +
1383 +       if (ltq_gpio_request(&pdev->dev, 42, 2, 1, "MDIO") ||
1384 +                       ltq_gpio_request(&pdev->dev, 43, 2, 1, "MDC")) {
1385 +               dev_err(&pdev->dev, "failed to request MDIO gpios\n");
1386 +               err = -EBUSY;
1387 +               goto err_out;
1388 +       }
1389 +
1390 +       dev = alloc_etherdev_mq(sizeof(struct ltq_vrx200_priv), 4);
1391 +       strcpy(dev->name, "eth%d");
1392 +       dev->netdev_ops = &ltq_eth_netdev_ops;
1393 +       priv = netdev_priv(dev);
1394 +       priv->res = res;
1395 +       priv->pldata = dev_get_platdata(&pdev->dev);
1396 +       priv->netdev = dev;
1397 +
1398 +       priv->clk_ppe = clk_get(&pdev->dev, NULL);
1399 +       if (IS_ERR(priv->clk_ppe))
1400 +               return PTR_ERR(priv->clk_ppe);
1401 +
1402 +       spin_lock_init(&priv->lock);
1403 +
1404 +       for (i = 0; i < MAX_DMA_CHAN; i++) {
1405 +               if (IS_TX(i))
1406 +                       netif_napi_add(dev, &priv->ch[i].napi,
1407 +                               ltq_vrx200_poll_tx, 8);
1408 +               else if (IS_RX(i))
1409 +                       netif_napi_add(dev, &priv->ch[i].napi,
1410 +                               ltq_vrx200_poll_rx, 32);
1411 +               priv->ch[i].netdev = dev;
1412 +       }
1413 +
1414 +       err = register_netdev(dev);
1415 +       if (err)
1416 +               goto err_free;
1417 +
1418 +       platform_set_drvdata(pdev, dev);
1419 +       return 0;
1420 +
1421 +err_free:
1422 +       kfree(dev);
1423 +err_out:
1424 +       return err;
1425 +}
1426 +
1427 +static int __devexit
1428 +ltq_vrx200_remove(struct platform_device *pdev)
1429 +{
1430 +       struct net_device *dev = platform_get_drvdata(pdev);
1431 +
1432 +       if (dev) {
1433 +               netif_tx_stop_all_queues(dev);
1434 +               ltq_vrx200_hw_exit(dev);
1435 +               ltq_vrx200_mdio_cleanup(dev);
1436 +               unregister_netdev(dev);
1437 +       }
1438 +       return 0;
1439 +}
1440 +
1441 +static struct platform_driver ltq_mii_driver = {
1442 +       .probe = ltq_vrx200_probe,
1443 +       .remove = __devexit_p(ltq_vrx200_remove),
1444 +       .driver = {
1445 +               .name = "ltq_vrx200",
1446 +               .owner = THIS_MODULE,
1447 +       },
1448 +};
1449 +
1450 +module_platform_driver(ltq_mii_driver);
1451 +
1452 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1453 +MODULE_DESCRIPTION("Lantiq SoC ETOP");
1454 +MODULE_LICENSE("GPL");