9caf6920834821e2618aeb224507dc9c80743fad
[15.05/openwrt.git] / target / linux / brcm63xx / patches-2.6.27 / 009-add_integrated_ethernet_mac_support.patch
1 From 49aa7ffcd9bd2d9a0af99fced7b8511160dbf345 Mon Sep 17 00:00:00 2001
2 From: Maxime Bizon <mbizon@freebox.fr>
3 Date: Sun, 21 Sep 2008 03:43:26 +0200
4 Subject: [PATCH] [MIPS] BCM63XX: Add integrated ethernet mac support.
5
6 Signed-off-by: Maxime Bizon <mbizon@freebox.fr>
7 ---
8  arch/mips/bcm63xx/Makefile                       |    1 +
9  arch/mips/bcm63xx/dev-enet.c                     |  158 ++
10  drivers/net/Kconfig                              |    9 +
11  drivers/net/Makefile                             |    1 +
12  drivers/net/bcm63xx_enet.c                       | 1894 ++++++++++++++++++++++
13  drivers/net/bcm63xx_enet.h                       |  294 ++++
14  include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h |   45 +
15  7 files changed, 2402 insertions(+), 0 deletions(-)
16  create mode 100644 arch/mips/bcm63xx/dev-enet.c
17  create mode 100644 drivers/net/bcm63xx_enet.c
18  create mode 100644 drivers/net/bcm63xx_enet.h
19  create mode 100644 include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
20
21 diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
22 index 99e335d..5358093 100644
23 --- a/arch/mips/bcm63xx/Makefile
24 +++ b/arch/mips/bcm63xx/Makefile
25 @@ -3,4 +3,5 @@ obj-y           += dev-uart.o
26  obj-y          += dev-pcmcia.o
27  obj-y          += dev-usb-ohci.o
28  obj-y          += dev-usb-ehci.o
29 +obj-y          += dev-enet.o
30  obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
31 diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
32 new file mode 100644
33 index 0000000..c6e472e
34 --- /dev/null
35 +++ b/arch/mips/bcm63xx/dev-enet.c
36 @@ -0,0 +1,158 @@
37 +/*
38 + * This file is subject to the terms and conditions of the GNU General Public
39 + * License.  See the file "COPYING" in the main directory of this archive
40 + * for more details.
41 + *
42 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
43 + */
44 +
45 +#include <linux/init.h>
46 +#include <linux/kernel.h>
47 +#include <linux/platform_device.h>
48 +#include <bcm63xx_dev_enet.h>
49 +#include <bcm63xx_io.h>
50 +#include <bcm63xx_regs.h>
51 +
52 +static struct resource shared_res[] = {
53 +       {
54 +               .start          = -1, /* filled at runtime */
55 +               .end            = -1, /* filled at runtime */
56 +               .flags          = IORESOURCE_MEM,
57 +       },
58 +};
59 +
60 +static struct platform_device bcm63xx_enet_shared_device = {
61 +       .name           = "bcm63xx_enet_shared",
62 +       .id             = 0,
63 +       .num_resources  = ARRAY_SIZE(shared_res),
64 +       .resource       = shared_res,
65 +};
66 +
67 +static int shared_device_registered = 0;
68 +
69 +static struct resource enet0_res[] = {
70 +       {
71 +               .start          = -1, /* filled at runtime */
72 +               .end            = -1, /* filled at runtime */
73 +               .flags          = IORESOURCE_MEM,
74 +       },
75 +       {
76 +               .start          = -1, /* filled at runtime */
77 +               .flags          = IORESOURCE_IRQ,
78 +       },
79 +       {
80 +               .start          = -1, /* filled at runtime */
81 +               .start          = IRQ_ENET0_RXDMA,
82 +               .flags          = IORESOURCE_IRQ,
83 +       },
84 +       {
85 +               .start          = -1, /* filled at runtime */
86 +               .start          = IRQ_ENET0_TXDMA,
87 +               .flags          = IORESOURCE_IRQ,
88 +       },
89 +};
90 +
91 +static struct bcm63xx_enet_platform_data enet0_pd;
92 +
93 +static struct platform_device bcm63xx_enet0_device = {
94 +       .name           = "bcm63xx_enet",
95 +       .id             = 0,
96 +       .num_resources  = ARRAY_SIZE(enet0_res),
97 +       .resource       = enet0_res,
98 +       .dev            = {
99 +               .platform_data = &enet0_pd,
100 +       },
101 +};
102 +
103 +static struct resource enet1_res[] = {
104 +       {
105 +               .start          = -1, /* filled at runtime */
106 +               .end            = -1, /* filled at runtime */
107 +               .flags          = IORESOURCE_MEM,
108 +       },
109 +       {
110 +               .start          = -1, /* filled at runtime */
111 +               .flags          = IORESOURCE_IRQ,
112 +       },
113 +       {
114 +               .start          = -1, /* filled at runtime */
115 +               .flags          = IORESOURCE_IRQ,
116 +       },
117 +       {
118 +               .start          = -1, /* filled at runtime */
119 +               .flags          = IORESOURCE_IRQ,
120 +       },
121 +};
122 +
123 +static struct bcm63xx_enet_platform_data enet1_pd;
124 +
125 +static struct platform_device bcm63xx_enet1_device = {
126 +       .name           = "bcm63xx_enet",
127 +       .id             = 1,
128 +       .num_resources  = ARRAY_SIZE(enet1_res),
129 +       .resource       = enet1_res,
130 +       .dev            = {
131 +               .platform_data = &enet1_pd,
132 +       },
133 +};
134 +
135 +int __init bcm63xx_enet_register(int unit,
136 +                                const struct bcm63xx_enet_platform_data *pd)
137 +{
138 +       struct platform_device *pdev;
139 +       struct bcm63xx_enet_platform_data *dpd;
140 +       int ret;
141 +
142 +       if (unit > 1)
143 +               return -ENODEV;
144 +
145 +       if (!shared_device_registered) {
146 +               shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
147 +               shared_res[0].end = shared_res[0].start;
148 +               shared_res[0].end += RSET_ENETDMA_SIZE - 1;
149 +
150 +               ret = platform_device_register(&bcm63xx_enet_shared_device);
151 +               if (ret)
152 +                       return ret;
153 +               shared_device_registered = 1;
154 +       }
155 +
156 +       if (unit == 0) {
157 +               enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
158 +               enet0_res[0].end = enet0_res[0].start;
159 +               enet0_res[0].end += RSET_ENET_SIZE - 1;
160 +               enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
161 +               enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
162 +               enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
163 +               pdev = &bcm63xx_enet0_device;
164 +       } else {
165 +               enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
166 +               enet1_res[0].end = enet1_res[0].start;
167 +               enet1_res[0].end += RSET_ENET_SIZE - 1;
168 +               enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
169 +               enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
170 +               enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
171 +               pdev = &bcm63xx_enet1_device;
172 +       }
173 +
174 +       /* copy given platform data */
175 +       dpd = pdev->dev.platform_data;
176 +       memcpy(dpd, pd, sizeof (*pd));
177 +
178 +       /* adjust them in case internal phy is used */
179 +       if (dpd->use_internal_phy) {
180 +
181 +               /* internal phy only exists for enet0 */
182 +               if (unit == 1)
183 +                       return -ENODEV;
184 +
185 +               dpd->phy_id = 1;
186 +               dpd->has_phy_interrupt = 1;
187 +               dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
188 +       }
189 +
190 +       ret = platform_device_register(pdev);
191 +       if (ret)
192 +               return ret;
193 +       return 0;
194 +}
195 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
196 index bbd0e67..01f7e5a 100644
197 --- a/drivers/net/Kconfig
198 +++ b/drivers/net/Kconfig
199 @@ -1840,6 +1840,15 @@ config NE_H8300
200           Say Y here if you want to use the NE2000 compatible
201           controller on the Renesas H8/300 processor.
202  
203 +config BCM63XX_ENET
204 +       tristate "Broadcom 63xx internal mac support"
205 +       depends on BCM63XX
206 +       select MII
207 +       select PHYLIB
208 +       help
209 +         This driver supports the ethernet MACs in the Broadcom 63xx
210 +         MIPS chipset family (BCM63XX).
211 +
212  source "drivers/net/fs_enet/Kconfig"
213  
214  endif # NET_ETHERNET
215 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
216 index 284ed83..ab22f32 100644
217 --- a/drivers/net/Makefile
218 +++ b/drivers/net/Makefile
219 @@ -123,6 +123,7 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
220  obj-$(CONFIG_B44) += b44.o
221  obj-$(CONFIG_FORCEDETH) += forcedeth.o
222  obj-$(CONFIG_NE_H8300) += ne-h8300.o
223 +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
224  obj-$(CONFIG_AX88796) += ax88796.o
225  
226  obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
227 diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
228 new file mode 100644
229 index 0000000..40c2565
230 --- /dev/null
231 +++ b/drivers/net/bcm63xx_enet.c
232 @@ -0,0 +1,1894 @@
233 +/*
234 + * Driver for BCM963xx builtin Ethernet mac
235 + *
236 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
237 + *
238 + * This program is free software; you can redistribute it and/or modify
239 + * it under the terms of the GNU General Public License as published by
240 + * the Free Software Foundation; either version 2 of the License, or
241 + * (at your option) any later version.
242 + *
243 + * This program is distributed in the hope that it will be useful,
244 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
245 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
246 + * GNU General Public License for more details.
247 + *
248 + * You should have received a copy of the GNU General Public License
249 + * along with this program; if not, write to the Free Software
250 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
251 + */
252 +#include <linux/init.h>
253 +#include <linux/module.h>
254 +#include <linux/clk.h>
255 +#include <linux/etherdevice.h>
256 +#include <linux/delay.h>
257 +#include <linux/ethtool.h>
258 +#include <linux/crc32.h>
259 +#include <linux/err.h>
260 +#include <linux/dma-mapping.h>
261 +#include <linux/platform_device.h>
262 +
263 +#include <bcm63xx_dev_enet.h>
264 +#include "bcm63xx_enet.h"
265 +
266 +static char bcm_enet_driver_name[] = "bcm63xx_enet";
267 +static char bcm_enet_driver_version[] = "1.0";
268 +
269 +static int copybreak __read_mostly = 128;
270 +module_param(copybreak, int, 0);
271 +MODULE_PARM_DESC(copybreak, "Receive copy threshold");
272 +
273 +/* io memory shared between all devices */
274 +static void __iomem *bcm_enet_shared_base;
275 +
276 +/*
277 + * io helpers to access mac registers
278 + */
279 +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
280 +{
281 +       return bcm_readl(priv->base + off);
282 +}
283 +
284 +static inline void enet_writel(struct bcm_enet_priv *priv,
285 +                              u32 val, u32 off)
286 +{
287 +       bcm_writel(val, priv->base + off);
288 +}
289 +
290 +/*
291 + * io helpers to access shared registers
292 + */
293 +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
294 +{
295 +       return bcm_readl(bcm_enet_shared_base + off);
296 +}
297 +
298 +static inline void enet_dma_writel(struct bcm_enet_priv *priv,
299 +                                      u32 val, u32 off)
300 +{
301 +       bcm_writel(val, bcm_enet_shared_base + off);
302 +}
303 +
304 +/*
305 + * write given data into mii register and wait for transfer to end
306 + * with timeout (average measured transfer time is 25us)
307 + */
308 +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
309 +{
310 +       int limit;
311 +
312 +       /* make sure mii interrupt status is cleared */
313 +       enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
314 +
315 +       enet_writel(priv, data, ENET_MIIDATA_REG);
316 +       wmb();
317 +
318 +       /* busy wait on mii interrupt bit, with timeout */
319 +       limit = 1000;
320 +       do {
321 +               if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
322 +                       break;
323 +               udelay(1);
324 +       } while (limit-- >= 0);
325 +
326 +       return (limit < 0) ? 1 : 0;
327 +}
328 +
329 +/*
330 + * MII internal read callback
331 + */
332 +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
333 +                             int regnum)
334 +{
335 +       u32 tmp, val;
336 +
337 +       tmp = regnum << ENET_MIIDATA_REG_SHIFT;
338 +       tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
339 +       tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
340 +       tmp |= ENET_MIIDATA_OP_READ_MASK;
341 +
342 +       if (do_mdio_op(priv, tmp))
343 +               return -1;
344 +
345 +       val = enet_readl(priv, ENET_MIIDATA_REG);
346 +       val &= 0xffff;
347 +       return val;
348 +}
349 +
350 +/*
351 + * MII internal write callback
352 + */
353 +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
354 +                              int regnum, u16 value)
355 +{
356 +       u32 tmp;
357 +
358 +       tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
359 +       tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
360 +       tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
361 +       tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
362 +       tmp |= ENET_MIIDATA_OP_WRITE_MASK;
363 +
364 +       (void)do_mdio_op(priv, tmp);
365 +       return 0;
366 +}
367 +
368 +/*
369 + * MII read callback from phylib
370 + */
371 +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
372 +                                    int regnum)
373 +{
374 +       return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
375 +}
376 +
377 +/*
378 + * MII write callback from phylib
379 + */
380 +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
381 +                                     int regnum, u16 value)
382 +{
383 +       return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
384 +}
385 +
386 +/*
387 + * MII read callback from mii core
388 + */
389 +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
390 +                                 int regnum)
391 +{
392 +       return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
393 +}
394 +
395 +/*
396 + * MII write callback from mii core
397 + */
398 +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
399 +                                   int regnum, int value)
400 +{
401 +       bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
402 +}
403 +
404 +/*
405 + * refill rx queue
406 + */
407 +static int bcm_enet_refill_rx(struct net_device *dev)
408 +{
409 +       struct bcm_enet_priv *priv;
410 +
411 +       priv = netdev_priv(dev);
412 +
413 +       while (priv->rx_desc_count < priv->rx_ring_size) {
414 +               struct bcm_enet_desc *desc;
415 +               struct sk_buff *skb;
416 +               dma_addr_t p;
417 +               int desc_idx;
418 +               u32 len_stat;
419 +
420 +               desc_idx = priv->rx_dirty_desc;
421 +               desc = &priv->rx_desc_cpu[desc_idx];
422 +
423 +               if (!priv->rx_skb[desc_idx]) {
424 +                       skb = netdev_alloc_skb(dev, BCMENET_MAX_RX_SIZE);
425 +                       if (!skb)
426 +                               break;
427 +                       priv->rx_skb[desc_idx] = skb;
428 +
429 +                       p = dma_map_single(&priv->pdev->dev, skb->data,
430 +                                          BCMENET_MAX_RX_SIZE,
431 +                                          DMA_FROM_DEVICE);
432 +                       desc->address = p;
433 +               }
434 +
435 +               len_stat = BCMENET_MAX_RX_SIZE << DMADESC_LENGTH_SHIFT;
436 +               len_stat |= DMADESC_OWNER_MASK;
437 +               if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
438 +                       len_stat |= DMADESC_WRAP_MASK;
439 +                       priv->rx_dirty_desc = 0;
440 +               } else {
441 +                       priv->rx_dirty_desc++;
442 +               }
443 +               wmb();
444 +               desc->len_stat = len_stat;
445 +
446 +               priv->rx_desc_count++;
447 +
448 +               /* tell dma engine we allocated one buffer */
449 +               enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
450 +       }
451 +
452 +       /* If rx ring is still empty, set a timer to try allocating
453 +        * again at a later time. */
454 +       if (priv->rx_desc_count == 0 && netif_running(dev)) {
455 +               dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
456 +               priv->rx_timeout.expires = jiffies + HZ;
457 +               add_timer(&priv->rx_timeout);
458 +       }
459 +
460 +       return 0;
461 +}
462 +
463 +/*
464 + * timer callback to defer refill rx queue in case we're OOM
465 + */
466 +static void bcm_enet_refill_rx_timer(unsigned long data)
467 +{
468 +       struct net_device *dev;
469 +       struct bcm_enet_priv *priv;
470 +
471 +       dev = (struct net_device *)data;
472 +       priv = netdev_priv(dev);
473 +
474 +       spin_lock(&priv->rx_lock);
475 +       bcm_enet_refill_rx((struct net_device *)data);
476 +       spin_unlock(&priv->rx_lock);
477 +}
478 +
479 +/*
480 + * extract packet from rx queue
481 + */
482 +static int bcm_enet_receive_queue(struct net_device *dev, int budget)
483 +{
484 +       struct bcm_enet_priv *priv;
485 +       struct device *kdev;
486 +       int processed;
487 +
488 +       priv = netdev_priv(dev);
489 +       kdev = &priv->pdev->dev;
490 +       processed = 0;
491 +
492 +       /* don't scan ring further than number of refilled
493 +        * descriptor */
494 +       if (budget > priv->rx_desc_count)
495 +               budget = priv->rx_desc_count;
496 +
497 +       do {
498 +               struct bcm_enet_desc *desc;
499 +               struct sk_buff *skb;
500 +               int desc_idx;
501 +               u32 len_stat;
502 +               unsigned int len;
503 +
504 +               desc_idx = priv->rx_curr_desc;
505 +               desc = &priv->rx_desc_cpu[desc_idx];
506 +
507 +               /* make sure we actually read the descriptor status at
508 +                * each loop */
509 +               rmb();
510 +
511 +               len_stat = desc->len_stat;
512 +
513 +               /* break if dma ownership belongs to hw */
514 +               if (len_stat & DMADESC_OWNER_MASK)
515 +                       break;
516 +
517 +               processed++;
518 +               priv->rx_curr_desc++;
519 +               if (priv->rx_curr_desc == priv->rx_ring_size)
520 +                       priv->rx_curr_desc = 0;
521 +               priv->rx_desc_count--;
522 +
523 +               /* if the packet does not have start of packet _and_
524 +                * end of packet flag set, then just recycle it */
525 +               if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
526 +                       priv->stats.rx_dropped++;
527 +                       continue;
528 +               }
529 +
530 +               /* recycle packet if it's marked as bad */
531 +               if (unlikely(len_stat & DMADESC_ERR_MASK)) {
532 +                       priv->stats.rx_errors++;
533 +
534 +                       if (len_stat & DMADESC_OVSIZE_MASK)
535 +                               priv->stats.rx_length_errors++;
536 +                       if (len_stat & DMADESC_CRC_MASK)
537 +                               priv->stats.rx_crc_errors++;
538 +                       if (len_stat & DMADESC_UNDER_MASK)
539 +                               priv->stats.rx_frame_errors++;
540 +                       if (len_stat & DMADESC_OV_MASK)
541 +                               priv->stats.rx_fifo_errors++;
542 +                       continue;
543 +               }
544 +
545 +               /* valid packet */
546 +               skb = priv->rx_skb[desc_idx];
547 +               len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
548 +               /* don't include FCS */
549 +               len -= 4;
550 +
551 +               if (len < copybreak) {
552 +                       struct sk_buff *nskb;
553 +
554 +                       nskb = netdev_alloc_skb(dev, len + 2);
555 +                       if (!nskb) {
556 +                               /* forget packet, just rearm desc */
557 +                               priv->stats.rx_dropped++;
558 +                               continue;
559 +                       }
560 +
561 +                       /* since we're copying the data, we can align
562 +                        * them properly */
563 +                       skb_reserve(nskb, NET_IP_ALIGN);
564 +                       dma_sync_single_for_cpu(kdev, desc->address,
565 +                                               len, DMA_FROM_DEVICE);
566 +                       memcpy(nskb->data, skb->data, len);
567 +                       dma_sync_single_for_device(kdev, desc->address,
568 +                                                  len, DMA_FROM_DEVICE);
569 +                       skb = nskb;
570 +               } else {
571 +                       dma_unmap_single(&priv->pdev->dev, desc->address,
572 +                                        BCMENET_MAX_RX_SIZE, DMA_FROM_DEVICE);
573 +                       priv->rx_skb[desc_idx] = NULL;
574 +               }
575 +
576 +               skb_put(skb, len);
577 +               skb->dev = dev;
578 +               skb->protocol = eth_type_trans(skb, dev);
579 +               priv->stats.rx_packets++;
580 +               priv->stats.rx_bytes += len;
581 +               dev->last_rx = jiffies;
582 +               netif_receive_skb(skb);
583 +
584 +       } while (--budget > 0);
585 +
586 +       if (processed || !priv->rx_desc_count) {
587 +               bcm_enet_refill_rx(dev);
588 +
589 +               /* kick rx dma */
590 +               enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
591 +                               ENETDMA_CHANCFG_REG(priv->rx_chan));
592 +       }
593 +
594 +       return processed;
595 +}
596 +
597 +
598 +/*
599 + * try to or force reclaim of transmitted buffers
600 + */
601 +static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
602 +{
603 +       struct bcm_enet_priv *priv;
604 +       int released;
605 +
606 +       priv = netdev_priv(dev);
607 +       released = 0;
608 +
609 +       while (priv->tx_desc_count < priv->tx_ring_size) {
610 +               struct bcm_enet_desc *desc;
611 +               struct sk_buff *skb;
612 +
613 +               /* We run in a bh and fight against start_xmit, which
614 +                * is called with bh disabled  */
615 +               spin_lock(&priv->tx_lock);
616 +
617 +               desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
618 +
619 +               if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
620 +                       spin_unlock(&priv->tx_lock);
621 +                       break;
622 +               }
623 +
624 +               /* ensure other field of the descriptor were not read
625 +                * before we checked ownership */
626 +               rmb();
627 +
628 +               skb = priv->tx_skb[priv->tx_dirty_desc];
629 +               priv->tx_skb[priv->tx_dirty_desc] = NULL;
630 +               dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
631 +                                DMA_TO_DEVICE);
632 +
633 +               priv->tx_dirty_desc++;
634 +               if (priv->tx_dirty_desc == priv->tx_ring_size)
635 +                       priv->tx_dirty_desc = 0;
636 +               priv->tx_desc_count++;
637 +
638 +               spin_unlock(&priv->tx_lock);
639 +
640 +               if (desc->len_stat & DMADESC_UNDER_MASK)
641 +                       priv->stats.tx_errors++;
642 +
643 +               dev_kfree_skb(skb);
644 +               released++;
645 +       }
646 +
647 +       if (netif_queue_stopped(dev) && released)
648 +               netif_wake_queue(dev);
649 +
650 +       return released;
651 +}
652 +
653 +/*
654 + * poll func, called by network core
655 + */
656 +static int bcm_enet_poll(struct napi_struct *napi, int budget)
657 +{
658 +       struct bcm_enet_priv *priv;
659 +       struct net_device *dev;
660 +       int tx_work_done, rx_work_done;
661 +
662 +       priv = container_of(napi, struct bcm_enet_priv, napi);
663 +       dev = priv->net_dev;
664 +
665 +       /* ack interrupts */
666 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
667 +                       ENETDMA_IR_REG(priv->rx_chan));
668 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
669 +                       ENETDMA_IR_REG(priv->tx_chan));
670 +
671 +       /* reclaim sent skb */
672 +       tx_work_done = bcm_enet_tx_reclaim(dev, 0);
673 +
674 +       spin_lock(&priv->rx_lock);
675 +       rx_work_done = bcm_enet_receive_queue(dev, budget);
676 +       spin_unlock(&priv->rx_lock);
677 +
678 +       if (rx_work_done >= budget || tx_work_done > 0) {
679 +               /* rx/tx queue is not yet empty/clean */
680 +               return rx_work_done;
681 +       }
682 +
683 +       /* no more packet in rx/tx queue, remove device from poll
684 +        * queue */
685 +       __netif_rx_complete(dev, napi);
686 +
687 +       /* restore rx/tx interrupt */
688 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
689 +                       ENETDMA_IRMASK_REG(priv->rx_chan));
690 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
691 +                       ENETDMA_IRMASK_REG(priv->tx_chan));
692 +
693 +       return rx_work_done;
694 +}
695 +
696 +/*
697 + * mac interrupt handler
698 + */
699 +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
700 +{
701 +       struct net_device *dev;
702 +       struct bcm_enet_priv *priv;
703 +       u32 stat;
704 +
705 +       dev = dev_id;
706 +       priv = netdev_priv(dev);
707 +
708 +       stat = enet_readl(priv, ENET_IR_REG);
709 +       if (!(stat & ENET_IR_MIB))
710 +               return IRQ_NONE;
711 +
712 +       /* clear & mask interrupt */
713 +       enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
714 +       enet_writel(priv, 0, ENET_IRMASK_REG);
715 +
716 +       /* read mib registers in workqueue */
717 +       schedule_work(&priv->mib_update_task);
718 +
719 +       return IRQ_HANDLED;
720 +}
721 +
722 +/*
723 + * rx/tx dma interrupt handler
724 + */
725 +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
726 +{
727 +       struct net_device *dev;
728 +       struct bcm_enet_priv *priv;
729 +
730 +       dev = dev_id;
731 +       priv = netdev_priv(dev);
732 +
733 +       /* mask rx/tx interrupts */
734 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
735 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
736 +
737 +       netif_rx_schedule(dev, &priv->napi);
738 +
739 +       return IRQ_HANDLED;
740 +}
741 +
742 +/*
743 + * tx request callback
744 + */
745 +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
746 +{
747 +       struct bcm_enet_priv *priv;
748 +       struct bcm_enet_desc *desc;
749 +       u32 len_stat;
750 +       int ret;
751 +
752 +       priv = netdev_priv(dev);
753 +
754 +       /* lock against tx reclaim */
755 +       spin_lock(&priv->tx_lock);
756 +
757 +       /* make sure  the tx hw queue  is not full,  should not happen
758 +        * since we stop queue before it's the case */
759 +       if (unlikely(!priv->tx_desc_count)) {
760 +               netif_stop_queue(dev);
761 +               dev_err(&priv->pdev->dev, "xmit called with no tx desc "
762 +                       "available?\n");
763 +               ret = NETDEV_TX_BUSY;
764 +               goto out_unlock;
765 +       }
766 +
767 +       /* point to the next available desc */
768 +       desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
769 +       priv->tx_skb[priv->tx_curr_desc] = skb;
770 +
771 +       /* fill descriptor */
772 +       desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
773 +                                      DMA_TO_DEVICE);
774 +
775 +       len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
776 +       len_stat |= DMADESC_ESOP_MASK |
777 +               DMADESC_APPEND_CRC |
778 +               DMADESC_OWNER_MASK;
779 +
780 +       priv->tx_curr_desc++;
781 +       if (priv->tx_curr_desc == priv->tx_ring_size) {
782 +               priv->tx_curr_desc = 0;
783 +               len_stat |= DMADESC_WRAP_MASK;
784 +       }
785 +       priv->tx_desc_count--;
786 +
787 +       /* dma might be already polling, make sure we update desc
788 +        * fields in correct order */
789 +       wmb();
790 +       desc->len_stat = len_stat;
791 +       wmb();
792 +
793 +       /* kick tx dma */
794 +       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
795 +                       ENETDMA_CHANCFG_REG(priv->tx_chan));
796 +
797 +       /* stop queue if no more desc available */
798 +       if (!priv->tx_desc_count)
799 +               netif_stop_queue(dev);
800 +
801 +       priv->stats.tx_bytes += skb->len;
802 +       priv->stats.tx_packets++;
803 +       dev->trans_start = jiffies;
804 +       ret = NETDEV_TX_OK;
805 +
806 +out_unlock:
807 +       spin_unlock(&priv->tx_lock);
808 +       return ret;
809 +}
810 +
811 +/*
812 + * Change the interface's mac address.
813 + */
814 +static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
815 +{
816 +       struct bcm_enet_priv *priv;
817 +       struct sockaddr *addr = p;
818 +       u32 val;
819 +
820 +       priv = netdev_priv(dev);
821 +       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
822 +
823 +       /* use perfect match register 0 to store my mac address */
824 +       val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
825 +               (dev->dev_addr[4] << 8) | dev->dev_addr[5];
826 +       enet_writel(priv, val, ENET_PML_REG(0));
827 +
828 +       val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
829 +       val |= ENET_PMH_DATAVALID_MASK;
830 +       enet_writel(priv, val, ENET_PMH_REG(0));
831 +
832 +       return 0;
833 +}
834 +
835 +/*
836 + * Change rx mode (promiscous/allmulti) and update multicast list
837 + */
838 +static void bcm_enet_set_multicast_list(struct net_device *dev)
839 +{
840 +       struct bcm_enet_priv *priv;
841 +       struct dev_mc_list *mc_list;
842 +       u32 val;
843 +       int i;
844 +
845 +       priv = netdev_priv(dev);
846 +
847 +       val = enet_readl(priv, ENET_RXCFG_REG);
848 +
849 +       if (dev->flags & IFF_PROMISC)
850 +               val |= ENET_RXCFG_PROMISC_MASK;
851 +       else
852 +               val &= ~ENET_RXCFG_PROMISC_MASK;
853 +
854 +       /* only 3 perfect match registers left, first one is used for
855 +        * own mac address */
856 +       if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
857 +               val |= ENET_RXCFG_ALLMCAST_MASK;
858 +       else
859 +               val &= ~ENET_RXCFG_ALLMCAST_MASK;
860 +
861 +       /* no need to set perfect match registers if we catch all
862 +        * multicast */
863 +       if (val & ENET_RXCFG_ALLMCAST_MASK) {
864 +               enet_writel(priv, val, ENET_RXCFG_REG);
865 +               return;
866 +       }
867 +
868 +       for (i = 0, mc_list = dev->mc_list;
869 +            (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
870 +            i++, mc_list = mc_list->next) {
871 +               u8 *dmi_addr;
872 +               u32 tmp;
873 +
874 +               /* filter non ethernet address */
875 +               if (mc_list->dmi_addrlen != 6)
876 +                       continue;
877 +
878 +               /* update perfect match registers */
879 +               dmi_addr = mc_list->dmi_addr;
880 +               tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
881 +                       (dmi_addr[4] << 8) | dmi_addr[5];
882 +               enet_writel(priv, tmp, ENET_PML_REG(i + 1));
883 +
884 +               tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
885 +               tmp |= ENET_PMH_DATAVALID_MASK;
886 +               enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
887 +       }
888 +
889 +       for (; i < 3; i++) {
890 +               enet_writel(priv, 0, ENET_PML_REG(i + 1));
891 +               enet_writel(priv, 0, ENET_PMH_REG(i + 1));
892 +       }
893 +
894 +       enet_writel(priv, val, ENET_RXCFG_REG);
895 +}
896 +
897 +/*
898 + * set mac duplex parameters
899 + */
900 +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
901 +{
902 +       u32 val;
903 +
904 +       val = enet_readl(priv, ENET_TXCTL_REG);
905 +       if (fullduplex)
906 +               val |= ENET_TXCTL_FD_MASK;
907 +       else
908 +               val &= ~ENET_TXCTL_FD_MASK;
909 +       enet_writel(priv, val, ENET_TXCTL_REG);
910 +}
911 +
912 +/*
913 + * set mac flow control parameters
914 + */
915 +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
916 +{
917 +       u32 val;
918 +
919 +       /* rx flow control (pause frame handling) */
920 +       val = enet_readl(priv, ENET_RXCFG_REG);
921 +       if (rx_en)
922 +               val |= ENET_RXCFG_ENFLOW_MASK;
923 +       else
924 +               val &= ~ENET_RXCFG_ENFLOW_MASK;
925 +       enet_writel(priv, val, ENET_RXCFG_REG);
926 +
927 +       /* tx flow control (pause frame generation) */
928 +       val = enet_dma_readl(priv, ENETDMA_CFG_REG);
929 +       if (tx_en)
930 +               val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
931 +       else
932 +               val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
933 +       enet_dma_writel(priv, val, ENETDMA_CFG_REG);
934 +}
935 +
936 +/*
937 + * link changed callback (from phylib)
938 + */
939 +static void bcm_enet_adjust_phy_link(struct net_device *dev)
940 +{
941 +       struct bcm_enet_priv *priv;
942 +       struct phy_device *phydev;
943 +       int status_changed;
944 +
945 +       priv = netdev_priv(dev);
946 +       phydev = priv->phydev;
947 +       status_changed = 0;
948 +
949 +       if (priv->old_link != phydev->link) {
950 +               status_changed = 1;
951 +               priv->old_link = phydev->link;
952 +       }
953 +
954 +       /* reflect duplex change in mac configuration */
955 +       if (phydev->link && phydev->duplex != priv->old_duplex) {
956 +               bcm_enet_set_duplex(priv,
957 +                                   (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
958 +               status_changed = 1;
959 +               priv->old_duplex = phydev->duplex;
960 +       }
961 +
962 +       /* enable flow control if remote advertise it (trust phylib to
963 +        * check that duplex is full */
964 +       if (phydev->link && phydev->pause != priv->old_pause) {
965 +               int rx_pause_en, tx_pause_en;
966 +
967 +               if (phydev->pause) {
968 +                       /* pause was advertised by lpa and us */
969 +                       rx_pause_en = 1;
970 +                       tx_pause_en = 1;
971 +               } else if (!priv->pause_auto) {
972 +                       /* pause setting overrided by user */
973 +                       rx_pause_en = priv->pause_rx;
974 +                       tx_pause_en = priv->pause_tx;
975 +               } else {
976 +                       rx_pause_en = 0;
977 +                       tx_pause_en = 0;
978 +               }
979 +
980 +               bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
981 +               status_changed = 1;
982 +               priv->old_pause = phydev->pause;
983 +       }
984 +
985 +       if (status_changed) {
986 +               pr_info("%s: link %s", dev->name, phydev->link ?
987 +                       "UP" : "DOWN");
988 +               if (phydev->link)
989 +                       printk(" - %d/%s - flow control %s", phydev->speed,
990 +                              DUPLEX_FULL == phydev->duplex ? "full" : "half",
991 +                              phydev->pause == 1 ? "rx&tx" : "off");
992 +
993 +               printk("\n");
994 +       }
995 +}
996 +
997 +/*
998 + * link changed callback (if phylib is not used)
999 + */
1000 +static void bcm_enet_adjust_link(struct net_device *dev)
1001 +{
1002 +       struct bcm_enet_priv *priv;
1003 +
1004 +       priv = netdev_priv(dev);
1005 +       bcm_enet_set_duplex(priv, priv->force_duplex_full);
1006 +       bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
1007 +
1008 +       pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
1009 +               dev->name,
1010 +               priv->force_speed_100 ? 100 : 10,
1011 +               priv->force_duplex_full ? "full" : "half",
1012 +               priv->pause_rx ? "rx" : "off",
1013 +               priv->pause_tx ? "tx" : "off");
1014 +}
1015 +
1016 +/*
1017 + * open callback, allocate dma rings & buffers and start rx operation
1018 + */
1019 +static int bcm_enet_open(struct net_device *dev)
1020 +{
1021 +       struct bcm_enet_priv *priv;
1022 +       struct sockaddr addr;
1023 +       struct device *kdev;
1024 +       struct phy_device *phydev;
1025 +       int irq_requested, i, ret;
1026 +       unsigned int size;
1027 +       char phy_id[BUS_ID_SIZE];
1028 +       void *p;
1029 +       u32 val;
1030 +
1031 +       priv = netdev_priv(dev);
1032 +       priv->rx_desc_cpu = priv->tx_desc_cpu = NULL;
1033 +       priv->rx_skb = priv->tx_skb = NULL;
1034 +
1035 +       kdev = &priv->pdev->dev;
1036 +
1037 +       if (priv->has_phy) {
1038 +               /* connect to PHY */
1039 +               snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
1040 +                        priv->mac_id ? "1" : "0", priv->phy_id);
1041 +
1042 +               phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
1043 +                                    PHY_INTERFACE_MODE_MII);
1044 +
1045 +               if (IS_ERR(phydev)) {
1046 +                       dev_err(kdev, "could not attach to PHY\n");
1047 +                       return PTR_ERR(phydev);
1048 +               }
1049 +
1050 +               /* mask with MAC supported features */
1051 +               phydev->supported &= (SUPPORTED_10baseT_Half |
1052 +                                     SUPPORTED_10baseT_Full |
1053 +                                     SUPPORTED_100baseT_Half |
1054 +                                     SUPPORTED_100baseT_Full |
1055 +                                     SUPPORTED_Autoneg |
1056 +                                     SUPPORTED_Pause |
1057 +                                     SUPPORTED_MII);
1058 +               phydev->advertising = phydev->supported;
1059 +
1060 +               if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
1061 +                       phydev->advertising |= SUPPORTED_Pause;
1062 +               else
1063 +                       phydev->advertising &= ~SUPPORTED_Pause;
1064 +
1065 +               dev_info(kdev, "attached PHY at address %d [%s]\n",
1066 +                        phydev->addr, phydev->drv->name);
1067 +
1068 +               priv->old_link = 0;
1069 +               priv->old_duplex = -1;
1070 +               priv->old_pause = -1;
1071 +               priv->phydev = phydev;
1072 +       }
1073 +
1074 +       /* mask all interrupts and request them */
1075 +       enet_writel(priv, 0, ENET_IRMASK_REG);
1076 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1077 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1078 +
1079 +       irq_requested = 0;
1080 +       ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
1081 +       if (ret)
1082 +               goto out;
1083 +       irq_requested++;
1084 +
1085 +       ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
1086 +                         IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
1087 +       if (ret)
1088 +               goto out;
1089 +       irq_requested++;
1090 +
1091 +       ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
1092 +                         IRQF_DISABLED, dev->name, dev);
1093 +       if (ret)
1094 +               goto out;
1095 +       irq_requested++;
1096 +
1097 +       /* initialize perfect match registers */
1098 +       for (i = 0; i < 4; i++) {
1099 +               enet_writel(priv, 0, ENET_PML_REG(i));
1100 +               enet_writel(priv, 0, ENET_PMH_REG(i));
1101 +       }
1102 +
1103 +       /* write device mac address */
1104 +       memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
1105 +       bcm_enet_set_mac_address(dev, &addr);
1106 +
1107 +       /* allocate rx dma ring */
1108 +       size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
1109 +       p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
1110 +       if (!p) {
1111 +               dev_err(kdev, "cannot allocate rx ring %u\n", size);
1112 +               ret = -ENOMEM;
1113 +               goto out;
1114 +       }
1115 +
1116 +       memset(p, 0, size);
1117 +       priv->rx_desc_alloc_size = size;
1118 +       priv->rx_desc_cpu = p;
1119 +
1120 +       /* allocate tx dma ring */
1121 +       size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
1122 +       p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
1123 +       if (!p) {
1124 +               dev_err(kdev, "cannot allocate tx ring\n");
1125 +               ret = -ENOMEM;
1126 +               goto out;
1127 +       }
1128 +
1129 +       memset(p, 0, size);
1130 +       priv->tx_desc_alloc_size = size;
1131 +       priv->tx_desc_cpu = p;
1132 +
1133 +       priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
1134 +                              GFP_KERNEL);
1135 +       if (!priv->tx_skb) {
1136 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1137 +               ret = -ENOMEM;
1138 +               goto out;
1139 +       }
1140 +
1141 +       priv->tx_desc_count = priv->tx_ring_size;
1142 +       priv->tx_dirty_desc = 0;
1143 +       priv->tx_curr_desc = 0;
1144 +       spin_lock_init(&priv->tx_lock);
1145 +
1146 +       /* init & fill rx ring with skbs */
1147 +       priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
1148 +                              GFP_KERNEL);
1149 +       if (!priv->rx_skb) {
1150 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1151 +               ret = -ENOMEM;
1152 +               goto out;
1153 +       }
1154 +
1155 +       priv->rx_desc_count = 0;
1156 +       priv->rx_dirty_desc = 0;
1157 +       priv->rx_curr_desc = 0;
1158 +
1159 +       /* initialize flow control buffer allocation */
1160 +       enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1161 +                       ENETDMA_BUFALLOC_REG(priv->rx_chan));
1162 +
1163 +       if (bcm_enet_refill_rx(dev)) {
1164 +               dev_err(kdev, "cannot allocate rx skb queue\n");
1165 +               ret = -ENOMEM;
1166 +               goto out;
1167 +       }
1168 +
1169 +       /* write rx & tx ring addresses */
1170 +       enet_dma_writel(priv, priv->rx_desc_dma,
1171 +                       ENETDMA_RSTART_REG(priv->rx_chan));
1172 +       enet_dma_writel(priv, priv->tx_desc_dma,
1173 +                       ENETDMA_RSTART_REG(priv->tx_chan));
1174 +
1175 +       /* clear remaining state ram for rx & tx channel */
1176 +       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
1177 +       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
1178 +       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
1179 +       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
1180 +       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
1181 +       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
1182 +
1183 +       /* set max rx/tx length */
1184 +       enet_writel(priv, BCMENET_MAX_RX_SIZE, ENET_RXMAXLEN_REG);
1185 +       enet_writel(priv, BCMENET_MAX_TX_SIZE, ENET_TXMAXLEN_REG);
1186 +
1187 +       /* set dma maximum burst len */
1188 +       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1189 +                       ENETDMA_MAXBURST_REG(priv->rx_chan));
1190 +       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1191 +                       ENETDMA_MAXBURST_REG(priv->tx_chan));
1192 +
1193 +       /* set correct transmit fifo watermark */
1194 +       enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1195 +
1196 +       /* set flow control low/high threshold to 1/3 / 2/3 */
1197 +       val = priv->rx_ring_size / 3;
1198 +       enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1199 +       val = (priv->rx_ring_size * 2) / 3;
1200 +       enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1201 +
1202 +       /* all set, enable mac and interrupts, start dma engine and
1203 +        * kick rx dma channel */
1204 +       wmb();
1205 +       enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
1206 +       enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1207 +       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
1208 +                       ENETDMA_CHANCFG_REG(priv->rx_chan));
1209 +
1210 +       /* watch "mib counters about to overflow" interrupt */
1211 +       enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1212 +       enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1213 +
1214 +       /* watch "packet transferred" interrupt in rx and tx */
1215 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1216 +                       ENETDMA_IR_REG(priv->rx_chan));
1217 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1218 +                       ENETDMA_IR_REG(priv->tx_chan));
1219 +
1220 +       /* make sure we enable napi before rx interrupt  */
1221 +       napi_enable(&priv->napi);
1222 +
1223 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1224 +                       ENETDMA_IRMASK_REG(priv->rx_chan));
1225 +       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1226 +                       ENETDMA_IRMASK_REG(priv->tx_chan));
1227 +
1228 +       if (priv->has_phy)
1229 +               phy_start(priv->phydev);
1230 +       else
1231 +               bcm_enet_adjust_link(dev);
1232 +
1233 +       netif_start_queue(dev);
1234 +       return 0;
1235 +
1236 +out:
1237 +       phy_disconnect(priv->phydev);
1238 +       if (irq_requested > 2)
1239 +               free_irq(priv->irq_tx, dev);
1240 +       if (irq_requested > 1)
1241 +               free_irq(priv->irq_rx, dev);
1242 +       if (irq_requested > 0)
1243 +               free_irq(dev->irq, dev);
1244 +       for (i = 0; i < priv->rx_ring_size; i++) {
1245 +               struct bcm_enet_desc *desc;
1246 +
1247 +               if (!priv->rx_skb[i])
1248 +                       continue;
1249 +
1250 +               desc = &priv->rx_desc_cpu[i];
1251 +               dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1252 +                                DMA_FROM_DEVICE);
1253 +               kfree_skb(priv->rx_skb[i]);
1254 +       }
1255 +       if (priv->rx_desc_cpu)
1256 +               dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1257 +                                 priv->rx_desc_cpu, priv->rx_desc_dma);
1258 +       if (priv->tx_desc_cpu)
1259 +               dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1260 +                                 priv->tx_desc_cpu, priv->tx_desc_dma);
1261 +       kfree(priv->rx_skb);
1262 +       kfree(priv->tx_skb);
1263 +       return ret;
1264 +}
1265 +
1266 +/*
1267 + * disable mac
1268 + */
1269 +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1270 +{
1271 +       int limit;
1272 +       u32 val;
1273 +
1274 +       val = enet_readl(priv, ENET_CTL_REG);
1275 +       val |= ENET_CTL_DISABLE_MASK;
1276 +       enet_writel(priv, val, ENET_CTL_REG);
1277 +
1278 +       limit = 1000;
1279 +       do {
1280 +               u32 val;
1281 +
1282 +               val = enet_readl(priv, ENET_CTL_REG);
1283 +               if (!(val & ENET_CTL_DISABLE_MASK))
1284 +                       break;
1285 +               udelay(1);
1286 +       } while (limit--);
1287 +}
1288 +
1289 +/*
1290 + * disable dma in given channel
1291 + */
1292 +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1293 +{
1294 +       int limit;
1295 +
1296 +       enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1297 +
1298 +       limit = 1000;
1299 +       do {
1300 +               u32 val;
1301 +
1302 +               val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1303 +               if (!(val & ENETDMA_CHANCFG_EN_MASK))
1304 +                       break;
1305 +               udelay(1);
1306 +       } while (limit--);
1307 +}
1308 +
1309 +/*
1310 + * stop callback
1311 + */
1312 +static int bcm_enet_stop(struct net_device *dev)
1313 +{
1314 +       struct bcm_enet_priv *priv;
1315 +       struct device *kdev;
1316 +       int i;
1317 +
1318 +       priv = netdev_priv(dev);
1319 +       kdev = &priv->pdev->dev;
1320 +
1321 +       netif_stop_queue(dev);
1322 +       napi_disable(&priv->napi);
1323 +       if (priv->has_phy)
1324 +               phy_stop(priv->phydev);
1325 +       del_timer_sync(&priv->rx_timeout);
1326 +
1327 +       /* mask all interrupts */
1328 +       enet_writel(priv, 0, ENET_IRMASK_REG);
1329 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1330 +       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1331 +
1332 +       /* make sure no mib update is scheduled */
1333 +       flush_scheduled_work();
1334 +
1335 +       /* disable dma & mac */
1336 +       bcm_enet_disable_dma(priv, priv->tx_chan);
1337 +       bcm_enet_disable_dma(priv, priv->rx_chan);
1338 +       bcm_enet_disable_mac(priv);
1339 +
1340 +       /* force reclaim of all tx buffers */
1341 +       bcm_enet_tx_reclaim(dev, 1);
1342 +
1343 +       /* free the rx skb ring */
1344 +       for (i = 0; i < priv->rx_ring_size; i++) {
1345 +               struct bcm_enet_desc *desc;
1346 +
1347 +               if (!priv->rx_skb[i])
1348 +                       continue;
1349 +
1350 +               desc = &priv->rx_desc_cpu[i];
1351 +               dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1352 +                                DMA_FROM_DEVICE);
1353 +               kfree_skb(priv->rx_skb[i]);
1354 +       }
1355 +
1356 +       /* free remaining allocated memory */
1357 +       kfree(priv->rx_skb);
1358 +       kfree(priv->tx_skb);
1359 +       dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1360 +                         priv->rx_desc_cpu, priv->rx_desc_dma);
1361 +       dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1362 +                         priv->tx_desc_cpu, priv->tx_desc_dma);
1363 +       free_irq(priv->irq_tx, dev);
1364 +       free_irq(priv->irq_rx, dev);
1365 +       free_irq(dev->irq, dev);
1366 +
1367 +       /* release phy */
1368 +       if (priv->has_phy) {
1369 +               phy_disconnect(priv->phydev);
1370 +               priv->phydev = NULL;
1371 +       }
1372 +
1373 +       return 0;
1374 +}
1375 +
1376 +/*
1377 + * core request to return device rx/tx stats
1378 + */
1379 +static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1380 +{
1381 +       struct bcm_enet_priv *priv;
1382 +
1383 +       priv = netdev_priv(dev);
1384 +       return &priv->stats;
1385 +}
1386 +
1387 +/*
1388 + * ethtool callbacks
1389 + */
1390 +struct bcm_enet_stats {
1391 +       char stat_string[ETH_GSTRING_LEN];
1392 +       int sizeof_stat;
1393 +       int stat_offset;
1394 +       int mib_reg;
1395 +};
1396 +
1397 +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),            \
1398 +                    offsetof(struct bcm_enet_priv, m)
1399 +
1400 +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1401 +       { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1402 +       { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1403 +       { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1404 +       { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1405 +       { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1406 +       { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1407 +       { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1408 +       { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1409 +
1410 +       { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1411 +       { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1412 +       { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1413 +       { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1414 +       { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1415 +       { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1416 +       { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1417 +       { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1418 +       { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1419 +       { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1420 +       { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1421 +       { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1422 +       { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1423 +       { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1424 +       { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1425 +       { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1426 +       { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1427 +       { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1428 +       { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1429 +       { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1430 +       { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1431 +
1432 +       { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1433 +       { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1434 +       { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1435 +       { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1436 +       { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1437 +       { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1438 +       { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1439 +       { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1440 +       { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1441 +       { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1442 +       { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1443 +       { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1444 +       { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1445 +       { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1446 +       { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1447 +       { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1448 +       { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1449 +       { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1450 +       { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1451 +       { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1452 +       { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1453 +       { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1454 +
1455 +};
1456 +
1457 +#define BCM_ENET_STATS_LEN     \
1458 +       (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1459 +
1460 +static const u32 unused_mib_regs[] = {
1461 +       ETH_MIB_TX_ALL_OCTETS,
1462 +       ETH_MIB_TX_ALL_PKTS,
1463 +       ETH_MIB_RX_ALL_OCTETS,
1464 +       ETH_MIB_RX_ALL_PKTS,
1465 +};
1466 +
1467 +
1468 +static void bcm_enet_get_drvinfo(struct net_device *netdev,
1469 +                                struct ethtool_drvinfo *drvinfo)
1470 +{
1471 +       strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1472 +       strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1473 +       strncpy(drvinfo->fw_version, "N/A", 32);
1474 +       strncpy(drvinfo->bus_info, "bcm63xx", 32);
1475 +       drvinfo->n_stats = BCM_ENET_STATS_LEN;
1476 +}
1477 +
1478 +static int bcm_enet_get_stats_count(struct net_device *netdev)
1479 +{
1480 +       return BCM_ENET_STATS_LEN;
1481 +}
1482 +
1483 +static void bcm_enet_get_strings(struct net_device *netdev,
1484 +                                u32 stringset, u8 *data)
1485 +{
1486 +       int i;
1487 +
1488 +       switch (stringset) {
1489 +       case ETH_SS_STATS:
1490 +               for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1491 +                       memcpy(data + i * ETH_GSTRING_LEN,
1492 +                              bcm_enet_gstrings_stats[i].stat_string,
1493 +                              ETH_GSTRING_LEN);
1494 +               }
1495 +               break;
1496 +       }
1497 +}
1498 +
1499 +static void update_mib_counters(struct bcm_enet_priv *priv)
1500 +{
1501 +       int i;
1502 +
1503 +       for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1504 +               const struct bcm_enet_stats *s;
1505 +               u32 val;
1506 +               char *p;
1507 +
1508 +               s = &bcm_enet_gstrings_stats[i];
1509 +               if (s->mib_reg == -1)
1510 +                       continue;
1511 +
1512 +               val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1513 +               p = (char *)priv + s->stat_offset;
1514 +
1515 +               if (s->sizeof_stat == sizeof(u64))
1516 +                       *(u64 *)p += val;
1517 +               else
1518 +                       *(u32 *)p += val;
1519 +       }
1520 +
1521 +       /* also empty unused mib counters to make sure mib counter
1522 +        * overflow interrupt is cleared */
1523 +       for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1524 +               (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1525 +}
1526 +
1527 +static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1528 +{
1529 +       struct bcm_enet_priv *priv;
1530 +
1531 +       priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1532 +       mutex_lock(&priv->mib_update_lock);
1533 +       update_mib_counters(priv);
1534 +       mutex_unlock(&priv->mib_update_lock);
1535 +
1536 +       /* reenable mib interrupt */
1537 +       if (netif_running(priv->net_dev))
1538 +               enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1539 +}
1540 +
1541 +static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1542 +                                      struct ethtool_stats *stats,
1543 +                                      u64 *data)
1544 +{
1545 +       struct bcm_enet_priv *priv;
1546 +       int i;
1547 +
1548 +       priv = netdev_priv(netdev);
1549 +
1550 +       mutex_lock(&priv->mib_update_lock);
1551 +       update_mib_counters(priv);
1552 +
1553 +       for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1554 +               const struct bcm_enet_stats *s;
1555 +               char *p;
1556 +
1557 +               s = &bcm_enet_gstrings_stats[i];
1558 +               p = (char *)priv + s->stat_offset;
1559 +               data[i] = (s->sizeof_stat == sizeof(u64)) ?
1560 +                       *(u64 *)p : *(u32 *)p;
1561 +       }
1562 +       mutex_unlock(&priv->mib_update_lock);
1563 +}
1564 +
1565 +static int bcm_enet_get_settings(struct net_device *dev,
1566 +                                struct ethtool_cmd *cmd)
1567 +{
1568 +       struct bcm_enet_priv *priv;
1569 +
1570 +       priv = netdev_priv(dev);
1571 +
1572 +       cmd->maxrxpkt = 0;
1573 +       cmd->maxtxpkt = 0;
1574 +
1575 +       if (priv->has_phy) {
1576 +               if (!priv->phydev)
1577 +                       return -ENODEV;
1578 +               return phy_ethtool_gset(priv->phydev, cmd);
1579 +       } else {
1580 +               cmd->autoneg = 0;
1581 +               cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1582 +               cmd->duplex = (priv->force_duplex_full) ?
1583 +                       DUPLEX_FULL : DUPLEX_HALF;
1584 +               cmd->supported = ADVERTISED_10baseT_Half  |
1585 +                       ADVERTISED_10baseT_Full |
1586 +                       ADVERTISED_100baseT_Half |
1587 +                       ADVERTISED_100baseT_Full;
1588 +               cmd->advertising = 0;
1589 +               cmd->port = PORT_MII;
1590 +               cmd->transceiver = XCVR_EXTERNAL;
1591 +       }
1592 +       return 0;
1593 +}
1594 +
1595 +static int bcm_enet_set_settings(struct net_device *dev,
1596 +                                struct ethtool_cmd *cmd)
1597 +{
1598 +       struct bcm_enet_priv *priv;
1599 +
1600 +       priv = netdev_priv(dev);
1601 +       if (priv->has_phy) {
1602 +               if (!priv->phydev)
1603 +                       return -ENODEV;
1604 +               return phy_ethtool_sset(priv->phydev, cmd);
1605 +       } else {
1606 +
1607 +               if (cmd->autoneg ||
1608 +                   (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1609 +                   cmd->port != PORT_MII)
1610 +                       return -EINVAL;
1611 +
1612 +               priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1613 +               priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1614 +
1615 +               if (netif_running(dev))
1616 +                       bcm_enet_adjust_link(dev);
1617 +               return 0;
1618 +       }
1619 +}
1620 +
1621 +static void bcm_enet_get_ringparam(struct net_device *dev,
1622 +                                  struct ethtool_ringparam *ering)
1623 +{
1624 +       struct bcm_enet_priv *priv;
1625 +
1626 +       priv = netdev_priv(dev);
1627 +
1628 +       /* rx/tx ring is actually only limited by memory */
1629 +       ering->rx_max_pending = 8192;
1630 +       ering->tx_max_pending = 8192;
1631 +       ering->rx_mini_max_pending = 0;
1632 +       ering->rx_jumbo_max_pending = 0;
1633 +       ering->rx_pending = priv->rx_ring_size;
1634 +       ering->tx_pending = priv->tx_ring_size;
1635 +}
1636 +
1637 +static int bcm_enet_set_ringparam(struct net_device *dev,
1638 +                                 struct ethtool_ringparam *ering)
1639 +{
1640 +       struct bcm_enet_priv *priv;
1641 +       int was_running;
1642 +
1643 +       priv = netdev_priv(dev);
1644 +
1645 +       was_running = 0;
1646 +       if (netif_running(dev)) {
1647 +               bcm_enet_stop(dev);
1648 +               was_running = 1;
1649 +       }
1650 +
1651 +       priv->rx_ring_size = ering->rx_pending;
1652 +       priv->tx_ring_size = ering->tx_pending;
1653 +
1654 +       if (was_running) {
1655 +               int err;
1656 +
1657 +               err = bcm_enet_open(dev);
1658 +               if (err)
1659 +                       dev_close(dev);
1660 +               else
1661 +                       bcm_enet_set_multicast_list(dev);
1662 +       }
1663 +       return 0;
1664 +}
1665 +
1666 +static void bcm_enet_get_pauseparam(struct net_device *dev,
1667 +                                   struct ethtool_pauseparam *ecmd)
1668 +{
1669 +       struct bcm_enet_priv *priv;
1670 +
1671 +       priv = netdev_priv(dev);
1672 +       ecmd->autoneg = priv->pause_auto;
1673 +       ecmd->rx_pause = priv->pause_rx;
1674 +       ecmd->tx_pause = priv->pause_tx;
1675 +}
1676 +
1677 +static int bcm_enet_set_pauseparam(struct net_device *dev,
1678 +                                  struct ethtool_pauseparam *ecmd)
1679 +{
1680 +       struct bcm_enet_priv *priv;
1681 +
1682 +       priv = netdev_priv(dev);
1683 +
1684 +       if (priv->has_phy) {
1685 +               if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1686 +                       /* asymetric pause mode not supported,
1687 +                        * actually possible but integrated PHY has RO
1688 +                        * asym_pause bit */
1689 +                       return -EINVAL;
1690 +               }
1691 +       } else {
1692 +               /* no pause autoneg on direct mii connection */
1693 +               if (ecmd->autoneg)
1694 +                       return -EINVAL;
1695 +       }
1696 +
1697 +       priv->pause_auto = ecmd->autoneg;
1698 +       priv->pause_rx = ecmd->rx_pause;
1699 +       priv->pause_tx = ecmd->tx_pause;
1700 +
1701 +       return 0;
1702 +}
1703 +
1704 +static struct ethtool_ops bcm_enet_ethtool_ops = {
1705 +       .get_strings            = bcm_enet_get_strings,
1706 +       .get_stats_count        = bcm_enet_get_stats_count,
1707 +       .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1708 +       .get_settings           = bcm_enet_get_settings,
1709 +       .set_settings           = bcm_enet_set_settings,
1710 +       .get_drvinfo            = bcm_enet_get_drvinfo,
1711 +       .get_link               = ethtool_op_get_link,
1712 +       .get_ringparam          = bcm_enet_get_ringparam,
1713 +       .set_ringparam          = bcm_enet_set_ringparam,
1714 +       .get_pauseparam         = bcm_enet_get_pauseparam,
1715 +       .set_pauseparam         = bcm_enet_set_pauseparam,
1716 +};
1717 +
1718 +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1719 +{
1720 +       struct bcm_enet_priv *priv;
1721 +
1722 +       priv = netdev_priv(dev);
1723 +       if (priv->has_phy) {
1724 +               if (!priv->phydev)
1725 +                       return -ENODEV;
1726 +               return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1727 +       } else {
1728 +               struct mii_if_info mii;
1729 +
1730 +               mii.dev = dev;
1731 +               mii.mdio_read = bcm_enet_mdio_read_mii;
1732 +               mii.mdio_write = bcm_enet_mdio_write_mii;
1733 +               mii.phy_id = 0;
1734 +               mii.phy_id_mask = 0x3f;
1735 +               mii.reg_num_mask = 0x1f;
1736 +               return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1737 +       }
1738 +}
1739 +
1740 +/*
1741 + * preinit hardware to allow mii operation while device is down
1742 + */
1743 +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1744 +{
1745 +       u32 val;
1746 +       int limit;
1747 +
1748 +       /* make sure mac is disabled */
1749 +       bcm_enet_disable_mac(priv);
1750 +
1751 +       /* soft reset mac */
1752 +       val = ENET_CTL_SRESET_MASK;
1753 +       enet_writel(priv, val, ENET_CTL_REG);
1754 +       wmb();
1755 +
1756 +       limit = 1000;
1757 +       do {
1758 +               val = enet_readl(priv, ENET_CTL_REG);
1759 +               if (!(val & ENET_CTL_SRESET_MASK))
1760 +                       break;
1761 +               udelay(1);
1762 +       } while (limit--);
1763 +
1764 +       /* select correct mii interface */
1765 +       val = enet_readl(priv, ENET_CTL_REG);
1766 +       if (priv->use_external_mii)
1767 +               val |= ENET_CTL_EPHYSEL_MASK;
1768 +       else
1769 +               val &= ~ENET_CTL_EPHYSEL_MASK;
1770 +       enet_writel(priv, val, ENET_CTL_REG);
1771 +
1772 +       /* turn on mdc clock */
1773 +       enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1774 +                   ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1775 +
1776 +       /* set mib counters to self-clear when read */
1777 +       val = enet_readl(priv, ENET_MIBCTL_REG);
1778 +       val |= ENET_MIBCTL_RDCLEAR_MASK;
1779 +       enet_writel(priv, val, ENET_MIBCTL_REG);
1780 +}
1781 +
1782 +/*
1783 + * allocate netdevice, request register memory and register device.
1784 + */
1785 +static int __devinit bcm_enet_probe(struct platform_device *pdev)
1786 +{
1787 +       struct bcm_enet_priv *priv;
1788 +       struct net_device *dev;
1789 +       struct bcm63xx_enet_platform_data *pd;
1790 +       struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1791 +       struct mii_bus *bus;
1792 +       const char *clk_name;
1793 +       unsigned int iomem_size;
1794 +       int i, ret, mdio_registered, mem_requested;
1795 +
1796 +       /* stop if shared driver failed, assume driver->probe will be
1797 +        * called in the same order we register devices (correct ?) */
1798 +       if (!bcm_enet_shared_base)
1799 +               return -ENODEV;
1800 +
1801 +       mdio_registered = mem_requested = 0;
1802 +
1803 +       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1804 +       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1805 +       res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1806 +       res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1807 +       if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1808 +               return -ENODEV;
1809 +
1810 +       ret = 0;
1811 +       dev = alloc_etherdev(sizeof(*priv));
1812 +       if (!dev)
1813 +               return -ENOMEM;
1814 +       priv = netdev_priv(dev);
1815 +       memset(priv, 0, sizeof(*priv));
1816 +
1817 +       iomem_size = res_mem->end - res_mem->start + 1;
1818 +       if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1819 +               ret = -EBUSY;
1820 +               goto err;
1821 +       }
1822 +       mem_requested = 1;
1823 +
1824 +       priv->base = ioremap(res_mem->start, iomem_size);
1825 +       if (priv->base == NULL) {
1826 +               ret = -ENOMEM;
1827 +               goto err;
1828 +       }
1829 +       dev->irq = priv->irq = res_irq->start;
1830 +       priv->irq_rx = res_irq_rx->start;
1831 +       priv->irq_tx = res_irq_tx->start;
1832 +       priv->mac_id = pdev->id;
1833 +
1834 +       /* get rx & tx dma channel id for this mac */
1835 +       if (priv->mac_id == 0) {
1836 +               priv->rx_chan = 0;
1837 +               priv->tx_chan = 1;
1838 +               clk_name = "enet0";
1839 +       } else {
1840 +               priv->rx_chan = 2;
1841 +               priv->tx_chan = 3;
1842 +               clk_name = "enet1";
1843 +       }
1844 +
1845 +       priv->mac_clk = clk_get(&pdev->dev, clk_name);
1846 +       if (IS_ERR(priv->mac_clk)) {
1847 +               ret = PTR_ERR(priv->mac_clk);
1848 +               priv->mac_clk = NULL;
1849 +               goto err;
1850 +       }
1851 +       clk_enable(priv->mac_clk);
1852 +
1853 +       /* initialize default and fetch platform data */
1854 +       priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1855 +       priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1856 +
1857 +       pd = pdev->dev.platform_data;
1858 +       if (pd) {
1859 +               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1860 +               priv->has_phy = pd->has_phy;
1861 +               priv->phy_id = pd->phy_id;
1862 +               priv->has_phy_interrupt = pd->has_phy_interrupt;
1863 +               priv->phy_interrupt = pd->phy_interrupt;
1864 +               priv->use_external_mii = !pd->use_internal_phy;
1865 +               priv->pause_auto = pd->pause_auto;
1866 +               priv->pause_rx = pd->pause_rx;
1867 +               priv->pause_tx = pd->pause_tx;
1868 +               priv->force_duplex_full = pd->force_duplex_full;
1869 +               priv->force_speed_100 = pd->force_speed_100;
1870 +       }
1871 +
1872 +       if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1873 +               /* using internal PHY, enable clock */
1874 +               priv->phy_clk = clk_get(&pdev->dev, "ephy");
1875 +               if (IS_ERR(priv->phy_clk)) {
1876 +                       ret = PTR_ERR(priv->phy_clk);
1877 +                       priv->phy_clk = NULL;
1878 +                       goto err;
1879 +               }
1880 +               clk_enable(priv->phy_clk);
1881 +       }
1882 +
1883 +       /* do minimal hardware init to be able to probe mii bus */
1884 +       bcm_enet_hw_preinit(priv);
1885 +
1886 +       /* MII bus registration */
1887 +       if (priv->has_phy) {
1888 +               bus = &priv->mii_bus;
1889 +               bus->name = "bcm63xx_enet MII bus";
1890 +               bus->dev = &pdev->dev;
1891 +               bus->priv = priv;
1892 +               bus->read = bcm_enet_mdio_read_phylib;
1893 +               bus->write = bcm_enet_mdio_write_phylib;
1894 +               sprintf(bus->id, "%d", priv->mac_id);
1895 +
1896 +               /* only probe bus where we think the PHY is, because
1897 +                * the mdio read operation return 0 instead of 0xffff
1898 +                * if a slave is not present on hw */
1899 +               bus->phy_mask = ~(1 << priv->phy_id);
1900 +
1901 +               bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1902 +               if (!bus->irq) {
1903 +                       ret = -ENOMEM;
1904 +                       goto err;
1905 +               }
1906 +
1907 +               if (priv->has_phy_interrupt)
1908 +                       bus->irq[priv->phy_id] = priv->phy_interrupt;
1909 +               else
1910 +                       bus->irq[priv->phy_id] = PHY_POLL;
1911 +
1912 +               ret = mdiobus_register(bus);
1913 +               if (ret) {
1914 +                       dev_err(&pdev->dev, "unable to register mdio bus\n");
1915 +                       goto err;
1916 +               }
1917 +               mdio_registered = 1;
1918 +       } else {
1919 +
1920 +               /* run platform code to initialize PHY device */
1921 +               if (pd->mii_config &&
1922 +                   pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1923 +                                  bcm_enet_mdio_write_mii)) {
1924 +                       dev_err(&pdev->dev, "unable to configure mdio bus\n");
1925 +                       goto err;
1926 +               }
1927 +       }
1928 +
1929 +       spin_lock_init(&priv->rx_lock);
1930 +
1931 +       /* init rx timeout (used for oom) */
1932 +       init_timer(&priv->rx_timeout);
1933 +       priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1934 +       priv->rx_timeout.data = (unsigned long)dev;
1935 +
1936 +       /* init the mib update lock&work */
1937 +       mutex_init(&priv->mib_update_lock);
1938 +       INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1939 +
1940 +       /* zero mib counters */
1941 +       for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1942 +               enet_writel(priv, 0, ENET_MIB_REG(i));
1943 +
1944 +       /* register netdevice */
1945 +       dev->open = bcm_enet_open;
1946 +       dev->stop = bcm_enet_stop;
1947 +       dev->hard_start_xmit = bcm_enet_start_xmit;
1948 +       dev->get_stats = bcm_enet_get_stats;
1949 +       dev->set_mac_address = bcm_enet_set_mac_address;
1950 +       dev->set_multicast_list = bcm_enet_set_multicast_list;
1951 +       netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1952 +       dev->do_ioctl = bcm_enet_ioctl;
1953 +#ifdef CONFIG_NET_POLL_CONTROLLER
1954 +       dev->poll_controller = bcm_enet_netpoll;
1955 +#endif
1956 +
1957 +       SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1958 +
1959 +       ret = register_netdev(dev);
1960 +       if (ret)
1961 +               goto err;
1962 +
1963 +       platform_set_drvdata(pdev, dev);
1964 +       priv->pdev = pdev;
1965 +       priv->net_dev = dev;
1966 +       SET_NETDEV_DEV(dev, &pdev->dev);
1967 +
1968 +       return 0;
1969 +
1970 +err:
1971 +       if (mem_requested)
1972 +               release_mem_region(res_mem->start, iomem_size);
1973 +       if (mdio_registered)
1974 +               mdiobus_unregister(&priv->mii_bus);
1975 +       kfree(priv->mii_bus.irq);
1976 +       if (priv->mac_clk) {
1977 +               clk_disable(priv->mac_clk);
1978 +               clk_put(priv->mac_clk);
1979 +       }
1980 +       if (priv->phy_clk) {
1981 +               clk_disable(priv->phy_clk);
1982 +               clk_put(priv->phy_clk);
1983 +       }
1984 +       if (priv->base) {
1985 +               /* turn off mdc clock */
1986 +               enet_writel(priv, 0, ENET_MIISC_REG);
1987 +               iounmap(priv->base);
1988 +       }
1989 +       free_netdev(dev);
1990 +       return ret;
1991 +}
1992 +
1993 +
1994 +/*
1995 + * exit func, stops hardware and unregisters netdevice
1996 + */
1997 +static int __devexit bcm_enet_remove(struct platform_device *pdev)
1998 +{
1999 +       struct bcm_enet_priv *priv;
2000 +       struct net_device *dev;
2001 +       struct resource *res;
2002 +
2003 +       /* stop netdevice */
2004 +       dev = platform_get_drvdata(pdev);
2005 +       priv = netdev_priv(dev);
2006 +       unregister_netdev(dev);
2007 +
2008 +       /* turn off mdc clock */
2009 +       enet_writel(priv, 0, ENET_MIISC_REG);
2010 +
2011 +       if (priv->has_phy) {
2012 +               mdiobus_unregister(&priv->mii_bus);
2013 +               kfree(priv->mii_bus.irq);
2014 +       } else {
2015 +               struct bcm63xx_enet_platform_data *pd;
2016 +
2017 +               pd = pdev->dev.platform_data;
2018 +               if (pd && pd->mii_config)
2019 +                       pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
2020 +                                      bcm_enet_mdio_write_mii);
2021 +       }
2022 +
2023 +       /* release device resources */
2024 +       iounmap(priv->base);
2025 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2026 +       release_mem_region(res->start, res->end - res->start + 1);
2027 +
2028 +       /* disable hw block clocks */
2029 +       if (priv->phy_clk) {
2030 +               clk_disable(priv->phy_clk);
2031 +               clk_put(priv->phy_clk);
2032 +       }
2033 +       clk_disable(priv->mac_clk);
2034 +       clk_put(priv->mac_clk);
2035 +
2036 +       free_netdev(dev);
2037 +       return 0;
2038 +}
2039 +
2040 +struct platform_driver bcm63xx_enet_driver = {
2041 +       .probe  = bcm_enet_probe,
2042 +       .remove = __devexit_p(bcm_enet_remove),
2043 +       .driver = {
2044 +               .name   = "bcm63xx_enet",
2045 +               .owner  = THIS_MODULE,
2046 +       },
2047 +};
2048 +
2049 +/*
2050 + * reserve & remap memory space shared between all macs
2051 + */
2052 +static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
2053 +{
2054 +       struct resource *res;
2055 +       unsigned int iomem_size;
2056 +
2057 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2058 +       if (!res)
2059 +               return -ENODEV;
2060 +
2061 +       iomem_size = res->end - res->start + 1;
2062 +       if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
2063 +               return -EBUSY;
2064 +
2065 +       bcm_enet_shared_base = ioremap(res->start, iomem_size);
2066 +       if (!bcm_enet_shared_base) {
2067 +               release_mem_region(res->start, iomem_size);
2068 +               return -ENOMEM;
2069 +       }
2070 +       return 0;
2071 +}
2072 +
2073 +static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
2074 +{
2075 +       struct resource *res;
2076 +
2077 +       iounmap(bcm_enet_shared_base);
2078 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2079 +       release_mem_region(res->start, res->end - res->start + 1);
2080 +       return 0;
2081 +}
2082 +
2083 +/*
2084 + * this "shared" driver is needed because both macs share a single
2085 + * address space
2086 + */
2087 +struct platform_driver bcm63xx_enet_shared_driver = {
2088 +       .probe  = bcm_enet_shared_probe,
2089 +       .remove = __devexit_p(bcm_enet_shared_remove),
2090 +       .driver = {
2091 +               .name   = "bcm63xx_enet_shared",
2092 +               .owner  = THIS_MODULE,
2093 +       },
2094 +};
2095 +
2096 +/*
2097 + * entry point
2098 + */
2099 +static int __init bcm_enet_init(void)
2100 +{
2101 +       int ret;
2102 +
2103 +       ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2104 +       if (ret)
2105 +               return ret;
2106 +
2107 +       ret = platform_driver_register(&bcm63xx_enet_driver);
2108 +       if (ret)
2109 +               platform_driver_unregister(&bcm63xx_enet_shared_driver);
2110 +
2111 +       return ret;
2112 +}
2113 +
2114 +static void __exit bcm_enet_exit(void)
2115 +{
2116 +       platform_driver_unregister(&bcm63xx_enet_driver);
2117 +       platform_driver_unregister(&bcm63xx_enet_shared_driver);
2118 +}
2119 +
2120 +
2121 +module_init(bcm_enet_init);
2122 +module_exit(bcm_enet_exit);
2123 +
2124 +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2125 +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2126 +MODULE_LICENSE("GPL");
2127 diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
2128 new file mode 100644
2129 index 0000000..fe7ffc1
2130 --- /dev/null
2131 +++ b/drivers/net/bcm63xx_enet.h
2132 @@ -0,0 +1,294 @@
2133 +#ifndef BCM63XX_ENET_H_
2134 +#define BCM63XX_ENET_H_
2135 +
2136 +#include <linux/types.h>
2137 +#include <linux/mii.h>
2138 +#include <linux/mutex.h>
2139 +#include <linux/phy.h>
2140 +#include <linux/platform_device.h>
2141 +
2142 +#include <bcm63xx_regs.h>
2143 +#include <bcm63xx_irq.h>
2144 +#include <bcm63xx_io.h>
2145 +
2146 +/* default number of descriptor */
2147 +#define BCMENET_DEF_RX_DESC    64
2148 +#define BCMENET_DEF_TX_DESC    32
2149 +
2150 +/* maximum burst len for dma (4 bytes unit) */
2151 +#define BCMENET_DMA_MAXBURST   16
2152 +
2153 +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
2154 + * must be low enough so that a DMA transfer of above burst length can
2155 + * not overflow the fifo  */
2156 +#define BCMENET_TX_FIFO_TRESH  32
2157 +
2158 +/* maximum rx/tx packet size */
2159 +#define        BCMENET_MAX_RX_SIZE     (ETH_FRAME_LEN + 4)
2160 +#define        BCMENET_MAX_TX_SIZE     (ETH_FRAME_LEN + 4)
2161 +
2162 +/*
2163 + * rx/tx dma descriptor
2164 + */
2165 +struct bcm_enet_desc {
2166 +       u32 len_stat;
2167 +       u32 address;
2168 +};
2169 +
2170 +#define DMADESC_LENGTH_SHIFT   16
2171 +#define DMADESC_LENGTH_MASK    (0xfff << DMADESC_LENGTH_SHIFT)
2172 +#define DMADESC_OWNER_MASK     (1 << 15)
2173 +#define DMADESC_EOP_MASK       (1 << 14)
2174 +#define DMADESC_SOP_MASK       (1 << 13)
2175 +#define DMADESC_ESOP_MASK      (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
2176 +#define DMADESC_WRAP_MASK      (1 << 12)
2177 +
2178 +#define DMADESC_UNDER_MASK     (1 << 9)
2179 +#define DMADESC_APPEND_CRC     (1 << 8)
2180 +#define DMADESC_OVSIZE_MASK    (1 << 4)
2181 +#define DMADESC_RXER_MASK      (1 << 2)
2182 +#define DMADESC_CRC_MASK       (1 << 1)
2183 +#define DMADESC_OV_MASK                (1 << 0)
2184 +#define DMADESC_ERR_MASK       (DMADESC_UNDER_MASK | \
2185 +                               DMADESC_OVSIZE_MASK | \
2186 +                               DMADESC_RXER_MASK | \
2187 +                               DMADESC_CRC_MASK | \
2188 +                               DMADESC_OV_MASK)
2189 +
2190 +
2191 +/*
2192 + * MIB Counters register definitions
2193 +*/
2194 +#define ETH_MIB_TX_GD_OCTETS                   0
2195 +#define ETH_MIB_TX_GD_PKTS                     1
2196 +#define ETH_MIB_TX_ALL_OCTETS                  2
2197 +#define ETH_MIB_TX_ALL_PKTS                    3
2198 +#define ETH_MIB_TX_BRDCAST                     4
2199 +#define ETH_MIB_TX_MULT                                5
2200 +#define ETH_MIB_TX_64                          6
2201 +#define ETH_MIB_TX_65_127                      7
2202 +#define ETH_MIB_TX_128_255                     8
2203 +#define ETH_MIB_TX_256_511                     9
2204 +#define ETH_MIB_TX_512_1023                    10
2205 +#define ETH_MIB_TX_1024_MAX                    11
2206 +#define ETH_MIB_TX_JAB                         12
2207 +#define ETH_MIB_TX_OVR                         13
2208 +#define ETH_MIB_TX_FRAG                                14
2209 +#define ETH_MIB_TX_UNDERRUN                    15
2210 +#define ETH_MIB_TX_COL                         16
2211 +#define ETH_MIB_TX_1_COL                       17
2212 +#define ETH_MIB_TX_M_COL                       18
2213 +#define ETH_MIB_TX_EX_COL                      19
2214 +#define ETH_MIB_TX_LATE                                20
2215 +#define ETH_MIB_TX_DEF                         21
2216 +#define ETH_MIB_TX_CRS                         22
2217 +#define ETH_MIB_TX_PAUSE                       23
2218 +
2219 +#define ETH_MIB_RX_GD_OCTETS                   32
2220 +#define ETH_MIB_RX_GD_PKTS                     33
2221 +#define ETH_MIB_RX_ALL_OCTETS                  34
2222 +#define ETH_MIB_RX_ALL_PKTS                    35
2223 +#define ETH_MIB_RX_BRDCAST                     36
2224 +#define ETH_MIB_RX_MULT                                37
2225 +#define ETH_MIB_RX_64                          38
2226 +#define ETH_MIB_RX_65_127                      39
2227 +#define ETH_MIB_RX_128_255                     40
2228 +#define ETH_MIB_RX_256_511                     41
2229 +#define ETH_MIB_RX_512_1023                    42
2230 +#define ETH_MIB_RX_1024_MAX                    43
2231 +#define ETH_MIB_RX_JAB                         44
2232 +#define ETH_MIB_RX_OVR                         45
2233 +#define ETH_MIB_RX_FRAG                                46
2234 +#define ETH_MIB_RX_DROP                                47
2235 +#define ETH_MIB_RX_CRC_ALIGN                   48
2236 +#define ETH_MIB_RX_UND                         49
2237 +#define ETH_MIB_RX_CRC                         50
2238 +#define ETH_MIB_RX_ALIGN                       51
2239 +#define ETH_MIB_RX_SYM                         52
2240 +#define ETH_MIB_RX_PAUSE                       53
2241 +#define ETH_MIB_RX_CNTRL                       54
2242 +
2243 +
2244 +struct bcm_enet_mib_counters {
2245 +       u64 tx_gd_octets;
2246 +       u32 tx_gd_pkts;
2247 +       u32 tx_all_octets;
2248 +       u32 tx_all_pkts;
2249 +       u32 tx_brdcast;
2250 +       u32 tx_mult;
2251 +       u32 tx_64;
2252 +       u32 tx_65_127;
2253 +       u32 tx_128_255;
2254 +       u32 tx_256_511;
2255 +       u32 tx_512_1023;
2256 +       u32 tx_1024_max;
2257 +       u32 tx_jab;
2258 +       u32 tx_ovr;
2259 +       u32 tx_frag;
2260 +       u32 tx_underrun;
2261 +       u32 tx_col;
2262 +       u32 tx_1_col;
2263 +       u32 tx_m_col;
2264 +       u32 tx_ex_col;
2265 +       u32 tx_late;
2266 +       u32 tx_def;
2267 +       u32 tx_crs;
2268 +       u32 tx_pause;
2269 +       u64 rx_gd_octets;
2270 +       u32 rx_gd_pkts;
2271 +       u32 rx_all_octets;
2272 +       u32 rx_all_pkts;
2273 +       u32 rx_brdcast;
2274 +       u32 rx_mult;
2275 +       u32 rx_64;
2276 +       u32 rx_65_127;
2277 +       u32 rx_128_255;
2278 +       u32 rx_256_511;
2279 +       u32 rx_512_1023;
2280 +       u32 rx_1024_max;
2281 +       u32 rx_jab;
2282 +       u32 rx_ovr;
2283 +       u32 rx_frag;
2284 +       u32 rx_drop;
2285 +       u32 rx_crc_align;
2286 +       u32 rx_und;
2287 +       u32 rx_crc;
2288 +       u32 rx_align;
2289 +       u32 rx_sym;
2290 +       u32 rx_pause;
2291 +       u32 rx_cntrl;
2292 +};
2293 +
2294 +
2295 +struct bcm_enet_priv {
2296 +
2297 +       /* mac id (from platform device id) */
2298 +       int mac_id;
2299 +
2300 +       /* base remapped address of device */
2301 +       void __iomem *base;
2302 +
2303 +       /* mac irq, rx_dma irq, tx_dma irq */
2304 +       int irq;
2305 +       int irq_rx;
2306 +       int irq_tx;
2307 +
2308 +       /* hw view of rx & tx dma ring */
2309 +       dma_addr_t rx_desc_dma;
2310 +       dma_addr_t tx_desc_dma;
2311 +
2312 +       /* allocated size (in bytes) for rx & tx dma ring */
2313 +       unsigned int rx_desc_alloc_size;
2314 +       unsigned int tx_desc_alloc_size;
2315 +
2316 +
2317 +       struct napi_struct napi;
2318 +
2319 +       /* dma channel id for rx */
2320 +       int rx_chan;
2321 +
2322 +       /* number of dma desc in rx ring */
2323 +       int rx_ring_size;
2324 +
2325 +       /* cpu view of rx dma ring */
2326 +       struct bcm_enet_desc *rx_desc_cpu;
2327 +
2328 +       /* current number of armed descriptor given to hardware for rx */
2329 +       int rx_desc_count;
2330 +
2331 +       /* next rx descriptor to fetch from hardware */
2332 +       int rx_curr_desc;
2333 +
2334 +       /* next dirty rx descriptor to refill */
2335 +       int rx_dirty_desc;
2336 +
2337 +       /* list of skb given to hw for rx */
2338 +       struct sk_buff **rx_skb;
2339 +
2340 +       /* used when rx skb allocation failed, so we defer rx queue
2341 +        * refill */
2342 +       struct timer_list rx_timeout;
2343 +
2344 +       /* lock rx_timeout against rx normal operation */
2345 +       spinlock_t rx_lock;
2346 +
2347 +
2348 +       /* dma channel id for tx */
2349 +       int tx_chan;
2350 +
2351 +       /* number of dma desc in tx ring */
2352 +       int tx_ring_size;
2353 +
2354 +       /* cpu view of rx dma ring */
2355 +       struct bcm_enet_desc *tx_desc_cpu;
2356 +
2357 +       /* number of available descriptor for tx */
2358 +       int tx_desc_count;
2359 +
2360 +       /* next tx descriptor avaiable */
2361 +       int tx_curr_desc;
2362 +
2363 +       /* next dirty tx descriptor to reclaim */
2364 +       int tx_dirty_desc;
2365 +
2366 +       /* list of skb given to hw for tx */
2367 +       struct sk_buff **tx_skb;
2368 +
2369 +       /* lock used by tx reclaim and xmit */
2370 +       spinlock_t tx_lock;
2371 +
2372 +
2373 +       /* set if internal phy is ignored and external mii interface
2374 +        * is selected */
2375 +       int use_external_mii;
2376 +
2377 +       /* set if a phy is connected, phy address must be known,
2378 +        * probing is not possible */
2379 +       int has_phy;
2380 +       int phy_id;
2381 +
2382 +       /* set if connected phy has an associated irq */
2383 +       int has_phy_interrupt;
2384 +       int phy_interrupt;
2385 +
2386 +       /* used when a phy is connected (phylib used) */
2387 +       struct mii_bus mii_bus;
2388 +       struct phy_device *phydev;
2389 +       int old_link;
2390 +       int old_duplex;
2391 +       int old_pause;
2392 +
2393 +       /* used when no phy is connected */
2394 +       int force_speed_100;
2395 +       int force_duplex_full;
2396 +
2397 +       /* pause parameters */
2398 +       int pause_auto;
2399 +       int pause_rx;
2400 +       int pause_tx;
2401 +
2402 +       /* stats */
2403 +       struct net_device_stats stats;
2404 +       struct bcm_enet_mib_counters mib;
2405 +
2406 +       /* after mib interrupt, mib registers update is done in this
2407 +        * work queue */
2408 +       struct work_struct mib_update_task;
2409 +
2410 +       /* lock mib update between userspace request and workqueue */
2411 +       struct mutex mib_update_lock;
2412 +
2413 +       /* mac clock */
2414 +       struct clk *mac_clk;
2415 +
2416 +       /* phy clock if internal phy is used */
2417 +       struct clk *phy_clk;
2418 +
2419 +       /* network device reference */
2420 +       struct net_device *net_dev;
2421 +
2422 +       /* platform device reference */
2423 +       struct platform_device *pdev;
2424 +};
2425 +
2426 +#endif /* ! BCM63XX_ENET_H_ */
2427 diff --git a/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h b/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
2428 new file mode 100644
2429 index 0000000..d53f611
2430 --- /dev/null
2431 +++ b/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
2432 @@ -0,0 +1,45 @@
2433 +#ifndef BCM63XX_DEV_ENET_H_
2434 +#define BCM63XX_DEV_ENET_H_
2435 +
2436 +#include <linux/if_ether.h>
2437 +#include <linux/init.h>
2438 +
2439 +/*
2440 + * on board ethernet platform data
2441 + */
2442 +struct bcm63xx_enet_platform_data {
2443 +       char mac_addr[ETH_ALEN];
2444 +
2445 +       int has_phy;
2446 +
2447 +       /* if has_phy, then set use_internal_phy */
2448 +       int use_internal_phy;
2449 +
2450 +       /* or fill phy info to use an external one */
2451 +       int phy_id;
2452 +       int has_phy_interrupt;
2453 +       int phy_interrupt;
2454 +
2455 +       /* if has_phy, use autonegociated pause parameters or force
2456 +        * them */
2457 +       int pause_auto;
2458 +       int pause_rx;
2459 +       int pause_tx;
2460 +
2461 +       /* if !has_phy, set desired forced speed/duplex */
2462 +       int force_speed_100;
2463 +       int force_duplex_full;
2464 +
2465 +       /* if !has_phy, set callback to perform mii device
2466 +        * init/remove */
2467 +       int (*mii_config)(struct net_device *dev, int probe,
2468 +                         int (*mii_read)(struct net_device *dev,
2469 +                                         int phy_id, int reg),
2470 +                         void (*mii_write)(struct net_device *dev,
2471 +                                           int phy_id, int reg, int val));
2472 +};
2473 +
2474 +int __init bcm63xx_enet_register(int unit,
2475 +                                const struct bcm63xx_enet_platform_data *pd);
2476 +
2477 +#endif /* ! BCM63XX_DEV_ENET_H_ */
2478 -- 
2479 1.5.4.3
2480