[brcm47xx] refresh kernel patches
[openwrt.git] / target / linux / brcm47xx / patches-2.6.23 / 120-b44_ssb_support.patch
1 Index: linux-2.6.23.17/drivers/net/b44.c
2 ===================================================================
3 --- linux-2.6.23.17.orig/drivers/net/b44.c
4 +++ linux-2.6.23.17/drivers/net/b44.c
5 @@ -1,7 +1,9 @@
6 -/* b44.c: Broadcom 4400 device driver.
7 +/* b44.c: Broadcom 4400/47xx device driver.
8   *
9   * Copyright (C) 2002 David S. Miller (davem@redhat.com)
10 - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
11 + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
12 + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
13 + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
14   * Copyright (C) 2006 Broadcom Corporation.
15   *
16   * Distribute under GPL.
17 @@ -21,11 +23,13 @@
18  #include <linux/delay.h>
19  #include <linux/init.h>
20  #include <linux/dma-mapping.h>
21 +#include <linux/ssb/ssb.h>
22  
23  #include <asm/uaccess.h>
24  #include <asm/io.h>
25  #include <asm/irq.h>
26  
27 +
28  #include "b44.h"
29  
30  #define DRV_MODULE_NAME                "b44"
31 @@ -87,8 +91,8 @@
32  static char version[] __devinitdata =
33         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
34  
35 -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
36 -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
37 +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
38 +MODULE_DESCRIPTION("Broadcom 4400/47xx 10/100 PCI ethernet driver");
39  MODULE_LICENSE("GPL");
40  MODULE_VERSION(DRV_MODULE_VERSION);
41  
42 @@ -96,18 +100,11 @@ static int b44_debug = -1; /* -1 == use 
43  module_param(b44_debug, int, 0);
44  MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
45  
46 -static struct pci_device_id b44_pci_tbl[] = {
47 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
48 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
49 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
50 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
51 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
52 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
53 -       { }     /* terminate list with empty entry */
54 +static struct ssb_device_id b44_ssb_tbl[] = {
55 +       SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
56 +       SSB_DEVTABLE_END
57  };
58  
59 -MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
60 -
61  static void b44_halt(struct b44 *);
62  static void b44_init_rings(struct b44 *);
63  
64 @@ -119,6 +116,7 @@ static void b44_init_hw(struct b44 *, in
65  
66  static int dma_desc_align_mask;
67  static int dma_desc_sync_size;
68 +static int instance;
69  
70  static const char b44_gstrings[][ETH_GSTRING_LEN] = {
71  #define _B44(x...)     # x,
72 @@ -126,35 +124,24 @@ B44_STAT_REG_DECLARE
73  #undef _B44
74  };
75  
76 -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
77 -                                                dma_addr_t dma_base,
78 -                                                unsigned long offset,
79 -                                                enum dma_data_direction dir)
80 -{
81 -       dma_sync_single_range_for_device(&pdev->dev, dma_base,
82 -                                        offset & dma_desc_align_mask,
83 -                                        dma_desc_sync_size, dir);
84 -}
85 -
86 -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
87 -                                             dma_addr_t dma_base,
88 -                                             unsigned long offset,
89 -                                             enum dma_data_direction dir)
90 -{
91 -       dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
92 -                                     offset & dma_desc_align_mask,
93 -                                     dma_desc_sync_size, dir);
94 -}
95 -
96 -static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
97 -{
98 -       return readl(bp->regs + reg);
99 -}
100 -
101 -static inline void bw32(const struct b44 *bp,
102 -                       unsigned long reg, unsigned long val)
103 -{
104 -       writel(val, bp->regs + reg);
105 +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
106 +                                              dma_addr_t dma_base,
107 +                                              unsigned long offset,
108 +                                              enum dma_data_direction dir)
109 +{
110 +       dma_sync_single_range_for_device(sdev->dev, dma_base,
111 +                                       offset & dma_desc_align_mask,
112 +                                       dma_desc_sync_size, dir);
113 +}
114 +
115 +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
116 +                                           dma_addr_t dma_base,
117 +                                           unsigned long offset,
118 +                                           enum dma_data_direction dir)
119 +{
120 +       dma_sync_single_range_for_cpu(sdev->dev, dma_base,
121 +                                    offset & dma_desc_align_mask,
122 +                                    dma_desc_sync_size, dir);
123  }
124  
125  static int b44_wait_bit(struct b44 *bp, unsigned long reg,
126 @@ -182,117 +169,29 @@ static int b44_wait_bit(struct b44 *bp, 
127         return 0;
128  }
129  
130 -/* Sonics SiliconBackplane support routines.  ROFL, you should see all the
131 - * buzz words used on this company's website :-)
132 - *
133 - * All of these routines must be invoked with bp->lock held and
134 - * interrupts disabled.
135 - */
136 -
137 -#define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
138 -#define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
139 -
140 -static u32 ssb_get_core_rev(struct b44 *bp)
141 -{
142 -       return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
143 -}
144 -
145 -static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
146 -{
147 -       u32 bar_orig, pci_rev, val;
148 -
149 -       pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
150 -       pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
151 -       pci_rev = ssb_get_core_rev(bp);
152 -
153 -       val = br32(bp, B44_SBINTVEC);
154 -       val |= cores;
155 -       bw32(bp, B44_SBINTVEC, val);
156 -
157 -       val = br32(bp, SSB_PCI_TRANS_2);
158 -       val |= SSB_PCI_PREF | SSB_PCI_BURST;
159 -       bw32(bp, SSB_PCI_TRANS_2, val);
160 -
161 -       pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
162 -
163 -       return pci_rev;
164 -}
165 -
166 -static void ssb_core_disable(struct b44 *bp)
167 -{
168 -       if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
169 -               return;
170 -
171 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
172 -       b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
173 -       b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
174 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
175 -                           SBTMSLOW_REJECT | SBTMSLOW_RESET));
176 -       br32(bp, B44_SBTMSLOW);
177 -       udelay(1);
178 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
179 -       br32(bp, B44_SBTMSLOW);
180 -       udelay(1);
181 -}
182 -
183 -static void ssb_core_reset(struct b44 *bp)
184 +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
185  {
186         u32 val;
187  
188 -       ssb_core_disable(bp);
189 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
190 -       br32(bp, B44_SBTMSLOW);
191 -       udelay(1);
192 -
193 -       /* Clear SERR if set, this is a hw bug workaround.  */
194 -       if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
195 -               bw32(bp, B44_SBTMSHIGH, 0);
196 -
197 -       val = br32(bp, B44_SBIMSTATE);
198 -       if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
199 -               bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
200 -
201 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
202 -       br32(bp, B44_SBTMSLOW);
203 -       udelay(1);
204 +       bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
205 +                           (index << CAM_CTRL_INDEX_SHIFT)));
206  
207 -       bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
208 -       br32(bp, B44_SBTMSLOW);
209 -       udelay(1);
210 -}
211 +       b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
212  
213 -static int ssb_core_unit(struct b44 *bp)
214 -{
215 -#if 0
216 -       u32 val = br32(bp, B44_SBADMATCH0);
217 -       u32 base;
218 +       val = br32(bp, B44_CAM_DATA_LO);
219  
220 -       type = val & SBADMATCH0_TYPE_MASK;
221 -       switch (type) {
222 -       case 0:
223 -               base = val & SBADMATCH0_BS0_MASK;
224 -               break;
225 +       data[2] = (val >> 24) & 0xFF;
226 +       data[3] = (val >> 16) & 0xFF;
227 +       data[4] = (val >> 8) & 0xFF;
228 +       data[5] = (val >> 0) & 0xFF;
229  
230 -       case 1:
231 -               base = val & SBADMATCH0_BS1_MASK;
232 -               break;
233 +       val = br32(bp, B44_CAM_DATA_HI);
234  
235 -       case 2:
236 -       default:
237 -               base = val & SBADMATCH0_BS2_MASK;
238 -               break;
239 -       };
240 -#endif
241 -       return 0;
242 +       data[0] = (val >> 8) & 0xFF;
243 +       data[1] = (val >> 0) & 0xFF;
244  }
245  
246 -static int ssb_is_core_up(struct b44 *bp)
247 -{
248 -       return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
249 -               == SBTMSLOW_CLOCK);
250 -}
251 -
252 -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
253 +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
254  {
255         u32 val;
256  
257 @@ -328,14 +227,14 @@ static void b44_enable_ints(struct b44 *
258         bw32(bp, B44_IMASK, bp->imask);
259  }
260  
261 -static int b44_readphy(struct b44 *bp, int reg, u32 *val)
262 +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
263  {
264         int err;
265  
266         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
267         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
268                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
269 -                            (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
270 +                            (phy_addr << MDIO_DATA_PMD_SHIFT) |
271                              (reg << MDIO_DATA_RA_SHIFT) |
272                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
273         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
274 @@ -344,18 +243,34 @@ static int b44_readphy(struct b44 *bp, i
275         return err;
276  }
277  
278 -static int b44_writephy(struct b44 *bp, int reg, u32 val)
279 +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
280  {
281         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
282         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
283                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
284 -                            (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
285 +                            (phy_addr << MDIO_DATA_PMD_SHIFT) |
286                              (reg << MDIO_DATA_RA_SHIFT) |
287                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
288                              (val & MDIO_DATA_DATA)));
289         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
290  }
291  
292 +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
293 +{
294 +       if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
295 +               return 0;
296 +
297 +       return __b44_readphy(bp, bp->phy_addr, reg, val);
298 +}
299 +
300 +static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
301 +{
302 +       if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
303 +               return 0;
304 +
305 +       return __b44_writephy(bp, bp->phy_addr, reg, val);
306 +}
307 +
308  /* miilib interface */
309  /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
310   * due to code existing before miilib use was added to this driver.
311 @@ -384,6 +299,8 @@ static int b44_phy_reset(struct b44 *bp)
312         u32 val;
313         int err;
314  
315 +       if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
316 +               return 0;
317         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
318         if (err)
319                 return err;
320 @@ -442,11 +359,27 @@ static void b44_set_flow_ctrl(struct b44
321         __b44_set_flow_ctrl(bp, pause_enab);
322  }
323  
324 +
325 +extern char *nvram_get(char *name); //FIXME: move elsewhere
326  static int b44_setup_phy(struct b44 *bp)
327  {
328         u32 val;
329         int err;
330  
331 +       /*
332 +        * workaround for bad hardware design in Linksys WAP54G v1.0
333 +        * see https://dev.openwrt.org/ticket/146
334 +        * check and reset bit "isolate"
335 +        */
336 +       if ((atoi(nvram_get("boardnum")) == 2) &&
337 +                       (__b44_readphy(bp, 0, MII_BMCR, &val) == 0) &&
338 +                       (val & BMCR_ISOLATE) &&
339 +                       (__b44_writephy(bp, 0, MII_BMCR, val & ~BMCR_ISOLATE) != 0)) {
340 +               printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
341 +       }
342 +
343 +       if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
344 +               return 0;
345         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
346                 goto out;
347         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
348 @@ -542,6 +475,19 @@ static void b44_check_phy(struct b44 *bp
349  {
350         u32 bmsr, aux;
351  
352 +       if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
353 +               bp->flags |= B44_FLAG_100_BASE_T;
354 +               bp->flags |= B44_FLAG_FULL_DUPLEX;
355 +               if (!netif_carrier_ok(bp->dev)) {
356 +                       u32 val = br32(bp, B44_TX_CTRL);
357 +                       val |= TX_CTRL_DUPLEX;
358 +                       bw32(bp, B44_TX_CTRL, val);
359 +                       netif_carrier_on(bp->dev);
360 +                       b44_link_report(bp);
361 +               }
362 +               return;
363 +       }
364 +
365         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
366             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
367             (bmsr != 0xffff)) {
368 @@ -617,10 +563,10 @@ static void b44_tx(struct b44 *bp)
369  
370                 BUG_ON(skb == NULL);
371  
372 -               pci_unmap_single(bp->pdev,
373 +               dma_unmap_single(bp->sdev->dev,
374                                  pci_unmap_addr(rp, mapping),
375                                  skb->len,
376 -                                PCI_DMA_TODEVICE);
377 +                                DMA_TO_DEVICE);
378                 rp->skb = NULL;
379                 dev_kfree_skb_irq(skb);
380         }
381 @@ -657,9 +603,9 @@ static int b44_alloc_rx_skb(struct b44 *
382         if (skb == NULL)
383                 return -ENOMEM;
384  
385 -       mapping = pci_map_single(bp->pdev, skb->data,
386 +       mapping = dma_map_single(bp->sdev->dev, skb->data,
387                                  RX_PKT_BUF_SZ,
388 -                                PCI_DMA_FROMDEVICE);
389 +                                DMA_FROM_DEVICE);
390  
391         /* Hardware bug work-around, the chip is unable to do PCI DMA
392            to/from anything above 1GB :-( */
393 @@ -667,18 +613,18 @@ static int b44_alloc_rx_skb(struct b44 *
394                 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
395                 /* Sigh... */
396                 if (!dma_mapping_error(mapping))
397 -                       pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
398 +                       dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
399                 dev_kfree_skb_any(skb);
400                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
401                 if (skb == NULL)
402                         return -ENOMEM;
403 -               mapping = pci_map_single(bp->pdev, skb->data,
404 +               mapping = dma_map_single(bp->sdev->dev, skb->data,
405                                          RX_PKT_BUF_SZ,
406 -                                        PCI_DMA_FROMDEVICE);
407 +                                        DMA_FROM_DEVICE);
408                 if (dma_mapping_error(mapping) ||
409                         mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
410                         if (!dma_mapping_error(mapping))
411 -                               pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
412 +                               dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
413                         dev_kfree_skb_any(skb);
414                         return -ENOMEM;
415                 }
416 @@ -705,9 +651,9 @@ static int b44_alloc_rx_skb(struct b44 *
417         dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
418  
419         if (bp->flags & B44_FLAG_RX_RING_HACK)
420 -               b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
421 -                                            dest_idx * sizeof(dp),
422 -                                            DMA_BIDIRECTIONAL);
423 +               b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
424 +                                           dest_idx * sizeof(dp),
425 +                                           DMA_BIDIRECTIONAL);
426  
427         return RX_PKT_BUF_SZ;
428  }
429 @@ -734,9 +680,9 @@ static void b44_recycle_rx(struct b44 *b
430                            pci_unmap_addr(src_map, mapping));
431  
432         if (bp->flags & B44_FLAG_RX_RING_HACK)
433 -               b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
434 -                                         src_idx * sizeof(src_desc),
435 -                                         DMA_BIDIRECTIONAL);
436 +               b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
437 +                                        src_idx * sizeof(src_desc),
438 +                                        DMA_BIDIRECTIONAL);
439  
440         ctrl = src_desc->ctrl;
441         if (dest_idx == (B44_RX_RING_SIZE - 1))
442 @@ -750,13 +696,13 @@ static void b44_recycle_rx(struct b44 *b
443         src_map->skb = NULL;
444  
445         if (bp->flags & B44_FLAG_RX_RING_HACK)
446 -               b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
447 -                                            dest_idx * sizeof(dest_desc),
448 -                                            DMA_BIDIRECTIONAL);
449 +               b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
450 +                                           dest_idx * sizeof(dest_desc),
451 +                                           DMA_BIDIRECTIONAL);
452  
453 -       pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
454 +       dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
455                                        RX_PKT_BUF_SZ,
456 -                                      PCI_DMA_FROMDEVICE);
457 +                                      DMA_FROM_DEVICE);
458  }
459  
460  static int b44_rx(struct b44 *bp, int budget)
461 @@ -776,9 +722,9 @@ static int b44_rx(struct b44 *bp, int bu
462                 struct rx_header *rh;
463                 u16 len;
464  
465 -               pci_dma_sync_single_for_cpu(bp->pdev, map,
466 +               dma_sync_single_for_cpu(bp->sdev->dev, map,
467                                             RX_PKT_BUF_SZ,
468 -                                           PCI_DMA_FROMDEVICE);
469 +                                           DMA_FROM_DEVICE);
470                 rh = (struct rx_header *) skb->data;
471                 len = le16_to_cpu(rh->len);
472                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
473 @@ -810,8 +756,8 @@ static int b44_rx(struct b44 *bp, int bu
474                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
475                         if (skb_size < 0)
476                                 goto drop_it;
477 -                       pci_unmap_single(bp->pdev, map,
478 -                                        skb_size, PCI_DMA_FROMDEVICE);
479 +                       dma_unmap_single(bp->sdev->dev, map,
480 +                                        skb_size, DMA_FROM_DEVICE);
481                         /* Leave out rx_header */
482                         skb_put(skb, len + RX_PKT_OFFSET);
483                         skb_pull(skb, RX_PKT_OFFSET);
484 @@ -982,24 +928,24 @@ static int b44_start_xmit(struct sk_buff
485                 goto err_out;
486         }
487  
488 -       mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
489 +       mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
490         if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
491                 struct sk_buff *bounce_skb;
492  
493                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
494                 if (!dma_mapping_error(mapping))
495 -                       pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
496 +                       dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE);
497  
498                 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
499                 if (!bounce_skb)
500                         goto err_out;
501  
502 -               mapping = pci_map_single(bp->pdev, bounce_skb->data,
503 -                                        len, PCI_DMA_TODEVICE);
504 +               mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
505 +                                        len, DMA_TO_DEVICE);
506                 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
507                         if (!dma_mapping_error(mapping))
508 -                               pci_unmap_single(bp->pdev, mapping,
509 -                                                len, PCI_DMA_TODEVICE);
510 +                               dma_unmap_single(bp->sdev->dev, mapping,
511 +                                                len, DMA_TO_DEVICE);
512                         dev_kfree_skb_any(bounce_skb);
513                         goto err_out;
514                 }
515 @@ -1022,9 +968,9 @@ static int b44_start_xmit(struct sk_buff
516         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
517  
518         if (bp->flags & B44_FLAG_TX_RING_HACK)
519 -               b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
520 -                                            entry * sizeof(bp->tx_ring[0]),
521 -                                            DMA_TO_DEVICE);
522 +               b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
523 +                                           entry * sizeof(bp->tx_ring[0]),
524 +                                           DMA_TO_DEVICE);
525  
526         entry = NEXT_TX(entry);
527  
528 @@ -1097,10 +1043,10 @@ static void b44_free_rings(struct b44 *b
529  
530                 if (rp->skb == NULL)
531                         continue;
532 -               pci_unmap_single(bp->pdev,
533 +               dma_unmap_single(bp->sdev->dev,
534                                  pci_unmap_addr(rp, mapping),
535                                  RX_PKT_BUF_SZ,
536 -                                PCI_DMA_FROMDEVICE);
537 +                                DMA_FROM_DEVICE);
538                 dev_kfree_skb_any(rp->skb);
539                 rp->skb = NULL;
540         }
541 @@ -1111,10 +1057,10 @@ static void b44_free_rings(struct b44 *b
542  
543                 if (rp->skb == NULL)
544                         continue;
545 -               pci_unmap_single(bp->pdev,
546 +               dma_unmap_single(bp->sdev->dev,
547                                  pci_unmap_addr(rp, mapping),
548                                  rp->skb->len,
549 -                                PCI_DMA_TODEVICE);
550 +                                DMA_TO_DEVICE);
551                 dev_kfree_skb_any(rp->skb);
552                 rp->skb = NULL;
553         }
554 @@ -1136,14 +1082,14 @@ static void b44_init_rings(struct b44 *b
555         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
556  
557         if (bp->flags & B44_FLAG_RX_RING_HACK)
558 -               dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
559 -                                          DMA_TABLE_BYTES,
560 -                                          PCI_DMA_BIDIRECTIONAL);
561 +               dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
562 +                                         DMA_TABLE_BYTES,
563 +                                         DMA_BIDIRECTIONAL);
564  
565         if (bp->flags & B44_FLAG_TX_RING_HACK)
566 -               dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
567 -                                          DMA_TABLE_BYTES,
568 -                                          PCI_DMA_TODEVICE);
569 +               dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
570 +                                         DMA_TABLE_BYTES,
571 +                                         DMA_TO_DEVICE);
572  
573         for (i = 0; i < bp->rx_pending; i++) {
574                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
575 @@ -1163,24 +1109,24 @@ static void b44_free_consistent(struct b
576         bp->tx_buffers = NULL;
577         if (bp->rx_ring) {
578                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
579 -                       dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
580 -                                        DMA_TABLE_BYTES,
581 -                                        DMA_BIDIRECTIONAL);
582 +                       dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
583 +                                       DMA_TABLE_BYTES,
584 +                                       DMA_BIDIRECTIONAL);
585                         kfree(bp->rx_ring);
586                 } else
587 -                       pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
588 +                       dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
589                                             bp->rx_ring, bp->rx_ring_dma);
590                 bp->rx_ring = NULL;
591                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
592         }
593         if (bp->tx_ring) {
594                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
595 -                       dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
596 -                                        DMA_TABLE_BYTES,
597 -                                        DMA_TO_DEVICE);
598 +                       dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
599 +                                       DMA_TABLE_BYTES,
600 +                                       DMA_TO_DEVICE);
601                         kfree(bp->tx_ring);
602                 } else
603 -                       pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
604 +                       dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
605                                             bp->tx_ring, bp->tx_ring_dma);
606                 bp->tx_ring = NULL;
607                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
608 @@ -1206,7 +1152,7 @@ static int b44_alloc_consistent(struct b
609                 goto out_err;
610  
611         size = DMA_TABLE_BYTES;
612 -       bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
613 +       bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC);
614         if (!bp->rx_ring) {
615                 /* Allocation may have failed due to pci_alloc_consistent
616                    insisting on use of GFP_DMA, which is more restrictive
617 @@ -1218,9 +1164,9 @@ static int b44_alloc_consistent(struct b
618                 if (!rx_ring)
619                         goto out_err;
620  
621 -               rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
622 -                                            DMA_TABLE_BYTES,
623 -                                            DMA_BIDIRECTIONAL);
624 +               rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
625 +                                           DMA_TABLE_BYTES,
626 +                                           DMA_BIDIRECTIONAL);
627  
628                 if (dma_mapping_error(rx_ring_dma) ||
629                         rx_ring_dma + size > DMA_30BIT_MASK) {
630 @@ -1233,9 +1179,9 @@ static int b44_alloc_consistent(struct b
631                 bp->flags |= B44_FLAG_RX_RING_HACK;
632         }
633  
634 -       bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
635 +       bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC);
636         if (!bp->tx_ring) {
637 -               /* Allocation may have failed due to pci_alloc_consistent
638 +               /* Allocation may have failed due to dma_alloc_coherent
639                    insisting on use of GFP_DMA, which is more restrictive
640                    than necessary...  */
641                 struct dma_desc *tx_ring;
642 @@ -1245,9 +1191,9 @@ static int b44_alloc_consistent(struct b
643                 if (!tx_ring)
644                         goto out_err;
645  
646 -               tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
647 -                                            DMA_TABLE_BYTES,
648 -                                            DMA_TO_DEVICE);
649 +               tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
650 +                                           DMA_TABLE_BYTES,
651 +                                           DMA_TO_DEVICE);
652  
653                 if (dma_mapping_error(tx_ring_dma) ||
654                         tx_ring_dma + size > DMA_30BIT_MASK) {
655 @@ -1282,7 +1228,9 @@ static void b44_clear_stats(struct b44 *
656  /* bp->lock is held. */
657  static void b44_chip_reset(struct b44 *bp)
658  {
659 -       if (ssb_is_core_up(bp)) {
660 +       struct ssb_device *sdev = bp->sdev;
661 +
662 +       if (ssb_device_is_enabled(bp->sdev)) {
663                 bw32(bp, B44_RCV_LAZY, 0);
664                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
665                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
666 @@ -1294,19 +1242,24 @@ static void b44_chip_reset(struct b44 *b
667                 }
668                 bw32(bp, B44_DMARX_CTRL, 0);
669                 bp->rx_prod = bp->rx_cons = 0;
670 -       } else {
671 -               ssb_pci_setup(bp, (bp->core_unit == 0 ?
672 -                                  SBINTVEC_ENET0 :
673 -                                  SBINTVEC_ENET1));
674         }
675  
676 -       ssb_core_reset(bp);
677 -
678 +       ssb_device_enable(bp->sdev, 0);
679         b44_clear_stats(bp);
680  
681 -       /* Make PHY accessible. */
682 -       bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
683 +       switch (sdev->bus->bustype) {
684 +       case SSB_BUSTYPE_SSB:
685 +                       bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
686 +                            (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
687 +                            & MDIO_CTRL_MAXF_MASK)));
688 +               break;
689 +       case SSB_BUSTYPE_PCI:
690 +       case SSB_BUSTYPE_PCMCIA:
691 +               bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
692                              (0x0d & MDIO_CTRL_MAXF_MASK)));
693 +               break;
694 +       }
695 +
696         br32(bp, B44_MDIO_CTRL);
697  
698         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
699 @@ -1349,6 +1302,7 @@ static int b44_set_mac_addr(struct net_d
700  {
701         struct b44 *bp = netdev_priv(dev);
702         struct sockaddr *addr = p;
703 +       u32 val;
704  
705         if (netif_running(dev))
706                 return -EBUSY;
707 @@ -1359,7 +1313,11 @@ static int b44_set_mac_addr(struct net_d
708         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
709  
710         spin_lock_irq(&bp->lock);
711 -       __b44_set_mac_addr(bp);
712 +
713 +       val = br32(bp, B44_RXCONFIG);
714 +       if (!(val & RXCONFIG_CAM_ABSENT))
715 +               __b44_set_mac_addr(bp);
716 +
717         spin_unlock_irq(&bp->lock);
718  
719         return 0;
720 @@ -1445,18 +1403,6 @@ out:
721         return err;
722  }
723  
724 -#if 0
725 -/*static*/ void b44_dump_state(struct b44 *bp)
726 -{
727 -       u32 val32, val32_2, val32_3, val32_4, val32_5;
728 -       u16 val16;
729 -
730 -       pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
731 -       printk("DEBUG: PCI status [%04x] \n", val16);
732 -
733 -}
734 -#endif
735 -
736  #ifdef CONFIG_NET_POLL_CONTROLLER
737  /*
738   * Polling receive - used by netconsole and other diagnostic tools
739 @@ -1570,7 +1516,6 @@ static void b44_setup_pseudo_magicp(stru
740  static void b44_setup_wol(struct b44 *bp)
741  {
742         u32 val;
743 -       u16 pmval;
744  
745         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
746  
747 @@ -1594,13 +1539,6 @@ static void b44_setup_wol(struct b44 *bp
748         } else {
749                 b44_setup_pseudo_magicp(bp);
750         }
751 -
752 -       val = br32(bp, B44_SBTMSLOW);
753 -       bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
754 -
755 -       pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
756 -       pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
757 -
758  }
759  
760  static int b44_close(struct net_device *dev)
761 @@ -1700,7 +1638,7 @@ static void __b44_set_rx_mode(struct net
762  
763         val = br32(bp, B44_RXCONFIG);
764         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
765 -       if (dev->flags & IFF_PROMISC) {
766 +       if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
767                 val |= RXCONFIG_PROMISC;
768                 bw32(bp, B44_RXCONFIG, val);
769         } else {
770 @@ -1747,12 +1685,8 @@ static void b44_set_msglevel(struct net_
771  
772  static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
773  {
774 -       struct b44 *bp = netdev_priv(dev);
775 -       struct pci_dev *pci_dev = bp->pdev;
776 -
777         strcpy (info->driver, DRV_MODULE_NAME);
778         strcpy (info->version, DRV_MODULE_VERSION);
779 -       strcpy (info->bus_info, pci_name(pci_dev));
780  }
781  
782  static int b44_nway_reset(struct net_device *dev)
783 @@ -2035,6 +1969,245 @@ static const struct ethtool_ops b44_etht
784         .get_ethtool_stats      = b44_get_ethtool_stats,
785  };
786  
787 +static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
788 +{
789 +       struct b44 *bp = dev->priv;
790 +       u32 ethcmd;
791 +
792 +       if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
793 +               return -EFAULT;
794 +
795 +       switch (ethcmd) {
796 +       case ETHTOOL_GDRVINFO: {
797 +               struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
798 +               strcpy (info.driver, DRV_MODULE_NAME);
799 +               strcpy (info.version, DRV_MODULE_VERSION);
800 +               memset(&info.fw_version, 0, sizeof(info.fw_version));
801 +               info.eedump_len = 0;
802 +               info.regdump_len = 0;
803 +               if (copy_to_user (useraddr, &info, sizeof (info)))
804 +                       return -EFAULT;
805 +               return 0;
806 +       }
807 +
808 +       case ETHTOOL_GSET: {
809 +               struct ethtool_cmd cmd = { ETHTOOL_GSET };
810 +
811 +               if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
812 +                       return -EAGAIN;
813 +               cmd.supported = (SUPPORTED_Autoneg);
814 +               cmd.supported |= (SUPPORTED_100baseT_Half |
815 +                                 SUPPORTED_100baseT_Full |
816 +                                 SUPPORTED_10baseT_Half |
817 +                                 SUPPORTED_10baseT_Full |
818 +                                 SUPPORTED_MII);
819 +
820 +               cmd.advertising = 0;
821 +               if (bp->flags & B44_FLAG_ADV_10HALF)
822 +                       cmd.advertising |= ADVERTISE_10HALF;
823 +               if (bp->flags & B44_FLAG_ADV_10FULL)
824 +                       cmd.advertising |= ADVERTISE_10FULL;
825 +               if (bp->flags & B44_FLAG_ADV_100HALF)
826 +                       cmd.advertising |= ADVERTISE_100HALF;
827 +               if (bp->flags & B44_FLAG_ADV_100FULL)
828 +                       cmd.advertising |= ADVERTISE_100FULL;
829 +               cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
830 +               cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
831 +                       SPEED_100 : SPEED_10;
832 +               cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
833 +                       DUPLEX_FULL : DUPLEX_HALF;
834 +               cmd.port = 0;
835 +               cmd.phy_address = bp->phy_addr;
836 +               cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
837 +                       XCVR_INTERNAL : XCVR_EXTERNAL;
838 +               cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
839 +                       AUTONEG_DISABLE : AUTONEG_ENABLE;
840 +               cmd.maxtxpkt = 0;
841 +               cmd.maxrxpkt = 0;
842 +               if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
843 +                       return -EFAULT;
844 +               return 0;
845 +       }
846 +       case ETHTOOL_SSET: {
847 +               struct ethtool_cmd cmd;
848 +
849 +               if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
850 +                       return -EAGAIN;
851 +
852 +               if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
853 +                       return -EFAULT;
854 +
855 +               /* We do not support gigabit. */
856 +               if (cmd.autoneg == AUTONEG_ENABLE) {
857 +                       if (cmd.advertising &
858 +                           (ADVERTISED_1000baseT_Half |
859 +                            ADVERTISED_1000baseT_Full))
860 +                               return -EINVAL;
861 +               } else if ((cmd.speed != SPEED_100 &&
862 +                           cmd.speed != SPEED_10) ||
863 +                          (cmd.duplex != DUPLEX_HALF &&
864 +                           cmd.duplex != DUPLEX_FULL)) {
865 +                               return -EINVAL;
866 +               }
867 +
868 +               spin_lock_irq(&bp->lock);
869 +
870 +               if (cmd.autoneg == AUTONEG_ENABLE) {
871 +                       bp->flags &= ~B44_FLAG_FORCE_LINK;
872 +                       bp->flags &= ~(B44_FLAG_ADV_10HALF |
873 +                                      B44_FLAG_ADV_10FULL |
874 +                                      B44_FLAG_ADV_100HALF |
875 +                                      B44_FLAG_ADV_100FULL);
876 +                       if (cmd.advertising & ADVERTISE_10HALF)
877 +                               bp->flags |= B44_FLAG_ADV_10HALF;
878 +                       if (cmd.advertising & ADVERTISE_10FULL)
879 +                               bp->flags |= B44_FLAG_ADV_10FULL;
880 +                       if (cmd.advertising & ADVERTISE_100HALF)
881 +                               bp->flags |= B44_FLAG_ADV_100HALF;
882 +                       if (cmd.advertising & ADVERTISE_100FULL)
883 +                               bp->flags |= B44_FLAG_ADV_100FULL;
884 +               } else {
885 +                       bp->flags |= B44_FLAG_FORCE_LINK;
886 +                       if (cmd.speed == SPEED_100)
887 +                               bp->flags |= B44_FLAG_100_BASE_T;
888 +                       if (cmd.duplex == DUPLEX_FULL)
889 +                               bp->flags |= B44_FLAG_FULL_DUPLEX;
890 +               }
891 +
892 +               b44_setup_phy(bp);
893 +
894 +               spin_unlock_irq(&bp->lock);
895 +
896 +               return 0;
897 +       }
898 +
899 +       case ETHTOOL_GMSGLVL: {
900 +               struct ethtool_value edata = { ETHTOOL_GMSGLVL };
901 +               edata.data = bp->msg_enable;
902 +               if (copy_to_user(useraddr, &edata, sizeof(edata)))
903 +                       return -EFAULT;
904 +               return 0;
905 +       }
906 +       case ETHTOOL_SMSGLVL: {
907 +               struct ethtool_value edata;
908 +               if (copy_from_user(&edata, useraddr, sizeof(edata)))
909 +                       return -EFAULT;
910 +               bp->msg_enable = edata.data;
911 +               return 0;
912 +       }
913 +       case ETHTOOL_NWAY_RST: {
914 +               u32 bmcr;
915 +               int r;
916 +
917 +               spin_lock_irq(&bp->lock);
918 +               b44_readphy(bp, MII_BMCR, &bmcr);
919 +               b44_readphy(bp, MII_BMCR, &bmcr);
920 +               r = -EINVAL;
921 +               if (bmcr & BMCR_ANENABLE) {
922 +                       b44_writephy(bp, MII_BMCR,
923 +                                    bmcr | BMCR_ANRESTART);
924 +                       r = 0;
925 +               }
926 +               spin_unlock_irq(&bp->lock);
927 +
928 +               return r;
929 +       }
930 +       case ETHTOOL_GLINK: {
931 +               struct ethtool_value edata = { ETHTOOL_GLINK };
932 +               edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
933 +               if (copy_to_user(useraddr, &edata, sizeof(edata)))
934 +                       return -EFAULT;
935 +               return 0;
936 +       }
937 +       case ETHTOOL_GRINGPARAM: {
938 +               struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
939 +
940 +               ering.rx_max_pending = B44_RX_RING_SIZE - 1;
941 +               ering.rx_pending = bp->rx_pending;
942 +
943 +               /* XXX ethtool lacks a tx_max_pending, oops... */
944 +
945 +               if (copy_to_user(useraddr, &ering, sizeof(ering)))
946 +                       return -EFAULT;
947 +               return 0;
948 +       }
949 +       case ETHTOOL_SRINGPARAM: {
950 +               struct ethtool_ringparam ering;
951 +
952 +               if (copy_from_user(&ering, useraddr, sizeof(ering)))
953 +                       return -EFAULT;
954 +
955 +               if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
956 +                   (ering.rx_mini_pending != 0) ||
957 +                   (ering.rx_jumbo_pending != 0) ||
958 +                   (ering.tx_pending > B44_TX_RING_SIZE - 1))
959 +                       return -EINVAL;
960 +
961 +               spin_lock_irq(&bp->lock);
962 +
963 +               bp->rx_pending = ering.rx_pending;
964 +               bp->tx_pending = ering.tx_pending;
965 +
966 +               b44_halt(bp);
967 +               b44_init_rings(bp);
968 +               b44_init_hw(bp, 1);
969 +               netif_wake_queue(bp->dev);
970 +               spin_unlock_irq(&bp->lock);
971 +
972 +               b44_enable_ints(bp);
973 +
974 +               return 0;
975 +       }
976 +       case ETHTOOL_GPAUSEPARAM: {
977 +               struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
978 +
979 +               epause.autoneg =
980 +                       (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
981 +               epause.rx_pause =
982 +                       (bp->flags & B44_FLAG_RX_PAUSE) != 0;
983 +               epause.tx_pause =
984 +                       (bp->flags & B44_FLAG_TX_PAUSE) != 0;
985 +               if (copy_to_user(useraddr, &epause, sizeof(epause)))
986 +                       return -EFAULT;
987 +               return 0;
988 +       }
989 +       case ETHTOOL_SPAUSEPARAM: {
990 +               struct ethtool_pauseparam epause;
991 +
992 +               if (copy_from_user(&epause, useraddr, sizeof(epause)))
993 +                       return -EFAULT;
994 +
995 +               spin_lock_irq(&bp->lock);
996 +               if (epause.autoneg)
997 +                       bp->flags |= B44_FLAG_PAUSE_AUTO;
998 +               else
999 +                       bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1000 +               if (epause.rx_pause)
1001 +                       bp->flags |= B44_FLAG_RX_PAUSE;
1002 +               else
1003 +                       bp->flags &= ~B44_FLAG_RX_PAUSE;
1004 +               if (epause.tx_pause)
1005 +                       bp->flags |= B44_FLAG_TX_PAUSE;
1006 +               else
1007 +                       bp->flags &= ~B44_FLAG_TX_PAUSE;
1008 +               if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1009 +                       b44_halt(bp);
1010 +                       b44_init_rings(bp);
1011 +                       b44_init_hw(bp, 1);
1012 +               } else {
1013 +                       __b44_set_flow_ctrl(bp, bp->flags);
1014 +               }
1015 +               spin_unlock_irq(&bp->lock);
1016 +
1017 +               b44_enable_ints(bp);
1018 +
1019 +               return 0;
1020 +       }
1021 +       };
1022 +
1023 +       return -EOPNOTSUPP;
1024 +}
1025 +
1026  static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1027  {
1028         struct mii_ioctl_data *data = if_mii(ifr);
1029 @@ -2044,40 +2217,64 @@ static int b44_ioctl(struct net_device *
1030         if (!netif_running(dev))
1031                 goto out;
1032  
1033 -       spin_lock_irq(&bp->lock);
1034 -       err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1035 -       spin_unlock_irq(&bp->lock);
1036 -out:
1037 -       return err;
1038 -}
1039 +       switch (cmd) {
1040 +       case SIOCETHTOOL:
1041 +              return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1042  
1043 -/* Read 128-bytes of EEPROM. */
1044 -static int b44_read_eeprom(struct b44 *bp, u8 *data)
1045 -{
1046 -       long i;
1047 -       __le16 *ptr = (__le16 *) data;
1048 +       case SIOCGMIIPHY:
1049 +              data->phy_id = bp->phy_addr;
1050  
1051 -       for (i = 0; i < 128; i += 2)
1052 -               ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1053 +              /* fallthru */
1054 +       case SIOCGMIIREG: {
1055 +              u32 mii_regval;
1056 +              spin_lock_irq(&bp->lock);
1057 +              err = __b44_readphy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, &mii_regval);
1058 +              spin_unlock_irq(&bp->lock);
1059  
1060 -       return 0;
1061 +              data->val_out = mii_regval;
1062 +
1063 +              return err;
1064 +       }
1065 +
1066 +       case SIOCSMIIREG:
1067 +              if (!capable(CAP_NET_ADMIN))
1068 +                     return -EPERM;
1069 +
1070 +              spin_lock_irq(&bp->lock);
1071 +              err = __b44_writephy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1072 +              spin_unlock_irq(&bp->lock);
1073 +
1074 +              return err;
1075 +
1076 +       default:
1077 +              break;
1078 +       };
1079 +       return -EOPNOTSUPP;
1080 +
1081 +out:
1082 +       return err;
1083  }
1084  
1085  static int __devinit b44_get_invariants(struct b44 *bp)
1086  {
1087 -       u8 eeprom[128];
1088 -       int err;
1089 +       struct ssb_device *sdev = bp->sdev;
1090 +       int err = 0;
1091 +       u8 *addr;
1092  
1093 -       err = b44_read_eeprom(bp, &eeprom[0]);
1094 -       if (err)
1095 -               goto out;
1096 +       bp->dma_offset = ssb_dma_translation(sdev);
1097  
1098 -       bp->dev->dev_addr[0] = eeprom[79];
1099 -       bp->dev->dev_addr[1] = eeprom[78];
1100 -       bp->dev->dev_addr[2] = eeprom[81];
1101 -       bp->dev->dev_addr[3] = eeprom[80];
1102 -       bp->dev->dev_addr[4] = eeprom[83];
1103 -       bp->dev->dev_addr[5] = eeprom[82];
1104 +       switch (instance) {
1105 +       case 1:
1106 +              addr = sdev->bus->sprom.et0mac;
1107 +              bp->phy_addr = sdev->bus->sprom.et0phyaddr;
1108 +              break;
1109 +       default:
1110 +              addr = sdev->bus->sprom.et1mac;
1111 +              bp->phy_addr = sdev->bus->sprom.et1phyaddr;
1112 +              break;
1113 +       }
1114 +
1115 +       memcpy(bp->dev->dev_addr, addr, 6);
1116  
1117         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1118                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1119 @@ -2086,103 +2283,52 @@ static int __devinit b44_get_invariants(
1120  
1121         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1122  
1123 -       bp->phy_addr = eeprom[90] & 0x1f;
1124 -
1125         bp->imask = IMASK_DEF;
1126  
1127 -       bp->core_unit = ssb_core_unit(bp);
1128 -       bp->dma_offset = SB_PCI_DMA;
1129 -
1130         /* XXX - really required?
1131            bp->flags |= B44_FLAG_BUGGY_TXPTR;
1132           */
1133  
1134 -       if (ssb_get_core_rev(bp) >= 7)
1135 +       if (bp->sdev->id.revision >= 7)
1136                 bp->flags |= B44_FLAG_B0_ANDLATER;
1137  
1138 -out:
1139         return err;
1140  }
1141  
1142 -static int __devinit b44_init_one(struct pci_dev *pdev,
1143 -                                 const struct pci_device_id *ent)
1144 +static int __devinit b44_init_one(struct ssb_device *sdev,
1145 +                                 const struct ssb_device_id *ent)
1146  {
1147         static int b44_version_printed = 0;
1148 -       unsigned long b44reg_base, b44reg_len;
1149         struct net_device *dev;
1150         struct b44 *bp;
1151         int err, i;
1152  
1153 +       instance++;
1154 +
1155         if (b44_version_printed++ == 0)
1156                 printk(KERN_INFO "%s", version);
1157  
1158 -       err = pci_enable_device(pdev);
1159 -       if (err) {
1160 -               dev_err(&pdev->dev, "Cannot enable PCI device, "
1161 -                      "aborting.\n");
1162 -               return err;
1163 -       }
1164 -
1165 -       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1166 -               dev_err(&pdev->dev,
1167 -                       "Cannot find proper PCI device "
1168 -                      "base address, aborting.\n");
1169 -               err = -ENODEV;
1170 -               goto err_out_disable_pdev;
1171 -       }
1172 -
1173 -       err = pci_request_regions(pdev, DRV_MODULE_NAME);
1174 -       if (err) {
1175 -               dev_err(&pdev->dev,
1176 -                       "Cannot obtain PCI resources, aborting.\n");
1177 -               goto err_out_disable_pdev;
1178 -       }
1179 -
1180 -       pci_set_master(pdev);
1181 -
1182 -       err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1183 -       if (err) {
1184 -               dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1185 -               goto err_out_free_res;
1186 -       }
1187 -
1188 -       err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1189 -       if (err) {
1190 -               dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1191 -               goto err_out_free_res;
1192 -       }
1193 -
1194 -       b44reg_base = pci_resource_start(pdev, 0);
1195 -       b44reg_len = pci_resource_len(pdev, 0);
1196 -
1197         dev = alloc_etherdev(sizeof(*bp));
1198         if (!dev) {
1199 -               dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1200 +               dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
1201                 err = -ENOMEM;
1202 -               goto err_out_free_res;
1203 +               goto out;
1204         }
1205  
1206         SET_MODULE_OWNER(dev);
1207 -       SET_NETDEV_DEV(dev,&pdev->dev);
1208 +       SET_NETDEV_DEV(dev,sdev->dev);
1209  
1210         /* No interesting netdevice features in this card... */
1211         dev->features |= 0;
1212  
1213         bp = netdev_priv(dev);
1214 -       bp->pdev = pdev;
1215 +       bp->sdev = sdev;
1216         bp->dev = dev;
1217  
1218         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1219  
1220         spin_lock_init(&bp->lock);
1221  
1222 -       bp->regs = ioremap(b44reg_base, b44reg_len);
1223 -       if (bp->regs == 0UL) {
1224 -               dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1225 -               err = -ENOMEM;
1226 -               goto err_out_free_dev;
1227 -       }
1228 -
1229         bp->rx_pending = B44_DEF_RX_RING_PENDING;
1230         bp->tx_pending = B44_DEF_TX_RING_PENDING;
1231  
1232 @@ -2201,16 +2347,16 @@ static int __devinit b44_init_one(struct
1233         dev->poll_controller = b44_poll_controller;
1234  #endif
1235         dev->change_mtu = b44_change_mtu;
1236 -       dev->irq = pdev->irq;
1237 +       dev->irq = sdev->irq;
1238         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
1239  
1240         netif_carrier_off(dev);
1241  
1242         err = b44_get_invariants(bp);
1243         if (err) {
1244 -               dev_err(&pdev->dev,
1245 +               dev_err(sdev->dev,
1246                         "Problem fetching invariants of chip, aborting.\n");
1247 -               goto err_out_iounmap;
1248 +               goto err_out_free_dev;
1249         }
1250  
1251         bp->mii_if.dev = dev;
1252 @@ -2229,61 +2375,52 @@ static int __devinit b44_init_one(struct
1253  
1254         err = register_netdev(dev);
1255         if (err) {
1256 -               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1257 -               goto err_out_iounmap;
1258 +               dev_err(sdev->dev, "Cannot register net device, aborting.\n");
1259 +               goto out;
1260         }
1261  
1262 -       pci_set_drvdata(pdev, dev);
1263 -
1264 -       pci_save_state(bp->pdev);
1265 +       ssb_set_drvdata(sdev, dev);
1266  
1267         /* Chip reset provides power to the b44 MAC & PCI cores, which
1268          * is necessary for MAC register access.
1269          */
1270         b44_chip_reset(bp);
1271  
1272 -       printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1273 +       printk(KERN_INFO "%s: Broadcom 10/100BaseT Ethernet ", dev->name);
1274         for (i = 0; i < 6; i++)
1275                 printk("%2.2x%c", dev->dev_addr[i],
1276                        i == 5 ? '\n' : ':');
1277  
1278 -       return 0;
1279 +       /* Initialize phy */
1280 +       spin_lock_irq(&bp->lock);
1281 +       b44_chip_reset(bp);
1282 +       spin_unlock_irq(&bp->lock);
1283  
1284 -err_out_iounmap:
1285 -       iounmap(bp->regs);
1286 +       return 0;
1287  
1288  err_out_free_dev:
1289         free_netdev(dev);
1290  
1291 -err_out_free_res:
1292 -       pci_release_regions(pdev);
1293 -
1294 -err_out_disable_pdev:
1295 -       pci_disable_device(pdev);
1296 -       pci_set_drvdata(pdev, NULL);
1297 +out:
1298         return err;
1299  }
1300  
1301 -static void __devexit b44_remove_one(struct pci_dev *pdev)
1302 +static void __devexit b44_remove_one(struct ssb_device *pdev)
1303  {
1304 -       struct net_device *dev = pci_get_drvdata(pdev);
1305 -       struct b44 *bp = netdev_priv(dev);
1306 +       struct net_device *dev = ssb_get_drvdata(pdev);
1307  
1308         unregister_netdev(dev);
1309 -       iounmap(bp->regs);
1310         free_netdev(dev);
1311 -       pci_release_regions(pdev);
1312 -       pci_disable_device(pdev);
1313 -       pci_set_drvdata(pdev, NULL);
1314 +       ssb_set_drvdata(pdev, NULL);
1315  }
1316  
1317 -static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
1318 +static int b44_suspend(struct ssb_device *pdev, pm_message_t state)
1319  {
1320 -       struct net_device *dev = pci_get_drvdata(pdev);
1321 +       struct net_device *dev = ssb_get_drvdata(pdev);
1322         struct b44 *bp = netdev_priv(dev);
1323  
1324          if (!netif_running(dev))
1325 -                 return 0;
1326 +               return 0;
1327  
1328         del_timer_sync(&bp->timer);
1329  
1330 @@ -2301,33 +2438,22 @@ static int b44_suspend(struct pci_dev *p
1331                 b44_init_hw(bp, B44_PARTIAL_RESET);
1332                 b44_setup_wol(bp);
1333         }
1334 -       pci_disable_device(pdev);
1335 +
1336         return 0;
1337  }
1338  
1339 -static int b44_resume(struct pci_dev *pdev)
1340 +static int b44_resume(struct ssb_device *pdev)
1341  {
1342 -       struct net_device *dev = pci_get_drvdata(pdev);
1343 +       struct net_device *dev = ssb_get_drvdata(pdev);
1344         struct b44 *bp = netdev_priv(dev);
1345         int rc = 0;
1346  
1347 -       pci_restore_state(pdev);
1348 -       rc = pci_enable_device(pdev);
1349 -       if (rc) {
1350 -               printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
1351 -                       dev->name);
1352 -               return rc;
1353 -       }
1354 -
1355 -       pci_set_master(pdev);
1356 -
1357         if (!netif_running(dev))
1358                 return 0;
1359  
1360         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1361         if (rc) {
1362                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
1363 -               pci_disable_device(pdev);
1364                 return rc;
1365         }
1366  
1367 @@ -2346,29 +2472,31 @@ static int b44_resume(struct pci_dev *pd
1368         return 0;
1369  }
1370  
1371 -static struct pci_driver b44_driver = {
1372 +static struct ssb_driver b44_driver = {
1373         .name           = DRV_MODULE_NAME,
1374 -       .id_table       = b44_pci_tbl,
1375 +       .id_table       = b44_ssb_tbl,
1376         .probe          = b44_init_one,
1377         .remove         = __devexit_p(b44_remove_one),
1378 -        .suspend        = b44_suspend,
1379 -        .resume         = b44_resume,
1380 +       .suspend        = b44_suspend,
1381 +       .resume         = b44_resume,
1382  };
1383  
1384  static int __init b44_init(void)
1385  {
1386         unsigned int dma_desc_align_size = dma_get_cache_alignment();
1387  
1388 +       instance = 0;
1389 +
1390         /* Setup paramaters for syncing RX/TX DMA descriptors */
1391         dma_desc_align_mask = ~(dma_desc_align_size - 1);
1392         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
1393  
1394 -       return pci_register_driver(&b44_driver);
1395 +       return ssb_driver_register(&b44_driver);
1396  }
1397  
1398  static void __exit b44_cleanup(void)
1399  {
1400 -       pci_unregister_driver(&b44_driver);
1401 +       ssb_driver_unregister(&b44_driver);
1402  }
1403  
1404  module_init(b44_init);
1405 Index: linux-2.6.23.17/drivers/net/b44.h
1406 ===================================================================
1407 --- linux-2.6.23.17.orig/drivers/net/b44.h
1408 +++ linux-2.6.23.17/drivers/net/b44.h
1409 @@ -129,6 +129,7 @@
1410  #define  RXCONFIG_FLOW         0x00000020 /* Flow Control Enable */
1411  #define  RXCONFIG_FLOW_ACCEPT  0x00000040 /* Accept Unicast Flow Control Frame */
1412  #define  RXCONFIG_RFILT                0x00000080 /* Reject Filter */
1413 +#define  RXCONFIG_CAM_ABSENT   0x00000100 /* CAM Absent */
1414  #define B44_RXMAXLEN   0x0404UL /* EMAC RX Max Packet Length */
1415  #define B44_TXMAXLEN   0x0408UL /* EMAC TX Max Packet Length */
1416  #define B44_MDIO_CTRL  0x0410UL /* EMAC MDIO Control */
1417 @@ -227,75 +228,9 @@
1418  #define B44_RX_PAUSE   0x05D4UL /* MIB RX Pause Packets */
1419  #define B44_RX_NPAUSE  0x05D8UL /* MIB RX Non-Pause Packets */
1420  
1421 -/* Silicon backplane register definitions */
1422 -#define B44_SBIMSTATE  0x0F90UL /* SB Initiator Agent State */
1423 -#define  SBIMSTATE_PC          0x0000000f /* Pipe Count */
1424 -#define  SBIMSTATE_AP_MASK     0x00000030 /* Arbitration Priority */
1425 -#define  SBIMSTATE_AP_BOTH     0x00000000 /* Use both timeslices and token */
1426 -#define  SBIMSTATE_AP_TS       0x00000010 /* Use timeslices only */
1427 -#define  SBIMSTATE_AP_TK       0x00000020 /* Use token only */
1428 -#define  SBIMSTATE_AP_RSV      0x00000030 /* Reserved */
1429 -#define  SBIMSTATE_IBE         0x00020000 /* In Band Error */
1430 -#define  SBIMSTATE_TO          0x00040000 /* Timeout */
1431 -#define B44_SBINTVEC   0x0F94UL /* SB Interrupt Mask */
1432 -#define  SBINTVEC_PCI          0x00000001 /* Enable interrupts for PCI */
1433 -#define  SBINTVEC_ENET0                0x00000002 /* Enable interrupts for enet 0 */
1434 -#define  SBINTVEC_ILINE20      0x00000004 /* Enable interrupts for iline20 */
1435 -#define  SBINTVEC_CODEC                0x00000008 /* Enable interrupts for v90 codec */
1436 -#define  SBINTVEC_USB          0x00000010 /* Enable interrupts for usb */
1437 -#define  SBINTVEC_EXTIF                0x00000020 /* Enable interrupts for external i/f */
1438 -#define  SBINTVEC_ENET1                0x00000040 /* Enable interrupts for enet 1 */
1439 -#define B44_SBTMSLOW   0x0F98UL /* SB Target State Low */
1440 -#define  SBTMSLOW_RESET                0x00000001 /* Reset */
1441 -#define  SBTMSLOW_REJECT       0x00000002 /* Reject */
1442 -#define  SBTMSLOW_CLOCK                0x00010000 /* Clock Enable */
1443 -#define  SBTMSLOW_FGC          0x00020000 /* Force Gated Clocks On */
1444 -#define  SBTMSLOW_PE           0x40000000 /* Power Management Enable */
1445 -#define  SBTMSLOW_BE           0x80000000 /* BIST Enable */
1446 -#define B44_SBTMSHIGH  0x0F9CUL /* SB Target State High */
1447 -#define  SBTMSHIGH_SERR                0x00000001 /* S-error */
1448 -#define  SBTMSHIGH_INT         0x00000002 /* Interrupt */
1449 -#define  SBTMSHIGH_BUSY                0x00000004 /* Busy */
1450 -#define  SBTMSHIGH_GCR         0x20000000 /* Gated Clock Request */
1451 -#define  SBTMSHIGH_BISTF       0x40000000 /* BIST Failed */
1452 -#define  SBTMSHIGH_BISTD       0x80000000 /* BIST Done */
1453 -#define B44_SBIDHIGH   0x0FFCUL /* SB Identification High */
1454 -#define  SBIDHIGH_RC_MASK      0x0000000f /* Revision Code */
1455 -#define  SBIDHIGH_CC_MASK      0x0000fff0 /* Core Code */
1456 -#define  SBIDHIGH_CC_SHIFT     4
1457 -#define  SBIDHIGH_VC_MASK      0xffff0000 /* Vendor Code */
1458 -#define  SBIDHIGH_VC_SHIFT     16
1459 -
1460 -/* SSB PCI config space registers.  */
1461 -#define SSB_PMCSR              0x44
1462 -#define  SSB_PE                        0x100
1463 -#define        SSB_BAR0_WIN            0x80
1464 -#define        SSB_BAR1_WIN            0x84
1465 -#define        SSB_SPROM_CONTROL       0x88
1466 -#define        SSB_BAR1_CONTROL        0x8c
1467 -
1468 -/* SSB core and host control registers.  */
1469 -#define SSB_CONTROL            0x0000UL
1470 -#define SSB_ARBCONTROL         0x0010UL
1471 -#define SSB_ISTAT              0x0020UL
1472 -#define SSB_IMASK              0x0024UL
1473 -#define SSB_MBOX               0x0028UL
1474 -#define SSB_BCAST_ADDR         0x0050UL
1475 -#define SSB_BCAST_DATA         0x0054UL
1476 -#define SSB_PCI_TRANS_0                0x0100UL
1477 -#define SSB_PCI_TRANS_1                0x0104UL
1478 -#define SSB_PCI_TRANS_2                0x0108UL
1479 -#define SSB_SPROM              0x0800UL
1480 -
1481 -#define SSB_PCI_MEM            0x00000000
1482 -#define SSB_PCI_IO             0x00000001
1483 -#define SSB_PCI_CFG0           0x00000002
1484 -#define SSB_PCI_CFG1           0x00000003
1485 -#define SSB_PCI_PREF           0x00000004
1486 -#define SSB_PCI_BURST          0x00000008
1487 -#define SSB_PCI_MASK0          0xfc000000
1488 -#define SSB_PCI_MASK1          0xfc000000
1489 -#define SSB_PCI_MASK2          0xc0000000
1490 +#define br32(bp, REG)  ssb_read32((bp)->sdev, (REG))
1491 +#define bw32(bp, REG, VAL)     ssb_write32((bp)->sdev, (REG), (VAL))
1492 +#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
1493  
1494  /* 4400 PHY registers */
1495  #define B44_MII_AUXCTRL                24      /* Auxiliary Control */
1496 @@ -346,10 +281,12 @@ struct rx_header {
1497  
1498  struct ring_info {
1499         struct sk_buff          *skb;
1500 -       DECLARE_PCI_UNMAP_ADDR(mapping);
1501 +       dma_addr_t      mapping;
1502  };
1503  
1504  #define B44_MCAST_TABLE_SIZE   32
1505 +#define B44_PHY_ADDR_NO_PHY    30
1506 +#define B44_MDC_RATIO          5000000
1507  
1508  #define        B44_STAT_REG_DECLARE            \
1509         _B44(tx_good_octets)            \
1510 @@ -425,9 +362,10 @@ struct b44 {
1511  
1512         u32                     dma_offset;
1513         u32                     flags;
1514 -#define B44_FLAG_B0_ANDLATER   0x00000001
1515 +#define B44_FLAG_INIT_COMPLETE 0x00000001
1516  #define B44_FLAG_BUGGY_TXPTR   0x00000002
1517  #define B44_FLAG_REORDER_BUG   0x00000004
1518 +#define B44_FLAG_B0_ANDLATER    0x00000008
1519  #define B44_FLAG_PAUSE_AUTO    0x00008000
1520  #define B44_FLAG_FULL_DUPLEX   0x00010000
1521  #define B44_FLAG_100_BASE_T    0x00020000
1522 @@ -450,8 +388,7 @@ struct b44 {
1523         struct net_device_stats stats;
1524         struct b44_hw_stats     hw_stats;
1525  
1526 -       void __iomem            *regs;
1527 -       struct pci_dev          *pdev;
1528 +       struct ssb_device       *sdev;
1529         struct net_device       *dev;
1530  
1531         dma_addr_t              rx_ring_dma, tx_ring_dma;
1532 Index: linux-2.6.23.17/drivers/net/Kconfig
1533 ===================================================================
1534 --- linux-2.6.23.17.orig/drivers/net/Kconfig
1535 +++ linux-2.6.23.17/drivers/net/Kconfig
1536 @@ -1577,7 +1577,7 @@ config APRICOT
1537  
1538  config B44
1539         tristate "Broadcom 4400 ethernet support"
1540 -       depends on NET_PCI && PCI
1541 +       depends on SSB && EXPERIMENTAL
1542         select MII
1543         help
1544           If you have a network (Ethernet) controller of this type, say Y and