Update to 2.6.35.3
[openwrt.git] / target / linux / generic / patches-2.6.35 / 975-ssb_update.patch
1 ---
2  drivers/net/b44.c                   |  146 +++++++++++++++------------------
3  drivers/ssb/driver_chipcommon.c     |   24 +++++
4  drivers/ssb/driver_chipcommon_pmu.c |   17 +--
5  drivers/ssb/main.c                  |   76 -----------------
6  include/linux/ssb/ssb.h             |  159 ------------------------------------
7  5 files changed, 104 insertions(+), 318 deletions(-)
8
9 --- linux-2.6.35.3.orig/drivers/net/b44.c
10 +++ linux-2.6.35.3/drivers/net/b44.c
11 @@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *)
12  
13  static void b44_init_hw(struct b44 *, int);
14  
15 -static int dma_desc_align_mask;
16  static int dma_desc_sync_size;
17  static int instance;
18  
19 @@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for
20                                                 unsigned long offset,
21                                                 enum dma_data_direction dir)
22  {
23 -       ssb_dma_sync_single_range_for_device(sdev, dma_base,
24 -                                            offset & dma_desc_align_mask,
25 -                                            dma_desc_sync_size, dir);
26 +       dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
27 +                                  dma_desc_sync_size, dir);
28  }
29  
30  static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
31 @@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for
32                                              unsigned long offset,
33                                              enum dma_data_direction dir)
34  {
35 -       ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
36 -                                         offset & dma_desc_align_mask,
37 -                                         dma_desc_sync_size, dir);
38 +       dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
39 +                               dma_desc_sync_size, dir);
40  }
41  
42  static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
43 @@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
44  
45                 BUG_ON(skb == NULL);
46  
47 -               ssb_dma_unmap_single(bp->sdev,
48 -                                    rp->mapping,
49 -                                    skb->len,
50 -                                    DMA_TO_DEVICE);
51 +               dma_unmap_single(bp->sdev->dma_dev,
52 +                                rp->mapping,
53 +                                skb->len,
54 +                                DMA_TO_DEVICE);
55                 rp->skb = NULL;
56                 dev_kfree_skb_irq(skb);
57         }
58 @@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *
59         if (skb == NULL)
60                 return -ENOMEM;
61  
62 -       mapping = ssb_dma_map_single(bp->sdev, skb->data,
63 -                                    RX_PKT_BUF_SZ,
64 -                                    DMA_FROM_DEVICE);
65 +       mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
66 +                                RX_PKT_BUF_SZ,
67 +                                DMA_FROM_DEVICE);
68  
69         /* Hardware bug work-around, the chip is unable to do PCI DMA
70            to/from anything above 1GB :-( */
71 -       if (ssb_dma_mapping_error(bp->sdev, mapping) ||
72 +       if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
73                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
74                 /* Sigh... */
75 -               if (!ssb_dma_mapping_error(bp->sdev, mapping))
76 -                       ssb_dma_unmap_single(bp->sdev, mapping,
77 +               if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
78 +                       dma_unmap_single(bp->sdev->dma_dev, mapping,
79                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
80                 dev_kfree_skb_any(skb);
81                 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
82                 if (skb == NULL)
83                         return -ENOMEM;
84 -               mapping = ssb_dma_map_single(bp->sdev, skb->data,
85 -                                            RX_PKT_BUF_SZ,
86 -                                            DMA_FROM_DEVICE);
87 -               if (ssb_dma_mapping_error(bp->sdev, mapping) ||
88 -                       mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
89 -                       if (!ssb_dma_mapping_error(bp->sdev, mapping))
90 -                               ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
91 +               mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
92 +                                        RX_PKT_BUF_SZ,
93 +                                        DMA_FROM_DEVICE);
94 +               if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
95 +                   mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
96 +                       if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
97 +                               dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
98                         dev_kfree_skb_any(skb);
99                         return -ENOMEM;
100                 }
101 @@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *b
102                                              dest_idx * sizeof(*dest_desc),
103                                              DMA_BIDIRECTIONAL);
104  
105 -       ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
106 -                                      RX_PKT_BUF_SZ,
107 -                                      DMA_FROM_DEVICE);
108 +       dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
109 +                                  RX_PKT_BUF_SZ,
110 +                                  DMA_FROM_DEVICE);
111  }
112  
113  static int b44_rx(struct b44 *bp, int budget)
114 @@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int bu
115                 struct rx_header *rh;
116                 u16 len;
117  
118 -               ssb_dma_sync_single_for_cpu(bp->sdev, map,
119 -                                           RX_PKT_BUF_SZ,
120 -                                           DMA_FROM_DEVICE);
121 +               dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
122 +                                       RX_PKT_BUF_SZ,
123 +                                       DMA_FROM_DEVICE);
124                 rh = (struct rx_header *) skb->data;
125                 len = le16_to_cpu(rh->len);
126                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
127 @@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int bu
128                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
129                         if (skb_size < 0)
130                                 goto drop_it;
131 -                       ssb_dma_unmap_single(bp->sdev, map,
132 -                                            skb_size, DMA_FROM_DEVICE);
133 +                       dma_unmap_single(bp->sdev->dma_dev, map,
134 +                                        skb_size, DMA_FROM_DEVICE);
135                         /* Leave out rx_header */
136                         skb_put(skb, len + RX_PKT_OFFSET);
137                         skb_pull(skb, RX_PKT_OFFSET);
138 @@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct
139                 goto err_out;
140         }
141  
142 -       mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
143 -       if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
144 +       mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
145 +       if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
146                 struct sk_buff *bounce_skb;
147  
148                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
149 -               if (!ssb_dma_mapping_error(bp->sdev, mapping))
150 -                       ssb_dma_unmap_single(bp->sdev, mapping, len,
151 +               if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
152 +                       dma_unmap_single(bp->sdev->dma_dev, mapping, len,
153                                              DMA_TO_DEVICE);
154  
155                 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
156                 if (!bounce_skb)
157                         goto err_out;
158  
159 -               mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
160 -                                            len, DMA_TO_DEVICE);
161 -               if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
162 -                       if (!ssb_dma_mapping_error(bp->sdev, mapping))
163 -                               ssb_dma_unmap_single(bp->sdev, mapping,
164 +               mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
165 +                                        len, DMA_TO_DEVICE);
166 +               if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
167 +                       if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
168 +                               dma_unmap_single(bp->sdev->dma_dev, mapping,
169                                                      len, DMA_TO_DEVICE);
170                         dev_kfree_skb_any(bounce_skb);
171                         goto err_out;
172 @@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *b
173  
174                 if (rp->skb == NULL)
175                         continue;
176 -               ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
177 -                                    DMA_FROM_DEVICE);
178 +               dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
179 +                                DMA_FROM_DEVICE);
180                 dev_kfree_skb_any(rp->skb);
181                 rp->skb = NULL;
182         }
183 @@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *b
184  
185                 if (rp->skb == NULL)
186                         continue;
187 -               ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
188 -                                    DMA_TO_DEVICE);
189 +               dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
190 +                                DMA_TO_DEVICE);
191                 dev_kfree_skb_any(rp->skb);
192                 rp->skb = NULL;
193         }
194 @@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *b
195         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
196  
197         if (bp->flags & B44_FLAG_RX_RING_HACK)
198 -               ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
199 -                                              DMA_TABLE_BYTES,
200 -                                              DMA_BIDIRECTIONAL);
201 +               dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
202 +                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
203  
204         if (bp->flags & B44_FLAG_TX_RING_HACK)
205 -               ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
206 -                                              DMA_TABLE_BYTES,
207 -                                              DMA_TO_DEVICE);
208 +               dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
209 +                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
210  
211         for (i = 0; i < bp->rx_pending; i++) {
212                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
213 @@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b
214         bp->tx_buffers = NULL;
215         if (bp->rx_ring) {
216                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
217 -                       ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
218 -                                            DMA_TABLE_BYTES,
219 -                                            DMA_BIDIRECTIONAL);
220 +                       dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
221 +                                        DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
222                         kfree(bp->rx_ring);
223                 } else
224 -                       ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
225 -                                               bp->rx_ring, bp->rx_ring_dma,
226 -                                               GFP_KERNEL);
227 +                       dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
228 +                                         bp->rx_ring, bp->rx_ring_dma);
229                 bp->rx_ring = NULL;
230                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
231         }
232         if (bp->tx_ring) {
233                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
234 -                       ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
235 -                                            DMA_TABLE_BYTES,
236 -                                            DMA_TO_DEVICE);
237 +                       dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
238 +                                        DMA_TABLE_BYTES, DMA_TO_DEVICE);
239                         kfree(bp->tx_ring);
240                 } else
241 -                       ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
242 -                                               bp->tx_ring, bp->tx_ring_dma,
243 -                                               GFP_KERNEL);
244 +                       dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
245 +                                         bp->tx_ring, bp->tx_ring_dma);
246                 bp->tx_ring = NULL;
247                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
248         }
249 @@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b
250                 goto out_err;
251  
252         size = DMA_TABLE_BYTES;
253 -       bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
254 +       bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
255 +                                        &bp->rx_ring_dma, gfp);
256         if (!bp->rx_ring) {
257                 /* Allocation may have failed due to pci_alloc_consistent
258                    insisting on use of GFP_DMA, which is more restrictive
259 @@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b
260                 if (!rx_ring)
261                         goto out_err;
262  
263 -               rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
264 -                                                DMA_TABLE_BYTES,
265 -                                                DMA_BIDIRECTIONAL);
266 +               rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
267 +                                            DMA_TABLE_BYTES,
268 +                                            DMA_BIDIRECTIONAL);
269  
270 -               if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
271 +               if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
272                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
273                         kfree(rx_ring);
274                         goto out_err;
275 @@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b
276                 bp->flags |= B44_FLAG_RX_RING_HACK;
277         }
278  
279 -       bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
280 +       bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
281 +                                        &bp->tx_ring_dma, gfp);
282         if (!bp->tx_ring) {
283                 /* Allocation may have failed due to ssb_dma_alloc_consistent
284                    insisting on use of GFP_DMA, which is more restrictive
285 @@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b
286                 if (!tx_ring)
287                         goto out_err;
288  
289 -               tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
290 -                                           DMA_TABLE_BYTES,
291 -                                           DMA_TO_DEVICE);
292 +               tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
293 +                                            DMA_TABLE_BYTES,
294 +                                            DMA_TO_DEVICE);
295  
296 -               if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
297 +               if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
298                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
299                         kfree(tx_ring);
300                         goto out_err;
301 @@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct
302                         "Failed to powerup the bus\n");
303                 goto err_out_free_dev;
304         }
305 -       err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
306 -       if (err) {
307 +
308 +       if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
309 +           dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
310                 dev_err(sdev->dev,
311                         "Required 30BIT DMA mask unsupported by the system\n");
312                 goto err_out_powerdown;
313         }
314 +
315         err = b44_get_invariants(bp);
316         if (err) {
317                 dev_err(sdev->dev,
318 @@ -2344,7 +2339,6 @@ static int __init b44_init(void)
319         int err;
320  
321         /* Setup paramaters for syncing RX/TX DMA descriptors */
322 -       dma_desc_align_mask = ~(dma_desc_align_size - 1);
323         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
324  
325         err = b44_pci_init();
326 --- linux-2.6.35.3.orig/drivers/ssb/driver_chipcommon.c
327 +++ linux-2.6.35.3/drivers/ssb/driver_chipcommon.c
328 @@ -209,6 +209,24 @@ static void chipco_powercontrol_init(str
329         }
330  }
331  
332 +/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
333 +static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
334 +{
335 +       struct ssb_bus *bus = cc->dev->bus;
336 +
337 +       switch (bus->chip_id) {
338 +       case 0x4312:
339 +       case 0x4322:
340 +       case 0x4328:
341 +               return 7000;
342 +       case 0x4325:
343 +               /* TODO: */
344 +       default:
345 +               return 15000;
346 +       }
347 +}
348 +
349 +/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
350  static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
351  {
352         struct ssb_bus *bus = cc->dev->bus;
353 @@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(stru
354  
355         if (bus->bustype != SSB_BUSTYPE_PCI)
356                 return;
357 +
358 +       if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
359 +               cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
360 +               return;
361 +       }
362 +
363         if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
364                 return;
365  
366 --- linux-2.6.35.3.orig/drivers/ssb/driver_chipcommon_pmu.c
367 +++ linux-2.6.35.3/drivers/ssb/driver_chipcommon_pmu.c
368 @@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struc
369                 chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
370  }
371  
372 +/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
373  void ssb_pmu_init(struct ssb_chipcommon *cc)
374  {
375 -       struct ssb_bus *bus = cc->dev->bus;
376         u32 pmucap;
377  
378         if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
379 @@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon
380         ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
381                     cc->pmu.rev, pmucap);
382  
383 -       if (cc->pmu.rev >= 1) {
384 -               if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
385 -                       chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
386 -                                     ~SSB_CHIPCO_PMU_CTL_NOILPONW);
387 -               } else {
388 -                       chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
389 -                                    SSB_CHIPCO_PMU_CTL_NOILPONW);
390 -               }
391 -       }
392 +       if (cc->pmu.rev == 1)
393 +               chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
394 +                             ~SSB_CHIPCO_PMU_CTL_NOILPONW);
395 +       else
396 +               chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
397 +                            SSB_CHIPCO_PMU_CTL_NOILPONW);
398         ssb_pmu_pll_init(cc);
399         ssb_pmu_resources_init(cc);
400  }
401 --- linux-2.6.35.3.orig/drivers/ssb/main.c
402 +++ linux-2.6.35.3/drivers/ssb/main.c
403 @@ -486,6 +486,7 @@ static int ssb_devices_register(struct s
404  #ifdef CONFIG_SSB_PCIHOST
405                         sdev->irq = bus->host_pci->irq;
406                         dev->parent = &bus->host_pci->dev;
407 +                       sdev->dma_dev = dev->parent;
408  #endif
409                         break;
410                 case SSB_BUSTYPE_PCMCIA:
411 @@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
412                         break;
413                 case SSB_BUSTYPE_SSB:
414                         dev->dma_mask = &dev->coherent_dma_mask;
415 +                       sdev->dma_dev = dev;
416                         break;
417                 }
418  
419 @@ -1226,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_devic
420  }
421  EXPORT_SYMBOL(ssb_dma_translation);
422  
423 -int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
424 -{
425 -#ifdef CONFIG_SSB_PCIHOST
426 -       int err;
427 -#endif
428 -
429 -       switch (dev->bus->bustype) {
430 -       case SSB_BUSTYPE_PCI:
431 -#ifdef CONFIG_SSB_PCIHOST
432 -               err = pci_set_dma_mask(dev->bus->host_pci, mask);
433 -               if (err)
434 -                       return err;
435 -               err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
436 -               return err;
437 -#endif
438 -       case SSB_BUSTYPE_SSB:
439 -               return dma_set_mask(dev->dev, mask);
440 -       default:
441 -               __ssb_dma_not_implemented(dev);
442 -       }
443 -       return -ENOSYS;
444 -}
445 -EXPORT_SYMBOL(ssb_dma_set_mask);
446 -
447 -void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
448 -                               dma_addr_t *dma_handle, gfp_t gfp_flags)
449 -{
450 -       switch (dev->bus->bustype) {
451 -       case SSB_BUSTYPE_PCI:
452 -#ifdef CONFIG_SSB_PCIHOST
453 -               if (gfp_flags & GFP_DMA) {
454 -                       /* Workaround: The PCI API does not support passing
455 -                        * a GFP flag. */
456 -                       return dma_alloc_coherent(&dev->bus->host_pci->dev,
457 -                                                 size, dma_handle, gfp_flags);
458 -               }
459 -               return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
460 -#endif
461 -       case SSB_BUSTYPE_SSB:
462 -               return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
463 -       default:
464 -               __ssb_dma_not_implemented(dev);
465 -       }
466 -       return NULL;
467 -}
468 -EXPORT_SYMBOL(ssb_dma_alloc_consistent);
469 -
470 -void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
471 -                            void *vaddr, dma_addr_t dma_handle,
472 -                            gfp_t gfp_flags)
473 -{
474 -       switch (dev->bus->bustype) {
475 -       case SSB_BUSTYPE_PCI:
476 -#ifdef CONFIG_SSB_PCIHOST
477 -               if (gfp_flags & GFP_DMA) {
478 -                       /* Workaround: The PCI API does not support passing
479 -                        * a GFP flag. */
480 -                       dma_free_coherent(&dev->bus->host_pci->dev,
481 -                                         size, vaddr, dma_handle);
482 -                       return;
483 -               }
484 -               pci_free_consistent(dev->bus->host_pci, size,
485 -                                   vaddr, dma_handle);
486 -               return;
487 -#endif
488 -       case SSB_BUSTYPE_SSB:
489 -               dma_free_coherent(dev->dev, size, vaddr, dma_handle);
490 -               return;
491 -       default:
492 -               __ssb_dma_not_implemented(dev);
493 -       }
494 -}
495 -EXPORT_SYMBOL(ssb_dma_free_consistent);
496 -
497  int ssb_bus_may_powerdown(struct ssb_bus *bus)
498  {
499         struct ssb_chipcommon *cc;
500 --- linux-2.6.35.3.orig/include/linux/ssb/ssb.h
501 +++ linux-2.6.35.3/include/linux/ssb/ssb.h
502 @@ -167,7 +167,7 @@ struct ssb_device {
503          * is an optimization. */
504         const struct ssb_bus_ops *ops;
505  
506 -       struct device *dev;
507 +       struct device *dev, *dma_dev;
508  
509         struct ssb_bus *bus;
510         struct ssb_device_id id;
511 @@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ss
512  #define SSB_DMA_TRANSLATION_MASK       0xC0000000
513  #define SSB_DMA_TRANSLATION_SHIFT      30
514  
515 -extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
516 -
517 -extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
518 -                                      dma_addr_t *dma_handle, gfp_t gfp_flags);
519 -extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
520 -                                   void *vaddr, dma_addr_t dma_handle,
521 -                                   gfp_t gfp_flags);
522 -
523  static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
524  {
525  #ifdef CONFIG_SSB_DEBUG
526 @@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_
527  #endif /* DEBUG */
528  }
529  
530 -static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
531 -{
532 -       switch (dev->bus->bustype) {
533 -       case SSB_BUSTYPE_PCI:
534 -#ifdef CONFIG_SSB_PCIHOST
535 -               return pci_dma_mapping_error(dev->bus->host_pci, addr);
536 -#endif
537 -               break;
538 -       case SSB_BUSTYPE_SSB:
539 -               return dma_mapping_error(dev->dev, addr);
540 -       default:
541 -               break;
542 -       }
543 -       __ssb_dma_not_implemented(dev);
544 -       return -ENOSYS;
545 -}
546 -
547 -static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
548 -                                           size_t size, enum dma_data_direction dir)
549 -{
550 -       switch (dev->bus->bustype) {
551 -       case SSB_BUSTYPE_PCI:
552 -#ifdef CONFIG_SSB_PCIHOST
553 -               return pci_map_single(dev->bus->host_pci, p, size, dir);
554 -#endif
555 -               break;
556 -       case SSB_BUSTYPE_SSB:
557 -               return dma_map_single(dev->dev, p, size, dir);
558 -       default:
559 -               break;
560 -       }
561 -       __ssb_dma_not_implemented(dev);
562 -       return 0;
563 -}
564 -
565 -static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
566 -                                       size_t size, enum dma_data_direction dir)
567 -{
568 -       switch (dev->bus->bustype) {
569 -       case SSB_BUSTYPE_PCI:
570 -#ifdef CONFIG_SSB_PCIHOST
571 -               pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
572 -               return;
573 -#endif
574 -               break;
575 -       case SSB_BUSTYPE_SSB:
576 -               dma_unmap_single(dev->dev, dma_addr, size, dir);
577 -               return;
578 -       default:
579 -               break;
580 -       }
581 -       __ssb_dma_not_implemented(dev);
582 -}
583 -
584 -static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
585 -                                              dma_addr_t dma_addr,
586 -                                              size_t size,
587 -                                              enum dma_data_direction dir)
588 -{
589 -       switch (dev->bus->bustype) {
590 -       case SSB_BUSTYPE_PCI:
591 -#ifdef CONFIG_SSB_PCIHOST
592 -               pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
593 -                                           size, dir);
594 -               return;
595 -#endif
596 -               break;
597 -       case SSB_BUSTYPE_SSB:
598 -               dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
599 -               return;
600 -       default:
601 -               break;
602 -       }
603 -       __ssb_dma_not_implemented(dev);
604 -}
605 -
606 -static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
607 -                                                 dma_addr_t dma_addr,
608 -                                                 size_t size,
609 -                                                 enum dma_data_direction dir)
610 -{
611 -       switch (dev->bus->bustype) {
612 -       case SSB_BUSTYPE_PCI:
613 -#ifdef CONFIG_SSB_PCIHOST
614 -               pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
615 -                                              size, dir);
616 -               return;
617 -#endif
618 -               break;
619 -       case SSB_BUSTYPE_SSB:
620 -               dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
621 -               return;
622 -       default:
623 -               break;
624 -       }
625 -       __ssb_dma_not_implemented(dev);
626 -}
627 -
628 -static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
629 -                                                    dma_addr_t dma_addr,
630 -                                                    unsigned long offset,
631 -                                                    size_t size,
632 -                                                    enum dma_data_direction dir)
633 -{
634 -       switch (dev->bus->bustype) {
635 -       case SSB_BUSTYPE_PCI:
636 -#ifdef CONFIG_SSB_PCIHOST
637 -               /* Just sync everything. That's all the PCI API can do. */
638 -               pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
639 -                                           offset + size, dir);
640 -               return;
641 -#endif
642 -               break;
643 -       case SSB_BUSTYPE_SSB:
644 -               dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
645 -                                             size, dir);
646 -               return;
647 -       default:
648 -               break;
649 -       }
650 -       __ssb_dma_not_implemented(dev);
651 -}
652 -
653 -static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
654 -                                                       dma_addr_t dma_addr,
655 -                                                       unsigned long offset,
656 -                                                       size_t size,
657 -                                                       enum dma_data_direction dir)
658 -{
659 -       switch (dev->bus->bustype) {
660 -       case SSB_BUSTYPE_PCI:
661 -#ifdef CONFIG_SSB_PCIHOST
662 -               /* Just sync everything. That's all the PCI API can do. */
663 -               pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
664 -                                              offset + size, dir);
665 -               return;
666 -#endif
667 -               break;
668 -       case SSB_BUSTYPE_SSB:
669 -               dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
670 -                                                size, dir);
671 -               return;
672 -       default:
673 -               break;
674 -       }
675 -       __ssb_dma_not_implemented(dev);
676 -}
677 -
678 -
679  #ifdef CONFIG_SSB_PCIHOST
680  /* PCI-host wrapper driver */
681  extern int ssb_pcihost_register(struct pci_driver *driver);