3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
45 struct bcm43xx_dmadesc_generic * op32_idx2desc(struct bcm43xx_dmaring *ring,
47 struct bcm43xx_dmadesc_meta **meta)
49 struct bcm43xx_dmadesc32 *desc;
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
55 return (struct bcm43xx_dmadesc_generic *)desc;
58 static void op32_fill_descriptor(struct bcm43xx_dmaring *ring,
59 struct bcm43xx_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
63 struct bcm43xx_dmadesc32 *descbase = ring->descbase;
69 slot = (int)(&(desc->dma32) - descbase);
70 assert(slot >= 0 && slot < ring->nr_slots);
72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & BCM43xx_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
81 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
83 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
85 ctl |= BCM43xx_DMA32_DCTL_IRQ;
86 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
87 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
93 static void op32_poke_tx(struct bcm43xx_dmaring *ring, int slot)
95 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXINDEX,
96 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
99 static void op32_tx_suspend(struct bcm43xx_dmaring *ring)
101 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
102 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
103 | BCM43xx_DMA32_TXSUSPEND);
106 static void op32_tx_resume(struct bcm43xx_dmaring *ring)
108 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
109 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
110 & ~BCM43xx_DMA32_TXSUSPEND);
113 static int op32_get_current_rxslot(struct bcm43xx_dmaring *ring)
117 val = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
118 val &= BCM43xx_DMA32_RXDPTR;
120 return (val / sizeof(struct bcm43xx_dmadesc32));
123 static void op32_set_current_rxslot(struct bcm43xx_dmaring *ring,
126 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
127 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
130 static const struct bcm43xx_dma_ops dma32_ops = {
131 .idx2desc = op32_idx2desc,
132 .fill_descriptor = op32_fill_descriptor,
133 .poke_tx = op32_poke_tx,
134 .tx_suspend = op32_tx_suspend,
135 .tx_resume = op32_tx_resume,
136 .get_current_rxslot = op32_get_current_rxslot,
137 .set_current_rxslot = op32_set_current_rxslot,
142 struct bcm43xx_dmadesc_generic * op64_idx2desc(struct bcm43xx_dmaring *ring,
144 struct bcm43xx_dmadesc_meta **meta)
146 struct bcm43xx_dmadesc64 *desc;
148 *meta = &(ring->meta[slot]);
149 desc = ring->descbase;
150 desc = &(desc[slot]);
152 return (struct bcm43xx_dmadesc_generic *)desc;
155 static void op64_fill_descriptor(struct bcm43xx_dmaring *ring,
156 struct bcm43xx_dmadesc_generic *desc,
157 dma_addr_t dmaaddr, u16 bufsize,
158 int start, int end, int irq)
160 struct bcm43xx_dmadesc64 *descbase = ring->descbase;
162 u32 ctl0 = 0, ctl1 = 0;
166 slot = (int)(&(desc->dma64) - descbase);
167 assert(slot >= 0 && slot < ring->nr_slots);
169 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
170 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
171 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
172 >> SSB_DMA_TRANSLATION_SHIFT;
173 addrhi |= ssb_dma_translation(ring->dev->dev);
174 if (slot == ring->nr_slots - 1)
175 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
177 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
179 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
181 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
182 ctl1 |= (bufsize - ring->frameoffset)
183 & BCM43xx_DMA64_DCTL1_BYTECNT;
184 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
185 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
187 desc->dma64.control0 = cpu_to_le32(ctl0);
188 desc->dma64.control1 = cpu_to_le32(ctl1);
189 desc->dma64.address_low = cpu_to_le32(addrlo);
190 desc->dma64.address_high = cpu_to_le32(addrhi);
193 static void op64_poke_tx(struct bcm43xx_dmaring *ring, int slot)
195 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXINDEX,
196 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
199 static void op64_tx_suspend(struct bcm43xx_dmaring *ring)
201 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
202 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
203 | BCM43xx_DMA64_TXSUSPEND);
206 static void op64_tx_resume(struct bcm43xx_dmaring *ring)
208 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
209 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
210 & ~BCM43xx_DMA64_TXSUSPEND);
213 static int op64_get_current_rxslot(struct bcm43xx_dmaring *ring)
217 val = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
218 val &= BCM43xx_DMA64_RXSTATDPTR;
220 return (val / sizeof(struct bcm43xx_dmadesc64));
223 static void op64_set_current_rxslot(struct bcm43xx_dmaring *ring,
226 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
227 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
230 static const struct bcm43xx_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
241 static inline int free_slots(struct bcm43xx_dmaring *ring)
243 return (ring->nr_slots - ring->used_slots);
246 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
248 assert(slot >= -1 && slot <= ring->nr_slots - 1);
249 if (slot == ring->nr_slots - 1)
254 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
256 assert(slot >= 0 && slot <= ring->nr_slots - 1);
258 return ring->nr_slots - 1;
262 /* Request a slot for usage. */
264 int request_slot(struct bcm43xx_dmaring *ring)
269 assert(!ring->stopped);
270 assert(free_slots(ring) != 0);
272 slot = next_slot(ring, ring->current_slot);
273 ring->current_slot = slot;
276 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
277 if (ring->used_slots > ring->max_used_slots)
278 ring->max_used_slots = ring->used_slots;
279 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG*/
284 /* Return a slot to the free slots. */
286 void return_slot(struct bcm43xx_dmaring *ring, int slot)
293 /* Mac80211-queue to bcm43xx-ring mapping */
294 static struct bcm43xx_dmaring * priority_to_txring(struct bcm43xx_wldev *dev,
297 struct bcm43xx_dmaring *ring;
299 /*FIXME: For now we always run on TX-ring-1 */
300 return dev->dma.tx_ring1;
302 /* 0 = highest priority */
303 switch (queue_priority) {
308 ring = dev->dma.tx_ring3;
311 ring = dev->dma.tx_ring2;
314 ring = dev->dma.tx_ring1;
317 ring = dev->dma.tx_ring0;
320 ring = dev->dma.tx_ring4;
323 ring = dev->dma.tx_ring5;
330 /* Bcm43xx-ring to mac80211-queue mapping */
331 static inline int txring_to_priority(struct bcm43xx_dmaring *ring)
333 static const u8 idx_to_prio[] =
334 { 3, 2, 1, 0, 4, 5, };
336 /*FIXME: have only one queue, for now */
339 return idx_to_prio[ring->index];
343 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
345 static const u16 map64[] = {
346 BCM43xx_MMIO_DMA64_BASE0,
347 BCM43xx_MMIO_DMA64_BASE1,
348 BCM43xx_MMIO_DMA64_BASE2,
349 BCM43xx_MMIO_DMA64_BASE3,
350 BCM43xx_MMIO_DMA64_BASE4,
351 BCM43xx_MMIO_DMA64_BASE5,
353 static const u16 map32[] = {
354 BCM43xx_MMIO_DMA32_BASE0,
355 BCM43xx_MMIO_DMA32_BASE1,
356 BCM43xx_MMIO_DMA32_BASE2,
357 BCM43xx_MMIO_DMA32_BASE3,
358 BCM43xx_MMIO_DMA32_BASE4,
359 BCM43xx_MMIO_DMA32_BASE5,
363 assert(controller_idx >= 0 &&
364 controller_idx < ARRAY_SIZE(map64));
365 return map64[controller_idx];
367 assert(controller_idx >= 0 &&
368 controller_idx < ARRAY_SIZE(map32));
369 return map32[controller_idx];
373 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
381 dmaaddr = dma_map_single(ring->dev->dev->dev,
385 dmaaddr = dma_map_single(ring->dev->dev->dev,
394 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
400 dma_unmap_single(ring->dev->dev->dev,
404 dma_unmap_single(ring->dev->dev->dev,
411 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
417 dma_sync_single_for_cpu(ring->dev->dev->dev,
418 addr, len, DMA_FROM_DEVICE);
422 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
428 dma_sync_single_for_device(ring->dev->dev->dev,
429 addr, len, DMA_FROM_DEVICE);
433 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
434 struct bcm43xx_dmadesc_meta *meta,
439 dev_kfree_skb_irq(meta->skb);
441 dev_kfree_skb(meta->skb);
446 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
448 struct device *dev = ring->dev->dev->dev;
450 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
451 &(ring->dmabase), GFP_KERNEL);
452 if (!ring->descbase) {
453 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
456 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
461 static void free_ringmemory(struct bcm43xx_dmaring *ring)
463 struct device *dev = ring->dev->dev->dev;
465 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
466 ring->descbase, ring->dmabase);
469 /* Reset the RX DMA channel */
470 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_wldev *dev,
471 u16 mmio_base, int dma64)
479 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
480 bcm43xx_write32(dev, mmio_base + offset, 0);
481 for (i = 0; i < 10; i++) {
482 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
483 value = bcm43xx_read32(dev, mmio_base + offset);
485 value &= BCM43xx_DMA64_RXSTAT;
486 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
491 value &= BCM43xx_DMA32_RXSTATE;
492 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
500 printk(KERN_ERR PFX "ERROR: DMA RX reset timed out\n");
507 /* Reset the RX DMA channel */
508 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_wldev *dev,
509 u16 mmio_base, int dma64)
517 for (i = 0; i < 10; i++) {
518 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
519 value = bcm43xx_read32(dev, mmio_base + offset);
521 value &= BCM43xx_DMA64_TXSTAT;
522 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
523 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
524 value == BCM43xx_DMA64_TXSTAT_STOPPED)
527 value &= BCM43xx_DMA32_TXSTATE;
528 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
529 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
530 value == BCM43xx_DMA32_TXSTAT_STOPPED)
535 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
536 bcm43xx_write32(dev, mmio_base + offset, 0);
537 for (i = 0; i < 10; i++) {
538 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
539 value = bcm43xx_read32(dev, mmio_base + offset);
541 value &= BCM43xx_DMA64_TXSTAT;
542 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
547 value &= BCM43xx_DMA32_TXSTATE;
548 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
556 printk(KERN_ERR PFX "ERROR: DMA TX reset timed out\n");
559 /* ensure the reset is completed. */
565 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
566 struct bcm43xx_dmadesc_generic *desc,
567 struct bcm43xx_dmadesc_meta *meta,
570 struct bcm43xx_rxhdr_fw4 *rxhdr;
571 struct bcm43xx_hwtxstatus *txstat;
577 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
580 dmaaddr = map_descbuffer(ring, skb->data,
581 ring->rx_buffersize, 0);
582 if (dma_mapping_error(dmaaddr)) {
583 /* ugh. try to realloc in zone_dma */
584 gfp_flags |= GFP_DMA;
586 dev_kfree_skb_any(skb);
588 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
591 dmaaddr = map_descbuffer(ring, skb->data,
592 ring->rx_buffersize, 0);
595 if (dma_mapping_error(dmaaddr)) {
596 dev_kfree_skb_any(skb);
601 meta->dmaaddr = dmaaddr;
602 ring->ops->fill_descriptor(ring, desc, dmaaddr,
603 ring->rx_buffersize, 0, 0, 0);
605 rxhdr = (struct bcm43xx_rxhdr_fw4 *)(skb->data);
606 rxhdr->frame_len = 0;
607 txstat = (struct bcm43xx_hwtxstatus *)(skb->data);
613 /* Allocate the initial descbuffers.
614 * This is used for an RX ring only.
616 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
618 int i, err = -ENOMEM;
619 struct bcm43xx_dmadesc_generic *desc;
620 struct bcm43xx_dmadesc_meta *meta;
622 for (i = 0; i < ring->nr_slots; i++) {
623 desc = ring->ops->idx2desc(ring, i, &meta);
625 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
627 printk(KERN_ERR PFX "Failed to allocate initial descbuffers\n");
632 ring->used_slots = ring->nr_slots;
638 for (i--; i >= 0; i--) {
639 desc = ring->ops->idx2desc(ring, i, &meta);
641 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
642 dev_kfree_skb(meta->skb);
647 /* Do initial setup of the DMA controller.
648 * Reset the controller, write the ring busaddress
649 * and switch the "enable" bit on.
651 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
656 u32 trans = ssb_dma_translation(ring->dev->dev);
660 u64 ringbase = (u64)(ring->dmabase);
662 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
663 >> SSB_DMA_TRANSLATION_SHIFT;
664 value = BCM43xx_DMA64_TXENABLE;
665 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
666 & BCM43xx_DMA64_TXADDREXT_MASK;
667 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
668 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
669 (ringbase & 0xFFFFFFFF));
670 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
671 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
674 u32 ringbase = (u32)(ring->dmabase);
676 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
677 >> SSB_DMA_TRANSLATION_SHIFT;
678 value = BCM43xx_DMA32_TXENABLE;
679 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
680 & BCM43xx_DMA32_TXADDREXT_MASK;
681 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
682 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
683 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
687 err = alloc_initial_descbuffers(ring);
691 u64 ringbase = (u64)(ring->dmabase);
693 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
694 >> SSB_DMA_TRANSLATION_SHIFT;
695 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
696 value |= BCM43xx_DMA64_RXENABLE;
697 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
698 & BCM43xx_DMA64_RXADDREXT_MASK;
699 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
700 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
701 (ringbase & 0xFFFFFFFF));
702 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
703 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
705 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
707 u32 ringbase = (u32)(ring->dmabase);
709 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
710 >> SSB_DMA_TRANSLATION_SHIFT;
711 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
712 value |= BCM43xx_DMA32_RXENABLE;
713 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
714 & BCM43xx_DMA32_RXADDREXT_MASK;
715 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
716 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
717 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
719 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
727 /* Shutdown the DMA controller. */
728 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
731 bcm43xx_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->dma64);
733 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
734 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
736 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
738 bcm43xx_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->dma64);
740 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
741 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
743 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
747 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
749 struct bcm43xx_dmadesc_generic *desc;
750 struct bcm43xx_dmadesc_meta *meta;
753 if (!ring->used_slots)
755 for (i = 0; i < ring->nr_slots; i++) {
756 desc = ring->ops->idx2desc(ring, i, &meta);
763 unmap_descbuffer(ring, meta->dmaaddr,
766 unmap_descbuffer(ring, meta->dmaaddr,
767 ring->rx_buffersize, 0);
769 free_descriptor_buffer(ring, meta, 0);
773 static u64 supported_dma_mask(struct bcm43xx_wldev *dev)
778 tmp = bcm43xx_read32(dev, SSB_TMSHIGH);
779 if (tmp & SSB_TMSHIGH_DMA64)
780 return DMA_64BIT_MASK;
781 mmio_base = bcm43xx_dmacontroller_base(0, 0);
783 mmio_base + BCM43xx_DMA32_TXCTL,
784 BCM43xx_DMA32_TXADDREXT_MASK);
785 tmp = bcm43xx_read32(dev,
786 mmio_base + BCM43xx_DMA32_TXCTL);
787 if (tmp & BCM43xx_DMA32_TXADDREXT_MASK)
788 return DMA_32BIT_MASK;
790 return DMA_30BIT_MASK;
793 /* Main initialization function. */
795 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_wldev *dev,
796 int controller_index,
800 struct bcm43xx_dmaring *ring;
805 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
809 nr_slots = BCM43xx_RXRING_SLOTS;
811 nr_slots = BCM43xx_TXRING_SLOTS;
813 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
818 ring->txhdr_cache = kcalloc(nr_slots,
819 sizeof(struct bcm43xx_txhdr_fw4),
821 if (!ring->txhdr_cache)
824 /* test for ability to dma to txhdr_cache */
825 dma_test = dma_map_single(dev->dev->dev,
826 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
829 if (dma_mapping_error(dma_test)) {
831 kfree(ring->txhdr_cache);
832 ring->txhdr_cache = kcalloc(nr_slots,
833 sizeof(struct bcm43xx_txhdr_fw4),
834 GFP_KERNEL | GFP_DMA);
835 if (!ring->txhdr_cache)
838 dma_test = dma_map_single(dev->dev->dev,
839 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
842 if (dma_mapping_error(dma_test))
843 goto err_kfree_txhdr_cache;
846 dma_unmap_single(dev->dev->dev,
847 dma_test, sizeof(struct bcm43xx_txhdr_fw4),
852 ring->nr_slots = nr_slots;
853 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
854 ring->index = controller_index;
855 ring->dma64 = !!dma64;
857 ring->ops = &dma64_ops;
859 ring->ops = &dma32_ops;
862 ring->current_slot = -1;
864 if (ring->index == 0) {
865 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
866 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
867 } else if (ring->index == 3) {
868 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
869 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
873 spin_lock_init(&ring->lock);
874 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
875 ring->last_injected_overflow = jiffies;
878 err = alloc_ringmemory(ring);
880 goto err_kfree_txhdr_cache;
881 err = dmacontroller_setup(ring);
883 goto err_free_ringmemory;
889 free_ringmemory(ring);
890 err_kfree_txhdr_cache:
891 kfree(ring->txhdr_cache);
900 /* Main cleanup function. */
901 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
906 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
907 (ring->dma64) ? "64" : "32",
909 (ring->tx) ? "TX" : "RX",
910 ring->max_used_slots, ring->nr_slots);
911 /* Device IRQs are disabled prior entering this function,
912 * so no need to take care of concurrency with rx handler stuff.
914 dmacontroller_cleanup(ring);
915 free_all_descbuffers(ring);
916 free_ringmemory(ring);
918 kfree(ring->txhdr_cache);
923 void bcm43xx_dma_free(struct bcm43xx_wldev *dev)
925 struct bcm43xx_dma *dma;
927 if (bcm43xx_using_pio(dev))
931 bcm43xx_destroy_dmaring(dma->rx_ring3);
932 dma->rx_ring3 = NULL;
933 bcm43xx_destroy_dmaring(dma->rx_ring0);
934 dma->rx_ring0 = NULL;
936 bcm43xx_destroy_dmaring(dma->tx_ring5);
937 dma->tx_ring5 = NULL;
938 bcm43xx_destroy_dmaring(dma->tx_ring4);
939 dma->tx_ring4 = NULL;
940 bcm43xx_destroy_dmaring(dma->tx_ring3);
941 dma->tx_ring3 = NULL;
942 bcm43xx_destroy_dmaring(dma->tx_ring2);
943 dma->tx_ring2 = NULL;
944 bcm43xx_destroy_dmaring(dma->tx_ring1);
945 dma->tx_ring1 = NULL;
946 bcm43xx_destroy_dmaring(dma->tx_ring0);
947 dma->tx_ring0 = NULL;
950 int bcm43xx_dma_init(struct bcm43xx_wldev *dev)
952 struct bcm43xx_dma *dma = &dev->dma;
953 struct bcm43xx_dmaring *ring;
958 dmamask = supported_dma_mask(dev);
959 if (dmamask == DMA_64BIT_MASK)
962 err = ssb_dma_set_mask(dev->dev, dmamask);
964 #ifdef BCM43XX_MAC80211_PIO
965 printk(KERN_WARNING PFX "DMA for this device not supported. "
966 "Falling back to PIO\n");
967 dev->__using_pio = 1;
970 printk(KERN_ERR PFX "DMA for this device not supported and "
971 "no PIO support compiled in\n");
977 /* setup TX DMA channels. */
978 ring = bcm43xx_setup_dmaring(dev, 0, 1, dma64);
981 dma->tx_ring0 = ring;
983 ring = bcm43xx_setup_dmaring(dev, 1, 1, dma64);
985 goto err_destroy_tx0;
986 dma->tx_ring1 = ring;
988 ring = bcm43xx_setup_dmaring(dev, 2, 1, dma64);
990 goto err_destroy_tx1;
991 dma->tx_ring2 = ring;
993 ring = bcm43xx_setup_dmaring(dev, 3, 1, dma64);
995 goto err_destroy_tx2;
996 dma->tx_ring3 = ring;
998 ring = bcm43xx_setup_dmaring(dev, 4, 1, dma64);
1000 goto err_destroy_tx3;
1001 dma->tx_ring4 = ring;
1003 ring = bcm43xx_setup_dmaring(dev, 5, 1, dma64);
1005 goto err_destroy_tx4;
1006 dma->tx_ring5 = ring;
1008 /* setup RX DMA channels. */
1009 ring = bcm43xx_setup_dmaring(dev, 0, 0, dma64);
1011 goto err_destroy_tx5;
1012 dma->rx_ring0 = ring;
1014 if (dev->dev->id.revision < 5) {
1015 ring = bcm43xx_setup_dmaring(dev, 3, 0, dma64);
1017 goto err_destroy_rx0;
1018 dma->rx_ring3 = ring;
1021 dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
1022 (dmamask == DMA_64BIT_MASK) ? 64 :
1023 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1029 bcm43xx_destroy_dmaring(dma->rx_ring0);
1030 dma->rx_ring0 = NULL;
1032 bcm43xx_destroy_dmaring(dma->tx_ring5);
1033 dma->tx_ring5 = NULL;
1035 bcm43xx_destroy_dmaring(dma->tx_ring4);
1036 dma->tx_ring4 = NULL;
1038 bcm43xx_destroy_dmaring(dma->tx_ring3);
1039 dma->tx_ring3 = NULL;
1041 bcm43xx_destroy_dmaring(dma->tx_ring2);
1042 dma->tx_ring2 = NULL;
1044 bcm43xx_destroy_dmaring(dma->tx_ring1);
1045 dma->tx_ring1 = NULL;
1047 bcm43xx_destroy_dmaring(dma->tx_ring0);
1048 dma->tx_ring0 = NULL;
1052 /* Generate a cookie for the TX header. */
1053 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
1056 u16 cookie = 0x1000;
1058 /* Use the upper 4 bits of the cookie as
1059 * DMA controller ID and store the slot number
1060 * in the lower 12 bits.
1061 * Note that the cookie must never be 0, as this
1062 * is a special value used in RX path.
1064 switch (ring->index) {
1084 assert(((u16)slot & 0xF000) == 0x0000);
1085 cookie |= (u16)slot;
1090 /* Inspect a cookie and find out to which controller/slot it belongs. */
1092 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_wldev *dev,
1093 u16 cookie, int *slot)
1095 struct bcm43xx_dma *dma = &dev->dma;
1096 struct bcm43xx_dmaring *ring = NULL;
1098 switch (cookie & 0xF000) {
1100 ring = dma->tx_ring0;
1103 ring = dma->tx_ring1;
1106 ring = dma->tx_ring2;
1109 ring = dma->tx_ring3;
1112 ring = dma->tx_ring4;
1115 ring = dma->tx_ring5;
1120 *slot = (cookie & 0x0FFF);
1121 assert(ring && *slot >= 0 && *slot < ring->nr_slots);
1126 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
1127 struct sk_buff *skb,
1128 struct ieee80211_tx_control *ctl)
1130 const struct bcm43xx_dma_ops *ops = ring->ops;
1134 struct bcm43xx_dmadesc_generic *desc;
1135 struct bcm43xx_dmadesc_meta *meta;
1136 struct bcm43xx_dmadesc_meta *meta_hdr;
1137 struct sk_buff *bounce_skb;
1139 #define SLOTS_PER_PACKET 2
1140 assert(skb_shinfo(skb)->nr_frags == 0);
1142 /* Get a slot for the header. */
1143 slot = request_slot(ring);
1144 desc = ops->idx2desc(ring, slot, &meta_hdr);
1145 memset(meta_hdr, 0, sizeof(*meta_hdr));
1147 header = &(ring->txhdr_cache[slot * sizeof(struct bcm43xx_txhdr_fw4)]);
1148 bcm43xx_generate_txhdr(ring->dev, header,
1149 skb->data, skb->len, ctl,
1150 generate_cookie(ring, slot));
1152 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1153 sizeof(struct bcm43xx_txhdr_fw4), 1);
1154 if (dma_mapping_error(meta_hdr->dmaaddr))
1156 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1157 sizeof(struct bcm43xx_txhdr_fw4), 1, 0, 0);
1159 /* Get a slot for the payload. */
1160 slot = request_slot(ring);
1161 desc = ops->idx2desc(ring, slot, &meta);
1162 memset(meta, 0, sizeof(*meta));
1164 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1166 meta->is_last_fragment = 1;
1168 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1169 /* create a bounce buffer in zone_dma on mapping failure. */
1170 if (dma_mapping_error(meta->dmaaddr)) {
1171 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1177 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1178 dev_kfree_skb_any(skb);
1181 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1182 if (dma_mapping_error(meta->dmaaddr)) {
1184 goto out_free_bounce;
1188 ops->fill_descriptor(ring, desc, meta->dmaaddr,
1191 /* Now transfer the whole frame. */
1193 ops->poke_tx(ring, next_slot(ring, slot));
1197 dev_kfree_skb_any(skb);
1199 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1200 sizeof(struct bcm43xx_txhdr_fw4), 1);
1205 int should_inject_overflow(struct bcm43xx_dmaring *ring)
1207 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1208 if (unlikely(bcm43xx_debug(ring->dev, BCM43xx_DBG_DMAOVERFLOW))) {
1209 /* Check if we should inject another ringbuffer overflow
1210 * to test handling of this situation in the stack. */
1211 unsigned long next_overflow;
1213 next_overflow = ring->last_injected_overflow + HZ;
1214 if (time_after(jiffies, next_overflow)) {
1215 ring->last_injected_overflow = jiffies;
1216 dprintk(KERN_DEBUG PFX "Injecting TX ring overflow on "
1217 "DMA controller %d\n", ring->index);
1221 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG */
1225 int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
1226 struct sk_buff *skb,
1227 struct ieee80211_tx_control *ctl)
1229 struct bcm43xx_dmaring *ring;
1231 unsigned long flags;
1233 ring = priority_to_txring(dev, ctl->queue);
1234 spin_lock_irqsave(&ring->lock, flags);
1236 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1237 printkl(KERN_ERR PFX "DMA queue overflow\n");
1241 /* Check if the queue was stopped in mac80211,
1242 * but we got called nevertheless.
1243 * That would be a mac80211 bug. */
1244 assert(!ring->stopped);
1246 err = dma_tx_fragment(ring, skb, ctl);
1247 if (unlikely(err)) {
1248 printkl(KERN_ERR PFX "DMA tx mapping failure\n");
1251 ring->nr_tx_packets++;
1252 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1253 should_inject_overflow(ring)) {
1254 /* This TX ring is full. */
1255 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1259 spin_unlock_irqrestore(&ring->lock, flags);
1264 void bcm43xx_dma_handle_txstatus(struct bcm43xx_wldev *dev,
1265 const struct bcm43xx_txstatus *status)
1267 const struct bcm43xx_dma_ops *ops;
1268 struct bcm43xx_dmaring *ring;
1269 struct bcm43xx_dmadesc_generic *desc;
1270 struct bcm43xx_dmadesc_meta *meta;
1273 ring = parse_cookie(dev, status->cookie, &slot);
1274 if (unlikely(!ring))
1276 assert(irqs_disabled());
1277 spin_lock(&ring->lock);
1282 assert(slot >= 0 && slot < ring->nr_slots);
1283 desc = ops->idx2desc(ring, slot, &meta);
1286 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1288 unmap_descbuffer(ring, meta->dmaaddr, sizeof(struct bcm43xx_txhdr_fw4), 1);
1290 if (meta->is_last_fragment) {
1292 /* Call back to inform the ieee80211 subsystem about the
1293 * status of the transmission.
1294 * Some fields of txstat are already filled in dma_tx().
1297 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1298 meta->txstat.retry_count = status->frame_count - 1;
1299 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, &(meta->txstat));
1300 /* skb is freed by ieee80211_tx_status_irqsafe() */
1303 /* No need to call free_descriptor_buffer here, as
1304 * this is only the txhdr, which is not allocated.
1306 assert(meta->skb == NULL);
1308 /* Everything belonging to the slot is unmapped
1309 * and freed, so we can return it.
1311 return_slot(ring, slot);
1313 if (meta->is_last_fragment)
1315 slot = next_slot(ring, slot);
1317 dev->stats.last_tx = jiffies;
1318 if (ring->stopped) {
1319 assert(free_slots(ring) >= SLOTS_PER_PACKET);
1320 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1324 spin_unlock(&ring->lock);
1327 void bcm43xx_dma_get_tx_stats(struct bcm43xx_wldev *dev,
1328 struct ieee80211_tx_queue_stats *stats)
1330 const int nr_queues = dev->wl->hw->queues;
1331 struct bcm43xx_dmaring *ring;
1332 struct ieee80211_tx_queue_stats_data *data;
1333 unsigned long flags;
1336 for (i = 0; i < nr_queues; i++) {
1337 data = &(stats->data[i]);
1338 ring = priority_to_txring(dev, i);
1340 spin_lock_irqsave(&ring->lock, flags);
1341 data->len = ring->used_slots / SLOTS_PER_PACKET;
1342 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1343 data->count = ring->nr_tx_packets;
1344 spin_unlock_irqrestore(&ring->lock, flags);
1348 static void dma_rx(struct bcm43xx_dmaring *ring,
1351 const struct bcm43xx_dma_ops *ops = ring->ops;
1352 struct bcm43xx_dmadesc_generic *desc;
1353 struct bcm43xx_dmadesc_meta *meta;
1354 struct bcm43xx_rxhdr_fw4 *rxhdr;
1355 struct sk_buff *skb;
1360 desc = ops->idx2desc(ring, *slot, &meta);
1362 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1365 if (ring->index == 3) {
1366 /* We received an xmit status. */
1367 struct bcm43xx_hwtxstatus *hw = (struct bcm43xx_hwtxstatus *)skb->data;
1370 while (hw->cookie == 0) {
1377 bcm43xx_handle_hwtxstatus(ring->dev, hw);
1378 /* recycle the descriptor buffer. */
1379 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1383 rxhdr = (struct bcm43xx_rxhdr_fw4 *)skb->data;
1384 len = le16_to_cpu(rxhdr->frame_len);
1391 len = le16_to_cpu(rxhdr->frame_len);
1392 } while (len == 0 && i++ < 5);
1393 if (unlikely(len == 0)) {
1394 /* recycle the descriptor buffer. */
1395 sync_descbuffer_for_device(ring, meta->dmaaddr,
1396 ring->rx_buffersize);
1400 if (unlikely(len > ring->rx_buffersize)) {
1401 /* The data did not fit into one descriptor buffer
1402 * and is split over multiple buffers.
1403 * This should never happen, as we try to allocate buffers
1404 * big enough. So simply ignore this packet.
1410 desc = ops->idx2desc(ring, *slot, &meta);
1411 /* recycle the descriptor buffer. */
1412 sync_descbuffer_for_device(ring, meta->dmaaddr,
1413 ring->rx_buffersize);
1414 *slot = next_slot(ring, *slot);
1416 tmp -= ring->rx_buffersize;
1420 printkl(KERN_ERR PFX "DMA RX buffer too small "
1421 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1422 len, ring->rx_buffersize, cnt);
1426 dmaaddr = meta->dmaaddr;
1427 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1428 if (unlikely(err)) {
1429 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1430 sync_descbuffer_for_device(ring, dmaaddr,
1431 ring->rx_buffersize);
1435 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1436 skb_put(skb, len + ring->frameoffset);
1437 skb_pull(skb, ring->frameoffset);
1439 bcm43xx_rx(ring->dev, skb, rxhdr);
1444 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1446 const struct bcm43xx_dma_ops *ops = ring->ops;
1447 int slot, current_slot;
1448 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1453 current_slot = ops->get_current_rxslot(ring);
1454 assert(current_slot >= 0 && current_slot < ring->nr_slots);
1456 slot = ring->current_slot;
1457 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1458 dma_rx(ring, &slot);
1459 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1460 if (++used_slots > ring->max_used_slots)
1461 ring->max_used_slots = used_slots;
1464 ops->set_current_rxslot(ring, slot);
1465 ring->current_slot = slot;
1468 static void bcm43xx_dma_tx_suspend_ring(struct bcm43xx_dmaring *ring)
1470 unsigned long flags;
1472 spin_lock_irqsave(&ring->lock, flags);
1474 ring->ops->tx_suspend(ring);
1475 spin_unlock_irqrestore(&ring->lock, flags);
1478 static void bcm43xx_dma_tx_resume_ring(struct bcm43xx_dmaring *ring)
1480 unsigned long flags;
1482 spin_lock_irqsave(&ring->lock, flags);
1484 ring->ops->tx_resume(ring);
1485 spin_unlock_irqrestore(&ring->lock, flags);
1488 void bcm43xx_dma_tx_suspend(struct bcm43xx_wldev *dev)
1490 bcm43xx_power_saving_ctl_bits(dev, -1, 1);
1491 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring0);
1492 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring1);
1493 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring2);
1494 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring3);
1495 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring4);
1496 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring5);
1499 void bcm43xx_dma_tx_resume(struct bcm43xx_wldev *dev)
1501 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring5);
1502 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring4);
1503 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring3);
1504 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring2);
1505 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring1);
1506 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring0);
1507 bcm43xx_power_saving_ctl_bits(dev, -1, -1);