brcm2708: update against latest rpi-3.10.y branch
[openwrt.git] / target / linux / brcm2708 / patches-3.10 / 0114-dmaengine-Add-support-for-BCM2708.patch
1 From 5fa4eb7d17acff35c1ea9a7def5dec5802566565 Mon Sep 17 00:00:00 2001
2 From: Florian Meier <florian.meier@koalo.de>
3 Date: Fri, 22 Nov 2013 14:22:53 +0100
4 Subject: [PATCH 114/174] dmaengine: Add support for BCM2708
5
6 Add support for DMA controller of BCM2708 as used in the Raspberry Pi.
7 Currently it only supports cyclic DMA.
8
9 Signed-off-by: Florian Meier <florian.meier@koalo.de>
10 ---
11  drivers/dma/Kconfig             |   6 +
12  drivers/dma/Makefile            |   1 +
13  drivers/dma/bcm2708-dmaengine.c | 588 ++++++++++++++++++++++++++++++++++++++++
14  3 files changed, 595 insertions(+)
15  create mode 100644 drivers/dma/bcm2708-dmaengine.c
16
17 --- a/drivers/dma/Kconfig
18 +++ b/drivers/dma/Kconfig
19 @@ -305,6 +305,12 @@ config DMA_OMAP
20         select DMA_ENGINE
21         select DMA_VIRTUAL_CHANNELS
22  
23 +config DMA_BCM2708
24 +       tristate "BCM2708 DMA engine support"
25 +       depends on MACH_BCM2708
26 +       select DMA_ENGINE
27 +       select DMA_VIRTUAL_CHANNELS
28 +
29  config MMP_PDMA
30         bool "MMP PDMA support"
31         depends on (ARCH_MMP || ARCH_PXA)
32 --- a/drivers/dma/Makefile
33 +++ b/drivers/dma/Makefile
34 @@ -37,4 +37,5 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
35  obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
36  obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
37  obj-$(CONFIG_DMA_OMAP) += omap-dma.o
38 +obj-$(CONFIG_DMA_BCM2708) += bcm2708-dmaengine.o
39  obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
40 --- /dev/null
41 +++ b/drivers/dma/bcm2708-dmaengine.c
42 @@ -0,0 +1,588 @@
43 +/*
44 + * BCM2708 DMA engine support
45 + *
46 + * This driver only supports cyclic DMA transfers
47 + * as needed for the I2S module.
48 + *
49 + * Author:      Florian Meier <florian.meier@koalo.de>
50 + *              Copyright 2013
51 + *
52 + * Based on
53 + *     OMAP DMAengine support by Russell King
54 + *
55 + *     BCM2708 DMA Driver
56 + *     Copyright (C) 2010 Broadcom
57 + *
58 + *     Raspberry Pi PCM I2S ALSA Driver
59 + *     Copyright (c) by Phil Poole 2013
60 + *
61 + *     MARVELL MMP Peripheral DMA Driver
62 + *     Copyright 2012 Marvell International Ltd.
63 + *
64 + * This program is free software; you can redistribute it and/or modify
65 + * it under the terms of the GNU General Public License as published by
66 + * the Free Software Foundation; either version 2 of the License, or
67 + * (at your option) any later version.
68 + *
69 + * This program is distributed in the hope that it will be useful,
70 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
71 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
72 + * GNU General Public License for more details.
73 + */
74 +#include <linux/dmaengine.h>
75 +#include <linux/dma-mapping.h>
76 +#include <linux/err.h>
77 +#include <linux/init.h>
78 +#include <linux/interrupt.h>
79 +#include <linux/list.h>
80 +#include <linux/module.h>
81 +#include <linux/platform_device.h>
82 +#include <linux/slab.h>
83 +#include <linux/io.h>
84 +#include <linux/spinlock.h>
85 +#include <linux/irq.h>
86 +
87 +#include "virt-dma.h"
88 +
89 +#include <mach/dma.h>
90 +#include <mach/irqs.h>
91 +
92 +struct bcm2708_dmadev {
93 +       struct dma_device ddev;
94 +       spinlock_t lock;
95 +       void __iomem *base;
96 +       struct device_dma_parameters dma_parms;
97 +};
98 +
99 +struct bcm2708_chan {
100 +       struct virt_dma_chan vc;
101 +       struct list_head node;
102 +
103 +       struct dma_slave_config cfg;
104 +       bool cyclic;
105 +
106 +       int ch;
107 +       struct bcm2708_desc *desc;
108 +
109 +       void __iomem *chan_base;
110 +       int irq_number;
111 +};
112 +
113 +struct bcm2708_desc {
114 +       struct virt_dma_desc vd;
115 +       enum dma_transfer_direction dir;
116 +
117 +       unsigned int control_block_size;
118 +       struct bcm2708_dma_cb *control_block_base;
119 +       dma_addr_t control_block_base_phys;
120 +
121 +       unsigned frames;
122 +       size_t size;
123 +};
124 +
125 +#define BCM2708_DMA_DATA_TYPE_S8       1
126 +#define BCM2708_DMA_DATA_TYPE_S16      2
127 +#define BCM2708_DMA_DATA_TYPE_S32      4
128 +#define BCM2708_DMA_DATA_TYPE_S128     16
129 +
130 +static inline struct bcm2708_dmadev *to_bcm2708_dma_dev(struct dma_device *d)
131 +{
132 +       return container_of(d, struct bcm2708_dmadev, ddev);
133 +}
134 +
135 +static inline struct bcm2708_chan *to_bcm2708_dma_chan(struct dma_chan *c)
136 +{
137 +       return container_of(c, struct bcm2708_chan, vc.chan);
138 +}
139 +
140 +static inline struct bcm2708_desc *to_bcm2708_dma_desc(
141 +               struct dma_async_tx_descriptor *t)
142 +{
143 +       return container_of(t, struct bcm2708_desc, vd.tx);
144 +}
145 +
146 +static void bcm2708_dma_desc_free(struct virt_dma_desc *vd)
147 +{
148 +       struct bcm2708_desc *desc = container_of(vd, struct bcm2708_desc, vd);
149 +       dma_free_coherent(desc->vd.tx.chan->device->dev,
150 +                       desc->control_block_size,
151 +                       desc->control_block_base,
152 +                       desc->control_block_base_phys);
153 +       kfree(desc);
154 +}
155 +
156 +static void bcm2708_dma_start_desc(struct bcm2708_chan *c)
157 +{
158 +       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
159 +       struct bcm2708_desc *d;
160 +
161 +       if (!vd) {
162 +               c->desc = NULL;
163 +               return;
164 +       }
165 +
166 +       list_del(&vd->node);
167 +
168 +       c->desc = d = to_bcm2708_dma_desc(&vd->tx);
169 +
170 +       bcm_dma_start(c->chan_base, d->control_block_base_phys);
171 +}
172 +
173 +static irqreturn_t bcm2708_dma_callback(int irq, void *data)
174 +{
175 +       struct bcm2708_chan *c = data;
176 +       struct bcm2708_desc *d;
177 +       unsigned long flags;
178 +
179 +       spin_lock_irqsave(&c->vc.lock, flags);
180 +
181 +       /* Acknowledge interrupt */
182 +       writel(BCM2708_DMA_INT, c->chan_base + BCM2708_DMA_CS);
183 +
184 +       d = c->desc;
185 +
186 +       if (d) {
187 +               /* TODO Only works for cyclic DMA */
188 +               vchan_cyclic_callback(&d->vd);
189 +       }
190 +
191 +       /* Keep the DMA engine running */
192 +       dsb(); /* ARM synchronization barrier */
193 +       writel(BCM2708_DMA_ACTIVE, c->chan_base + BCM2708_DMA_CS);
194 +
195 +       spin_unlock_irqrestore(&c->vc.lock, flags);
196 +
197 +       return IRQ_HANDLED;
198 +}
199 +
200 +static int bcm2708_dma_alloc_chan_resources(struct dma_chan *chan)
201 +{
202 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
203 +
204 +       return request_irq(c->irq_number,
205 +                       bcm2708_dma_callback, 0, "DMA IRQ", c);
206 +}
207 +
208 +static void bcm2708_dma_free_chan_resources(struct dma_chan *chan)
209 +{
210 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
211 +
212 +       vchan_free_chan_resources(&c->vc);
213 +       free_irq(c->irq_number, c);
214 +
215 +       dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
216 +}
217 +
218 +static size_t bcm2708_dma_desc_size(struct bcm2708_desc *d)
219 +{
220 +       return d->size;
221 +}
222 +
223 +static size_t bcm2708_dma_desc_size_pos(struct bcm2708_desc *d, dma_addr_t addr)
224 +{
225 +       unsigned i;
226 +       size_t size;
227 +
228 +       for (size = i = 0; i < d->frames; i++) {
229 +               struct bcm2708_dma_cb *control_block =
230 +                       &d->control_block_base[i];
231 +               size_t this_size = control_block->length;
232 +               dma_addr_t dma;
233 +
234 +               if (d->dir == DMA_DEV_TO_MEM)
235 +                       dma = control_block->dst;
236 +               else
237 +                       dma = control_block->src;
238 +
239 +               if (size)
240 +                       size += this_size;
241 +               else if (addr >= dma && addr < dma + this_size)
242 +                       size += dma + this_size - addr;
243 +       }
244 +
245 +       return size;
246 +}
247 +
248 +static enum dma_status bcm2708_dma_tx_status(struct dma_chan *chan,
249 +       dma_cookie_t cookie, struct dma_tx_state *txstate)
250 +{
251 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
252 +       struct virt_dma_desc *vd;
253 +       enum dma_status ret;
254 +       unsigned long flags;
255 +
256 +       ret = dma_cookie_status(chan, cookie, txstate);
257 +       if (ret == DMA_SUCCESS || !txstate)
258 +               return ret;
259 +
260 +       spin_lock_irqsave(&c->vc.lock, flags);
261 +       vd = vchan_find_desc(&c->vc, cookie);
262 +       if (vd) {
263 +               txstate->residue =
264 +                       bcm2708_dma_desc_size(to_bcm2708_dma_desc(&vd->tx));
265 +       } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
266 +               struct bcm2708_desc *d = c->desc;
267 +               dma_addr_t pos;
268 +
269 +               if (d->dir == DMA_MEM_TO_DEV)
270 +                       pos = readl(c->chan_base + BCM2708_DMA_SOURCE_AD);
271 +               else if (d->dir == DMA_DEV_TO_MEM)
272 +                       pos = readl(c->chan_base + BCM2708_DMA_DEST_AD);
273 +               else
274 +                       pos = 0;
275 +
276 +               txstate->residue = bcm2708_dma_desc_size_pos(d, pos);
277 +       } else {
278 +               txstate->residue = 0;
279 +       }
280 +
281 +       spin_unlock_irqrestore(&c->vc.lock, flags);
282 +
283 +       return ret;
284 +}
285 +
286 +static void bcm2708_dma_issue_pending(struct dma_chan *chan)
287 +{
288 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
289 +       unsigned long flags;
290 +
291 +       c->cyclic = true; /* Nothing else is implemented */
292 +
293 +       spin_lock_irqsave(&c->vc.lock, flags);
294 +       if (vchan_issue_pending(&c->vc) && !c->desc)
295 +               bcm2708_dma_start_desc(c);
296 +
297 +       spin_unlock_irqrestore(&c->vc.lock, flags);
298 +}
299 +
300 +static struct dma_async_tx_descriptor *bcm2708_dma_prep_dma_cyclic(
301 +       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
302 +       size_t period_len, enum dma_transfer_direction direction,
303 +       unsigned long flags, void *context)
304 +{
305 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
306 +       enum dma_slave_buswidth dev_width;
307 +       struct bcm2708_desc *d;
308 +       dma_addr_t dev_addr;
309 +       unsigned es, sync_type;
310 +       unsigned frame;
311 +
312 +       /* Grab configuration */
313 +       if (direction == DMA_DEV_TO_MEM) {
314 +               dev_addr = c->cfg.src_addr;
315 +               dev_width = c->cfg.src_addr_width;
316 +               sync_type = BCM2708_DMA_S_DREQ;
317 +       } else if (direction == DMA_MEM_TO_DEV) {
318 +               dev_addr = c->cfg.dst_addr;
319 +               dev_width = c->cfg.dst_addr_width;
320 +               sync_type = BCM2708_DMA_D_DREQ;
321 +       } else {
322 +               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
323 +               return NULL;
324 +       }
325 +
326 +       /* Bus width translates to the element size (ES) */
327 +       switch (dev_width) {
328 +       case DMA_SLAVE_BUSWIDTH_4_BYTES:
329 +               es = BCM2708_DMA_DATA_TYPE_S32;
330 +               break;
331 +       default:
332 +               return NULL;
333 +       }
334 +
335 +       /* Now allocate and setup the descriptor. */
336 +       d = kzalloc(sizeof(*d), GFP_NOWAIT);
337 +       if (!d)
338 +               return NULL;
339 +
340 +       d->dir = direction;
341 +       d->frames = buf_len / period_len;
342 +
343 +       /* Allocate memory for control blocks */
344 +       d->control_block_size = d->frames * sizeof(struct bcm2708_dma_cb);
345 +       d->control_block_base = dma_zalloc_coherent(chan->device->dev,
346 +                       d->control_block_size, &d->control_block_base_phys,
347 +                       GFP_NOWAIT);
348 +
349 +       if (!d->control_block_base) {
350 +               kfree(d);
351 +               return NULL;
352 +       }
353 +
354 +       /*
355 +        * Iterate over all frames, create a control block
356 +        * for each frame and link them together.
357 +        */
358 +       for (frame = 0; frame < d->frames; frame++) {
359 +               struct bcm2708_dma_cb *control_block =
360 +                       &d->control_block_base[frame];
361 +
362 +               /* Setup adresses */
363 +               if (d->dir == DMA_DEV_TO_MEM) {
364 +                       control_block->info = BCM2708_DMA_D_INC;
365 +                       control_block->src = dev_addr;
366 +                       control_block->dst = buf_addr + frame * period_len;
367 +               } else {
368 +                       control_block->info = BCM2708_DMA_S_INC;
369 +                       control_block->src = buf_addr + frame * period_len;
370 +                       control_block->dst = dev_addr;
371 +               }
372 +
373 +               /* Enable interrupt */
374 +               control_block->info |= BCM2708_DMA_INT_EN;
375 +
376 +               /* Setup synchronization */
377 +               if (sync_type != 0)
378 +                       control_block->info |= sync_type;
379 +
380 +               /* Setup DREQ channel */
381 +               if (c->cfg.slave_id != 0)
382 +                       control_block->info |=
383 +                               BCM2708_DMA_PER_MAP(c->cfg.slave_id);
384 +
385 +               /* Length of a frame */
386 +               control_block->length = period_len;
387 +               d->size += control_block->length;
388 +
389 +               /*
390 +                * Next block is the next frame.
391 +                * This DMA engine driver currently only supports cyclic DMA.
392 +                * Therefore, wrap around at number of frames.
393 +                */
394 +               control_block->next = d->control_block_base_phys +
395 +                       sizeof(struct bcm2708_dma_cb)
396 +                       * ((frame + 1) % d->frames);
397 +       }
398 +
399 +       return vchan_tx_prep(&c->vc, &d->vd, flags);
400 +}
401 +
402 +static int bcm2708_dma_slave_config(struct bcm2708_chan *c,
403 +               struct dma_slave_config *cfg)
404 +{
405 +       if ((cfg->direction == DMA_DEV_TO_MEM &&
406 +            cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
407 +           (cfg->direction == DMA_MEM_TO_DEV &&
408 +            cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
409 +           !is_slave_direction(cfg->direction)) {
410 +               return -EINVAL;
411 +       }
412 +
413 +       c->cfg = *cfg;
414 +
415 +       return 0;
416 +}
417 +
418 +static int bcm2708_dma_terminate_all(struct bcm2708_chan *c)
419 +{
420 +       struct bcm2708_dmadev *d = to_bcm2708_dma_dev(c->vc.chan.device);
421 +       unsigned long flags;
422 +       int timeout = 10000;
423 +       LIST_HEAD(head);
424 +
425 +       spin_lock_irqsave(&c->vc.lock, flags);
426 +
427 +       /* Prevent this channel being scheduled */
428 +       spin_lock(&d->lock);
429 +       list_del_init(&c->node);
430 +       spin_unlock(&d->lock);
431 +
432 +       /*
433 +        * Stop DMA activity: we assume the callback will not be called
434 +        * after bcm_dma_abort() returns (even if it does, it will see
435 +        * c->desc is NULL and exit.)
436 +        */
437 +       if (c->desc) {
438 +               c->desc = NULL;
439 +               bcm_dma_abort(c->chan_base);
440 +
441 +               /* Wait for stopping */
442 +               while (timeout > 0) {
443 +                       timeout--;
444 +                       if (!(readl(c->chan_base + BCM2708_DMA_CS) &
445 +                                               BCM2708_DMA_ACTIVE))
446 +                               break;
447 +
448 +                       cpu_relax();
449 +               }
450 +
451 +               if (timeout <= 0)
452 +                       dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
453 +       }
454 +
455 +       vchan_get_all_descriptors(&c->vc, &head);
456 +       spin_unlock_irqrestore(&c->vc.lock, flags);
457 +       vchan_dma_desc_free_list(&c->vc, &head);
458 +
459 +       return 0;
460 +}
461 +
462 +static int bcm2708_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
463 +       unsigned long arg)
464 +{
465 +       struct bcm2708_chan *c = to_bcm2708_dma_chan(chan);
466 +
467 +       switch (cmd) {
468 +       case DMA_SLAVE_CONFIG:
469 +               return bcm2708_dma_slave_config(c,
470 +                               (struct dma_slave_config *)arg);
471 +
472 +       case DMA_TERMINATE_ALL:
473 +               return bcm2708_dma_terminate_all(c);
474 +
475 +       default:
476 +               return -ENXIO;
477 +       }
478 +}
479 +
480 +static int bcm2708_dma_chan_init(struct bcm2708_dmadev *d, void __iomem* chan_base,
481 +                                                                       int chan_id, int irq)
482 +{
483 +       struct bcm2708_chan *c;
484 +
485 +       c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
486 +       if (!c)
487 +               return -ENOMEM;
488 +
489 +       c->vc.desc_free = bcm2708_dma_desc_free;
490 +       vchan_init(&c->vc, &d->ddev);
491 +       INIT_LIST_HEAD(&c->node);
492 +
493 +       d->ddev.chancnt++;
494 +
495 +       c->chan_base = chan_base;
496 +       c->ch = chan_id;
497 +       c->irq_number = irq;
498 +
499 +       return 0;
500 +}
501 +
502 +static void bcm2708_dma_free(struct bcm2708_dmadev *od)
503 +{
504 +       while (!list_empty(&od->ddev.channels)) {
505 +               struct bcm2708_chan *c = list_first_entry(&od->ddev.channels,
506 +                       struct bcm2708_chan, vc.chan.device_node);
507 +
508 +               list_del(&c->vc.chan.device_node);
509 +               tasklet_kill(&c->vc.task);
510 +       }
511 +}
512 +
513 +static int bcm2708_dma_probe(struct platform_device *pdev)
514 +{
515 +       struct bcm2708_dmadev *od;
516 +       int rc, i;
517 +
518 +       if (!pdev->dev.dma_mask)
519 +               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
520 +
521 +       rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
522 +       if (rc)
523 +               return rc;
524 +       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
525 +
526 +       od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
527 +       if (!od)
528 +               return -ENOMEM;
529 +
530 +       pdev->dev.dma_parms = &od->dma_parms;
531 +       dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
532 +
533 +       dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
534 +       dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
535 +       od->ddev.device_alloc_chan_resources = bcm2708_dma_alloc_chan_resources;
536 +       od->ddev.device_free_chan_resources = bcm2708_dma_free_chan_resources;
537 +       od->ddev.device_tx_status = bcm2708_dma_tx_status;
538 +       od->ddev.device_issue_pending = bcm2708_dma_issue_pending;
539 +       od->ddev.device_prep_dma_cyclic = bcm2708_dma_prep_dma_cyclic;
540 +       od->ddev.device_control = bcm2708_dma_control;
541 +       od->ddev.dev = &pdev->dev;
542 +       INIT_LIST_HEAD(&od->ddev.channels);
543 +       spin_lock_init(&od->lock);
544 +
545 +       platform_set_drvdata(pdev, od);
546 +
547 +       for (i = 0; i < 16; i++) {
548 +               void __iomem* chan_base;
549 +               int chan_id, irq;
550 +               
551 +               chan_id = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST,
552 +                       &chan_base,
553 +                       &irq);
554 +
555 +               if (chan_id < 0) 
556 +                       break;
557 +
558 +               rc = bcm2708_dma_chan_init(od, chan_base, chan_id, irq);
559 +               if (rc) {
560 +                       bcm2708_dma_free(od);
561 +                       return rc;
562 +               }
563 +       }
564 +
565 +       rc = dma_async_device_register(&od->ddev);
566 +       if (rc) {
567 +               dev_err(&pdev->dev,
568 +                       "Failed to register slave DMA engine device: %d\n", rc);
569 +               bcm2708_dma_free(od);
570 +               return rc;
571 +       }
572 +
573 +       dev_dbg(&pdev->dev, "Load BCM2708 DMA engine driver\n");
574 +
575 +       return rc;
576 +}
577 +
578 +static int bcm2708_dma_remove(struct platform_device *pdev)
579 +{
580 +       struct bcm2708_dmadev *od = platform_get_drvdata(pdev);
581 +
582 +       dma_async_device_unregister(&od->ddev);
583 +       bcm2708_dma_free(od);
584 +
585 +       return 0;
586 +}
587 +
588 +static struct platform_driver bcm2708_dma_driver = {
589 +       .probe  = bcm2708_dma_probe,
590 +       .remove = bcm2708_dma_remove,
591 +       .driver = {
592 +               .name = "bcm2708-dmaengine",
593 +               .owner = THIS_MODULE,
594 +       },
595 +};
596 +
597 +static struct platform_device *pdev;
598 +
599 +static const struct platform_device_info bcm2708_dma_dev_info = {
600 +       .name = "bcm2708-dmaengine",
601 +       .id = -1,
602 +};
603 +
604 +static int bcm2708_dma_init(void)
605 +{
606 +       int rc = platform_driver_register(&bcm2708_dma_driver);
607 +
608 +       if (rc == 0) {
609 +               pdev = platform_device_register_full(&bcm2708_dma_dev_info);
610 +               if (IS_ERR(pdev)) {
611 +                       platform_driver_unregister(&bcm2708_dma_driver);
612 +                       rc = PTR_ERR(pdev);
613 +               }
614 +       }
615 +
616 +       return rc;
617 +}
618 +subsys_initcall(bcm2708_dma_init);
619 +
620 +static void __exit bcm2708_dma_exit(void)
621 +{
622 +       platform_device_unregister(pdev);
623 +       platform_driver_unregister(&bcm2708_dma_driver);
624 +}
625 +module_exit(bcm2708_dma_exit);
626 +
627 +MODULE_ALIAS("platform:bcm2708-dma");
628 +MODULE_DESCRIPTION("BCM2708 DMA engine driver");
629 +MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
630 +MODULE_LICENSE("GPL v2");