1 From cf93418a4bd5e69f069a65da92537bd4d6191223 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 09:29:51 +0100
4 Subject: [PATCH 54/57] DMA: ralink: add rt2880 dma engine
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/dma/Kconfig | 6 +
9 drivers/dma/Makefile | 1 +
10 drivers/dma/dmaengine.c | 26 ++
11 drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
12 include/linux/dmaengine.h | 1 +
13 5 files changed, 611 insertions(+)
14 create mode 100644 drivers/dma/ralink-gdma.c
16 --- a/drivers/dma/Kconfig
17 +++ b/drivers/dma/Kconfig
18 @@ -351,6 +351,12 @@ config MOXART_DMA
20 Enable support for the MOXA ART SoC DMA controller.
23 + tristate "RALINK DMA support"
24 + depends on RALINK && SOC_MT7620
26 + select DMA_VIRTUAL_CHANNELS
31 --- a/drivers/dma/Makefile
32 +++ b/drivers/dma/Makefile
33 @@ -44,3 +44,4 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
34 obj-$(CONFIG_TI_CPPI41) += cppi41.o
35 obj-$(CONFIG_K3_DMA) += k3dma.o
36 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
37 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
39 +++ b/drivers/dma/ralink-gdma.c
42 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
43 + * GDMA4740 DMAC support
45 + * This program is free software; you can redistribute it and/or modify it
46 + * under the terms of the GNU General Public License as published by the
47 + * Free Software Foundation; either version 2 of the License, or (at your
48 + * option) any later version.
50 + * You should have received a copy of the GNU General Public License along
51 + * with this program; if not, write to the Free Software Foundation, Inc.,
52 + * 675 Mass Ave, Cambridge, MA 02139, USA.
56 +#include <linux/dmaengine.h>
57 +#include <linux/dma-mapping.h>
58 +#include <linux/err.h>
59 +#include <linux/init.h>
60 +#include <linux/list.h>
61 +#include <linux/module.h>
62 +#include <linux/platform_device.h>
63 +#include <linux/slab.h>
64 +#include <linux/spinlock.h>
65 +#include <linux/irq.h>
66 +#include <linux/of_dma.h>
68 +#include "virt-dma.h"
70 +#define GDMA_NR_CHANS 16
72 +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
73 +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
75 +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
76 +#define GDMA_REG_CTRL0_TX_MASK 0xffff
77 +#define GDMA_REG_CTRL0_TX_SHIFT 16
78 +#define GDMA_REG_CTRL0_CURR_MASK 0xff
79 +#define GDMA_REG_CTRL0_CURR_SHIFT 8
80 +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
81 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
82 +#define GDMA_REG_CTRL0_BURST_MASK 0x7
83 +#define GDMA_REG_CTRL0_BURST_SHIFT 3
84 +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
85 +#define GDMA_REG_CTRL0_ENABLE BIT(1)
86 +#define GDMA_REG_CTRL0_HW_MODE 0
88 +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
89 +#define GDMA_REG_CTRL1_SEG_MASK 0xf
90 +#define GDMA_REG_CTRL1_SEG_SHIFT 22
91 +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
92 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
93 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
94 +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
95 +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
96 +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
97 +#define GDMA_REG_CTRL1_COHERENT BIT(2)
98 +#define GDMA_REG_CTRL1_FAIL BIT(1)
99 +#define GDMA_REG_CTRL1_MASK BIT(0)
101 +#define GDMA_REG_UNMASK_INT 0x200
102 +#define GDMA_REG_DONE_INT 0x204
104 +#define GDMA_REG_GCT 0x220
105 +#define GDMA_REG_GCT_CHAN_MASK 0x3
106 +#define GDMA_REG_GCT_CHAN_SHIFT 3
107 +#define GDMA_REG_GCT_VER_MASK 0x3
108 +#define GDMA_REG_GCT_VER_SHIFT 1
109 +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
111 +enum gdma_dma_transfer_size {
112 + GDMA_TRANSFER_SIZE_4BYTE = 0,
113 + GDMA_TRANSFER_SIZE_8BYTE = 1,
114 + GDMA_TRANSFER_SIZE_16BYTE = 2,
115 + GDMA_TRANSFER_SIZE_32BYTE = 3,
118 +struct gdma_dma_sg {
123 +struct gdma_dma_desc {
124 + struct virt_dma_desc vdesc;
126 + enum dma_transfer_direction direction;
129 + unsigned int num_sgs;
130 + struct gdma_dma_sg sg[];
133 +struct gdma_dmaengine_chan {
134 + struct virt_dma_chan vchan;
137 + dma_addr_t fifo_addr;
138 + unsigned int transfer_shift;
140 + struct gdma_dma_desc *desc;
141 + unsigned int next_sg;
144 +struct gdma_dma_dev {
145 + struct dma_device ddev;
146 + void __iomem *base;
149 + struct gdma_dmaengine_chan chan[GDMA_NR_CHANS];
152 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
153 + struct gdma_dmaengine_chan *chan)
155 + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
159 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
161 + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
164 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
166 + return container_of(vdesc, struct gdma_dma_desc, vdesc);
169 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
172 + return readl(dma_dev->base + reg);
175 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
176 + unsigned reg, uint32_t val)
178 + //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val);
179 + writel(val, dma_dev->base + reg);
182 +static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev,
183 + unsigned int reg, uint32_t val, uint32_t mask)
187 + tmp = gdma_dma_read(dma_dev, reg);
190 + gdma_dma_write(dma_dev, reg, tmp);
193 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
195 + return kzalloc(sizeof(struct gdma_dma_desc) +
196 + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
199 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
202 + return GDMA_TRANSFER_SIZE_4BYTE;
203 + else if (maxburst <= 15)
204 + return GDMA_TRANSFER_SIZE_8BYTE;
205 + else if (maxburst <= 31)
206 + return GDMA_TRANSFER_SIZE_16BYTE;
208 + return GDMA_TRANSFER_SIZE_32BYTE;
211 +static int gdma_dma_slave_config(struct dma_chan *c,
212 + const struct dma_slave_config *config)
214 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
215 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
216 + enum gdma_dma_transfer_size transfer_size;
218 + uint32_t ctrl0, ctrl1;
220 + switch (config->direction) {
221 + case DMA_MEM_TO_DEV:
222 + ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
223 + ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT;
224 + flags = GDMA_REG_CTRL0_DST_ADDR_FIXED;
225 + transfer_size = gdma_dma_maxburst(config->dst_maxburst);
226 + chan->fifo_addr = config->dst_addr;
229 + case DMA_DEV_TO_MEM:
230 + ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
231 + ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT;
232 + flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
233 + transfer_size = gdma_dma_maxburst(config->src_maxburst);
234 + chan->fifo_addr = config->src_addr;
241 + chan->transfer_shift = 1 + transfer_size;
243 + ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE;
244 + ctrl0 |= GDMA_REG_CTRL0_DONE_INT;
246 + ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT);
247 + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
248 + ctrl1 |= GDMA_REG_CTRL1_FAIL;
249 + ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS;
250 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
251 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
256 +static int gdma_dma_terminate_all(struct dma_chan *c)
258 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
259 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
260 + unsigned long flags;
263 + spin_lock_irqsave(&chan->vchan.lock, flags);
264 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
265 + GDMA_REG_CTRL0_ENABLE);
267 + vchan_get_all_descriptors(&chan->vchan, &head);
268 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
270 + vchan_dma_desc_free_list(&chan->vchan, &head);
275 +static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
278 + struct dma_slave_config *config = (struct dma_slave_config *)arg;
281 + case DMA_SLAVE_CONFIG:
282 + return gdma_dma_slave_config(chan, config);
283 + case DMA_TERMINATE_ALL:
284 + return gdma_dma_terminate_all(chan);
290 +static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan)
292 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
293 + dma_addr_t src_addr, dst_addr;
294 + struct virt_dma_desc *vdesc;
295 + struct gdma_dma_sg *sg;
297 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
298 + GDMA_REG_CTRL0_ENABLE);
301 + vdesc = vchan_next_desc(&chan->vchan);
304 + chan->desc = to_gdma_dma_desc(vdesc);
308 + if (chan->next_sg == chan->desc->num_sgs)
311 + sg = &chan->desc->sg[chan->next_sg];
313 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
314 + src_addr = sg->addr;
315 + dst_addr = chan->fifo_addr;
317 + src_addr = chan->fifo_addr;
318 + dst_addr = sg->addr;
320 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
321 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
322 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id),
323 + (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE,
324 + GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT);
326 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK);
331 +static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan)
333 + spin_lock(&chan->vchan.lock);
335 + if (chan->desc && chan->desc->cyclic) {
336 + vchan_cyclic_callback(&chan->desc->vdesc);
338 + if (chan->next_sg == chan->desc->num_sgs) {
340 + vchan_cookie_complete(&chan->desc->vdesc);
344 + gdma_dma_start_transfer(chan);
345 + spin_unlock(&chan->vchan.lock);
348 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
350 + struct gdma_dma_dev *dma_dev = devid;
351 + uint32_t unmask, done;
354 + unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT);
355 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask);
356 + done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT);
358 + for (i = 0; i < GDMA_NR_CHANS; ++i)
360 + gdma_dma_chan_irq(&dma_dev->chan[i]);
361 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done);
363 + return IRQ_HANDLED;
366 +static void gdma_dma_issue_pending(struct dma_chan *c)
368 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
369 + unsigned long flags;
371 + spin_lock_irqsave(&chan->vchan.lock, flags);
372 + if (vchan_issue_pending(&chan->vchan) && !chan->desc)
373 + gdma_dma_start_transfer(chan);
374 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
377 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
378 + struct dma_chan *c, struct scatterlist *sgl,
379 + unsigned int sg_len, enum dma_transfer_direction direction,
380 + unsigned long flags, void *context)
382 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
383 + struct gdma_dma_desc *desc;
384 + struct scatterlist *sg;
387 + desc = gdma_dma_alloc_desc(sg_len);
391 + for_each_sg(sgl, sg, sg_len, i) {
392 + desc->sg[i].addr = sg_dma_address(sg);
393 + desc->sg[i].len = sg_dma_len(sg);
396 + desc->num_sgs = sg_len;
397 + desc->direction = direction;
398 + desc->cyclic = false;
400 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
403 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
404 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
405 + size_t period_len, enum dma_transfer_direction direction,
406 + unsigned long flags, void *context)
408 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
409 + struct gdma_dma_desc *desc;
410 + unsigned int num_periods, i;
412 + if (buf_len % period_len)
415 + num_periods = buf_len / period_len;
417 + desc = gdma_dma_alloc_desc(num_periods);
421 + for (i = 0; i < num_periods; i++) {
422 + desc->sg[i].addr = buf_addr;
423 + desc->sg[i].len = period_len;
424 + buf_addr += period_len;
427 + desc->num_sgs = num_periods;
428 + desc->direction = direction;
429 + desc->cyclic = true;
431 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
434 +static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan,
435 + struct gdma_dma_desc *desc, unsigned int next_sg)
437 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
438 + unsigned int residue, count;
443 + for (i = next_sg; i < desc->num_sgs; i++)
444 + residue += desc->sg[i].len;
446 + if (next_sg != 0) {
447 + count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
448 + count >>= GDMA_REG_CTRL0_CURR_SHIFT;
449 + count &= GDMA_REG_CTRL0_CURR_MASK;
450 + residue += count << chan->transfer_shift;
456 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
457 + dma_cookie_t cookie, struct dma_tx_state *state)
459 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
460 + struct virt_dma_desc *vdesc;
461 + enum dma_status status;
462 + unsigned long flags;
464 + status = dma_cookie_status(c, cookie, state);
465 + if (status == DMA_SUCCESS || !state)
468 + spin_lock_irqsave(&chan->vchan.lock, flags);
469 + vdesc = vchan_find_desc(&chan->vchan, cookie);
470 + if (cookie == chan->desc->vdesc.tx.cookie) {
471 + state->residue = gdma_dma_desc_residue(chan, chan->desc,
473 + } else if (vdesc) {
474 + state->residue = gdma_dma_desc_residue(chan,
475 + to_gdma_dma_desc(vdesc), 0);
477 + state->residue = 0;
479 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
484 +static int gdma_dma_alloc_chan_resources(struct dma_chan *c)
489 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
491 + vchan_free_chan_resources(to_virt_chan(c));
494 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
496 + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
499 +static struct dma_chan *
500 +of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
501 + struct of_dma *ofdma)
503 + struct gdma_dma_dev *dma_dev = ofdma->of_dma_data;
504 + unsigned int request = dma_spec->args[0];
506 + if (request >= GDMA_NR_CHANS)
509 + return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan));
512 +static int gdma_dma_probe(struct platform_device *pdev)
514 + struct gdma_dmaengine_chan *chan;
515 + struct gdma_dma_dev *dma_dev;
516 + struct dma_device *dd;
518 + struct resource *res;
524 + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
528 + dd = &dma_dev->ddev;
530 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
531 + dma_dev->base = devm_ioremap_resource(&pdev->dev, res);
532 + if (IS_ERR(dma_dev->base))
533 + return PTR_ERR(dma_dev->base);
535 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
536 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
537 + dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources;
538 + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
539 + dd->device_tx_status = gdma_dma_tx_status;
540 + dd->device_issue_pending = gdma_dma_issue_pending;
541 + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
542 + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
543 + dd->device_control = gdma_dma_control;
544 + dd->dev = &pdev->dev;
545 + dd->chancnt = GDMA_NR_CHANS;
546 + INIT_LIST_HEAD(&dd->channels);
548 + for (i = 0; i < dd->chancnt; i++) {
549 + chan = &dma_dev->chan[i];
551 + chan->vchan.desc_free = gdma_dma_desc_free;
552 + vchan_init(&chan->vchan, dd);
555 + ret = dma_async_device_register(dd);
559 + ret = of_dma_controller_register(pdev->dev.of_node,
560 + of_dma_xlate_by_chan_id, dma_dev);
562 + goto err_unregister;
564 + irq = platform_get_irq(pdev, 0);
565 + ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev);
567 + goto err_unregister;
569 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0);
570 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1);
572 + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
573 + dev_info(&pdev->dev, "revision: %d, channels: %d\n",
574 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
575 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK));
576 + platform_set_drvdata(pdev, dma_dev);
578 + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
583 + dma_async_device_unregister(dd);
587 +static int gdma_dma_remove(struct platform_device *pdev)
589 + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
590 + int irq = platform_get_irq(pdev, 0);
592 + free_irq(irq, dma_dev);
593 + of_dma_controller_free(pdev->dev.of_node);
594 + dma_async_device_unregister(&dma_dev->ddev);
599 +static const struct of_device_id gdma_of_match_table[] = {
600 + { .compatible = "ralink,rt2880-gdma" },
604 +static struct platform_driver gdma_dma_driver = {
605 + .probe = gdma_dma_probe,
606 + .remove = gdma_dma_remove,
608 + .name = "gdma-rt2880",
609 + .owner = THIS_MODULE,
610 + .of_match_table = gdma_of_match_table,
613 +module_platform_driver(gdma_dma_driver);
615 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
616 +MODULE_DESCRIPTION("GDMA4740 DMA driver");
617 +MODULE_LICENSE("GPLv2");
618 --- a/include/linux/dmaengine.h
619 +++ b/include/linux/dmaengine.h
620 @@ -1073,6 +1073,7 @@ struct dma_chan *dma_request_slave_chann
622 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
623 void dma_release_channel(struct dma_chan *chan);
624 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
626 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)