ipq806x: Add support for IPQ806x chip family
[openwrt.git] / target / linux / ipq806x / patches / 0177-dmaengine-Add-QCOM-ADM-DMA-driver.patch
1 From 8984e3fc6db029479d6aa78882b39235379aebff Mon Sep 17 00:00:00 2001
2 From: Andy Gross <agross@codeaurora.org>
3 Date: Wed, 14 May 2014 13:45:07 -0500
4 Subject: [PATCH 177/182] dmaengine: Add QCOM ADM DMA driver
5
6 Add the DMA engine driver for the QCOM Application Data Mover (ADM) DMA
7 controller found in the MSM8960 and IPQ/APQ8064 platforms.
8
9 The ADM supports both memory to memory transactions and memory
10 to/from peripheral device transactions.  The controller also provides flow
11 control capabilities for transactions to/from peripheral devices.
12
13 The initial release of this driver supports slave transfers to/from peripherals
14 and also incorporates CRCI (client rate control interface) flow control.
15
16 Signed-off-by: Andy Gross <agross@codeaurora.org>
17 ---
18  drivers/dma/Kconfig    |   10 +
19  drivers/dma/Makefile   |    1 +
20  drivers/dma/qcom_adm.c |  871 ++++++++++++++++++++++++++++++++++++++++++++++++
21  3 files changed, 882 insertions(+)
22  create mode 100644 drivers/dma/qcom_adm.c
23
24 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
25 index f87cef9..79155fa 100644
26 --- a/drivers/dma/Kconfig
27 +++ b/drivers/dma/Kconfig
28 @@ -410,4 +410,14 @@ config QCOM_BAM_DMA
29           Enable support for the QCOM BAM DMA controller.  This controller
30           provides DMA capabilities for a variety of on-chip devices.
31  
32 +config QCOM_ADM
33 +       tristate "Qualcomm ADM support"
34 +       depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
35 +       select DMA_ENGINE
36 +       select DMA_VIRTUAL_CHANNELS
37 +       ---help---
38 +         Enable support for the Qualcomm ADM DMA controller.  This controller
39 +         provides DMA capabilities for both general purpose and on-chip
40 +         peripheral devices.
41 +
42  endif
43 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
44 index 5150c82..4a4f521 100644
45 --- a/drivers/dma/Makefile
46 +++ b/drivers/dma/Makefile
47 @@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o
48  obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
49  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
50  obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
51 +obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
52 diff --git a/drivers/dma/qcom_adm.c b/drivers/dma/qcom_adm.c
53 new file mode 100644
54 index 0000000..035f606
55 --- /dev/null
56 +++ b/drivers/dma/qcom_adm.c
57 @@ -0,0 +1,871 @@
58 +/*
59 + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
60 + *
61 + * This program is free software; you can redistribute it and/or modify
62 + * it under the terms of the GNU General Public License version 2 and
63 + * only version 2 as published by the Free Software Foundation.
64 + *
65 + * This program is distributed in the hope that it will be useful,
66 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
67 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
68 + * GNU General Public License for more details.
69 + *
70 + */
71 +
72 +#include <linux/kernel.h>
73 +#include <linux/io.h>
74 +#include <linux/init.h>
75 +#include <linux/slab.h>
76 +#include <linux/module.h>
77 +#include <linux/interrupt.h>
78 +#include <linux/dma-mapping.h>
79 +#include <linux/scatterlist.h>
80 +#include <linux/device.h>
81 +#include <linux/platform_device.h>
82 +#include <linux/of.h>
83 +#include <linux/of_address.h>
84 +#include <linux/of_irq.h>
85 +#include <linux/of_dma.h>
86 +#include <linux/reset.h>
87 +#include <linux/clk.h>
88 +#include <linux/dmaengine.h>
89 +
90 +#include "dmaengine.h"
91 +#include "virt-dma.h"
92 +
93 +/* ADM registers - calculated from channel number and security domain */
94 +#define HI_CH_CMD_PTR(chan, ee)                (4*chan + 0x20800*ee)
95 +#define HI_CH_RSLT(chan, ee)           (0x40 + 4*chan + 0x20800*ee)
96 +#define HI_CH_FLUSH_STATE0(chan, ee)   (0x80 + 4*chan + 0x20800*ee)
97 +#define HI_CH_FLUSH_STATE1(chan, ee)   (0xc0 + 4*chan + 0x20800*ee)
98 +#define HI_CH_FLUSH_STATE2(chan, ee)   (0x100 + 4*chan + 0x20800*ee)
99 +#define HI_CH_FLUSH_STATE3(chan, ee)   (0x140 + 4*chan + 0x20800*ee)
100 +#define HI_CH_FLUSH_STATE4(chan, ee)   (0x180 + 4*chan + 0x20800*ee)
101 +#define HI_CH_FLUSH_STATE5(chan, ee)   (0x1c0 + 4*chan + 0x20800*ee)
102 +#define HI_CH_STATUS_SD(chan, ee)      (0x200 + 4*chan + 0x20800*ee)
103 +#define HI_CH_CONF(chan)               (0x240 + 4*chan)
104 +#define HI_CH_RSLT_CONF(chan, ee)      (0x300 + 4*chan + 0x20800*ee)
105 +#define HI_SEC_DOMAIN_IRQ_STATUS(ee)   (0x380 + 0x20800*ee)
106 +#define HI_CI_CONF(ci)                 (0x390 + 4*ci)
107 +#define HI_CRCI_CONF0                  0x3d0
108 +#define HI_CRCI_CONF1                  0x3d4
109 +#define HI_GP_CTL                      0x3d8
110 +#define HI_CRCI_CTL(chan, ee)          (0x400 + 0x4*chan + 0x20800*ee)
111 +
112 +/* channel status */
113 +#define CH_STATUS_VALID        BIT(1)
114 +
115 +/* channel result */
116 +#define CH_RSLT_VALID  BIT(31)
117 +#define CH_RSLT_ERR    BIT(3)
118 +#define CH_RSLT_FLUSH  BIT(2)
119 +#define CH_RSLT_TPD    BIT(1)
120 +
121 +/* channel conf */
122 +#define CH_CONF_MPU_DISABLE    BIT(11)
123 +#define CH_CONF_PERM_MPU_CONF  BIT(9)
124 +#define CH_CONF_FLUSH_RSLT_EN  BIT(8)
125 +#define CH_CONF_FORCE_RSLT_EN  BIT(7)
126 +#define CH_CONF_IRQ_EN         BIT(6)
127 +
128 +/* channel result conf */
129 +#define CH_RSLT_CONF_FLUSH_EN  BIT(1)
130 +#define CH_RSLT_CONF_IRQ_EN    BIT(0)
131 +
132 +/* CRCI CTL */
133 +#define CRCI_CTL_RST   BIT(17)
134 +
135 +/* CI configuration */
136 +#define CI_RANGE_END(x)                (x << 24)
137 +#define CI_RANGE_START(x)      (x << 16)
138 +#define CI_BURST_4_WORDS       0x4
139 +#define CI_BURST_8_WORDS       0x8
140 +
141 +/* GP CTL */
142 +#define GP_CTL_LP_EN           BIT(12)
143 +#define GP_CTL_LP_CNT(x)       (x << 8)
144 +
145 +/* Command pointer list entry */
146 +#define CPLE_LP                BIT(31)
147 +
148 +/* Command list entry */
149 +#define CMD_LC                 BIT(31)
150 +#define CMD_DST_CRCI(n)                (((n) & 0xf) << 7)
151 +#define CMD_SRC_CRCI(n)                (((n) & 0xf) << 3)
152 +
153 +#define CMD_TYPE_SINGLE                0x0
154 +#define CMD_TYPE_BOX           0x3
155 +
156 +#define ADM_DESC_ALIGN 8
157 +#define ADM_MAX_XFER   (SZ_64K-1)
158 +#define ADM_MAX_ROWS   (SZ_64K-1)
159 +
160 +/* Command Pointer List Entry */
161 +#define CMD_LP         BIT(31)
162 +#define CMD_PT_MASK    (0x3 << 29)
163 +#define CMD_ADDR_MASK  0x3fffffff
164 +
165 +struct adm_desc_hw {
166 +       u32 cmd;
167 +       u32 src_addr;
168 +       u32 dst_addr;
169 +       u32 row_len;
170 +       u32 num_rows;
171 +       u32 row_offset;
172 +};
173 +
174 +struct adm_cmd_ptr_list {
175 +       u32 cple;                       /* command ptr list entry */
176 +       struct adm_desc_hw desc[0];
177 +};
178 +
179 +struct adm_async_desc {
180 +       struct virt_dma_desc vd;
181 +       struct adm_device *adev;
182 +
183 +       size_t length;
184 +       enum dma_transfer_direction dir;
185 +       dma_addr_t dma_addr;
186 +       size_t dma_len;
187 +
188 +       struct adm_cmd_ptr_list *cpl;
189 +       u32 num_desc;
190 +};
191 +
192 +struct adm_chan {
193 +       struct virt_dma_chan vc;
194 +       struct adm_device *adev;
195 +
196 +       /* parsed from DT */
197 +       u32 id;                 /* channel id */
198 +       u32 crci;               /* CRCI to be used for transfers */
199 +       u32 blk_size;           /* block size for CRCI, default 16 byte */
200 +
201 +       struct adm_async_desc *curr_txd;
202 +       struct dma_slave_config slave;
203 +       struct list_head node;
204 +
205 +       int error;
206 +       int initialized;
207 +};
208 +
209 +static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
210 +{
211 +       return container_of(common, struct adm_chan, vc.chan);
212 +}
213 +
214 +struct adm_device {
215 +       void __iomem *regs;
216 +       struct device *dev;
217 +       struct dma_device common;
218 +       struct device_dma_parameters dma_parms;
219 +       struct adm_chan *channels;
220 +       u32 num_channels;
221 +
222 +       u32 ee;
223 +
224 +       struct clk *core_clk;
225 +       struct clk *iface_clk;
226 +
227 +       struct reset_control *clk_reset;
228 +       struct reset_control *c0_reset;
229 +       struct reset_control *c1_reset;
230 +       struct reset_control *c2_reset;
231 +       int irq;
232 +};
233 +
234 +/**
235 + * adm_alloc_chan - Allocates channel resources for DMA channel
236 + *
237 + * This function is effectively a stub, as we don't need to setup any resources
238 + */
239 +static int adm_alloc_chan(struct dma_chan *chan)
240 +{
241 +       return 0;
242 +}
243 +
244 +/**
245 + * adm_free_chan - Frees dma resources associated with the specific channel
246 + *
247 + * Free all allocated descriptors associated with this channel
248 + *
249 + */
250 +static void adm_free_chan(struct dma_chan *chan)
251 +{
252 +       /* free all queued descriptors */
253 +       vchan_free_chan_resources(to_virt_chan(chan));
254 +}
255 +
256 +/**
257 + * adm_prep_slave_sg - Prep slave sg transaction
258 + *
259 + * @chan: dma channel
260 + * @sgl: scatter gather list
261 + * @sg_len: length of sg
262 + * @direction: DMA transfer direction
263 + * @flags: DMA flags
264 + * @context: transfer context (unused)
265 + */
266 +static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
267 +       struct scatterlist *sgl, unsigned int sg_len,
268 +       enum dma_transfer_direction direction, unsigned long flags,
269 +       void *context)
270 +{
271 +       struct adm_chan *achan = to_adm_chan(chan);
272 +       struct adm_device *adev = achan->adev;
273 +       struct adm_async_desc *async_desc;
274 +       struct scatterlist *sg;
275 +       u32 i, rows, num_desc = 0, idx = 0, desc_offset;
276 +       struct adm_desc_hw *desc;
277 +       struct adm_cmd_ptr_list *cpl;
278 +       u32 burst = ADM_MAX_XFER;
279 +
280 +
281 +       if (!is_slave_direction(direction)) {
282 +               dev_err(adev->dev, "invalid dma direction\n");
283 +               return NULL;
284 +       }
285 +
286 +       /* if using CRCI flow control, validate burst settings */
287 +       if (achan->slave.device_fc) {
288 +               burst = (direction == DMA_MEM_TO_DEV) ?
289 +                       achan->slave.dst_maxburst :
290 +                       achan->slave.src_maxburst;
291 +
292 +               if (!burst) {
293 +                       dev_err(adev->dev, "invalid burst value w/ crci: %d\n",
294 +                               burst);
295 +                       return ERR_PTR(-EINVAL);
296 +               }
297 +       }
298 +
299 +       /* iterate through sgs and compute allocation size of structures */
300 +       for_each_sg(sgl, sg, sg_len, i) {
301 +
302 +               /* calculate boxes using burst */
303 +               rows = DIV_ROUND_UP(sg_dma_len(sg), burst);
304 +               num_desc += DIV_ROUND_UP(rows, ADM_MAX_ROWS);
305 +
306 +               /* flow control requires length as a multiple of burst */
307 +               if (achan->slave.device_fc && (sg_dma_len(sg) % burst)) {
308 +                       dev_err(adev->dev, "length is not multiple of burst\n");
309 +                       return ERR_PTR(-EINVAL);
310 +               }
311 +       }
312 +
313 +       async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
314 +       if (!async_desc)
315 +               return ERR_PTR(-ENOMEM);
316 +
317 +       async_desc->dma_len = num_desc * sizeof(*desc) + sizeof(*cpl) +
318 +                               ADM_DESC_ALIGN;
319 +       async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
320 +                       &async_desc->dma_addr, GFP_NOWAIT);
321 +
322 +       if (!async_desc->cpl) {
323 +               kfree(async_desc);
324 +               return ERR_PTR(-ENOMEM);
325 +       }
326 +
327 +       async_desc->num_desc = num_desc;
328 +       async_desc->adev = adev;
329 +       cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
330 +       desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
331 +       desc_offset = (u32)desc - (u32)async_desc->cpl;
332 +
333 +       /* init cmd list */
334 +       cpl->cple |= CPLE_LP;
335 +       cpl->cple |= (async_desc->dma_addr + desc_offset) >> 3;
336 +
337 +       for_each_sg(sgl, sg, sg_len, i) {
338 +               unsigned int remainder = sg_dma_len(sg);
339 +               unsigned int curr_offset = 0;
340 +               unsigned int row_len;
341 +
342 +               do {
343 +                       desc[idx].cmd = CMD_TYPE_BOX;
344 +                       desc[idx].row_offset = 0;
345 +
346 +                       if (direction == DMA_DEV_TO_MEM) {
347 +                               desc[idx].dst_addr = sg_dma_address(sg) +
348 +                                                       curr_offset;
349 +                               desc[idx].src_addr = achan->slave.src_addr;
350 +                               desc[idx].cmd |= CMD_SRC_CRCI(achan->crci);
351 +                               desc[idx].row_offset = burst;
352 +                       } else {
353 +                               desc[idx].src_addr = sg_dma_address(sg) +
354 +                                                       curr_offset;
355 +                               desc[idx].dst_addr = achan->slave.dst_addr;
356 +                               desc[idx].cmd |= CMD_DST_CRCI(achan->crci);
357 +                               desc[idx].row_offset = burst << 16;
358 +                       }
359 +
360 +                       if (remainder < burst) {
361 +                               rows = 1;
362 +                               row_len = remainder;
363 +                       } else {
364 +                               rows = remainder / burst;
365 +                               rows = min_t(u32, rows, ADM_MAX_ROWS);
366 +                               row_len = burst;
367 +                       }
368 +
369 +                       desc[idx].num_rows = rows << 16 | rows;
370 +                       desc[idx].row_len = row_len << 16 | row_len;
371 +
372 +                       remainder -= row_len * rows;
373 +                       async_desc->length += row_len * rows;
374 +                       curr_offset += row_len * rows;
375 +
376 +                       idx++;
377 +               } while (remainder > 0);
378 +       }
379 +
380 +       /* set last command flag */
381 +       desc[idx - 1].cmd |= CMD_LC;
382 +
383 +       /* reset channel error */
384 +       achan->error = 0;
385 +
386 +       return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
387 +}
388 +
389 +/**
390 + * adm_slave_config - set slave configuration for channel
391 + * @chan: dma channel
392 + * @cfg: slave configuration
393 + *
394 + * Sets slave configuration for channel
395 + *
396 + */
397 +static int adm_slave_config(struct adm_chan *achan,
398 +               struct dma_slave_config *cfg)
399 +{
400 +       int ret = 0;
401 +       u32 burst;
402 +       struct adm_device *adev = achan->adev;
403 +
404 +       memcpy(&achan->slave, cfg, sizeof(*cfg));
405 +
406 +       /* set channel CRCI burst, if applicable */
407 +       if (achan->crci) {
408 +               burst = max_t(u32, cfg->src_maxburst, cfg->dst_maxburst);
409 +
410 +               switch (burst) {
411 +               case 16:
412 +                       achan->blk_size = 0;
413 +                       break;
414 +               case 32:
415 +                       achan->blk_size = 1;
416 +                       break;
417 +               case 64:
418 +                       achan->blk_size = 2;
419 +                       break;
420 +               case 128:
421 +                       achan->blk_size = 3;
422 +                       break;
423 +               case 192:
424 +                       achan->blk_size = 4;
425 +                       break;
426 +               case 256:
427 +                       achan->blk_size = 5;
428 +                       break;
429 +               default:
430 +                       achan->slave.src_maxburst = 0;
431 +                       achan->slave.dst_maxburst = 0;
432 +                       ret = -EINVAL;
433 +                       break;
434 +               }
435 +
436 +               if (!ret)
437 +                       writel(achan->blk_size,
438 +                               adev->regs + HI_CRCI_CTL(achan->id, adev->ee));
439 +       }
440 +
441 +       return ret;
442 +}
443 +
444 +/**
445 + * adm_terminate_all - terminate all transactions on a channel
446 + * @achan: adm dma channel
447 + *
448 + * Dequeues and frees all transactions, aborts current transaction
449 + * No callbacks are done
450 + *
451 + */
452 +static void adm_terminate_all(struct adm_chan *achan)
453 +{
454 +       struct adm_device *adev = achan->adev;
455 +       unsigned long flags;
456 +       LIST_HEAD(head);
457 +
458 +       /* send flush command to terminate current transaction */
459 +       writel_relaxed(0x0,
460 +               adev->regs + HI_CH_FLUSH_STATE0(achan->id, adev->ee));
461 +
462 +       spin_lock_irqsave(&achan->vc.lock, flags);
463 +       vchan_get_all_descriptors(&achan->vc, &head);
464 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
465 +
466 +       vchan_dma_desc_free_list(&achan->vc, &head);
467 +}
468 +
469 +/**
470 + * adm_control - DMA device control
471 + * @chan: dma channel
472 + * @cmd: control cmd
473 + * @arg: cmd argument
474 + *
475 + * Perform DMA control command
476 + *
477 + */
478 +static int adm_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
479 +       unsigned long arg)
480 +{
481 +       struct adm_chan *achan = to_adm_chan(chan);
482 +       unsigned long flag;
483 +       int ret = 0;
484 +
485 +       switch (cmd) {
486 +       case DMA_SLAVE_CONFIG:
487 +               spin_lock_irqsave(&achan->vc.lock, flag);
488 +               ret = adm_slave_config(achan, (struct dma_slave_config *)arg);
489 +               spin_unlock_irqrestore(&achan->vc.lock, flag);
490 +               break;
491 +
492 +       case DMA_TERMINATE_ALL:
493 +               adm_terminate_all(achan);
494 +               break;
495 +
496 +       default:
497 +               ret = -ENXIO;
498 +               break;
499 +       };
500 +
501 +       return ret;
502 +}
503 +
504 +/**
505 + * adm_start_dma - start next transaction
506 + * @achan - ADM dma channel
507 + */
508 +static void adm_start_dma(struct adm_chan *achan)
509 +{
510 +       struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
511 +       struct adm_device *adev = achan->adev;
512 +       struct adm_async_desc *async_desc;
513 +       struct adm_desc_hw *desc;
514 +       struct adm_cmd_ptr_list *cpl;
515 +
516 +       lockdep_assert_held(&achan->vc.lock);
517 +
518 +       if (!vd)
519 +               return;
520 +
521 +       list_del(&vd->node);
522 +
523 +       /* write next command list out to the CMD FIFO */
524 +       async_desc = container_of(vd, struct adm_async_desc, vd);
525 +       achan->curr_txd = async_desc;
526 +
527 +       cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
528 +       desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
529 +
530 +       if (!achan->initialized) {
531 +               /* enable interrupts */
532 +               writel(CH_CONF_IRQ_EN | CH_CONF_FLUSH_RSLT_EN |
533 +                       CH_CONF_FORCE_RSLT_EN | CH_CONF_PERM_MPU_CONF |
534 +                       CH_CONF_MPU_DISABLE,
535 +                       adev->regs + HI_CH_CONF(achan->id));
536 +
537 +               writel(CH_RSLT_CONF_IRQ_EN | CH_RSLT_CONF_FLUSH_EN,
538 +                       adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
539 +
540 +               if (achan->crci)
541 +                       writel(achan->blk_size, adev->regs +
542 +                               HI_CRCI_CTL(achan->crci, adev->ee));
543 +
544 +               achan->initialized = 1;
545 +       }
546 +
547 +       /* make sure IRQ enable doesn't get reordered */
548 +       wmb();
549 +
550 +       /* write next command list out to the CMD FIFO */
551 +       writel(round_up(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
552 +               adev->regs + HI_CH_CMD_PTR(achan->id, adev->ee));
553 +}
554 +
555 +/**
556 + * adm_dma_irq - irq handler for ADM controller
557 + * @irq: IRQ of interrupt
558 + * @data: callback data
559 + *
560 + * IRQ handler for the bam controller
561 + */
562 +static irqreturn_t adm_dma_irq(int irq, void *data)
563 +{
564 +       struct adm_device *adev = data;
565 +       u32 srcs, i;
566 +       struct adm_async_desc *async_desc;
567 +       unsigned long flags;
568 +
569 +       srcs = readl_relaxed(adev->regs +
570 +                       HI_SEC_DOMAIN_IRQ_STATUS(adev->ee));
571 +
572 +       for (i = 0; i < 16; i++) {
573 +               struct adm_chan *achan = &adev->channels[i];
574 +               u32 status, result;
575 +               if (srcs & BIT(i)) {
576 +                       status = readl_relaxed(adev->regs +
577 +                               HI_CH_STATUS_SD(i, adev->ee));
578 +
579 +                       /* if no result present, skip */
580 +                       if (!(status & CH_STATUS_VALID))
581 +                               continue;
582 +
583 +                       result = readl_relaxed(adev->regs +
584 +                               HI_CH_RSLT(i, adev->ee));
585 +
586 +                       /* no valid results, skip */
587 +                       if (!(result & CH_RSLT_VALID))
588 +                               continue;
589 +
590 +                       /* flag error if transaction was flushed or failed */
591 +                       if (result & (CH_RSLT_ERR | CH_RSLT_FLUSH))
592 +                               achan->error = 1;
593 +
594 +                       spin_lock_irqsave(&achan->vc.lock, flags);
595 +                       async_desc = achan->curr_txd;
596 +
597 +                       achan->curr_txd = NULL;
598 +
599 +                       if (async_desc) {
600 +                               vchan_cookie_complete(&async_desc->vd);
601 +
602 +                               /* kick off next DMA */
603 +                               adm_start_dma(achan);
604 +                       }
605 +
606 +                       spin_unlock_irqrestore(&achan->vc.lock, flags);
607 +               }
608 +       }
609 +
610 +       return IRQ_HANDLED;
611 +}
612 +
613 +/**
614 + * adm_tx_status - returns status of transaction
615 + * @chan: dma channel
616 + * @cookie: transaction cookie
617 + * @txstate: DMA transaction state
618 + *
619 + * Return status of dma transaction
620 + */
621 +static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
622 +       struct dma_tx_state *txstate)
623 +{
624 +       struct adm_chan *achan = to_adm_chan(chan);
625 +       struct virt_dma_desc *vd;
626 +       enum dma_status ret;
627 +       unsigned long flags;
628 +       size_t residue = 0;
629 +
630 +       ret = dma_cookie_status(chan, cookie, txstate);
631 +
632 +       spin_lock_irqsave(&achan->vc.lock, flags);
633 +
634 +       vd = vchan_find_desc(&achan->vc, cookie);
635 +       if (vd)
636 +               residue = container_of(vd, struct adm_async_desc, vd)->length;
637 +       else if (achan->curr_txd && achan->curr_txd->vd.tx.cookie == cookie)
638 +               residue = achan->curr_txd->length;
639 +
640 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
641 +
642 +       dma_set_residue(txstate, residue);
643 +
644 +       if (achan->error)
645 +               return DMA_ERROR;
646 +
647 +       return ret;
648 +}
649 +
650 +static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
651 +       struct of_dma *of)
652 +{
653 +       struct adm_device *adev = container_of(of->of_dma_data,
654 +                       struct adm_device, common);
655 +       struct adm_chan *achan;
656 +       struct dma_chan *chan;
657 +       unsigned int request;
658 +       unsigned int crci;
659 +
660 +       if (dma_spec->args_count != 2) {
661 +               dev_err(adev->dev, "incorrect number of dma arguments\n");
662 +               return NULL;
663 +       }
664 +
665 +       request = dma_spec->args[0];
666 +       if (request >= adev->num_channels)
667 +               return NULL;
668 +
669 +       crci = dma_spec->args[1];
670 +
671 +       chan = dma_get_slave_channel(&(adev->channels[request].vc.chan));
672 +
673 +       if (!chan)
674 +               return NULL;
675 +
676 +       achan = to_adm_chan(chan);
677 +       achan->crci = crci;
678 +
679 +       return chan;
680 +}
681 +
682 +/**
683 + * adm_issue_pending - starts pending transactions
684 + * @chan: dma channel
685 + *
686 + * Issues all pending transactions and starts DMA
687 + */
688 +static void adm_issue_pending(struct dma_chan *chan)
689 +{
690 +       struct adm_chan *achan = to_adm_chan(chan);
691 +       unsigned long flags;
692 +
693 +       spin_lock_irqsave(&achan->vc.lock, flags);
694 +
695 +       if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
696 +               adm_start_dma(achan);
697 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
698 +}
699 +
700 +/**
701 + * adm_dma_free_desc - free descriptor memory
702 + * @vd: virtual descriptor
703 + *
704 + */
705 +static void adm_dma_free_desc(struct virt_dma_desc *vd)
706 +{
707 +       struct adm_async_desc *async_desc = container_of(vd,
708 +                       struct adm_async_desc, vd);
709 +
710 +       dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
711 +               async_desc->cpl, async_desc->dma_addr);
712 +       kfree(async_desc);
713 +}
714 +
715 +static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
716 +       u32 index)
717 +{
718 +       achan->id = index;
719 +       achan->adev = adev;
720 +
721 +       vchan_init(&achan->vc, &adev->common);
722 +       achan->vc.desc_free = adm_dma_free_desc;
723 +}
724 +
725 +static int adm_dma_probe(struct platform_device *pdev)
726 +{
727 +       struct adm_device *adev;
728 +       struct resource *iores;
729 +       int ret;
730 +       u32 i;
731 +
732 +       adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
733 +       if (!adev)
734 +               return -ENOMEM;
735 +
736 +       adev->dev = &pdev->dev;
737 +
738 +       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
739 +       adev->regs = devm_ioremap_resource(&pdev->dev, iores);
740 +       if (IS_ERR(adev->regs))
741 +               return PTR_ERR(adev->regs);
742 +
743 +       adev->irq = platform_get_irq(pdev, 0);
744 +       if (adev->irq < 0)
745 +               return adev->irq;
746 +
747 +       ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
748 +       if (ret) {
749 +               dev_err(adev->dev, "Execution environment unspecified\n");
750 +               return ret;
751 +       }
752 +
753 +       adev->core_clk = devm_clk_get(adev->dev, "core");
754 +       if (IS_ERR(adev->core_clk))
755 +               return PTR_ERR(adev->core_clk);
756 +
757 +       ret = clk_prepare_enable(adev->core_clk);
758 +       if (ret) {
759 +               dev_err(adev->dev, "failed to prepare/enable core clock\n");
760 +               return ret;
761 +       }
762 +
763 +       adev->iface_clk = devm_clk_get(adev->dev, "iface");
764 +       if (IS_ERR(adev->iface_clk))
765 +               return PTR_ERR(adev->iface_clk);
766 +
767 +       ret = clk_prepare_enable(adev->iface_clk);
768 +       if (ret) {
769 +               dev_err(adev->dev, "failed to prepare/enable iface clock\n");
770 +               return ret;
771 +       }
772 +
773 +       adev->clk_reset = devm_reset_control_get(&pdev->dev, "clk");
774 +       if (IS_ERR(adev->clk_reset)) {
775 +               dev_err(adev->dev, "failed to get ADM0 reset\n");
776 +               return PTR_ERR(adev->clk_reset);
777 +       }
778 +
779 +       adev->c0_reset = devm_reset_control_get(&pdev->dev, "c0");
780 +       if (IS_ERR(adev->c0_reset)) {
781 +               dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
782 +               return PTR_ERR(adev->c0_reset);
783 +       }
784 +
785 +       adev->c1_reset = devm_reset_control_get(&pdev->dev, "c1");
786 +       if (IS_ERR(adev->c1_reset)) {
787 +               dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
788 +               return PTR_ERR(adev->c1_reset);
789 +       }
790 +
791 +       adev->c2_reset = devm_reset_control_get(&pdev->dev, "c2");
792 +       if (IS_ERR(adev->c2_reset)) {
793 +               dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
794 +               return PTR_ERR(adev->c2_reset);
795 +       }
796 +
797 +       reset_control_assert(adev->clk_reset);
798 +       reset_control_assert(adev->c0_reset);
799 +       reset_control_assert(adev->c1_reset);
800 +       reset_control_assert(adev->c2_reset);
801 +
802 +       reset_control_deassert(adev->clk_reset);
803 +       reset_control_deassert(adev->c0_reset);
804 +       reset_control_deassert(adev->c1_reset);
805 +       reset_control_deassert(adev->c2_reset);
806 +
807 +       adev->num_channels = 16;
808 +
809 +       adev->channels = devm_kcalloc(adev->dev, adev->num_channels,
810 +                               sizeof(*adev->channels), GFP_KERNEL);
811 +
812 +       if (!adev->channels) {
813 +               ret = -ENOMEM;
814 +               goto err_disable_clk;
815 +       }
816 +
817 +       /* allocate and initialize channels */
818 +       INIT_LIST_HEAD(&adev->common.channels);
819 +
820 +       for (i = 0; i < adev->num_channels; i++)
821 +               adm_channel_init(adev, &adev->channels[i], i);
822 +
823 +       /* reset CRCIs */
824 +       for (i = 0; i < 16; i++)
825 +               writel(CRCI_CTL_RST, adev->regs + HI_CRCI_CTL(i, adev->ee));
826 +
827 +       /* configure client interfaces */
828 +       writel(CI_RANGE_START(0x40) | CI_RANGE_END(0xb0) | CI_BURST_8_WORDS,
829 +               adev->regs + HI_CI_CONF(0));
830 +       writel(CI_RANGE_START(0x2a) | CI_RANGE_END(0x2c) | CI_BURST_8_WORDS,
831 +               adev->regs + HI_CI_CONF(1));
832 +       writel(CI_RANGE_START(0x12) | CI_RANGE_END(0x28) | CI_BURST_8_WORDS,
833 +               adev->regs + HI_CI_CONF(2));
834 +       writel(GP_CTL_LP_EN | GP_CTL_LP_CNT(0xf), adev->regs + HI_GP_CTL);
835 +
836 +       ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
837 +                       0, "adm_dma", adev);
838 +       if (ret)
839 +               goto err_disable_clk;
840 +
841 +       platform_set_drvdata(pdev, adev);
842 +
843 +       adev->common.dev = adev->dev;
844 +       adev->common.dev->dma_parms = &adev->dma_parms;
845 +
846 +       /* set capabilities */
847 +       dma_cap_zero(adev->common.cap_mask);
848 +       dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
849 +       dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
850 +
851 +       /* initialize dmaengine apis */
852 +       adev->common.device_alloc_chan_resources = adm_alloc_chan;
853 +       adev->common.device_free_chan_resources = adm_free_chan;
854 +       adev->common.device_prep_slave_sg = adm_prep_slave_sg;
855 +       adev->common.device_control = adm_control;
856 +       adev->common.device_issue_pending = adm_issue_pending;
857 +       adev->common.device_tx_status = adm_tx_status;
858 +
859 +       ret = dma_async_device_register(&adev->common);
860 +       if (ret) {
861 +               dev_err(adev->dev, "failed to register dma async device\n");
862 +               goto err_disable_clk;
863 +       }
864 +
865 +       ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
866 +                                       &adev->common);
867 +       if (ret)
868 +               goto err_unregister_dma;
869 +
870 +       return 0;
871 +
872 +err_unregister_dma:
873 +       dma_async_device_unregister(&adev->common);
874 +err_disable_clk:
875 +       clk_disable_unprepare(adev->core_clk);
876 +       clk_disable_unprepare(adev->iface_clk);
877 +
878 +       return ret;
879 +}
880 +
881 +static int adm_dma_remove(struct platform_device *pdev)
882 +{
883 +       struct adm_device *adev = platform_get_drvdata(pdev);
884 +       struct adm_chan *achan;
885 +       u32 i;
886 +
887 +       of_dma_controller_free(pdev->dev.of_node);
888 +       dma_async_device_unregister(&adev->common);
889 +
890 +       devm_free_irq(adev->dev, adev->irq, adev);
891 +
892 +       for (i = 0; i < adev->num_channels; i++) {
893 +               achan = &adev->channels[i];
894 +               writel(CH_CONF_FLUSH_RSLT_EN,
895 +                       adev->regs + HI_CH_CONF(achan->id));
896 +               writel(CH_RSLT_CONF_FLUSH_EN,
897 +                       adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
898 +
899 +               adm_terminate_all(&adev->channels[i]);
900 +       }
901 +
902 +       clk_disable_unprepare(adev->core_clk);
903 +       clk_disable_unprepare(adev->iface_clk);
904 +
905 +       return 0;
906 +}
907 +
908 +static const struct of_device_id adm_of_match[] = {
909 +       { .compatible = "qcom,adm", },
910 +       {}
911 +};
912 +MODULE_DEVICE_TABLE(of, adm_of_match);
913 +
914 +static struct platform_driver adm_dma_driver = {
915 +       .probe = adm_dma_probe,
916 +       .remove = adm_dma_remove,
917 +       .driver = {
918 +               .name = "adm-dma-engine",
919 +               .owner = THIS_MODULE,
920 +               .of_match_table = adm_of_match,
921 +       },
922 +};
923 +
924 +module_platform_driver(adm_dma_driver);
925 +
926 +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
927 +MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
928 +MODULE_LICENSE("GPL v2");
929 -- 
930 1.7.10.4
931