kernel: update 3.14 to 3.14.18
[openwrt.git] / target / linux / ipq806x / patches / 0177-dmaengine-Add-QCOM-ADM-DMA-driver.patch
1 From 8984e3fc6db029479d6aa78882b39235379aebff Mon Sep 17 00:00:00 2001
2 From: Andy Gross <agross@codeaurora.org>
3 Date: Wed, 14 May 2014 13:45:07 -0500
4 Subject: [PATCH 177/182] dmaengine: Add QCOM ADM DMA driver
5
6 Add the DMA engine driver for the QCOM Application Data Mover (ADM) DMA
7 controller found in the MSM8960 and IPQ/APQ8064 platforms.
8
9 The ADM supports both memory to memory transactions and memory
10 to/from peripheral device transactions.  The controller also provides flow
11 control capabilities for transactions to/from peripheral devices.
12
13 The initial release of this driver supports slave transfers to/from peripherals
14 and also incorporates CRCI (client rate control interface) flow control.
15
16 Signed-off-by: Andy Gross <agross@codeaurora.org>
17 ---
18  drivers/dma/Kconfig    |   10 +
19  drivers/dma/Makefile   |    1 +
20  drivers/dma/qcom_adm.c |  871 ++++++++++++++++++++++++++++++++++++++++++++++++
21  3 files changed, 882 insertions(+)
22  create mode 100644 drivers/dma/qcom_adm.c
23
24 --- a/drivers/dma/Kconfig
25 +++ b/drivers/dma/Kconfig
26 @@ -410,4 +410,14 @@ config QCOM_BAM_DMA
27           Enable support for the QCOM BAM DMA controller.  This controller
28           provides DMA capabilities for a variety of on-chip devices.
29  
30 +config QCOM_ADM
31 +       tristate "Qualcomm ADM support"
32 +       depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
33 +       select DMA_ENGINE
34 +       select DMA_VIRTUAL_CHANNELS
35 +       ---help---
36 +         Enable support for the Qualcomm ADM DMA controller.  This controller
37 +         provides DMA capabilities for both general purpose and on-chip
38 +         peripheral devices.
39 +
40  endif
41 --- a/drivers/dma/Makefile
42 +++ b/drivers/dma/Makefile
43 @@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o
44  obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
45  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
46  obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
47 +obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
48 --- /dev/null
49 +++ b/drivers/dma/qcom_adm.c
50 @@ -0,0 +1,871 @@
51 +/*
52 + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
53 + *
54 + * This program is free software; you can redistribute it and/or modify
55 + * it under the terms of the GNU General Public License version 2 and
56 + * only version 2 as published by the Free Software Foundation.
57 + *
58 + * This program is distributed in the hope that it will be useful,
59 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
60 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
61 + * GNU General Public License for more details.
62 + *
63 + */
64 +
65 +#include <linux/kernel.h>
66 +#include <linux/io.h>
67 +#include <linux/init.h>
68 +#include <linux/slab.h>
69 +#include <linux/module.h>
70 +#include <linux/interrupt.h>
71 +#include <linux/dma-mapping.h>
72 +#include <linux/scatterlist.h>
73 +#include <linux/device.h>
74 +#include <linux/platform_device.h>
75 +#include <linux/of.h>
76 +#include <linux/of_address.h>
77 +#include <linux/of_irq.h>
78 +#include <linux/of_dma.h>
79 +#include <linux/reset.h>
80 +#include <linux/clk.h>
81 +#include <linux/dmaengine.h>
82 +
83 +#include "dmaengine.h"
84 +#include "virt-dma.h"
85 +
86 +/* ADM registers - calculated from channel number and security domain */
87 +#define HI_CH_CMD_PTR(chan, ee)                (4*chan + 0x20800*ee)
88 +#define HI_CH_RSLT(chan, ee)           (0x40 + 4*chan + 0x20800*ee)
89 +#define HI_CH_FLUSH_STATE0(chan, ee)   (0x80 + 4*chan + 0x20800*ee)
90 +#define HI_CH_FLUSH_STATE1(chan, ee)   (0xc0 + 4*chan + 0x20800*ee)
91 +#define HI_CH_FLUSH_STATE2(chan, ee)   (0x100 + 4*chan + 0x20800*ee)
92 +#define HI_CH_FLUSH_STATE3(chan, ee)   (0x140 + 4*chan + 0x20800*ee)
93 +#define HI_CH_FLUSH_STATE4(chan, ee)   (0x180 + 4*chan + 0x20800*ee)
94 +#define HI_CH_FLUSH_STATE5(chan, ee)   (0x1c0 + 4*chan + 0x20800*ee)
95 +#define HI_CH_STATUS_SD(chan, ee)      (0x200 + 4*chan + 0x20800*ee)
96 +#define HI_CH_CONF(chan)               (0x240 + 4*chan)
97 +#define HI_CH_RSLT_CONF(chan, ee)      (0x300 + 4*chan + 0x20800*ee)
98 +#define HI_SEC_DOMAIN_IRQ_STATUS(ee)   (0x380 + 0x20800*ee)
99 +#define HI_CI_CONF(ci)                 (0x390 + 4*ci)
100 +#define HI_CRCI_CONF0                  0x3d0
101 +#define HI_CRCI_CONF1                  0x3d4
102 +#define HI_GP_CTL                      0x3d8
103 +#define HI_CRCI_CTL(chan, ee)          (0x400 + 0x4*chan + 0x20800*ee)
104 +
105 +/* channel status */
106 +#define CH_STATUS_VALID        BIT(1)
107 +
108 +/* channel result */
109 +#define CH_RSLT_VALID  BIT(31)
110 +#define CH_RSLT_ERR    BIT(3)
111 +#define CH_RSLT_FLUSH  BIT(2)
112 +#define CH_RSLT_TPD    BIT(1)
113 +
114 +/* channel conf */
115 +#define CH_CONF_MPU_DISABLE    BIT(11)
116 +#define CH_CONF_PERM_MPU_CONF  BIT(9)
117 +#define CH_CONF_FLUSH_RSLT_EN  BIT(8)
118 +#define CH_CONF_FORCE_RSLT_EN  BIT(7)
119 +#define CH_CONF_IRQ_EN         BIT(6)
120 +
121 +/* channel result conf */
122 +#define CH_RSLT_CONF_FLUSH_EN  BIT(1)
123 +#define CH_RSLT_CONF_IRQ_EN    BIT(0)
124 +
125 +/* CRCI CTL */
126 +#define CRCI_CTL_RST   BIT(17)
127 +
128 +/* CI configuration */
129 +#define CI_RANGE_END(x)                (x << 24)
130 +#define CI_RANGE_START(x)      (x << 16)
131 +#define CI_BURST_4_WORDS       0x4
132 +#define CI_BURST_8_WORDS       0x8
133 +
134 +/* GP CTL */
135 +#define GP_CTL_LP_EN           BIT(12)
136 +#define GP_CTL_LP_CNT(x)       (x << 8)
137 +
138 +/* Command pointer list entry */
139 +#define CPLE_LP                BIT(31)
140 +
141 +/* Command list entry */
142 +#define CMD_LC                 BIT(31)
143 +#define CMD_DST_CRCI(n)                (((n) & 0xf) << 7)
144 +#define CMD_SRC_CRCI(n)                (((n) & 0xf) << 3)
145 +
146 +#define CMD_TYPE_SINGLE                0x0
147 +#define CMD_TYPE_BOX           0x3
148 +
149 +#define ADM_DESC_ALIGN 8
150 +#define ADM_MAX_XFER   (SZ_64K-1)
151 +#define ADM_MAX_ROWS   (SZ_64K-1)
152 +
153 +/* Command Pointer List Entry */
154 +#define CMD_LP         BIT(31)
155 +#define CMD_PT_MASK    (0x3 << 29)
156 +#define CMD_ADDR_MASK  0x3fffffff
157 +
158 +struct adm_desc_hw {
159 +       u32 cmd;
160 +       u32 src_addr;
161 +       u32 dst_addr;
162 +       u32 row_len;
163 +       u32 num_rows;
164 +       u32 row_offset;
165 +};
166 +
167 +struct adm_cmd_ptr_list {
168 +       u32 cple;                       /* command ptr list entry */
169 +       struct adm_desc_hw desc[0];
170 +};
171 +
172 +struct adm_async_desc {
173 +       struct virt_dma_desc vd;
174 +       struct adm_device *adev;
175 +
176 +       size_t length;
177 +       enum dma_transfer_direction dir;
178 +       dma_addr_t dma_addr;
179 +       size_t dma_len;
180 +
181 +       struct adm_cmd_ptr_list *cpl;
182 +       u32 num_desc;
183 +};
184 +
185 +struct adm_chan {
186 +       struct virt_dma_chan vc;
187 +       struct adm_device *adev;
188 +
189 +       /* parsed from DT */
190 +       u32 id;                 /* channel id */
191 +       u32 crci;               /* CRCI to be used for transfers */
192 +       u32 blk_size;           /* block size for CRCI, default 16 byte */
193 +
194 +       struct adm_async_desc *curr_txd;
195 +       struct dma_slave_config slave;
196 +       struct list_head node;
197 +
198 +       int error;
199 +       int initialized;
200 +};
201 +
202 +static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
203 +{
204 +       return container_of(common, struct adm_chan, vc.chan);
205 +}
206 +
207 +struct adm_device {
208 +       void __iomem *regs;
209 +       struct device *dev;
210 +       struct dma_device common;
211 +       struct device_dma_parameters dma_parms;
212 +       struct adm_chan *channels;
213 +       u32 num_channels;
214 +
215 +       u32 ee;
216 +
217 +       struct clk *core_clk;
218 +       struct clk *iface_clk;
219 +
220 +       struct reset_control *clk_reset;
221 +       struct reset_control *c0_reset;
222 +       struct reset_control *c1_reset;
223 +       struct reset_control *c2_reset;
224 +       int irq;
225 +};
226 +
227 +/**
228 + * adm_alloc_chan - Allocates channel resources for DMA channel
229 + *
230 + * This function is effectively a stub, as we don't need to setup any resources
231 + */
232 +static int adm_alloc_chan(struct dma_chan *chan)
233 +{
234 +       return 0;
235 +}
236 +
237 +/**
238 + * adm_free_chan - Frees dma resources associated with the specific channel
239 + *
240 + * Free all allocated descriptors associated with this channel
241 + *
242 + */
243 +static void adm_free_chan(struct dma_chan *chan)
244 +{
245 +       /* free all queued descriptors */
246 +       vchan_free_chan_resources(to_virt_chan(chan));
247 +}
248 +
249 +/**
250 + * adm_prep_slave_sg - Prep slave sg transaction
251 + *
252 + * @chan: dma channel
253 + * @sgl: scatter gather list
254 + * @sg_len: length of sg
255 + * @direction: DMA transfer direction
256 + * @flags: DMA flags
257 + * @context: transfer context (unused)
258 + */
259 +static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
260 +       struct scatterlist *sgl, unsigned int sg_len,
261 +       enum dma_transfer_direction direction, unsigned long flags,
262 +       void *context)
263 +{
264 +       struct adm_chan *achan = to_adm_chan(chan);
265 +       struct adm_device *adev = achan->adev;
266 +       struct adm_async_desc *async_desc;
267 +       struct scatterlist *sg;
268 +       u32 i, rows, num_desc = 0, idx = 0, desc_offset;
269 +       struct adm_desc_hw *desc;
270 +       struct adm_cmd_ptr_list *cpl;
271 +       u32 burst = ADM_MAX_XFER;
272 +
273 +
274 +       if (!is_slave_direction(direction)) {
275 +               dev_err(adev->dev, "invalid dma direction\n");
276 +               return NULL;
277 +       }
278 +
279 +       /* if using CRCI flow control, validate burst settings */
280 +       if (achan->slave.device_fc) {
281 +               burst = (direction == DMA_MEM_TO_DEV) ?
282 +                       achan->slave.dst_maxburst :
283 +                       achan->slave.src_maxburst;
284 +
285 +               if (!burst) {
286 +                       dev_err(adev->dev, "invalid burst value w/ crci: %d\n",
287 +                               burst);
288 +                       return ERR_PTR(-EINVAL);
289 +               }
290 +       }
291 +
292 +       /* iterate through sgs and compute allocation size of structures */
293 +       for_each_sg(sgl, sg, sg_len, i) {
294 +
295 +               /* calculate boxes using burst */
296 +               rows = DIV_ROUND_UP(sg_dma_len(sg), burst);
297 +               num_desc += DIV_ROUND_UP(rows, ADM_MAX_ROWS);
298 +
299 +               /* flow control requires length as a multiple of burst */
300 +               if (achan->slave.device_fc && (sg_dma_len(sg) % burst)) {
301 +                       dev_err(adev->dev, "length is not multiple of burst\n");
302 +                       return ERR_PTR(-EINVAL);
303 +               }
304 +       }
305 +
306 +       async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
307 +       if (!async_desc)
308 +               return ERR_PTR(-ENOMEM);
309 +
310 +       async_desc->dma_len = num_desc * sizeof(*desc) + sizeof(*cpl) +
311 +                               ADM_DESC_ALIGN;
312 +       async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
313 +                       &async_desc->dma_addr, GFP_NOWAIT);
314 +
315 +       if (!async_desc->cpl) {
316 +               kfree(async_desc);
317 +               return ERR_PTR(-ENOMEM);
318 +       }
319 +
320 +       async_desc->num_desc = num_desc;
321 +       async_desc->adev = adev;
322 +       cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
323 +       desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
324 +       desc_offset = (u32)desc - (u32)async_desc->cpl;
325 +
326 +       /* init cmd list */
327 +       cpl->cple |= CPLE_LP;
328 +       cpl->cple |= (async_desc->dma_addr + desc_offset) >> 3;
329 +
330 +       for_each_sg(sgl, sg, sg_len, i) {
331 +               unsigned int remainder = sg_dma_len(sg);
332 +               unsigned int curr_offset = 0;
333 +               unsigned int row_len;
334 +
335 +               do {
336 +                       desc[idx].cmd = CMD_TYPE_BOX;
337 +                       desc[idx].row_offset = 0;
338 +
339 +                       if (direction == DMA_DEV_TO_MEM) {
340 +                               desc[idx].dst_addr = sg_dma_address(sg) +
341 +                                                       curr_offset;
342 +                               desc[idx].src_addr = achan->slave.src_addr;
343 +                               desc[idx].cmd |= CMD_SRC_CRCI(achan->crci);
344 +                               desc[idx].row_offset = burst;
345 +                       } else {
346 +                               desc[idx].src_addr = sg_dma_address(sg) +
347 +                                                       curr_offset;
348 +                               desc[idx].dst_addr = achan->slave.dst_addr;
349 +                               desc[idx].cmd |= CMD_DST_CRCI(achan->crci);
350 +                               desc[idx].row_offset = burst << 16;
351 +                       }
352 +
353 +                       if (remainder < burst) {
354 +                               rows = 1;
355 +                               row_len = remainder;
356 +                       } else {
357 +                               rows = remainder / burst;
358 +                               rows = min_t(u32, rows, ADM_MAX_ROWS);
359 +                               row_len = burst;
360 +                       }
361 +
362 +                       desc[idx].num_rows = rows << 16 | rows;
363 +                       desc[idx].row_len = row_len << 16 | row_len;
364 +
365 +                       remainder -= row_len * rows;
366 +                       async_desc->length += row_len * rows;
367 +                       curr_offset += row_len * rows;
368 +
369 +                       idx++;
370 +               } while (remainder > 0);
371 +       }
372 +
373 +       /* set last command flag */
374 +       desc[idx - 1].cmd |= CMD_LC;
375 +
376 +       /* reset channel error */
377 +       achan->error = 0;
378 +
379 +       return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
380 +}
381 +
382 +/**
383 + * adm_slave_config - set slave configuration for channel
384 + * @chan: dma channel
385 + * @cfg: slave configuration
386 + *
387 + * Sets slave configuration for channel
388 + *
389 + */
390 +static int adm_slave_config(struct adm_chan *achan,
391 +               struct dma_slave_config *cfg)
392 +{
393 +       int ret = 0;
394 +       u32 burst;
395 +       struct adm_device *adev = achan->adev;
396 +
397 +       memcpy(&achan->slave, cfg, sizeof(*cfg));
398 +
399 +       /* set channel CRCI burst, if applicable */
400 +       if (achan->crci) {
401 +               burst = max_t(u32, cfg->src_maxburst, cfg->dst_maxburst);
402 +
403 +               switch (burst) {
404 +               case 16:
405 +                       achan->blk_size = 0;
406 +                       break;
407 +               case 32:
408 +                       achan->blk_size = 1;
409 +                       break;
410 +               case 64:
411 +                       achan->blk_size = 2;
412 +                       break;
413 +               case 128:
414 +                       achan->blk_size = 3;
415 +                       break;
416 +               case 192:
417 +                       achan->blk_size = 4;
418 +                       break;
419 +               case 256:
420 +                       achan->blk_size = 5;
421 +                       break;
422 +               default:
423 +                       achan->slave.src_maxburst = 0;
424 +                       achan->slave.dst_maxburst = 0;
425 +                       ret = -EINVAL;
426 +                       break;
427 +               }
428 +
429 +               if (!ret)
430 +                       writel(achan->blk_size,
431 +                               adev->regs + HI_CRCI_CTL(achan->id, adev->ee));
432 +       }
433 +
434 +       return ret;
435 +}
436 +
437 +/**
438 + * adm_terminate_all - terminate all transactions on a channel
439 + * @achan: adm dma channel
440 + *
441 + * Dequeues and frees all transactions, aborts current transaction
442 + * No callbacks are done
443 + *
444 + */
445 +static void adm_terminate_all(struct adm_chan *achan)
446 +{
447 +       struct adm_device *adev = achan->adev;
448 +       unsigned long flags;
449 +       LIST_HEAD(head);
450 +
451 +       /* send flush command to terminate current transaction */
452 +       writel_relaxed(0x0,
453 +               adev->regs + HI_CH_FLUSH_STATE0(achan->id, adev->ee));
454 +
455 +       spin_lock_irqsave(&achan->vc.lock, flags);
456 +       vchan_get_all_descriptors(&achan->vc, &head);
457 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
458 +
459 +       vchan_dma_desc_free_list(&achan->vc, &head);
460 +}
461 +
462 +/**
463 + * adm_control - DMA device control
464 + * @chan: dma channel
465 + * @cmd: control cmd
466 + * @arg: cmd argument
467 + *
468 + * Perform DMA control command
469 + *
470 + */
471 +static int adm_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
472 +       unsigned long arg)
473 +{
474 +       struct adm_chan *achan = to_adm_chan(chan);
475 +       unsigned long flag;
476 +       int ret = 0;
477 +
478 +       switch (cmd) {
479 +       case DMA_SLAVE_CONFIG:
480 +               spin_lock_irqsave(&achan->vc.lock, flag);
481 +               ret = adm_slave_config(achan, (struct dma_slave_config *)arg);
482 +               spin_unlock_irqrestore(&achan->vc.lock, flag);
483 +               break;
484 +
485 +       case DMA_TERMINATE_ALL:
486 +               adm_terminate_all(achan);
487 +               break;
488 +
489 +       default:
490 +               ret = -ENXIO;
491 +               break;
492 +       };
493 +
494 +       return ret;
495 +}
496 +
497 +/**
498 + * adm_start_dma - start next transaction
499 + * @achan - ADM dma channel
500 + */
501 +static void adm_start_dma(struct adm_chan *achan)
502 +{
503 +       struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
504 +       struct adm_device *adev = achan->adev;
505 +       struct adm_async_desc *async_desc;
506 +       struct adm_desc_hw *desc;
507 +       struct adm_cmd_ptr_list *cpl;
508 +
509 +       lockdep_assert_held(&achan->vc.lock);
510 +
511 +       if (!vd)
512 +               return;
513 +
514 +       list_del(&vd->node);
515 +
516 +       /* write next command list out to the CMD FIFO */
517 +       async_desc = container_of(vd, struct adm_async_desc, vd);
518 +       achan->curr_txd = async_desc;
519 +
520 +       cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
521 +       desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
522 +
523 +       if (!achan->initialized) {
524 +               /* enable interrupts */
525 +               writel(CH_CONF_IRQ_EN | CH_CONF_FLUSH_RSLT_EN |
526 +                       CH_CONF_FORCE_RSLT_EN | CH_CONF_PERM_MPU_CONF |
527 +                       CH_CONF_MPU_DISABLE,
528 +                       adev->regs + HI_CH_CONF(achan->id));
529 +
530 +               writel(CH_RSLT_CONF_IRQ_EN | CH_RSLT_CONF_FLUSH_EN,
531 +                       adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
532 +
533 +               if (achan->crci)
534 +                       writel(achan->blk_size, adev->regs +
535 +                               HI_CRCI_CTL(achan->crci, adev->ee));
536 +
537 +               achan->initialized = 1;
538 +       }
539 +
540 +       /* make sure IRQ enable doesn't get reordered */
541 +       wmb();
542 +
543 +       /* write next command list out to the CMD FIFO */
544 +       writel(round_up(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
545 +               adev->regs + HI_CH_CMD_PTR(achan->id, adev->ee));
546 +}
547 +
548 +/**
549 + * adm_dma_irq - irq handler for ADM controller
550 + * @irq: IRQ of interrupt
551 + * @data: callback data
552 + *
553 + * IRQ handler for the bam controller
554 + */
555 +static irqreturn_t adm_dma_irq(int irq, void *data)
556 +{
557 +       struct adm_device *adev = data;
558 +       u32 srcs, i;
559 +       struct adm_async_desc *async_desc;
560 +       unsigned long flags;
561 +
562 +       srcs = readl_relaxed(adev->regs +
563 +                       HI_SEC_DOMAIN_IRQ_STATUS(adev->ee));
564 +
565 +       for (i = 0; i < 16; i++) {
566 +               struct adm_chan *achan = &adev->channels[i];
567 +               u32 status, result;
568 +               if (srcs & BIT(i)) {
569 +                       status = readl_relaxed(adev->regs +
570 +                               HI_CH_STATUS_SD(i, adev->ee));
571 +
572 +                       /* if no result present, skip */
573 +                       if (!(status & CH_STATUS_VALID))
574 +                               continue;
575 +
576 +                       result = readl_relaxed(adev->regs +
577 +                               HI_CH_RSLT(i, adev->ee));
578 +
579 +                       /* no valid results, skip */
580 +                       if (!(result & CH_RSLT_VALID))
581 +                               continue;
582 +
583 +                       /* flag error if transaction was flushed or failed */
584 +                       if (result & (CH_RSLT_ERR | CH_RSLT_FLUSH))
585 +                               achan->error = 1;
586 +
587 +                       spin_lock_irqsave(&achan->vc.lock, flags);
588 +                       async_desc = achan->curr_txd;
589 +
590 +                       achan->curr_txd = NULL;
591 +
592 +                       if (async_desc) {
593 +                               vchan_cookie_complete(&async_desc->vd);
594 +
595 +                               /* kick off next DMA */
596 +                               adm_start_dma(achan);
597 +                       }
598 +
599 +                       spin_unlock_irqrestore(&achan->vc.lock, flags);
600 +               }
601 +       }
602 +
603 +       return IRQ_HANDLED;
604 +}
605 +
606 +/**
607 + * adm_tx_status - returns status of transaction
608 + * @chan: dma channel
609 + * @cookie: transaction cookie
610 + * @txstate: DMA transaction state
611 + *
612 + * Return status of dma transaction
613 + */
614 +static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
615 +       struct dma_tx_state *txstate)
616 +{
617 +       struct adm_chan *achan = to_adm_chan(chan);
618 +       struct virt_dma_desc *vd;
619 +       enum dma_status ret;
620 +       unsigned long flags;
621 +       size_t residue = 0;
622 +
623 +       ret = dma_cookie_status(chan, cookie, txstate);
624 +
625 +       spin_lock_irqsave(&achan->vc.lock, flags);
626 +
627 +       vd = vchan_find_desc(&achan->vc, cookie);
628 +       if (vd)
629 +               residue = container_of(vd, struct adm_async_desc, vd)->length;
630 +       else if (achan->curr_txd && achan->curr_txd->vd.tx.cookie == cookie)
631 +               residue = achan->curr_txd->length;
632 +
633 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
634 +
635 +       dma_set_residue(txstate, residue);
636 +
637 +       if (achan->error)
638 +               return DMA_ERROR;
639 +
640 +       return ret;
641 +}
642 +
643 +static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
644 +       struct of_dma *of)
645 +{
646 +       struct adm_device *adev = container_of(of->of_dma_data,
647 +                       struct adm_device, common);
648 +       struct adm_chan *achan;
649 +       struct dma_chan *chan;
650 +       unsigned int request;
651 +       unsigned int crci;
652 +
653 +       if (dma_spec->args_count != 2) {
654 +               dev_err(adev->dev, "incorrect number of dma arguments\n");
655 +               return NULL;
656 +       }
657 +
658 +       request = dma_spec->args[0];
659 +       if (request >= adev->num_channels)
660 +               return NULL;
661 +
662 +       crci = dma_spec->args[1];
663 +
664 +       chan = dma_get_slave_channel(&(adev->channels[request].vc.chan));
665 +
666 +       if (!chan)
667 +               return NULL;
668 +
669 +       achan = to_adm_chan(chan);
670 +       achan->crci = crci;
671 +
672 +       return chan;
673 +}
674 +
675 +/**
676 + * adm_issue_pending - starts pending transactions
677 + * @chan: dma channel
678 + *
679 + * Issues all pending transactions and starts DMA
680 + */
681 +static void adm_issue_pending(struct dma_chan *chan)
682 +{
683 +       struct adm_chan *achan = to_adm_chan(chan);
684 +       unsigned long flags;
685 +
686 +       spin_lock_irqsave(&achan->vc.lock, flags);
687 +
688 +       if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
689 +               adm_start_dma(achan);
690 +       spin_unlock_irqrestore(&achan->vc.lock, flags);
691 +}
692 +
693 +/**
694 + * adm_dma_free_desc - free descriptor memory
695 + * @vd: virtual descriptor
696 + *
697 + */
698 +static void adm_dma_free_desc(struct virt_dma_desc *vd)
699 +{
700 +       struct adm_async_desc *async_desc = container_of(vd,
701 +                       struct adm_async_desc, vd);
702 +
703 +       dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
704 +               async_desc->cpl, async_desc->dma_addr);
705 +       kfree(async_desc);
706 +}
707 +
708 +static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
709 +       u32 index)
710 +{
711 +       achan->id = index;
712 +       achan->adev = adev;
713 +
714 +       vchan_init(&achan->vc, &adev->common);
715 +       achan->vc.desc_free = adm_dma_free_desc;
716 +}
717 +
718 +static int adm_dma_probe(struct platform_device *pdev)
719 +{
720 +       struct adm_device *adev;
721 +       struct resource *iores;
722 +       int ret;
723 +       u32 i;
724 +
725 +       adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
726 +       if (!adev)
727 +               return -ENOMEM;
728 +
729 +       adev->dev = &pdev->dev;
730 +
731 +       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
732 +       adev->regs = devm_ioremap_resource(&pdev->dev, iores);
733 +       if (IS_ERR(adev->regs))
734 +               return PTR_ERR(adev->regs);
735 +
736 +       adev->irq = platform_get_irq(pdev, 0);
737 +       if (adev->irq < 0)
738 +               return adev->irq;
739 +
740 +       ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
741 +       if (ret) {
742 +               dev_err(adev->dev, "Execution environment unspecified\n");
743 +               return ret;
744 +       }
745 +
746 +       adev->core_clk = devm_clk_get(adev->dev, "core");
747 +       if (IS_ERR(adev->core_clk))
748 +               return PTR_ERR(adev->core_clk);
749 +
750 +       ret = clk_prepare_enable(adev->core_clk);
751 +       if (ret) {
752 +               dev_err(adev->dev, "failed to prepare/enable core clock\n");
753 +               return ret;
754 +       }
755 +
756 +       adev->iface_clk = devm_clk_get(adev->dev, "iface");
757 +       if (IS_ERR(adev->iface_clk))
758 +               return PTR_ERR(adev->iface_clk);
759 +
760 +       ret = clk_prepare_enable(adev->iface_clk);
761 +       if (ret) {
762 +               dev_err(adev->dev, "failed to prepare/enable iface clock\n");
763 +               return ret;
764 +       }
765 +
766 +       adev->clk_reset = devm_reset_control_get(&pdev->dev, "clk");
767 +       if (IS_ERR(adev->clk_reset)) {
768 +               dev_err(adev->dev, "failed to get ADM0 reset\n");
769 +               return PTR_ERR(adev->clk_reset);
770 +       }
771 +
772 +       adev->c0_reset = devm_reset_control_get(&pdev->dev, "c0");
773 +       if (IS_ERR(adev->c0_reset)) {
774 +               dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
775 +               return PTR_ERR(adev->c0_reset);
776 +       }
777 +
778 +       adev->c1_reset = devm_reset_control_get(&pdev->dev, "c1");
779 +       if (IS_ERR(adev->c1_reset)) {
780 +               dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
781 +               return PTR_ERR(adev->c1_reset);
782 +       }
783 +
784 +       adev->c2_reset = devm_reset_control_get(&pdev->dev, "c2");
785 +       if (IS_ERR(adev->c2_reset)) {
786 +               dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
787 +               return PTR_ERR(adev->c2_reset);
788 +       }
789 +
790 +       reset_control_assert(adev->clk_reset);
791 +       reset_control_assert(adev->c0_reset);
792 +       reset_control_assert(adev->c1_reset);
793 +       reset_control_assert(adev->c2_reset);
794 +
795 +       reset_control_deassert(adev->clk_reset);
796 +       reset_control_deassert(adev->c0_reset);
797 +       reset_control_deassert(adev->c1_reset);
798 +       reset_control_deassert(adev->c2_reset);
799 +
800 +       adev->num_channels = 16;
801 +
802 +       adev->channels = devm_kcalloc(adev->dev, adev->num_channels,
803 +                               sizeof(*adev->channels), GFP_KERNEL);
804 +
805 +       if (!adev->channels) {
806 +               ret = -ENOMEM;
807 +               goto err_disable_clk;
808 +       }
809 +
810 +       /* allocate and initialize channels */
811 +       INIT_LIST_HEAD(&adev->common.channels);
812 +
813 +       for (i = 0; i < adev->num_channels; i++)
814 +               adm_channel_init(adev, &adev->channels[i], i);
815 +
816 +       /* reset CRCIs */
817 +       for (i = 0; i < 16; i++)
818 +               writel(CRCI_CTL_RST, adev->regs + HI_CRCI_CTL(i, adev->ee));
819 +
820 +       /* configure client interfaces */
821 +       writel(CI_RANGE_START(0x40) | CI_RANGE_END(0xb0) | CI_BURST_8_WORDS,
822 +               adev->regs + HI_CI_CONF(0));
823 +       writel(CI_RANGE_START(0x2a) | CI_RANGE_END(0x2c) | CI_BURST_8_WORDS,
824 +               adev->regs + HI_CI_CONF(1));
825 +       writel(CI_RANGE_START(0x12) | CI_RANGE_END(0x28) | CI_BURST_8_WORDS,
826 +               adev->regs + HI_CI_CONF(2));
827 +       writel(GP_CTL_LP_EN | GP_CTL_LP_CNT(0xf), adev->regs + HI_GP_CTL);
828 +
829 +       ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
830 +                       0, "adm_dma", adev);
831 +       if (ret)
832 +               goto err_disable_clk;
833 +
834 +       platform_set_drvdata(pdev, adev);
835 +
836 +       adev->common.dev = adev->dev;
837 +       adev->common.dev->dma_parms = &adev->dma_parms;
838 +
839 +       /* set capabilities */
840 +       dma_cap_zero(adev->common.cap_mask);
841 +       dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
842 +       dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
843 +
844 +       /* initialize dmaengine apis */
845 +       adev->common.device_alloc_chan_resources = adm_alloc_chan;
846 +       adev->common.device_free_chan_resources = adm_free_chan;
847 +       adev->common.device_prep_slave_sg = adm_prep_slave_sg;
848 +       adev->common.device_control = adm_control;
849 +       adev->common.device_issue_pending = adm_issue_pending;
850 +       adev->common.device_tx_status = adm_tx_status;
851 +
852 +       ret = dma_async_device_register(&adev->common);
853 +       if (ret) {
854 +               dev_err(adev->dev, "failed to register dma async device\n");
855 +               goto err_disable_clk;
856 +       }
857 +
858 +       ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
859 +                                       &adev->common);
860 +       if (ret)
861 +               goto err_unregister_dma;
862 +
863 +       return 0;
864 +
865 +err_unregister_dma:
866 +       dma_async_device_unregister(&adev->common);
867 +err_disable_clk:
868 +       clk_disable_unprepare(adev->core_clk);
869 +       clk_disable_unprepare(adev->iface_clk);
870 +
871 +       return ret;
872 +}
873 +
874 +static int adm_dma_remove(struct platform_device *pdev)
875 +{
876 +       struct adm_device *adev = platform_get_drvdata(pdev);
877 +       struct adm_chan *achan;
878 +       u32 i;
879 +
880 +       of_dma_controller_free(pdev->dev.of_node);
881 +       dma_async_device_unregister(&adev->common);
882 +
883 +       devm_free_irq(adev->dev, adev->irq, adev);
884 +
885 +       for (i = 0; i < adev->num_channels; i++) {
886 +               achan = &adev->channels[i];
887 +               writel(CH_CONF_FLUSH_RSLT_EN,
888 +                       adev->regs + HI_CH_CONF(achan->id));
889 +               writel(CH_RSLT_CONF_FLUSH_EN,
890 +                       adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
891 +
892 +               adm_terminate_all(&adev->channels[i]);
893 +       }
894 +
895 +       clk_disable_unprepare(adev->core_clk);
896 +       clk_disable_unprepare(adev->iface_clk);
897 +
898 +       return 0;
899 +}
900 +
901 +static const struct of_device_id adm_of_match[] = {
902 +       { .compatible = "qcom,adm", },
903 +       {}
904 +};
905 +MODULE_DEVICE_TABLE(of, adm_of_match);
906 +
907 +static struct platform_driver adm_dma_driver = {
908 +       .probe = adm_dma_probe,
909 +       .remove = adm_dma_remove,
910 +       .driver = {
911 +               .name = "adm-dma-engine",
912 +               .owner = THIS_MODULE,
913 +               .of_match_table = adm_of_match,
914 +       },
915 +};
916 +
917 +module_platform_driver(adm_dma_driver);
918 +
919 +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
920 +MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
921 +MODULE_LICENSE("GPL v2");