1 From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@openwrt.org>
3 Date: Mon, 12 Aug 2013 12:50:22 +0200
4 Subject: [PATCH] MIPS: partially inline dma ops
6 Several DMA ops are no-op on many platforms, and the indirection through
7 the mips_dma_map_ops function table is causing the compiler to emit
10 Inlining visibly improves network performance in my tests (on a 24Kc
11 based system), and also slightly reduces code size of a few drivers.
13 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
15 arch/mips/Kconfig | 4 +
16 arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
17 arch/mips/mm/dma-default.c | 163 ++--------------
18 3 files changed, 373 insertions(+), 154 deletions(-)
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1618,6 +1618,7 @@ config CPU_CAVIUM_OCTEON
23 select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
24 select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
25 select MIPS_L1_CACHE_SHIFT_7
26 + select SYS_HAS_DMA_OPS
28 The Cavium Octeon processor is a highly integrated chip containing
29 many ethernet hardware widgets for networking tasks. The processor
30 @@ -1913,6 +1914,9 @@ config MIPS_MALTA_PM
34 +config SYS_HAS_DMA_OPS
38 # CPU may reorder R->R, R->W, W->R, W->W
39 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
40 --- a/arch/mips/include/asm/dma-mapping.h
41 +++ b/arch/mips/include/asm/dma-mapping.h
43 #ifndef _ASM_DMA_MAPPING_H
44 #define _ASM_DMA_MAPPING_H
46 +#include <linux/kmemcheck.h>
47 +#include <linux/bug.h>
48 #include <linux/scatterlist.h>
49 +#include <linux/dma-debug.h>
50 +#include <linux/dma-attrs.h>
52 #include <asm/dma-coherence.h>
53 #include <asm/cache.h>
54 +#include <asm/cpu-type.h>
55 +#include <asm-generic/dma-coherent.h>
57 #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
58 #include <dma-coherence.h>
61 extern struct dma_map_ops *mips_dma_map_ops;
63 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
64 + enum dma_data_direction direction);
65 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
66 + dma_addr_t *dma_handle, gfp_t gfp,
67 + struct dma_attrs *attrs);
68 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
69 + dma_addr_t dma_handle, struct dma_attrs *attrs);
71 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
73 +#ifdef CONFIG_SYS_HAS_DMA_OPS
74 if (dev && dev->archdata.dma_ops)
75 return dev->archdata.dma_ops;
77 return mips_dma_map_ops;
84 + * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
85 + * speculatively fill random cachelines with stale data at any time,
86 + * requiring an extra flush post-DMA.
88 + * Warning on the terminology - Linux calls an uncached area coherent;
89 + * MIPS terminology calls memory areas with hardware maintained coherency
92 + * Note that the R14000 and R16000 should also be checked for in this
93 + * condition. However this function is only called on non-I/O-coherent
94 + * systems and only the R10000 and R12000 are used in such systems, the
95 + * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
97 +static inline int cpu_needs_post_dma_flush(struct device *dev)
99 + return !plat_device_is_coherent(dev) &&
100 + (boot_cpu_type() == CPU_R10000 ||
101 + boot_cpu_type() == CPU_R12000 ||
102 + boot_cpu_type() == CPU_BMIPS5000);
105 +static inline struct page *dma_addr_to_page(struct device *dev,
106 + dma_addr_t dma_addr)
108 + return pfn_to_page(
109 + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
112 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
113 @@ -29,9 +77,399 @@ static inline bool dma_capable(struct de
115 static inline void dma_mark_clean(void *addr, size_t size) {}
117 -#include <asm-generic/dma-mapping-common.h>
118 +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
120 + enum dma_data_direction dir,
121 + struct dma_attrs *attrs)
123 + struct dma_map_ops *ops = get_dma_ops(dev);
124 + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
125 + struct page *page = virt_to_page(ptr);
128 + kmemcheck_mark_initialized(ptr, size);
129 + BUG_ON(!valid_dma_direction(dir));
131 + addr = ops->map_page(dev, page, offset, size, dir, attrs);
133 + if (!plat_device_is_coherent(dev))
134 + __dma_sync(page, offset, size, dir);
136 + addr = plat_map_dma_mem_page(dev, page) + offset;
138 + debug_dma_map_page(dev, page, offset, size, dir, addr, true);
142 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
144 + enum dma_data_direction dir,
145 + struct dma_attrs *attrs)
147 + struct dma_map_ops *ops = get_dma_ops(dev);
149 + BUG_ON(!valid_dma_direction(dir));
151 + ops->unmap_page(dev, addr, size, dir, attrs);
153 + if (cpu_needs_post_dma_flush(dev))
154 + __dma_sync(dma_addr_to_page(dev, addr),
155 + addr & ~PAGE_MASK, size, dir);
156 + plat_post_dma_flush(dev);
157 + plat_unmap_dma_mem(dev, addr, size, dir);
159 + debug_dma_unmap_page(dev, addr, size, dir, true);
163 + * dma_maps_sg_attrs returns 0 on error and > 0 on success.
164 + * It should never return a value < 0.
166 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
167 + int nents, enum dma_data_direction dir,
168 + struct dma_attrs *attrs)
170 + struct dma_map_ops *ops = get_dma_ops(dev);
172 + struct scatterlist *s;
174 + for_each_sg(sg, s, nents, i)
175 + kmemcheck_mark_initialized(sg_virt(s), s->length);
176 + BUG_ON(!valid_dma_direction(dir));
178 + ents = ops->map_sg(dev, sg, nents, dir, attrs);
180 + for_each_sg(sg, s, nents, i) {
181 + struct page *page = sg_page(s);
183 + if (!plat_device_is_coherent(dev))
184 + __dma_sync(page, s->offset, s->length, dir);
185 +#ifdef CONFIG_NEED_SG_DMA_LENGTH
186 + s->dma_length = s->length;
189 + plat_map_dma_mem_page(dev, page) + s->offset;
194 + debug_dma_map_sg(dev, sg, nents, ents, dir);
199 +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
200 + int nents, enum dma_data_direction dir,
201 + struct dma_attrs *attrs)
203 + struct dma_map_ops *ops = get_dma_ops(dev);
204 + struct scatterlist *s;
207 + BUG_ON(!valid_dma_direction(dir));
208 + debug_dma_unmap_sg(dev, sg, nents, dir);
210 + ops->unmap_sg(dev, sg, nents, dir, attrs);
213 + for_each_sg(sg, s, nents, i) {
214 + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
215 + __dma_sync(sg_page(s), s->offset, s->length, dir);
216 + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
220 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
221 + size_t offset, size_t size,
222 + enum dma_data_direction dir)
224 + struct dma_map_ops *ops = get_dma_ops(dev);
227 + kmemcheck_mark_initialized(page_address(page) + offset, size);
228 + BUG_ON(!valid_dma_direction(dir));
230 + addr = ops->map_page(dev, page, offset, size, dir, NULL);
232 + if (!plat_device_is_coherent(dev))
233 + __dma_sync(page, offset, size, dir);
235 + addr = plat_map_dma_mem_page(dev, page) + offset;
237 + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
242 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
243 + size_t size, enum dma_data_direction dir)
245 + struct dma_map_ops *ops = get_dma_ops(dev);
247 + BUG_ON(!valid_dma_direction(dir));
249 + ops->unmap_page(dev, addr, size, dir, NULL);
251 + if (cpu_needs_post_dma_flush(dev))
252 + __dma_sync(dma_addr_to_page(dev, addr),
253 + addr & ~PAGE_MASK, size, dir);
254 + plat_post_dma_flush(dev);
255 + plat_unmap_dma_mem(dev, addr, size, dir);
257 + debug_dma_unmap_page(dev, addr, size, dir, false);
260 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
262 + enum dma_data_direction dir)
264 + struct dma_map_ops *ops = get_dma_ops(dev);
266 + BUG_ON(!valid_dma_direction(dir));
268 + ops->sync_single_for_cpu(dev, addr, size, dir);
270 + if (cpu_needs_post_dma_flush(dev))
271 + __dma_sync(dma_addr_to_page(dev, addr),
272 + addr & ~PAGE_MASK, size, dir);
273 + plat_post_dma_flush(dev);
275 + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
278 +static inline void dma_sync_single_for_device(struct device *dev,
279 + dma_addr_t addr, size_t size,
280 + enum dma_data_direction dir)
282 + struct dma_map_ops *ops = get_dma_ops(dev);
284 + BUG_ON(!valid_dma_direction(dir));
286 + ops->sync_single_for_device(dev, addr, size, dir);
287 + else if (!plat_device_is_coherent(dev))
288 + __dma_sync(dma_addr_to_page(dev, addr),
289 + addr & ~PAGE_MASK, size, dir);
290 + debug_dma_sync_single_for_device(dev, addr, size, dir);
293 +static inline void dma_sync_single_range_for_cpu(struct device *dev,
295 + unsigned long offset,
297 + enum dma_data_direction dir)
299 + const struct dma_map_ops *ops = get_dma_ops(dev);
301 + BUG_ON(!valid_dma_direction(dir));
303 + ops->sync_single_for_cpu(dev, addr + offset, size, dir);
305 + if (cpu_needs_post_dma_flush(dev))
306 + __dma_sync(dma_addr_to_page(dev, addr + offset),
307 + (addr + offset) & ~PAGE_MASK, size, dir);
308 + plat_post_dma_flush(dev);
311 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
314 +static inline void dma_sync_single_range_for_device(struct device *dev,
316 + unsigned long offset,
318 + enum dma_data_direction dir)
320 + const struct dma_map_ops *ops = get_dma_ops(dev);
322 + BUG_ON(!valid_dma_direction(dir));
324 + ops->sync_single_for_device(dev, addr + offset, size, dir);
325 + else if (!plat_device_is_coherent(dev))
326 + __dma_sync(dma_addr_to_page(dev, addr + offset),
327 + (addr + offset) & ~PAGE_MASK, size, dir);
328 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
332 +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
333 + int nelems, enum dma_data_direction dir)
335 + struct dma_map_ops *ops = get_dma_ops(dev);
336 + struct scatterlist *s;
339 + BUG_ON(!valid_dma_direction(dir));
341 + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
342 + } else if (cpu_needs_post_dma_flush(dev)) {
343 + for_each_sg(sg, s, nelems, i)
344 + __dma_sync(sg_page(s), s->offset, s->length, dir);
346 + plat_post_dma_flush(dev);
347 + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
351 +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
352 + int nelems, enum dma_data_direction dir)
354 + struct dma_map_ops *ops = get_dma_ops(dev);
355 + struct scatterlist *s;
358 + BUG_ON(!valid_dma_direction(dir));
360 + ops->sync_sg_for_device(dev, sg, nelems, dir);
361 + } else if (!plat_device_is_coherent(dev)) {
362 + for_each_sg(sg, s, nelems, i)
363 + __dma_sync(sg_page(s), s->offset, s->length, dir);
365 + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
369 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
370 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
371 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
372 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
374 +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
375 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
378 + * dma_mmap_attrs - map a coherent DMA allocation into user space
379 + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
380 + * @vma: vm_area_struct describing requested user mapping
381 + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
382 + * @handle: device-view address returned from dma_alloc_attrs
383 + * @size: size of memory originally requested in dma_alloc_attrs
384 + * @attrs: attributes of mapping properties requested in dma_alloc_attrs
386 + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
387 + * into user space. The coherent DMA buffer must not be freed by the
388 + * driver until the user space mapping has been released.
391 +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
392 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
394 + struct dma_map_ops *ops = get_dma_ops(dev);
396 + if (ops && ops->mmap)
397 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
398 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
401 +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
404 +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
405 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
408 +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
409 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
411 + struct dma_map_ops *ops = get_dma_ops(dev);
413 + if (ops && ops->get_sgtable)
414 + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
416 + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
419 +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
421 +static inline int dma_supported(struct device *dev, u64 mask)
423 + struct dma_map_ops *ops = get_dma_ops(dev);
425 + return ops->dma_supported(dev, mask);
426 + return plat_dma_supported(dev, mask);
429 +static inline int dma_mapping_error(struct device *dev, u64 mask)
431 + struct dma_map_ops *ops = get_dma_ops(dev);
433 + debug_dma_mapping_error(dev, mask);
435 + return ops->mapping_error(dev, mask);
440 +dma_set_mask(struct device *dev, u64 mask)
442 + struct dma_map_ops *ops = get_dma_ops(dev);
444 + if(!dev->dma_mask || !dma_supported(dev, mask))
447 + if (ops && ops->set_dma_mask)
448 + return ops->set_dma_mask(dev, mask);
450 + *dev->dma_mask = mask;
455 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
456 enum dma_data_direction direction);
458 +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
460 +static inline void *dma_alloc_attrs(struct device *dev, size_t size,
461 + dma_addr_t *dma_handle, gfp_t gfp,
462 + struct dma_attrs *attrs)
465 + struct dma_map_ops *ops = get_dma_ops(dev);
468 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
470 + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
473 + debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
478 +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
480 +static inline void dma_free_attrs(struct device *dev, size_t size,
481 + void *vaddr, dma_addr_t dma_handle,
482 + struct dma_attrs *attrs)
484 + struct dma_map_ops *ops = get_dma_ops(dev);
487 + ops->free(dev, size, vaddr, dma_handle, attrs);
489 + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
491 + debug_dma_free_coherent(dev, size, vaddr, dma_handle);
494 +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
495 + dma_addr_t *dma_handle, gfp_t gfp)
497 + DEFINE_DMA_ATTRS(attrs);
499 + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
500 + return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
503 +static inline void dma_free_noncoherent(struct device *dev, size_t size,
504 + void *cpu_addr, dma_addr_t dma_handle)
506 + DEFINE_DMA_ATTRS(attrs);
508 + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
509 + dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
513 #endif /* _ASM_DMA_MAPPING_H */
514 --- a/arch/mips/mm/dma-default.c
515 +++ b/arch/mips/mm/dma-default.c
516 @@ -46,35 +46,6 @@ static int __init setnocoherentio(char *
517 early_param("nocoherentio", setnocoherentio);
520 -static inline struct page *dma_addr_to_page(struct device *dev,
521 - dma_addr_t dma_addr)
523 - return pfn_to_page(
524 - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
528 - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
529 - * speculatively fill random cachelines with stale data at any time,
530 - * requiring an extra flush post-DMA.
532 - * Warning on the terminology - Linux calls an uncached area coherent;
533 - * MIPS terminology calls memory areas with hardware maintained coherency
536 - * Note that the R14000 and R16000 should also be checked for in this
537 - * condition. However this function is only called on non-I/O-coherent
538 - * systems and only the R10000 and R12000 are used in such systems, the
539 - * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
541 -static inline int cpu_needs_post_dma_flush(struct device *dev)
543 - return !plat_device_is_coherent(dev) &&
544 - (boot_cpu_type() == CPU_R10000 ||
545 - boot_cpu_type() == CPU_R12000 ||
546 - boot_cpu_type() == CPU_BMIPS5000);
549 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
552 @@ -129,7 +100,7 @@ static void *mips_dma_alloc_noncoherent(
556 -static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
557 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
558 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
561 @@ -165,6 +136,7 @@ static void *mips_dma_alloc_coherent(str
565 +EXPORT_SYMBOL(mips_dma_alloc_coherent);
568 static void mips_dma_free_noncoherent(struct device *dev, size_t size,
569 @@ -174,7 +146,7 @@ static void mips_dma_free_noncoherent(st
570 free_pages((unsigned long) vaddr, get_order(size));
573 -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
574 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
575 dma_addr_t dma_handle, struct dma_attrs *attrs)
577 unsigned long addr = (unsigned long) vaddr;
578 @@ -196,40 +168,7 @@ static void mips_dma_free_coherent(struc
579 if (!dma_release_from_contiguous(dev, page, count))
580 __free_pages(page, get_order(size));
583 -static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
584 - void *cpu_addr, dma_addr_t dma_addr, size_t size,
585 - struct dma_attrs *attrs)
587 - unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
588 - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
589 - unsigned long addr = (unsigned long)cpu_addr;
590 - unsigned long off = vma->vm_pgoff;
594 - if (!plat_device_is_coherent(dev) && !hw_coherentio)
595 - addr = CAC_ADDR(addr);
597 - pfn = page_to_pfn(virt_to_page((void *)addr));
599 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
600 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
602 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
604 - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
607 - if (off < count && user_count <= (count - off)) {
608 - ret = remap_pfn_range(vma, vma->vm_start,
610 - user_count << PAGE_SHIFT,
611 - vma->vm_page_prot);
616 +EXPORT_SYMBOL(mips_dma_free_coherent);
618 static inline void __dma_sync_virtual(void *addr, size_t size,
619 enum dma_data_direction direction)
620 @@ -258,7 +197,7 @@ static inline void __dma_sync_virtual(vo
621 * If highmem is not configured then the bulk of this loop gets
624 -static inline void __dma_sync(struct page *page,
625 +void __dma_sync(struct page *page,
626 unsigned long offset, size_t size, enum dma_data_direction direction)
629 @@ -288,120 +227,7 @@ static inline void __dma_sync(struct pag
634 -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
635 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
637 - if (cpu_needs_post_dma_flush(dev))
638 - __dma_sync(dma_addr_to_page(dev, dma_addr),
639 - dma_addr & ~PAGE_MASK, size, direction);
640 - plat_post_dma_flush(dev);
641 - plat_unmap_dma_mem(dev, dma_addr, size, direction);
644 -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
645 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
648 - struct scatterlist *sg;
650 - for_each_sg(sglist, sg, nents, i) {
651 - if (!plat_device_is_coherent(dev))
652 - __dma_sync(sg_page(sg), sg->offset, sg->length,
654 -#ifdef CONFIG_NEED_SG_DMA_LENGTH
655 - sg->dma_length = sg->length;
657 - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
664 -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
665 - unsigned long offset, size_t size, enum dma_data_direction direction,
666 - struct dma_attrs *attrs)
668 - if (!plat_device_is_coherent(dev))
669 - __dma_sync(page, offset, size, direction);
671 - return plat_map_dma_mem_page(dev, page) + offset;
674 -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
675 - int nhwentries, enum dma_data_direction direction,
676 - struct dma_attrs *attrs)
679 - struct scatterlist *sg;
681 - for_each_sg(sglist, sg, nhwentries, i) {
682 - if (!plat_device_is_coherent(dev) &&
683 - direction != DMA_TO_DEVICE)
684 - __dma_sync(sg_page(sg), sg->offset, sg->length,
686 - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
690 -static void mips_dma_sync_single_for_cpu(struct device *dev,
691 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
693 - if (cpu_needs_post_dma_flush(dev))
694 - __dma_sync(dma_addr_to_page(dev, dma_handle),
695 - dma_handle & ~PAGE_MASK, size, direction);
696 - plat_post_dma_flush(dev);
699 -static void mips_dma_sync_single_for_device(struct device *dev,
700 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
702 - if (!plat_device_is_coherent(dev))
703 - __dma_sync(dma_addr_to_page(dev, dma_handle),
704 - dma_handle & ~PAGE_MASK, size, direction);
707 -static void mips_dma_sync_sg_for_cpu(struct device *dev,
708 - struct scatterlist *sglist, int nelems,
709 - enum dma_data_direction direction)
712 - struct scatterlist *sg;
714 - if (cpu_needs_post_dma_flush(dev)) {
715 - for_each_sg(sglist, sg, nelems, i) {
716 - __dma_sync(sg_page(sg), sg->offset, sg->length,
720 - plat_post_dma_flush(dev);
723 -static void mips_dma_sync_sg_for_device(struct device *dev,
724 - struct scatterlist *sglist, int nelems,
725 - enum dma_data_direction direction)
728 - struct scatterlist *sg;
730 - if (!plat_device_is_coherent(dev)) {
731 - for_each_sg(sglist, sg, nelems, i) {
732 - __dma_sync(sg_page(sg), sg->offset, sg->length,
738 -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
743 -int mips_dma_supported(struct device *dev, u64 mask)
745 - return plat_dma_supported(dev, mask);
747 +EXPORT_SYMBOL(__dma_sync);
749 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
750 enum dma_data_direction direction)
751 @@ -414,24 +240,10 @@ void dma_cache_sync(struct device *dev,
753 EXPORT_SYMBOL(dma_cache_sync);
755 -static struct dma_map_ops mips_default_dma_map_ops = {
756 - .alloc = mips_dma_alloc_coherent,
757 - .free = mips_dma_free_coherent,
758 - .mmap = mips_dma_mmap,
759 - .map_page = mips_dma_map_page,
760 - .unmap_page = mips_dma_unmap_page,
761 - .map_sg = mips_dma_map_sg,
762 - .unmap_sg = mips_dma_unmap_sg,
763 - .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
764 - .sync_single_for_device = mips_dma_sync_single_for_device,
765 - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
766 - .sync_sg_for_device = mips_dma_sync_sg_for_device,
767 - .mapping_error = mips_dma_mapping_error,
768 - .dma_supported = mips_dma_supported
771 -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
772 +#ifdef CONFIG_SYS_HAS_DMA_OPS
773 +struct dma_map_ops *mips_dma_map_ops = NULL;
774 EXPORT_SYMBOL(mips_dma_map_ops);
777 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)