Merge pull request #580 from wigyori/cc-libpcap
[15.05/openwrt.git] / target / linux / mvebu / patches-3.18 / 025-ARM-mvebu-Use-arm_coherent_dma_ops.patch
1 From 1bd4d8a6de5cda605e8b99fbf081be2ea2959380 Mon Sep 17 00:00:00 2001
2 From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
3 Date: Fri, 16 Jan 2015 17:11:29 +0100
4 Subject: ARM: mvebu: use arm_coherent_dma_ops and re-enable hardware I/O
5  coherency
6
7 Now that we have enabled automatic I/O synchronization barriers, we no
8 longer need any explicit barriers. We can therefore simplify
9 arch/arm/mach-mvebu/coherency.c by using the existing
10 arm_coherent_dma_ops instead of our custom mvebu_hwcc_dma_ops, and
11 re-enable hardware I/O coherency support.
12
13 Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
14 [Andrew Lunn <andrew@lunn.ch>: Remove forgotten comment]
15 Signed-off-by: Andrew Lunn <andrew@lunn.ch>
16
17 --- a/arch/arm/mach-mvebu/coherency.c
18 +++ b/arch/arm/mach-mvebu/coherency.c
19 @@ -33,6 +33,7 @@
20  #include <asm/smp_plat.h>
21  #include <asm/cacheflush.h>
22  #include <asm/mach/map.h>
23 +#include <asm/dma-mapping.h>
24  #include "armada-370-xp.h"
25  #include "coherency.h"
26  #include "mvebu-soc-id.h"
27 @@ -223,59 +224,6 @@ static void __init armada_375_coherency_
28         coherency_wa_enabled = true;
29  }
30  
31 -static inline void mvebu_hwcc_sync_io_barrier(void)
32 -{
33 -       if (coherency_wa_enabled) {
34 -               mvebu_hwcc_armada375_sync_io_barrier_wa();
35 -               return;
36 -       }
37 -
38 -       writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
39 -       while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
40 -}
41 -
42 -static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
43 -                                 unsigned long offset, size_t size,
44 -                                 enum dma_data_direction dir,
45 -                                 struct dma_attrs *attrs)
46 -{
47 -       if (dir != DMA_TO_DEVICE)
48 -               mvebu_hwcc_sync_io_barrier();
49 -       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
50 -}
51 -
52 -
53 -static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
54 -                             size_t size, enum dma_data_direction dir,
55 -                             struct dma_attrs *attrs)
56 -{
57 -       if (dir != DMA_TO_DEVICE)
58 -               mvebu_hwcc_sync_io_barrier();
59 -}
60 -
61 -static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
62 -                       size_t size, enum dma_data_direction dir)
63 -{
64 -       if (dir != DMA_TO_DEVICE)
65 -               mvebu_hwcc_sync_io_barrier();
66 -}
67 -
68 -static struct dma_map_ops mvebu_hwcc_dma_ops = {
69 -       .alloc                  = arm_dma_alloc,
70 -       .free                   = arm_dma_free,
71 -       .mmap                   = arm_dma_mmap,
72 -       .map_page               = mvebu_hwcc_dma_map_page,
73 -       .unmap_page             = mvebu_hwcc_dma_unmap_page,
74 -       .get_sgtable            = arm_dma_get_sgtable,
75 -       .map_sg                 = arm_dma_map_sg,
76 -       .unmap_sg               = arm_dma_unmap_sg,
77 -       .sync_single_for_cpu    = mvebu_hwcc_dma_sync,
78 -       .sync_single_for_device = mvebu_hwcc_dma_sync,
79 -       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
80 -       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
81 -       .set_dma_mask           = arm_dma_set_mask,
82 -};
83 -
84  static int mvebu_hwcc_notifier(struct notifier_block *nb,
85                                unsigned long event, void *__dev)
86  {
87 @@ -283,7 +231,7 @@ static int mvebu_hwcc_notifier(struct no
88  
89         if (event != BUS_NOTIFY_ADD_DEVICE)
90                 return NOTIFY_DONE;
91 -       set_dma_ops(dev, &mvebu_hwcc_dma_ops);
92 +       set_dma_ops(dev, &arm_coherent_dma_ops);
93  
94         return NOTIFY_OK;
95  }
96 @@ -399,14 +347,9 @@ static int coherency_type(void)
97         return type;
98  }
99  
100 -/*
101 - * As a precaution, we currently completely disable hardware I/O
102 - * coherency, until enough testing is done with automatic I/O
103 - * synchronization barriers to validate that it is a proper solution.
104 - */
105  int coherency_available(void)
106  {
107 -       return false;
108 +       return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
109  }
110  
111  int __init coherency_init(void)