[sparc] remove broken flag
[openwrt.git] / target / linux / leon / patches / 015-dma_ops.patch
1 From c6d8f92cfd7f4f19eb3b16545b3b68c561978fe8 Mon Sep 17 00:00:00 2001
2 From: Kristoffer Glembo <kristoffer@gaisler.com>
3 Date: Mon, 7 Jun 2010 14:00:30 +0200
4 Subject: [PATCH] sparc32: Added LEON dma_ops.
5
6 Added leon3_dma_ops and mmu_inval_dma_area.
7 ---
8  arch/sparc/kernel/ioport.c |  139 +++++++++++++++++++++++++++++++------------
9  1 files changed, 100 insertions(+), 39 deletions(-)
10
11 --- a/arch/sparc/kernel/ioport.c
12 +++ b/arch/sparc/kernel/ioport.c
13 @@ -50,10 +50,15 @@
14  #include <asm/io-unit.h>
15  #include <asm/leon.h>
16  
17 -#ifdef CONFIG_SPARC_LEON
18 -#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
19 -#else
20 +#ifndef CONFIG_SPARC_LEON
21  #define mmu_inval_dma_area(p, l)       /* Anton pulled it out for 2.4.0-xx */
22 +#else
23 +static inline void mmu_inval_dma_area(unsigned long va, unsigned long len)
24 +{
25 +       if (!sparc_leon3_snooping_enabled()) {
26 +               leon_flush_dcache_all();
27 +       }
28 +}
29  #endif
30  
31  static struct resource *_sparc_find_resource(struct resource *r,
32 @@ -254,7 +259,7 @@ static void *sbus_alloc_coherent(struct
33                                  dma_addr_t *dma_addrp, gfp_t gfp)
34  {
35         struct platform_device *op = to_platform_device(dev);
36 -       unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
37 +       unsigned long len_total = PAGE_ALIGN(len);
38         unsigned long va;
39         struct resource *res;
40         int order;
41 @@ -287,15 +292,19 @@ static void *sbus_alloc_coherent(struct
42          * XXX That's where sdev would be used. Currently we load
43          * all iommu tables with the same translations.
44          */
45 -       if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
46 -               goto err_noiommu;
47 -
48 -       res->name = op->dev.of_node->name;
49 +#ifdef CONFIG_SPARC_LEON
50 +       sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);   
51 +       *dma_addrp = virt_to_phys(va);
52 +#else
53 +       if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) {
54 +               release_resource(res);
55 +               goto err_nova;
56 +       }
57 +#endif
58 +       res->name = op->node->name;
59  
60         return (void *)(unsigned long)res->start;
61  
62 -err_noiommu:
63 -       release_resource(res);
64  err_nova:
65         free_pages(va, order);
66  err_nomem:
67 @@ -321,7 +330,7 @@ static void sbus_free_coherent(struct de
68                 return;
69         }
70  
71 -       n = (n + PAGE_SIZE-1) & PAGE_MASK;
72 +       n = PAGE_ALIGN(n);
73         if ((res->end-res->start)+1 != n) {
74                 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
75                     (long)((res->end-res->start)+1), n);
76 @@ -333,7 +342,12 @@ static void sbus_free_coherent(struct de
77  
78         /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
79         pgv = virt_to_page(p);
80 -       mmu_unmap_dma_area(dev, ba, n);
81 +
82 +#ifdef CONFIG_SPARC_LEON
83 +       sparc_unmapiorange((unsigned long)p, n);
84 +#else
85 +       mmu_unmap_dma_area(dev, ba, n);
86 +#endif
87  
88         __free_pages(pgv, get_order(n));
89  }
90 @@ -408,9 +422,6 @@ struct dma_map_ops sbus_dma_ops = {
91         .sync_sg_for_device     = sbus_sync_sg_for_device,
92  };
93  
94 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
95 -EXPORT_SYMBOL(dma_ops);
96 -
97  static int __init sparc_register_ioport(void)
98  {
99         register_proc_sparc_ioport();
100 @@ -422,7 +433,7 @@ arch_initcall(sparc_register_ioport);
101  
102  #endif /* CONFIG_SBUS */
103  
104 -#ifdef CONFIG_PCI
105 +#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
106  
107  /* Allocate and map kernel buffer using consistent mode DMA for a device.
108   * hwdev should be valid struct pci_dev pointer for PCI devices.
109 @@ -430,7 +441,7 @@ arch_initcall(sparc_register_ioport);
110  static void *pci32_alloc_coherent(struct device *dev, size_t len,
111                                   dma_addr_t *pba, gfp_t gfp)
112  {
113 -       unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
114 +       unsigned long len_total = PAGE_ALIGN(len);
115         unsigned long va;
116         struct resource *res;
117         int order;
118 @@ -463,10 +474,6 @@ static void *pci32_alloc_coherent(struct
119                 return NULL;
120         }
121         mmu_inval_dma_area(va, len_total);
122 -#if 0
123 -/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
124 -  (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
125 -#endif
126         sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
127  
128         *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
129 @@ -498,7 +505,7 @@ static void pci32_free_coherent(struct d
130                 return;
131         }
132  
133 -       n = (n + PAGE_SIZE-1) & PAGE_MASK;
134 +       n = PAGE_ALIGN(n);
135         if ((res->end-res->start)+1 != n) {
136                 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
137                     (long)((res->end-res->start)+1), (long)n);
138 @@ -515,6 +522,14 @@ static void pci32_free_coherent(struct d
139         free_pages(pgp, get_order(n));
140  }
141  
142 +static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
143 +                            enum dma_data_direction dir, struct dma_attrs *attrs)
144 +{
145 +       if (dir != PCI_DMA_TODEVICE) {
146 +               mmu_inval_dma_area((unsigned long)phys_to_virt(ba), PAGE_ALIGN(size));
147 +       }
148 +}
149 +
150  /*
151   * Same as pci_map_single, but with pages.
152   */
153 @@ -551,8 +566,7 @@ static int pci32_map_sg(struct device *d
154  
155         /* IIep is write-through, not flushing. */
156         for_each_sg(sgl, sg, nents, n) {
157 -               BUG_ON(page_address(sg_page(sg)) == NULL);
158 -               sg->dma_address = virt_to_phys(sg_virt(sg));
159 +               sg->dma_address = sg_phys(sg);
160                 sg->dma_length = sg->length;
161         }
162         return nents;
163 @@ -571,10 +585,7 @@ static void pci32_unmap_sg(struct device
164  
165         if (dir != PCI_DMA_TODEVICE) {
166                 for_each_sg(sgl, sg, nents, n) {
167 -                       BUG_ON(page_address(sg_page(sg)) == NULL);
168 -                       mmu_inval_dma_area(
169 -                           (unsigned long) page_address(sg_page(sg)),
170 -                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
171 +                       mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
172                 }
173         }
174  }
175 @@ -594,7 +605,7 @@ static void pci32_sync_single_for_cpu(st
176  {
177         if (dir != PCI_DMA_TODEVICE) {
178                 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
179 -                   (size + PAGE_SIZE-1) & PAGE_MASK);
180 +                                  PAGE_ALIGN(size));
181         }
182  }
183  
184 @@ -621,10 +632,7 @@ static void pci32_sync_sg_for_cpu(struct
185  
186         if (dir != PCI_DMA_TODEVICE) {
187                 for_each_sg(sgl, sg, nents, n) {
188 -                       BUG_ON(page_address(sg_page(sg)) == NULL);
189 -                       mmu_inval_dma_area(
190 -                           (unsigned long) page_address(sg_page(sg)),
191 -                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
192 +                       mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
193                 }
194         }
195  }
196 @@ -637,18 +645,38 @@ static void pci32_sync_sg_for_device(str
197  
198         if (dir != PCI_DMA_TODEVICE) {
199                 for_each_sg(sgl, sg, nents, n) {
200 -                       BUG_ON(page_address(sg_page(sg)) == NULL);
201 -                       mmu_inval_dma_area(
202 -                           (unsigned long) page_address(sg_page(sg)),
203 -                           (sg->length + PAGE_SIZE-1) & PAGE_MASK);
204 +                       mmu_inval_dma_area((unsigned long)sg_virt(sg),  PAGE_ALIGN(sg->length));
205                 }
206         }
207  }
208  
209 +/* LEON3 unmapping functions 
210 + * 
211 + * We can only invalidate the whole cache so unmap_page and unmap_sg do the same thing
212 + */
213 +static void leon3_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
214 +                            enum dma_data_direction dir, struct dma_attrs *attrs)
215 +{
216 +       if (dir != PCI_DMA_TODEVICE) {
217 +               mmu_inval_dma_area(0, 0);
218 +       }
219 +}
220 +
221 +static void leon3_unmap_sg(struct device *dev, struct scatterlist *sgl,
222 +                          int nents, enum dma_data_direction dir,
223 +                          struct dma_attrs *attrs)
224 +{
225 +
226 +       if (dir != PCI_DMA_TODEVICE) {
227 +               mmu_inval_dma_area(0, 0);
228 +       }
229 +}
230 +
231  struct dma_map_ops pci32_dma_ops = {
232         .alloc_coherent         = pci32_alloc_coherent,
233         .free_coherent          = pci32_free_coherent,
234         .map_page               = pci32_map_page,
235 +       .unmap_page             = pci32_unmap_page,
236         .map_sg                 = pci32_map_sg,
237         .unmap_sg               = pci32_unmap_sg,
238         .sync_single_for_cpu    = pci32_sync_single_for_cpu,
239 @@ -658,7 +686,30 @@ struct dma_map_ops pci32_dma_ops = {
240  };
241  EXPORT_SYMBOL(pci32_dma_ops);
242  
243 -#endif /* CONFIG_PCI */
244 +struct dma_map_ops leon3_dma_ops = {
245 +       .alloc_coherent         = sbus_alloc_coherent,
246 +       .free_coherent          = sbus_free_coherent,
247 +       .map_page               = pci32_map_page,
248 +       .unmap_page             = leon3_unmap_page,
249 +       .map_sg                 = pci32_map_sg,
250 +       .unmap_sg               = leon3_unmap_sg,
251 +       .sync_single_for_cpu    = pci32_sync_single_for_cpu,
252 +       .sync_single_for_device = pci32_sync_single_for_device,
253 +       .sync_sg_for_cpu        = pci32_sync_sg_for_cpu,
254 +       .sync_sg_for_device     = pci32_sync_sg_for_device,
255 +};
256 +
257 +#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
258 +
259 +#ifdef CONFIG_SPARC_LEON
260 +struct dma_map_ops *dma_ops = &leon3_dma_ops;
261 +#else
262 +struct dma_map_ops *dma_ops = &sbus_dma_ops;
263 +#endif
264 +
265 +#ifdef CONFIG_SBUS
266 +EXPORT_SYMBOL(dma_ops);
267 +#endif
268  
269  /*
270   * Return whether the given PCI device DMA address mask can be
271 @@ -676,6 +727,16 @@ int dma_supported(struct device *dev, u6
272  }
273  EXPORT_SYMBOL(dma_supported);
274  
275 +int dma_set_mask(struct device *dev, u64 dma_mask)
276 +{
277 +#ifdef CONFIG_PCI
278 +       if (dev->bus == &pci_bus_type)
279 +               return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
280 +#endif
281 +       return -EOPNOTSUPP;
282 +}
283 +EXPORT_SYMBOL(dma_set_mask);
284 +
285  #ifdef CONFIG_PROC_FS
286  
287  static int sparc_io_proc_show(struct seq_file *m, void *v)
288 @@ -717,7 +778,7 @@ static const struct file_operations spar
289  static struct resource *_sparc_find_resource(struct resource *root,
290                                              unsigned long hit)
291  {
292 -        struct resource *tmp;
293 +       struct resource *tmp;
294  
295         for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
296                 if (tmp->start <= hit && tmp->end >= hit)