ixp4xx: remove linux 3.10 support
[openwrt.git] / target / linux / brcm2708 / patches-3.10 / 0041-Add-Simon-Hall-s-dma-helper-module-useful-in-future-.patch
1 From ed83da894caf28e267eab3a01ef037a7198391a1 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Mon, 19 Nov 2012 18:27:05 +0000
4 Subject: [PATCH 041/196] Add Simon Hall's dma helper module, useful in future
5  for X acceleration
6
7 ---
8  arch/arm/mach-bcm2708/Kconfig                   |   8 +
9  arch/arm/mach-bcm2708/Makefile                  |   3 +
10  arch/arm/mach-bcm2708/dmaer.c                   | 887 ++++++++++++++++++++++++
11  arch/arm/mach-bcm2708/include/mach/vc_support.h |  69 ++
12  arch/arm/mach-bcm2708/vc_support.c              | 319 +++++++++
13  5 files changed, 1286 insertions(+)
14  create mode 100755 arch/arm/mach-bcm2708/dmaer.c
15  create mode 100755 arch/arm/mach-bcm2708/include/mach/vc_support.h
16  create mode 100755 arch/arm/mach-bcm2708/vc_support.c
17
18 diff --git a/arch/arm/mach-bcm2708/Kconfig b/arch/arm/mach-bcm2708/Kconfig
19 index a35ff89..b85bb8d 100644
20 --- a/arch/arm/mach-bcm2708/Kconfig
21 +++ b/arch/arm/mach-bcm2708/Kconfig
22 @@ -38,4 +38,12 @@ config BCM2708_SPIDEV
23          default y
24          help
25            Binds spidev driver to the SPI0 master
26 +
27 +config BCM2708_DMAER
28 +       tristate "BCM2708 DMA helper"
29 +       depends on MACH_BCM2708
30 +        default n
31 +        help
32 +          Enable DMA helper for accelerating X composition
33 +
34  endmenu
35 diff --git a/arch/arm/mach-bcm2708/Makefile b/arch/arm/mach-bcm2708/Makefile
36 index 164ecb2..0da162c 100644
37 --- a/arch/arm/mach-bcm2708/Makefile
38 +++ b/arch/arm/mach-bcm2708/Makefile
39 @@ -6,3 +6,6 @@ obj-$(CONFIG_MACH_BCM2708)      += clock.o bcm2708.o armctrl.o vcio.o power.o dma.o
40  obj-$(CONFIG_BCM2708_GPIO)     += bcm2708_gpio.o
41  obj-$(CONFIG_BCM2708_VCMEM)    += vc_mem.o
42  
43 +obj-$(CONFIG_BCM2708_DMAER)    += dmaer_master.o
44 +dmaer_master-objs              := dmaer.o vc_support.o
45 +
46 diff --git a/arch/arm/mach-bcm2708/dmaer.c b/arch/arm/mach-bcm2708/dmaer.c
47 new file mode 100755
48 index 0000000..d1bc0fa
49 --- /dev/null
50 +++ b/arch/arm/mach-bcm2708/dmaer.c
51 @@ -0,0 +1,887 @@
52 +#include <linux/init.h>
53 +#include <linux/sched.h>
54 +#include <linux/module.h>
55 +#include <linux/types.h>
56 +#include <linux/kdev_t.h>
57 +#include <linux/fs.h>
58 +#include <linux/cdev.h>
59 +#include <linux/mm.h>
60 +#include <linux/slab.h>
61 +#include <linux/pagemap.h>
62 +#include <linux/device.h>
63 +#include <linux/jiffies.h>
64 +#include <linux/timex.h>
65 +#include <linux/dma-mapping.h>
66 +
67 +#include <asm/uaccess.h>
68 +#include <asm/atomic.h>
69 +#include <asm/cacheflush.h>
70 +#include <asm/io.h>
71 +
72 +#include <mach/dma.h>
73 +#include <mach/vc_support.h>
74 +
75 +#ifdef ECLIPSE_IGNORE
76 +
77 +#define __user
78 +#define __init
79 +#define __exit
80 +#define __iomem
81 +#define KERN_DEBUG
82 +#define KERN_ERR
83 +#define KERN_WARNING
84 +#define KERN_INFO
85 +#define _IOWR(a, b, c) b
86 +#define _IOW(a, b, c) b
87 +#define _IO(a, b) b
88 +
89 +#endif
90 +
91 +//#define inline
92 +
93 +#define PRINTK(args...) printk(args)
94 +//#define PRINTK_VERBOSE(args...) printk(args)
95 +//#define PRINTK(args...)
96 +#define PRINTK_VERBOSE(args...)
97 +
98 +/***** TYPES ****/
99 +#define PAGES_PER_LIST 500
100 +struct PageList
101 +{
102 +       struct page *m_pPages[PAGES_PER_LIST];
103 +       unsigned int m_used;
104 +       struct PageList *m_pNext;
105 +};
106 +
107 +struct VmaPageList
108 +{
109 +       //each vma has a linked list of pages associated with it
110 +       struct PageList *m_pPageHead;
111 +       struct PageList *m_pPageTail;
112 +       unsigned int m_refCount;
113 +};
114 +
115 +struct DmaControlBlock
116 +{
117 +       unsigned int m_transferInfo;
118 +       void __user *m_pSourceAddr;
119 +       void __user *m_pDestAddr;
120 +       unsigned int m_xferLen;
121 +       unsigned int m_tdStride;
122 +       struct DmaControlBlock *m_pNext;
123 +       unsigned int m_blank1, m_blank2;
124 +};
125 +
126 +/***** DEFINES ******/
127 +//magic number defining the module
128 +#define DMA_MAGIC              0xdd
129 +
130 +//do user virtual to physical translation of the CB chain
131 +#define DMA_PREPARE            _IOWR(DMA_MAGIC, 0, struct DmaControlBlock *)
132 +
133 +//kick the pre-prepared CB chain
134 +#define DMA_KICK               _IOW(DMA_MAGIC, 1, struct DmaControlBlock *)
135 +
136 +//prepare it, kick it, wait for it
137 +#define DMA_PREPARE_KICK_WAIT  _IOWR(DMA_MAGIC, 2, struct DmaControlBlock *)
138 +
139 +//prepare it, kick it, don't wait for it
140 +#define DMA_PREPARE_KICK       _IOWR(DMA_MAGIC, 3, struct DmaControlBlock *)
141 +
142 +//not currently implemented
143 +#define DMA_WAIT_ONE           _IO(DMA_MAGIC, 4, struct DmaControlBlock *)
144 +
145 +//wait on all kicked CB chains
146 +#define DMA_WAIT_ALL           _IO(DMA_MAGIC, 5)
147 +
148 +//in order to discover the largest AXI burst that should be programmed into the transfer params
149 +#define DMA_MAX_BURST          _IO(DMA_MAGIC, 6)
150 +
151 +//set the address range through which the user address is assumed to already by a physical address
152 +#define DMA_SET_MIN_PHYS       _IOW(DMA_MAGIC, 7, unsigned long)
153 +#define DMA_SET_MAX_PHYS       _IOW(DMA_MAGIC, 8, unsigned long)
154 +#define DMA_SET_PHYS_OFFSET    _IOW(DMA_MAGIC, 9, unsigned long)
155 +
156 +//used to define the size for the CMA-based allocation *in pages*, can only be done once once the file is opened
157 +#define DMA_CMA_SET_SIZE       _IOW(DMA_MAGIC, 10, unsigned long)
158 +
159 +//used to get the version of the module, to test for a capability
160 +#define DMA_GET_VERSION                _IO(DMA_MAGIC, 99)
161 +
162 +#define VERSION_NUMBER 1
163 +
164 +#define VIRT_TO_BUS_CACHE_SIZE 8
165 +
166 +/***** FILE OPS *****/
167 +static int Open(struct inode *pInode, struct file *pFile);
168 +static int Release(struct inode *pInode, struct file *pFile);
169 +static long Ioctl(struct file *pFile, unsigned int cmd, unsigned long arg);
170 +static ssize_t Read(struct file *pFile, char __user *pUser, size_t count, loff_t *offp);
171 +static int Mmap(struct file *pFile, struct vm_area_struct *pVma);
172 +
173 +/***** VMA OPS ****/
174 +static void VmaOpen4k(struct vm_area_struct *pVma);
175 +static void VmaClose4k(struct vm_area_struct *pVma);
176 +static int VmaFault4k(struct vm_area_struct *pVma, struct vm_fault *pVmf);
177 +
178 +/**** DMA PROTOTYPES */
179 +static struct DmaControlBlock __user *DmaPrepare(struct DmaControlBlock __user *pUserCB, int *pError);
180 +static int DmaKick(struct DmaControlBlock __user *pUserCB);
181 +static void DmaWaitAll(void);
182 +
183 +/**** GENERIC ****/
184 +static int __init dmaer_init(void);
185 +static void __exit dmaer_exit(void);
186 +
187 +/*** OPS ***/
188 +static struct vm_operations_struct g_vmOps4k = {
189 +       .open = VmaOpen4k,
190 +       .close = VmaClose4k,
191 +       .fault = VmaFault4k,
192 +};
193 +
194 +static struct file_operations g_fOps = {
195 +       .owner = THIS_MODULE,
196 +       .llseek = 0,
197 +       .read = Read,
198 +       .write = 0,
199 +       .unlocked_ioctl = Ioctl,
200 +       .open = Open,
201 +       .release = Release,
202 +       .mmap = Mmap,
203 +};
204 +
205 +/***** GLOBALS ******/
206 +static dev_t g_majorMinor;
207 +
208 +//tracking usage of the two files
209 +static atomic_t g_oneLock4k = ATOMIC_INIT(1);
210 +
211 +//device operations
212 +static struct cdev g_cDev;
213 +static int g_trackedPages = 0;
214 +
215 +//dma control
216 +static unsigned int *g_pDmaChanBase;
217 +static int g_dmaIrq;
218 +static int g_dmaChan;
219 +
220 +//cma allocation
221 +static int g_cmaHandle;
222 +
223 +//user virtual to bus address translation acceleration
224 +static unsigned long g_virtAddr[VIRT_TO_BUS_CACHE_SIZE];
225 +static unsigned long g_busAddr[VIRT_TO_BUS_CACHE_SIZE];
226 +static unsigned long g_cbVirtAddr;
227 +static unsigned long g_cbBusAddr;
228 +static int g_cacheInsertAt;
229 +static int g_cacheHit, g_cacheMiss;
230 +
231 +//off by default
232 +static void __user *g_pMinPhys;
233 +static void __user *g_pMaxPhys;
234 +static unsigned long g_physOffset;
235 +
236 +/****** CACHE OPERATIONS ********/
237 +static inline void FlushAddrCache(void)
238 +{
239 +       int count = 0;
240 +       for (count = 0; count < VIRT_TO_BUS_CACHE_SIZE; count++)
241 +               g_virtAddr[count] = 0xffffffff;                 //never going to match as we always chop the bottom bits anyway
242 +
243 +       g_cbVirtAddr = 0xffffffff;
244 +
245 +       g_cacheInsertAt = 0;
246 +}
247 +
248 +//translate from a user virtual address to a bus address by mapping the page
249 +//NB this won't lock a page in memory, so to avoid potential paging issues using kernel logical addresses
250 +static inline void __iomem *UserVirtualToBus(void __user *pUser)
251 +{
252 +       int mapped;
253 +       struct page *pPage;
254 +       void *phys;
255 +
256 +       //map it (requiring that the pointer points to something that does not hang off the page boundary)
257 +       mapped = get_user_pages(current, current->mm,
258 +               (unsigned long)pUser, 1,
259 +               1, 0,
260 +               &pPage,
261 +               0);
262 +
263 +       if (mapped <= 0)                //error
264 +               return 0;
265 +
266 +       PRINTK_VERBOSE(KERN_DEBUG "user virtual %p arm phys %p bus %p\n",
267 +                       pUser, page_address(pPage), (void __iomem *)__virt_to_bus(page_address(pPage)));
268 +
269 +       //get the arm physical address
270 +       phys = page_address(pPage) + offset_in_page(pUser);
271 +       page_cache_release(pPage);
272 +
273 +       //and now the bus address
274 +       return (void __iomem *)__virt_to_bus(phys);
275 +}
276 +
277 +static inline void __iomem *UserVirtualToBusViaCbCache(void __user *pUser)
278 +{
279 +       unsigned long virtual_page = (unsigned long)pUser & ~4095;
280 +       unsigned long page_offset = (unsigned long)pUser & 4095;
281 +       unsigned long bus_addr;
282 +
283 +       if (g_cbVirtAddr == virtual_page)
284 +       {
285 +               bus_addr = g_cbBusAddr + page_offset;
286 +               g_cacheHit++;
287 +               return (void __iomem *)bus_addr;
288 +       }
289 +       else
290 +       {
291 +               bus_addr = (unsigned long)UserVirtualToBus(pUser);
292 +               
293 +               if (!bus_addr)
294 +                       return 0;
295 +               
296 +               g_cbVirtAddr = virtual_page;
297 +               g_cbBusAddr = bus_addr & ~4095;
298 +               g_cacheMiss++;
299 +
300 +               return (void __iomem *)bus_addr;
301 +       }
302 +}
303 +
304 +//do the same as above, by query our virt->bus cache
305 +static inline void __iomem *UserVirtualToBusViaCache(void __user *pUser)
306 +{
307 +       int count;
308 +       //get the page and its offset
309 +       unsigned long virtual_page = (unsigned long)pUser & ~4095;
310 +       unsigned long page_offset = (unsigned long)pUser & 4095;
311 +       unsigned long bus_addr;
312 +
313 +       if (pUser >= g_pMinPhys && pUser < g_pMaxPhys)
314 +       {
315 +               PRINTK_VERBOSE(KERN_DEBUG "user->phys passthrough on %p\n", pUser);
316 +               return (void __iomem *)((unsigned long)pUser + g_physOffset);
317 +       }
318 +
319 +       //check the cache for our entry
320 +       for (count = 0; count < VIRT_TO_BUS_CACHE_SIZE; count++)
321 +               if (g_virtAddr[count] == virtual_page)
322 +               {
323 +                       bus_addr = g_busAddr[count] + page_offset;
324 +                       g_cacheHit++;
325 +                       return (void __iomem *)bus_addr;
326 +               }
327 +
328 +       //not found, look up manually and then insert its page address
329 +       bus_addr = (unsigned long)UserVirtualToBus(pUser);
330 +
331 +       if (!bus_addr)
332 +               return 0;
333 +
334 +       g_virtAddr[g_cacheInsertAt] = virtual_page;
335 +       g_busAddr[g_cacheInsertAt] = bus_addr & ~4095;
336 +
337 +       //round robin
338 +       g_cacheInsertAt++;
339 +       if (g_cacheInsertAt == VIRT_TO_BUS_CACHE_SIZE)
340 +               g_cacheInsertAt = 0;
341 +
342 +       g_cacheMiss++;
343 +
344 +       return (void __iomem *)bus_addr;
345 +}
346 +
347 +/***** FILE OPERATIONS ****/
348 +static int Open(struct inode *pInode, struct file *pFile)
349 +{
350 +       PRINTK(KERN_DEBUG "file opening: %d/%d\n", imajor(pInode), iminor(pInode));
351 +       
352 +       //check which device we are
353 +       if (iminor(pInode) == 0)                //4k
354 +       {
355 +               //only one at a time
356 +               if (!atomic_dec_and_test(&g_oneLock4k))
357 +               {
358 +                       atomic_inc(&g_oneLock4k);
359 +                       return -EBUSY;
360 +               }
361 +       }
362 +       else
363 +               return -EINVAL;
364 +       
365 +       //todo there will be trouble if two different processes open the files
366 +
367 +       //reset after any file is opened
368 +       g_pMinPhys = (void __user *)-1;
369 +       g_pMaxPhys = (void __user *)0;
370 +       g_physOffset = 0;
371 +       g_cmaHandle = 0;
372 +
373 +       return 0;
374 +}
375 +
376 +static int Release(struct inode *pInode, struct file *pFile)
377 +{
378 +       PRINTK(KERN_DEBUG "file closing, %d pages tracked\n", g_trackedPages);
379 +       if (g_trackedPages)
380 +               PRINTK(KERN_ERR "we\'re leaking memory!\n");
381 +       
382 +       //wait for any dmas to finish
383 +       DmaWaitAll();
384 +
385 +       //free this memory on the application closing the file or it crashing (implicitly closing the file)
386 +       if (g_cmaHandle)
387 +       {
388 +               PRINTK(KERN_DEBUG "unlocking vc memory\n");
389 +               if (UnlockVcMemory(g_cmaHandle))
390 +                       PRINTK(KERN_ERR "uh-oh, unable to unlock vc memory!\n");
391 +               PRINTK(KERN_DEBUG "releasing vc memory\n");
392 +               if (ReleaseVcMemory(g_cmaHandle))
393 +                       PRINTK(KERN_ERR "uh-oh, unable to release vc memory!\n");
394 +       }
395 +
396 +       if (iminor(pInode) == 0)
397 +               atomic_inc(&g_oneLock4k);
398 +       else
399 +               return -EINVAL;
400 +
401 +       return 0;
402 +}
403 +
404 +static struct DmaControlBlock __user *DmaPrepare(struct DmaControlBlock __user *pUserCB, int *pError)
405 +{
406 +       struct DmaControlBlock kernCB;
407 +       struct DmaControlBlock __user *pUNext;
408 +       void __iomem *pSourceBus, __iomem *pDestBus;
409 +       
410 +       //get the control block into kernel memory so we can work on it
411 +       if (copy_from_user(&kernCB, pUserCB, sizeof(struct DmaControlBlock)) != 0)
412 +       {
413 +               PRINTK(KERN_ERR "copy_from_user failed for user cb %p\n", pUserCB);
414 +               *pError = 1;
415 +               return 0;
416 +       }
417 +       
418 +       if (kernCB.m_pSourceAddr == 0 || kernCB.m_pDestAddr == 0)
419 +       {
420 +               PRINTK(KERN_ERR "faulty source (%p) dest (%p) addresses for user cb %p\n",
421 +                       kernCB.m_pSourceAddr, kernCB.m_pDestAddr, pUserCB);
422 +               *pError = 1;
423 +               return 0;
424 +       }
425 +
426 +       pSourceBus = UserVirtualToBusViaCache(kernCB.m_pSourceAddr);
427 +       pDestBus = UserVirtualToBusViaCache(kernCB.m_pDestAddr);
428 +
429 +       if (!pSourceBus || !pDestBus)
430 +       {
431 +               PRINTK(KERN_ERR "virtual to bus translation failure for source/dest %p/%p->%p/%p\n",
432 +                               kernCB.m_pSourceAddr, kernCB.m_pDestAddr,
433 +                               pSourceBus, pDestBus);
434 +               *pError = 1;
435 +               return 0;
436 +       }
437 +       
438 +       //update the user structure with the new bus addresses
439 +       kernCB.m_pSourceAddr = pSourceBus;
440 +       kernCB.m_pDestAddr = pDestBus;
441 +
442 +       PRINTK_VERBOSE(KERN_DEBUG "final source %p dest %p\n", kernCB.m_pSourceAddr, kernCB.m_pDestAddr);
443 +               
444 +       //sort out the bus address for the next block
445 +       pUNext = kernCB.m_pNext;
446 +       
447 +       if (kernCB.m_pNext)
448 +       {
449 +               void __iomem *pNextBus;
450 +               pNextBus = UserVirtualToBusViaCbCache(kernCB.m_pNext);
451 +
452 +               if (!pNextBus)
453 +               {
454 +                       PRINTK(KERN_ERR "virtual to bus translation failure for m_pNext\n");
455 +                       *pError = 1;
456 +                       return 0;
457 +               }
458 +
459 +               //update the pointer with the bus address
460 +               kernCB.m_pNext = pNextBus;
461 +       }
462 +       
463 +       //write it back to user space
464 +       if (copy_to_user(pUserCB, &kernCB, sizeof(struct DmaControlBlock)) != 0)
465 +       {
466 +               PRINTK(KERN_ERR "copy_to_user failed for cb %p\n", pUserCB);
467 +               *pError = 1;
468 +               return 0;
469 +       }
470 +
471 +       __cpuc_flush_dcache_area(pUserCB, 32);
472 +
473 +       *pError = 0;
474 +       return pUNext;
475 +}
476 +
477 +static int DmaKick(struct DmaControlBlock __user *pUserCB)
478 +{
479 +       void __iomem *pBusCB;
480 +       
481 +       pBusCB = UserVirtualToBusViaCbCache(pUserCB);
482 +       if (!pBusCB)
483 +       {
484 +               PRINTK(KERN_ERR "virtual to bus translation failure for cb\n");
485 +               return 1;
486 +       }
487 +
488 +       //flush_cache_all();
489 +
490 +       bcm_dma_start(g_pDmaChanBase, (dma_addr_t)pBusCB);
491 +       
492 +       return 0;
493 +}
494 +
495 +static void DmaWaitAll(void)
496 +{
497 +       int counter = 0;
498 +       volatile int inner_count;
499 +       volatile unsigned int cs;
500 +       unsigned long time_before, time_after;
501 +
502 +       time_before = jiffies;
503 +       //bcm_dma_wait_idle(g_pDmaChanBase);
504 +       dsb();
505 +       
506 +       cs = readl(g_pDmaChanBase);
507 +       
508 +       while ((cs & 1) == 1)
509 +       {
510 +               cs = readl(g_pDmaChanBase);
511 +               counter++;
512 +
513 +               for (inner_count = 0; inner_count < 32; inner_count++);
514 +
515 +               asm volatile ("MCR p15,0,r0,c7,c0,4 \n");
516 +               //cpu_do_idle();
517 +               if (counter >= 1000000)
518 +               {
519 +                       PRINTK(KERN_WARNING "DMA failed to finish in a timely fashion\n");
520 +                       break;
521 +               }
522 +       }
523 +       time_after = jiffies;
524 +       PRINTK_VERBOSE(KERN_DEBUG "done, counter %d, cs %08x", counter, cs);
525 +       PRINTK_VERBOSE(KERN_DEBUG "took %ld jiffies, %d HZ\n", time_after - time_before, HZ);
526 +}
527 +
528 +static long Ioctl(struct file *pFile, unsigned int cmd, unsigned long arg)
529 +{
530 +       int error = 0;
531 +       PRINTK_VERBOSE(KERN_DEBUG "ioctl cmd %x arg %lx\n", cmd, arg);
532 +
533 +       switch (cmd)
534 +       {
535 +       case DMA_PREPARE:
536 +       case DMA_PREPARE_KICK:
537 +       case DMA_PREPARE_KICK_WAIT:
538 +               {
539 +                       struct DmaControlBlock __user *pUCB = (struct DmaControlBlock *)arg;
540 +                       int steps = 0;
541 +                       unsigned long start_time = jiffies;
542 +                       (void)start_time;
543 +
544 +                       //flush our address cache
545 +                       FlushAddrCache();
546 +
547 +                       PRINTK_VERBOSE(KERN_DEBUG "dma prepare\n");
548 +
549 +                       //do virtual to bus translation for each entry
550 +                       do
551 +                       {
552 +                               pUCB = DmaPrepare(pUCB, &error);
553 +                       } while (error == 0 && ++steps && pUCB);
554 +                       PRINTK_VERBOSE(KERN_DEBUG "prepare done in %d steps, %ld\n", steps, jiffies - start_time);
555 +
556 +                       //carry straight on if we want to kick too
557 +                       if (cmd == DMA_PREPARE || error)
558 +                       {
559 +                               PRINTK_VERBOSE(KERN_DEBUG "falling out\n");
560 +                               return error ? -EINVAL : 0;
561 +                       }
562 +               }
563 +       case DMA_KICK:
564 +               PRINTK_VERBOSE(KERN_DEBUG "dma begin\n");
565 +
566 +               if (cmd == DMA_KICK)
567 +                       FlushAddrCache();
568 +
569 +               DmaKick((struct DmaControlBlock __user *)arg);
570 +               
571 +               if (cmd != DMA_PREPARE_KICK_WAIT)
572 +                       break;
573 +/*     case DMA_WAIT_ONE:
574 +               //PRINTK(KERN_DEBUG "dma wait one\n");
575 +               break;*/
576 +       case DMA_WAIT_ALL:
577 +               //PRINTK(KERN_DEBUG "dma wait all\n");
578 +               DmaWaitAll();
579 +               break;
580 +       case DMA_MAX_BURST:
581 +               if (g_dmaChan == 0)
582 +                       return 10;
583 +               else
584 +                       return 5;
585 +       case DMA_SET_MIN_PHYS:
586 +               g_pMinPhys = (void __user *)arg;
587 +               PRINTK(KERN_DEBUG "min/max user/phys bypass set to %p %p\n", g_pMinPhys, g_pMaxPhys);
588 +               break;
589 +       case DMA_SET_MAX_PHYS:
590 +               g_pMaxPhys = (void __user *)arg;
591 +               PRINTK(KERN_DEBUG "min/max user/phys bypass set to %p %p\n", g_pMinPhys, g_pMaxPhys);
592 +               break;
593 +       case DMA_SET_PHYS_OFFSET:
594 +               g_physOffset = arg;
595 +               PRINTK(KERN_DEBUG "user/phys bypass offset set to %ld\n", g_physOffset);
596 +               break;
597 +       case DMA_CMA_SET_SIZE:
598 +       {
599 +               unsigned int pBusAddr;
600 +
601 +               if (g_cmaHandle)
602 +               {
603 +                       PRINTK(KERN_ERR "memory has already been allocated (handle %d)\n", g_cmaHandle);
604 +                       return -EINVAL;
605 +               }
606 +
607 +               PRINTK(KERN_INFO "allocating %ld bytes of VC memory\n", arg * 4096);
608 +
609 +               //get the memory
610 +               if (AllocateVcMemory(&g_cmaHandle, arg * 4096, 4096, MEM_FLAG_L1_NONALLOCATING | MEM_FLAG_NO_INIT | MEM_FLAG_HINT_PERMALOCK))
611 +               {
612 +                       PRINTK(KERN_ERR "failed to allocate %ld bytes of VC memory\n", arg * 4096);
613 +                       g_cmaHandle = 0;
614 +                       return -EINVAL;
615 +               }
616 +
617 +               //get an address for it
618 +               PRINTK(KERN_INFO "trying to map VC memory\n");
619 +
620 +               if (LockVcMemory(&pBusAddr, g_cmaHandle))
621 +               {
622 +                       PRINTK(KERN_ERR "failed to map CMA handle %d, releasing memory\n", g_cmaHandle);
623 +                       ReleaseVcMemory(g_cmaHandle);
624 +                       g_cmaHandle = 0;
625 +               }
626 +
627 +               PRINTK(KERN_INFO "bus address for CMA memory is %x\n", pBusAddr);
628 +               return pBusAddr;
629 +       }
630 +       case DMA_GET_VERSION:
631 +               PRINTK(KERN_DEBUG "returning version number, %d\n", VERSION_NUMBER);
632 +               return VERSION_NUMBER;
633 +       default:
634 +               PRINTK(KERN_DEBUG "unknown ioctl: %d\n", cmd);
635 +               return -EINVAL;
636 +       }
637 +
638 +       return 0;
639 +}
640 +
641 +static ssize_t Read(struct file *pFile, char __user *pUser, size_t count, loff_t *offp)
642 +{
643 +       return -EIO;
644 +}
645 +
646 +static int Mmap(struct file *pFile, struct vm_area_struct *pVma)
647 +{
648 +       struct PageList *pPages;
649 +       struct VmaPageList *pVmaList;
650 +       
651 +       PRINTK_VERBOSE(KERN_DEBUG "MMAP vma %p, length %ld (%s %d)\n",
652 +               pVma, pVma->vm_end - pVma->vm_start,
653 +               current->comm, current->pid);
654 +       PRINTK_VERBOSE(KERN_DEBUG "MMAP %p %d (tracked %d)\n", pVma, current->pid, g_trackedPages);
655 +
656 +       //make a new page list
657 +       pPages = (struct PageList *)kmalloc(sizeof(struct PageList), GFP_KERNEL);
658 +       if (!pPages)
659 +       {
660 +               PRINTK(KERN_ERR "couldn\'t allocate a new page list (%s %d)\n",
661 +                       current->comm, current->pid);
662 +               return -ENOMEM;
663 +       }
664 +
665 +       //clear the page list
666 +       pPages->m_used = 0;
667 +       pPages->m_pNext = 0;
668 +       
669 +       //insert our vma and new page list somewhere
670 +       if (!pVma->vm_private_data)
671 +       {
672 +               struct VmaPageList *pList;
673 +
674 +               PRINTK_VERBOSE(KERN_DEBUG "new vma list, making new one (%s %d)\n",
675 +                       current->comm, current->pid);
676 +
677 +               //make a new vma list
678 +               pList = (struct VmaPageList *)kmalloc(sizeof(struct VmaPageList), GFP_KERNEL);
679 +               if (!pList)
680 +               {
681 +                       PRINTK(KERN_ERR "couldn\'t allocate vma page list (%s %d)\n",
682 +                               current->comm, current->pid);
683 +                       kfree(pPages);
684 +                       return -ENOMEM;
685 +               }
686 +
687 +               //clear this list
688 +               pVma->vm_private_data = (void *)pList;
689 +               pList->m_refCount = 0;
690 +       }
691 +
692 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
693 +
694 +       //add it to the vma list
695 +       pVmaList->m_pPageHead = pPages;
696 +       pVmaList->m_pPageTail = pPages;
697 +
698 +       pVma->vm_ops = &g_vmOps4k;
699 +       pVma->vm_flags |= VM_IO;
700 +
701 +       VmaOpen4k(pVma);
702 +
703 +       return 0;
704 +}
705 +
706 +/****** VMA OPERATIONS ******/
707 +
708 +static void VmaOpen4k(struct vm_area_struct *pVma)
709 +{
710 +       struct VmaPageList *pVmaList;
711 +
712 +       PRINTK_VERBOSE(KERN_DEBUG "vma open %p private %p (%s %d), %d live pages\n", pVma, pVma->vm_private_data, current->comm, current->pid, g_trackedPages);
713 +       PRINTK_VERBOSE(KERN_DEBUG "OPEN %p %d %ld pages (tracked pages %d)\n",
714 +               pVma, current->pid, (pVma->vm_end - pVma->vm_start) >> 12,
715 +               g_trackedPages);
716 +
717 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
718 +
719 +       if (pVmaList)
720 +       {
721 +               pVmaList->m_refCount++;
722 +               PRINTK_VERBOSE(KERN_DEBUG "ref count is now %d\n", pVmaList->m_refCount);
723 +       }
724 +       else
725 +       {
726 +               PRINTK_VERBOSE(KERN_DEBUG "err, open but no vma page list\n");
727 +       }
728 +}
729 +
730 +static void VmaClose4k(struct vm_area_struct *pVma)
731 +{
732 +       struct VmaPageList *pVmaList;
733 +       int freed = 0;
734 +       
735 +       PRINTK_VERBOSE(KERN_DEBUG "vma close %p private %p (%s %d)\n", pVma, pVma->vm_private_data, current->comm, current->pid);
736 +       
737 +       //wait for any dmas to finish
738 +       DmaWaitAll();
739 +
740 +       //find our vma in the list
741 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
742 +
743 +       //may be a fork
744 +       if (pVmaList)
745 +       {
746 +               struct PageList *pPages;
747 +               
748 +               pVmaList->m_refCount--;
749 +
750 +               if (pVmaList->m_refCount == 0)
751 +               {
752 +                       PRINTK_VERBOSE(KERN_DEBUG "found vma, freeing pages (%s %d)\n",
753 +                               current->comm, current->pid);
754 +
755 +                       pPages = pVmaList->m_pPageHead;
756 +
757 +                       if (!pPages)
758 +                       {
759 +                               PRINTK(KERN_ERR "no page list (%s %d)!\n",
760 +                                       current->comm, current->pid);
761 +                               return;
762 +                       }
763 +
764 +                       while (pPages)
765 +                       {
766 +                               struct PageList *next;
767 +                               int count;
768 +
769 +                               PRINTK_VERBOSE(KERN_DEBUG "page list (%s %d)\n",
770 +                                       current->comm, current->pid);
771 +
772 +                               next = pPages->m_pNext;
773 +                               for (count = 0; count < pPages->m_used; count++)
774 +                               {
775 +                                       PRINTK_VERBOSE(KERN_DEBUG "freeing page %p (%s %d)\n",
776 +                                               pPages->m_pPages[count],
777 +                                               current->comm, current->pid);
778 +                                       __free_pages(pPages->m_pPages[count], 0);
779 +                                       g_trackedPages--;
780 +                                       freed++;
781 +                               }
782 +
783 +                               PRINTK_VERBOSE(KERN_DEBUG "freeing page list (%s %d)\n",
784 +                                       current->comm, current->pid);
785 +                               kfree(pPages);
786 +                               pPages = next;
787 +                       }
788 +                       
789 +                       //remove our vma from the list
790 +                       kfree(pVmaList);
791 +                       pVma->vm_private_data = 0;
792 +               }
793 +               else
794 +               {
795 +                       PRINTK_VERBOSE(KERN_DEBUG "ref count is %d, not closing\n", pVmaList->m_refCount);
796 +               }
797 +       }
798 +       else
799 +       {
800 +               PRINTK_VERBOSE(KERN_ERR "uh-oh, vma %p not found (%s %d)!\n", pVma, current->comm, current->pid);
801 +               PRINTK_VERBOSE(KERN_ERR "CLOSE ERR\n");
802 +       }
803 +
804 +       PRINTK_VERBOSE(KERN_DEBUG "CLOSE %p %d %d pages (tracked pages %d)",
805 +               pVma, current->pid, freed, g_trackedPages);
806 +
807 +       PRINTK_VERBOSE(KERN_DEBUG "%d pages open\n", g_trackedPages);
808 +}
809 +
810 +static int VmaFault4k(struct vm_area_struct *pVma, struct vm_fault *pVmf)
811 +{
812 +       PRINTK_VERBOSE(KERN_DEBUG "vma fault for vma %p private %p at offset %ld (%s %d)\n", pVma, pVma->vm_private_data, pVmf->pgoff,
813 +               current->comm, current->pid);
814 +       PRINTK_VERBOSE(KERN_DEBUG "FAULT\n");
815 +       pVmf->page = alloc_page(GFP_KERNEL);
816 +       
817 +       if (pVmf->page)
818 +       {
819 +               PRINTK_VERBOSE(KERN_DEBUG "alloc page virtual %p\n", page_address(pVmf->page));
820 +       }
821 +
822 +       if (!pVmf->page)
823 +       {
824 +               PRINTK(KERN_ERR "vma fault oom (%s %d)\n", current->comm, current->pid);
825 +               return VM_FAULT_OOM;
826 +       }
827 +       else
828 +       {
829 +               struct VmaPageList *pVmaList;
830 +               
831 +               get_page(pVmf->page);
832 +               g_trackedPages++;
833 +               
834 +               //find our vma in the list
835 +               pVmaList = (struct VmaPageList *)pVma->vm_private_data;
836 +               
837 +               if (pVmaList)
838 +               {
839 +                       PRINTK_VERBOSE(KERN_DEBUG "vma found (%s %d)\n", current->comm, current->pid);
840 +
841 +                       if (pVmaList->m_pPageTail->m_used == PAGES_PER_LIST)
842 +                       {
843 +                               PRINTK_VERBOSE(KERN_DEBUG "making new page list (%s %d)\n", current->comm, current->pid);
844 +                               //making a new page list
845 +                               pVmaList->m_pPageTail->m_pNext = (struct PageList *)kmalloc(sizeof(struct PageList), GFP_KERNEL);
846 +                               if (!pVmaList->m_pPageTail->m_pNext)
847 +                                       return -ENOMEM;
848 +                               
849 +                               //update the tail pointer
850 +                               pVmaList->m_pPageTail = pVmaList->m_pPageTail->m_pNext;
851 +                               pVmaList->m_pPageTail->m_used = 0;
852 +                               pVmaList->m_pPageTail->m_pNext = 0;
853 +                       }
854 +
855 +                       PRINTK_VERBOSE(KERN_DEBUG "adding page to list (%s %d)\n", current->comm, current->pid);
856 +                       
857 +                       pVmaList->m_pPageTail->m_pPages[pVmaList->m_pPageTail->m_used] = pVmf->page;
858 +                       pVmaList->m_pPageTail->m_used++;
859 +               }
860 +               else
861 +                       PRINTK(KERN_ERR "returned page for vma we don\'t know %p (%s %d)\n", pVma, current->comm, current->pid);
862 +               
863 +               return 0;
864 +       }
865 +}
866 +
867 +/****** GENERIC FUNCTIONS ******/
868 +static int __init dmaer_init(void)
869 +{
870 +       int result = alloc_chrdev_region(&g_majorMinor, 0, 1, "dmaer");
871 +       if (result < 0)
872 +       {
873 +               PRINTK(KERN_ERR "unable to get major device number\n");
874 +               return result;
875 +       }
876 +       else
877 +               PRINTK(KERN_DEBUG "major device number %d\n", MAJOR(g_majorMinor));
878 +       
879 +       PRINTK(KERN_DEBUG "vma list size %d, page list size %d, page size %ld\n",
880 +               sizeof(struct VmaPageList), sizeof(struct PageList), PAGE_SIZE);
881 +
882 +       //get a dma channel to work with
883 +       result = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST, (void **)&g_pDmaChanBase, &g_dmaIrq);
884 +
885 +       //uncomment to force to channel 0
886 +       //result = 0;
887 +       //g_pDmaChanBase = 0xce808000;
888 +       
889 +       if (result < 0)
890 +       {
891 +               PRINTK(KERN_ERR "failed to allocate dma channel\n");
892 +               cdev_del(&g_cDev);
893 +               unregister_chrdev_region(g_majorMinor, 1);
894 +       }
895 +       
896 +       //reset the channel
897 +       PRINTK(KERN_DEBUG "allocated dma channel %d (%p), initial state %08x\n", result, g_pDmaChanBase, *g_pDmaChanBase);
898 +       *g_pDmaChanBase = 1 << 31;
899 +       PRINTK(KERN_DEBUG "post-reset %08x\n", *g_pDmaChanBase);
900 +       
901 +       g_dmaChan = result;
902 +
903 +       //clear the cache stats
904 +       g_cacheHit = 0;
905 +       g_cacheMiss = 0;
906 +
907 +       //register our device - after this we are go go go
908 +       cdev_init(&g_cDev, &g_fOps);
909 +       g_cDev.owner = THIS_MODULE;
910 +       g_cDev.ops = &g_fOps;
911 +       
912 +       result = cdev_add(&g_cDev, g_majorMinor, 1);
913 +       if (result < 0)
914 +       {
915 +               PRINTK(KERN_ERR "failed to add character device\n");
916 +               unregister_chrdev_region(g_majorMinor, 1);
917 +               bcm_dma_chan_free(g_dmaChan);
918 +               return result;
919 +       }
920 +               
921 +       return 0;
922 +}
923 +
924 +static void __exit dmaer_exit(void)
925 +{
926 +       PRINTK(KERN_INFO "closing dmaer device, cache stats: %d hits %d misses\n", g_cacheHit, g_cacheMiss);
927 +       //unregister the device
928 +       cdev_del(&g_cDev);
929 +       unregister_chrdev_region(g_majorMinor, 1);
930 +       //free the dma channel
931 +       bcm_dma_chan_free(g_dmaChan);
932 +}
933 +
934 +MODULE_LICENSE("Dual BSD/GPL");
935 +MODULE_AUTHOR("Simon Hall");
936 +module_init(dmaer_init);
937 +module_exit(dmaer_exit);
938 +
939 diff --git a/arch/arm/mach-bcm2708/include/mach/vc_support.h b/arch/arm/mach-bcm2708/include/mach/vc_support.h
940 new file mode 100755
941 index 0000000..70e809f
942 --- /dev/null
943 +++ b/arch/arm/mach-bcm2708/include/mach/vc_support.h
944 @@ -0,0 +1,69 @@
945 +#ifndef _VC_SUPPORT_H_
946 +#define _VC_SUPPORT_H_
947 +
948 +/*
949 + * vc_support.h
950 + *
951 + *  Created on: 25 Nov 2012
952 + *      Author: Simon
953 + */
954 +
955 +enum {
956 +/*
957 +      If a MEM_HANDLE_T is discardable, the memory manager may resize it to size
958 +      0 at any time when it is not locked or retained.
959 +   */
960 +   MEM_FLAG_DISCARDABLE = 1 << 0,
961 +
962 +   /*
963 +      If a MEM_HANDLE_T is allocating (or normal), its block of memory will be
964 +      accessed in an allocating fashion through the cache.
965 +   */
966 +   MEM_FLAG_NORMAL = 0 << 2,
967 +   MEM_FLAG_ALLOCATING = MEM_FLAG_NORMAL,
968 +
969 +   /*
970 +      If a MEM_HANDLE_T is direct, its block of memory will be accessed
971 +      directly, bypassing the cache.
972 +   */
973 +   MEM_FLAG_DIRECT = 1 << 2,
974 +
975 +   /*
976 +      If a MEM_HANDLE_T is coherent, its block of memory will be accessed in a
977 +      non-allocating fashion through the cache.
978 +   */
979 +   MEM_FLAG_COHERENT = 2 << 2,
980 +
981 +   /*
982 +      If a MEM_HANDLE_T is L1-nonallocating, its block of memory will be accessed by
983 +      the VPU in a fashion which is allocating in L2, but only coherent in L1.
984 +   */
985 +   MEM_FLAG_L1_NONALLOCATING = (MEM_FLAG_DIRECT | MEM_FLAG_COHERENT),
986 +
987 +   /*
988 +      If a MEM_HANDLE_T is zero'd, its contents are set to 0 rather than
989 +      MEM_HANDLE_INVALID on allocation and resize up.
990 +   */
991 +   MEM_FLAG_ZERO = 1 << 4,
992 +
993 +   /*
994 +      If a MEM_HANDLE_T is uninitialised, it will not be reset to a defined value
995 +      (either zero, or all 1's) on allocation.
996 +    */
997 +   MEM_FLAG_NO_INIT = 1 << 5,
998 +
999 +   /*
1000 +      Hints.
1001 +   */
1002 +   MEM_FLAG_HINT_PERMALOCK = 1 << 6, /* Likely to be locked for long periods of time. */
1003 +};
1004 +
1005 +unsigned int AllocateVcMemory(unsigned int *pHandle, unsigned int size, unsigned int alignment, unsigned int flags);
1006 +unsigned int ReleaseVcMemory(unsigned int handle);
1007 +unsigned int LockVcMemory(unsigned int *pBusAddress, unsigned int handle);
1008 +unsigned int UnlockVcMemory(unsigned int handle);
1009 +
1010 +unsigned int ExecuteVcCode(unsigned int code,
1011 +               unsigned int r0, unsigned int r1, unsigned int r2, unsigned int r3, unsigned int r4, unsigned int r5);
1012 +
1013 +#endif
1014 diff --git a/arch/arm/mach-bcm2708/vc_support.c b/arch/arm/mach-bcm2708/vc_support.c
1015 new file mode 100755
1016 index 0000000..5cb1335
1017 --- /dev/null
1018 +++ b/arch/arm/mach-bcm2708/vc_support.c
1019 @@ -0,0 +1,319 @@
1020 +/*
1021 + * vc_support.c
1022 + *
1023 + *  Created on: 25 Nov 2012
1024 + *      Author: Simon
1025 + */
1026 +
1027 +#include <linux/module.h>
1028 +#include <mach/vcio.h>
1029 +
1030 +#ifdef ECLIPSE_IGNORE
1031 +
1032 +#define __user
1033 +#define __init
1034 +#define __exit
1035 +#define __iomem
1036 +#define KERN_DEBUG
1037 +#define KERN_ERR
1038 +#define KERN_WARNING
1039 +#define KERN_INFO
1040 +#define _IOWR(a, b, c) b
1041 +#define _IOW(a, b, c) b
1042 +#define _IO(a, b) b
1043 +
1044 +#endif
1045 +
1046 +/****** VC MAILBOX FUNCTIONALITY ******/
1047 +unsigned int AllocateVcMemory(unsigned int *pHandle, unsigned int size, unsigned int alignment, unsigned int flags)
1048 +{
1049 +       struct vc_msg
1050 +       {
1051 +               unsigned int m_msgSize;
1052 +               unsigned int m_response;
1053 +
1054 +               struct vc_tag
1055 +               {
1056 +                       unsigned int m_tagId;
1057 +                       unsigned int m_sendBufferSize;
1058 +                       union {
1059 +                               unsigned int m_sendDataSize;
1060 +                               unsigned int m_recvDataSize;
1061 +                       };
1062 +
1063 +                       struct args
1064 +                       {
1065 +                               union {
1066 +                                       unsigned int m_size;
1067 +                                       unsigned int m_handle;
1068 +                               };
1069 +                               unsigned int m_alignment;
1070 +                               unsigned int m_flags;
1071 +                       } m_args;
1072 +               } m_tag;
1073 +
1074 +               unsigned int m_endTag;
1075 +       } msg;
1076 +       int s;
1077 +
1078 +       msg.m_msgSize = sizeof(msg);
1079 +       msg.m_response = 0;
1080 +       msg.m_endTag = 0;
1081 +
1082 +       //fill in the tag for the allocation command
1083 +       msg.m_tag.m_tagId = 0x3000c;
1084 +       msg.m_tag.m_sendBufferSize = 12;
1085 +       msg.m_tag.m_sendDataSize = 12;
1086 +
1087 +       //fill in our args
1088 +       msg.m_tag.m_args.m_size = size;
1089 +       msg.m_tag.m_args.m_alignment = alignment;
1090 +       msg.m_tag.m_args.m_flags = flags;
1091 +
1092 +       //run the command
1093 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1094 +
1095 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1096 +       {
1097 +               *pHandle = msg.m_tag.m_args.m_handle;
1098 +               return 0;
1099 +       }
1100 +       else
1101 +       {
1102 +               printk(KERN_ERR "failed to allocate vc memory: s=%d response=%08x recv data size=%08x\n",
1103 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1104 +               return 1;
1105 +       }
1106 +}
1107 +
1108 +unsigned int ReleaseVcMemory(unsigned int handle)
1109 +{
1110 +       struct vc_msg
1111 +       {
1112 +               unsigned int m_msgSize;
1113 +               unsigned int m_response;
1114 +
1115 +               struct vc_tag
1116 +               {
1117 +                       unsigned int m_tagId;
1118 +                       unsigned int m_sendBufferSize;
1119 +                       union {
1120 +                               unsigned int m_sendDataSize;
1121 +                               unsigned int m_recvDataSize;
1122 +                       };
1123 +
1124 +                       struct args
1125 +                       {
1126 +                               union {
1127 +                                       unsigned int m_handle;
1128 +                                       unsigned int m_error;
1129 +                               };
1130 +                       } m_args;
1131 +               } m_tag;
1132 +
1133 +               unsigned int m_endTag;
1134 +       } msg;
1135 +       int s;
1136 +
1137 +       msg.m_msgSize = sizeof(msg);
1138 +       msg.m_response = 0;
1139 +       msg.m_endTag = 0;
1140 +
1141 +       //fill in the tag for the release command
1142 +       msg.m_tag.m_tagId = 0x3000f;
1143 +       msg.m_tag.m_sendBufferSize = 4;
1144 +       msg.m_tag.m_sendDataSize = 4;
1145 +
1146 +       //pass across the handle
1147 +       msg.m_tag.m_args.m_handle = handle;
1148 +
1149 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1150 +
1151 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004 && msg.m_tag.m_args.m_error == 0)
1152 +               return 0;
1153 +       else
1154 +       {
1155 +               printk(KERN_ERR "failed to release vc memory: s=%d response=%08x recv data size=%08x error=%08x\n",
1156 +                               s, msg.m_response, msg.m_tag.m_recvDataSize, msg.m_tag.m_args.m_error);
1157 +               return 1;
1158 +       }
1159 +}
1160 +
1161 +unsigned int LockVcMemory(unsigned int *pBusAddress, unsigned int handle)
1162 +{
1163 +       struct vc_msg
1164 +       {
1165 +               unsigned int m_msgSize;
1166 +               unsigned int m_response;
1167 +
1168 +               struct vc_tag
1169 +               {
1170 +                       unsigned int m_tagId;
1171 +                       unsigned int m_sendBufferSize;
1172 +                       union {
1173 +                               unsigned int m_sendDataSize;
1174 +                               unsigned int m_recvDataSize;
1175 +                       };
1176 +
1177 +                       struct args
1178 +                       {
1179 +                               union {
1180 +                                       unsigned int m_handle;
1181 +                                       unsigned int m_busAddress;
1182 +                               };
1183 +                       } m_args;
1184 +               } m_tag;
1185 +
1186 +               unsigned int m_endTag;
1187 +       } msg;
1188 +       int s;
1189 +
1190 +       msg.m_msgSize = sizeof(msg);
1191 +       msg.m_response = 0;
1192 +       msg.m_endTag = 0;
1193 +
1194 +       //fill in the tag for the lock command
1195 +       msg.m_tag.m_tagId = 0x3000d;
1196 +       msg.m_tag.m_sendBufferSize = 4;
1197 +       msg.m_tag.m_sendDataSize = 4;
1198 +
1199 +       //pass across the handle
1200 +       msg.m_tag.m_args.m_handle = handle;
1201 +
1202 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1203 +
1204 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1205 +       {
1206 +               //pick out the bus address
1207 +               *pBusAddress = msg.m_tag.m_args.m_busAddress;
1208 +               return 0;
1209 +       }
1210 +       else
1211 +       {
1212 +               printk(KERN_ERR "failed to lock vc memory: s=%d response=%08x recv data size=%08x\n",
1213 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1214 +               return 1;
1215 +       }
1216 +}
1217 +
1218 +unsigned int UnlockVcMemory(unsigned int handle)
1219 +{
1220 +       struct vc_msg
1221 +       {
1222 +               unsigned int m_msgSize;
1223 +               unsigned int m_response;
1224 +
1225 +               struct vc_tag
1226 +               {
1227 +                       unsigned int m_tagId;
1228 +                       unsigned int m_sendBufferSize;
1229 +                       union {
1230 +                               unsigned int m_sendDataSize;
1231 +                               unsigned int m_recvDataSize;
1232 +                       };
1233 +
1234 +                       struct args
1235 +                       {
1236 +                               union {
1237 +                                       unsigned int m_handle;
1238 +                                       unsigned int m_error;
1239 +                               };
1240 +                       } m_args;
1241 +               } m_tag;
1242 +
1243 +               unsigned int m_endTag;
1244 +       } msg;
1245 +       int s;
1246 +
1247 +       msg.m_msgSize = sizeof(msg);
1248 +       msg.m_response = 0;
1249 +       msg.m_endTag = 0;
1250 +
1251 +       //fill in the tag for the unlock command
1252 +       msg.m_tag.m_tagId = 0x3000e;
1253 +       msg.m_tag.m_sendBufferSize = 4;
1254 +       msg.m_tag.m_sendDataSize = 4;
1255 +
1256 +       //pass across the handle
1257 +       msg.m_tag.m_args.m_handle = handle;
1258 +
1259 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1260 +
1261 +       //check the error code too
1262 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004 && msg.m_tag.m_args.m_error == 0)
1263 +               return 0;
1264 +       else
1265 +       {
1266 +               printk(KERN_ERR "failed to unlock vc memory: s=%d response=%08x recv data size=%08x error%08x\n",
1267 +                               s, msg.m_response, msg.m_tag.m_recvDataSize, msg.m_tag.m_args.m_error);
1268 +               return 1;
1269 +       }
1270 +}
1271 +
1272 +unsigned int ExecuteVcCode(unsigned int code,
1273 +               unsigned int r0, unsigned int r1, unsigned int r2, unsigned int r3, unsigned int r4, unsigned int r5)
1274 +{
1275 +       struct vc_msg
1276 +       {
1277 +               unsigned int m_msgSize;
1278 +               unsigned int m_response;
1279 +
1280 +               struct vc_tag
1281 +               {
1282 +                       unsigned int m_tagId;
1283 +                       unsigned int m_sendBufferSize;
1284 +                       union {
1285 +                               unsigned int m_sendDataSize;
1286 +                               unsigned int m_recvDataSize;
1287 +                       };
1288 +
1289 +                       struct args
1290 +                       {
1291 +                               union {
1292 +                                       unsigned int m_pCode;
1293 +                                       unsigned int m_return;
1294 +                               };
1295 +                               unsigned int m_r0;
1296 +                               unsigned int m_r1;
1297 +                               unsigned int m_r2;
1298 +                               unsigned int m_r3;
1299 +                               unsigned int m_r4;
1300 +                               unsigned int m_r5;
1301 +                       } m_args;
1302 +               } m_tag;
1303 +
1304 +               unsigned int m_endTag;
1305 +       } msg;
1306 +       int s;
1307 +
1308 +       msg.m_msgSize = sizeof(msg);
1309 +       msg.m_response = 0;
1310 +       msg.m_endTag = 0;
1311 +
1312 +       //fill in the tag for the unlock command
1313 +       msg.m_tag.m_tagId = 0x30010;
1314 +       msg.m_tag.m_sendBufferSize = 28;
1315 +       msg.m_tag.m_sendDataSize = 28;
1316 +
1317 +       //pass across the handle
1318 +       msg.m_tag.m_args.m_pCode = code;
1319 +       msg.m_tag.m_args.m_r0 = r0;
1320 +       msg.m_tag.m_args.m_r1 = r1;
1321 +       msg.m_tag.m_args.m_r2 = r2;
1322 +       msg.m_tag.m_args.m_r3 = r3;
1323 +       msg.m_tag.m_args.m_r4 = r4;
1324 +       msg.m_tag.m_args.m_r5 = r5;
1325 +
1326 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1327 +
1328 +       //check the error code too
1329 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1330 +               return msg.m_tag.m_args.m_return;
1331 +       else
1332 +       {
1333 +               printk(KERN_ERR "failed to execute: s=%d response=%08x recv data size=%08x\n",
1334 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1335 +               return 1;
1336 +       }
1337 +}
1338 +
1339 -- 
1340 1.9.1
1341