create branch for barrier breaker (from trunk r41863)
[14.07/openwrt.git] / target / linux / brcm2708 / patches-3.10 / 0041-Add-Simon-Hall-s-dma-helper-module-useful-in-future-.patch
1 From 87b48ac64b6b2aeb97b53eee080e74cb83ff25fe Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Mon, 19 Nov 2012 18:27:05 +0000
4 Subject: [PATCH 041/174] Add Simon Hall's dma helper module, useful in future
5  for X acceleration
6
7 ---
8  arch/arm/mach-bcm2708/Kconfig                   |   8 +
9  arch/arm/mach-bcm2708/Makefile                  |   3 +
10  arch/arm/mach-bcm2708/dmaer.c                   | 887 ++++++++++++++++++++++++
11  arch/arm/mach-bcm2708/include/mach/vc_support.h |  69 ++
12  arch/arm/mach-bcm2708/vc_support.c              | 319 +++++++++
13  5 files changed, 1286 insertions(+)
14  create mode 100755 arch/arm/mach-bcm2708/dmaer.c
15  create mode 100755 arch/arm/mach-bcm2708/include/mach/vc_support.h
16  create mode 100755 arch/arm/mach-bcm2708/vc_support.c
17
18 --- a/arch/arm/mach-bcm2708/Kconfig
19 +++ b/arch/arm/mach-bcm2708/Kconfig
20 @@ -38,4 +38,12 @@ config BCM2708_SPIDEV
21          default y
22          help
23            Binds spidev driver to the SPI0 master
24 +
25 +config BCM2708_DMAER
26 +       tristate "BCM2708 DMA helper"
27 +       depends on MACH_BCM2708
28 +        default n
29 +        help
30 +          Enable DMA helper for accelerating X composition
31 +
32  endmenu
33 --- a/arch/arm/mach-bcm2708/Makefile
34 +++ b/arch/arm/mach-bcm2708/Makefile
35 @@ -6,3 +6,6 @@ obj-$(CONFIG_MACH_BCM2708)      += clock.o b
36  obj-$(CONFIG_BCM2708_GPIO)     += bcm2708_gpio.o
37  obj-$(CONFIG_BCM2708_VCMEM)    += vc_mem.o
38  
39 +obj-$(CONFIG_BCM2708_DMAER)    += dmaer_master.o
40 +dmaer_master-objs              := dmaer.o vc_support.o
41 +
42 --- /dev/null
43 +++ b/arch/arm/mach-bcm2708/dmaer.c
44 @@ -0,0 +1,887 @@
45 +#include <linux/init.h>
46 +#include <linux/sched.h>
47 +#include <linux/module.h>
48 +#include <linux/types.h>
49 +#include <linux/kdev_t.h>
50 +#include <linux/fs.h>
51 +#include <linux/cdev.h>
52 +#include <linux/mm.h>
53 +#include <linux/slab.h>
54 +#include <linux/pagemap.h>
55 +#include <linux/device.h>
56 +#include <linux/jiffies.h>
57 +#include <linux/timex.h>
58 +#include <linux/dma-mapping.h>
59 +
60 +#include <asm/uaccess.h>
61 +#include <asm/atomic.h>
62 +#include <asm/cacheflush.h>
63 +#include <asm/io.h>
64 +
65 +#include <mach/dma.h>
66 +#include <mach/vc_support.h>
67 +
68 +#ifdef ECLIPSE_IGNORE
69 +
70 +#define __user
71 +#define __init
72 +#define __exit
73 +#define __iomem
74 +#define KERN_DEBUG
75 +#define KERN_ERR
76 +#define KERN_WARNING
77 +#define KERN_INFO
78 +#define _IOWR(a, b, c) b
79 +#define _IOW(a, b, c) b
80 +#define _IO(a, b) b
81 +
82 +#endif
83 +
84 +//#define inline
85 +
86 +#define PRINTK(args...) printk(args)
87 +//#define PRINTK_VERBOSE(args...) printk(args)
88 +//#define PRINTK(args...)
89 +#define PRINTK_VERBOSE(args...)
90 +
91 +/***** TYPES ****/
92 +#define PAGES_PER_LIST 500
93 +struct PageList
94 +{
95 +       struct page *m_pPages[PAGES_PER_LIST];
96 +       unsigned int m_used;
97 +       struct PageList *m_pNext;
98 +};
99 +
100 +struct VmaPageList
101 +{
102 +       //each vma has a linked list of pages associated with it
103 +       struct PageList *m_pPageHead;
104 +       struct PageList *m_pPageTail;
105 +       unsigned int m_refCount;
106 +};
107 +
108 +struct DmaControlBlock
109 +{
110 +       unsigned int m_transferInfo;
111 +       void __user *m_pSourceAddr;
112 +       void __user *m_pDestAddr;
113 +       unsigned int m_xferLen;
114 +       unsigned int m_tdStride;
115 +       struct DmaControlBlock *m_pNext;
116 +       unsigned int m_blank1, m_blank2;
117 +};
118 +
119 +/***** DEFINES ******/
120 +//magic number defining the module
121 +#define DMA_MAGIC              0xdd
122 +
123 +//do user virtual to physical translation of the CB chain
124 +#define DMA_PREPARE            _IOWR(DMA_MAGIC, 0, struct DmaControlBlock *)
125 +
126 +//kick the pre-prepared CB chain
127 +#define DMA_KICK               _IOW(DMA_MAGIC, 1, struct DmaControlBlock *)
128 +
129 +//prepare it, kick it, wait for it
130 +#define DMA_PREPARE_KICK_WAIT  _IOWR(DMA_MAGIC, 2, struct DmaControlBlock *)
131 +
132 +//prepare it, kick it, don't wait for it
133 +#define DMA_PREPARE_KICK       _IOWR(DMA_MAGIC, 3, struct DmaControlBlock *)
134 +
135 +//not currently implemented
136 +#define DMA_WAIT_ONE           _IO(DMA_MAGIC, 4, struct DmaControlBlock *)
137 +
138 +//wait on all kicked CB chains
139 +#define DMA_WAIT_ALL           _IO(DMA_MAGIC, 5)
140 +
141 +//in order to discover the largest AXI burst that should be programmed into the transfer params
142 +#define DMA_MAX_BURST          _IO(DMA_MAGIC, 6)
143 +
144 +//set the address range through which the user address is assumed to already by a physical address
145 +#define DMA_SET_MIN_PHYS       _IOW(DMA_MAGIC, 7, unsigned long)
146 +#define DMA_SET_MAX_PHYS       _IOW(DMA_MAGIC, 8, unsigned long)
147 +#define DMA_SET_PHYS_OFFSET    _IOW(DMA_MAGIC, 9, unsigned long)
148 +
149 +//used to define the size for the CMA-based allocation *in pages*, can only be done once once the file is opened
150 +#define DMA_CMA_SET_SIZE       _IOW(DMA_MAGIC, 10, unsigned long)
151 +
152 +//used to get the version of the module, to test for a capability
153 +#define DMA_GET_VERSION                _IO(DMA_MAGIC, 99)
154 +
155 +#define VERSION_NUMBER 1
156 +
157 +#define VIRT_TO_BUS_CACHE_SIZE 8
158 +
159 +/***** FILE OPS *****/
160 +static int Open(struct inode *pInode, struct file *pFile);
161 +static int Release(struct inode *pInode, struct file *pFile);
162 +static long Ioctl(struct file *pFile, unsigned int cmd, unsigned long arg);
163 +static ssize_t Read(struct file *pFile, char __user *pUser, size_t count, loff_t *offp);
164 +static int Mmap(struct file *pFile, struct vm_area_struct *pVma);
165 +
166 +/***** VMA OPS ****/
167 +static void VmaOpen4k(struct vm_area_struct *pVma);
168 +static void VmaClose4k(struct vm_area_struct *pVma);
169 +static int VmaFault4k(struct vm_area_struct *pVma, struct vm_fault *pVmf);
170 +
171 +/**** DMA PROTOTYPES */
172 +static struct DmaControlBlock __user *DmaPrepare(struct DmaControlBlock __user *pUserCB, int *pError);
173 +static int DmaKick(struct DmaControlBlock __user *pUserCB);
174 +static void DmaWaitAll(void);
175 +
176 +/**** GENERIC ****/
177 +static int __init dmaer_init(void);
178 +static void __exit dmaer_exit(void);
179 +
180 +/*** OPS ***/
181 +static struct vm_operations_struct g_vmOps4k = {
182 +       .open = VmaOpen4k,
183 +       .close = VmaClose4k,
184 +       .fault = VmaFault4k,
185 +};
186 +
187 +static struct file_operations g_fOps = {
188 +       .owner = THIS_MODULE,
189 +       .llseek = 0,
190 +       .read = Read,
191 +       .write = 0,
192 +       .unlocked_ioctl = Ioctl,
193 +       .open = Open,
194 +       .release = Release,
195 +       .mmap = Mmap,
196 +};
197 +
198 +/***** GLOBALS ******/
199 +static dev_t g_majorMinor;
200 +
201 +//tracking usage of the two files
202 +static atomic_t g_oneLock4k = ATOMIC_INIT(1);
203 +
204 +//device operations
205 +static struct cdev g_cDev;
206 +static int g_trackedPages = 0;
207 +
208 +//dma control
209 +static unsigned int *g_pDmaChanBase;
210 +static int g_dmaIrq;
211 +static int g_dmaChan;
212 +
213 +//cma allocation
214 +static int g_cmaHandle;
215 +
216 +//user virtual to bus address translation acceleration
217 +static unsigned long g_virtAddr[VIRT_TO_BUS_CACHE_SIZE];
218 +static unsigned long g_busAddr[VIRT_TO_BUS_CACHE_SIZE];
219 +static unsigned long g_cbVirtAddr;
220 +static unsigned long g_cbBusAddr;
221 +static int g_cacheInsertAt;
222 +static int g_cacheHit, g_cacheMiss;
223 +
224 +//off by default
225 +static void __user *g_pMinPhys;
226 +static void __user *g_pMaxPhys;
227 +static unsigned long g_physOffset;
228 +
229 +/****** CACHE OPERATIONS ********/
230 +static inline void FlushAddrCache(void)
231 +{
232 +       int count = 0;
233 +       for (count = 0; count < VIRT_TO_BUS_CACHE_SIZE; count++)
234 +               g_virtAddr[count] = 0xffffffff;                 //never going to match as we always chop the bottom bits anyway
235 +
236 +       g_cbVirtAddr = 0xffffffff;
237 +
238 +       g_cacheInsertAt = 0;
239 +}
240 +
241 +//translate from a user virtual address to a bus address by mapping the page
242 +//NB this won't lock a page in memory, so to avoid potential paging issues using kernel logical addresses
243 +static inline void __iomem *UserVirtualToBus(void __user *pUser)
244 +{
245 +       int mapped;
246 +       struct page *pPage;
247 +       void *phys;
248 +
249 +       //map it (requiring that the pointer points to something that does not hang off the page boundary)
250 +       mapped = get_user_pages(current, current->mm,
251 +               (unsigned long)pUser, 1,
252 +               1, 0,
253 +               &pPage,
254 +               0);
255 +
256 +       if (mapped <= 0)                //error
257 +               return 0;
258 +
259 +       PRINTK_VERBOSE(KERN_DEBUG "user virtual %p arm phys %p bus %p\n",
260 +                       pUser, page_address(pPage), (void __iomem *)__virt_to_bus(page_address(pPage)));
261 +
262 +       //get the arm physical address
263 +       phys = page_address(pPage) + offset_in_page(pUser);
264 +       page_cache_release(pPage);
265 +
266 +       //and now the bus address
267 +       return (void __iomem *)__virt_to_bus(phys);
268 +}
269 +
270 +static inline void __iomem *UserVirtualToBusViaCbCache(void __user *pUser)
271 +{
272 +       unsigned long virtual_page = (unsigned long)pUser & ~4095;
273 +       unsigned long page_offset = (unsigned long)pUser & 4095;
274 +       unsigned long bus_addr;
275 +
276 +       if (g_cbVirtAddr == virtual_page)
277 +       {
278 +               bus_addr = g_cbBusAddr + page_offset;
279 +               g_cacheHit++;
280 +               return (void __iomem *)bus_addr;
281 +       }
282 +       else
283 +       {
284 +               bus_addr = (unsigned long)UserVirtualToBus(pUser);
285 +               
286 +               if (!bus_addr)
287 +                       return 0;
288 +               
289 +               g_cbVirtAddr = virtual_page;
290 +               g_cbBusAddr = bus_addr & ~4095;
291 +               g_cacheMiss++;
292 +
293 +               return (void __iomem *)bus_addr;
294 +       }
295 +}
296 +
297 +//do the same as above, by query our virt->bus cache
298 +static inline void __iomem *UserVirtualToBusViaCache(void __user *pUser)
299 +{
300 +       int count;
301 +       //get the page and its offset
302 +       unsigned long virtual_page = (unsigned long)pUser & ~4095;
303 +       unsigned long page_offset = (unsigned long)pUser & 4095;
304 +       unsigned long bus_addr;
305 +
306 +       if (pUser >= g_pMinPhys && pUser < g_pMaxPhys)
307 +       {
308 +               PRINTK_VERBOSE(KERN_DEBUG "user->phys passthrough on %p\n", pUser);
309 +               return (void __iomem *)((unsigned long)pUser + g_physOffset);
310 +       }
311 +
312 +       //check the cache for our entry
313 +       for (count = 0; count < VIRT_TO_BUS_CACHE_SIZE; count++)
314 +               if (g_virtAddr[count] == virtual_page)
315 +               {
316 +                       bus_addr = g_busAddr[count] + page_offset;
317 +                       g_cacheHit++;
318 +                       return (void __iomem *)bus_addr;
319 +               }
320 +
321 +       //not found, look up manually and then insert its page address
322 +       bus_addr = (unsigned long)UserVirtualToBus(pUser);
323 +
324 +       if (!bus_addr)
325 +               return 0;
326 +
327 +       g_virtAddr[g_cacheInsertAt] = virtual_page;
328 +       g_busAddr[g_cacheInsertAt] = bus_addr & ~4095;
329 +
330 +       //round robin
331 +       g_cacheInsertAt++;
332 +       if (g_cacheInsertAt == VIRT_TO_BUS_CACHE_SIZE)
333 +               g_cacheInsertAt = 0;
334 +
335 +       g_cacheMiss++;
336 +
337 +       return (void __iomem *)bus_addr;
338 +}
339 +
340 +/***** FILE OPERATIONS ****/
341 +static int Open(struct inode *pInode, struct file *pFile)
342 +{
343 +       PRINTK(KERN_DEBUG "file opening: %d/%d\n", imajor(pInode), iminor(pInode));
344 +       
345 +       //check which device we are
346 +       if (iminor(pInode) == 0)                //4k
347 +       {
348 +               //only one at a time
349 +               if (!atomic_dec_and_test(&g_oneLock4k))
350 +               {
351 +                       atomic_inc(&g_oneLock4k);
352 +                       return -EBUSY;
353 +               }
354 +       }
355 +       else
356 +               return -EINVAL;
357 +       
358 +       //todo there will be trouble if two different processes open the files
359 +
360 +       //reset after any file is opened
361 +       g_pMinPhys = (void __user *)-1;
362 +       g_pMaxPhys = (void __user *)0;
363 +       g_physOffset = 0;
364 +       g_cmaHandle = 0;
365 +
366 +       return 0;
367 +}
368 +
369 +static int Release(struct inode *pInode, struct file *pFile)
370 +{
371 +       PRINTK(KERN_DEBUG "file closing, %d pages tracked\n", g_trackedPages);
372 +       if (g_trackedPages)
373 +               PRINTK(KERN_ERR "we\'re leaking memory!\n");
374 +       
375 +       //wait for any dmas to finish
376 +       DmaWaitAll();
377 +
378 +       //free this memory on the application closing the file or it crashing (implicitly closing the file)
379 +       if (g_cmaHandle)
380 +       {
381 +               PRINTK(KERN_DEBUG "unlocking vc memory\n");
382 +               if (UnlockVcMemory(g_cmaHandle))
383 +                       PRINTK(KERN_ERR "uh-oh, unable to unlock vc memory!\n");
384 +               PRINTK(KERN_DEBUG "releasing vc memory\n");
385 +               if (ReleaseVcMemory(g_cmaHandle))
386 +                       PRINTK(KERN_ERR "uh-oh, unable to release vc memory!\n");
387 +       }
388 +
389 +       if (iminor(pInode) == 0)
390 +               atomic_inc(&g_oneLock4k);
391 +       else
392 +               return -EINVAL;
393 +
394 +       return 0;
395 +}
396 +
397 +static struct DmaControlBlock __user *DmaPrepare(struct DmaControlBlock __user *pUserCB, int *pError)
398 +{
399 +       struct DmaControlBlock kernCB;
400 +       struct DmaControlBlock __user *pUNext;
401 +       void __iomem *pSourceBus, __iomem *pDestBus;
402 +       
403 +       //get the control block into kernel memory so we can work on it
404 +       if (copy_from_user(&kernCB, pUserCB, sizeof(struct DmaControlBlock)) != 0)
405 +       {
406 +               PRINTK(KERN_ERR "copy_from_user failed for user cb %p\n", pUserCB);
407 +               *pError = 1;
408 +               return 0;
409 +       }
410 +       
411 +       if (kernCB.m_pSourceAddr == 0 || kernCB.m_pDestAddr == 0)
412 +       {
413 +               PRINTK(KERN_ERR "faulty source (%p) dest (%p) addresses for user cb %p\n",
414 +                       kernCB.m_pSourceAddr, kernCB.m_pDestAddr, pUserCB);
415 +               *pError = 1;
416 +               return 0;
417 +       }
418 +
419 +       pSourceBus = UserVirtualToBusViaCache(kernCB.m_pSourceAddr);
420 +       pDestBus = UserVirtualToBusViaCache(kernCB.m_pDestAddr);
421 +
422 +       if (!pSourceBus || !pDestBus)
423 +       {
424 +               PRINTK(KERN_ERR "virtual to bus translation failure for source/dest %p/%p->%p/%p\n",
425 +                               kernCB.m_pSourceAddr, kernCB.m_pDestAddr,
426 +                               pSourceBus, pDestBus);
427 +               *pError = 1;
428 +               return 0;
429 +       }
430 +       
431 +       //update the user structure with the new bus addresses
432 +       kernCB.m_pSourceAddr = pSourceBus;
433 +       kernCB.m_pDestAddr = pDestBus;
434 +
435 +       PRINTK_VERBOSE(KERN_DEBUG "final source %p dest %p\n", kernCB.m_pSourceAddr, kernCB.m_pDestAddr);
436 +               
437 +       //sort out the bus address for the next block
438 +       pUNext = kernCB.m_pNext;
439 +       
440 +       if (kernCB.m_pNext)
441 +       {
442 +               void __iomem *pNextBus;
443 +               pNextBus = UserVirtualToBusViaCbCache(kernCB.m_pNext);
444 +
445 +               if (!pNextBus)
446 +               {
447 +                       PRINTK(KERN_ERR "virtual to bus translation failure for m_pNext\n");
448 +                       *pError = 1;
449 +                       return 0;
450 +               }
451 +
452 +               //update the pointer with the bus address
453 +               kernCB.m_pNext = pNextBus;
454 +       }
455 +       
456 +       //write it back to user space
457 +       if (copy_to_user(pUserCB, &kernCB, sizeof(struct DmaControlBlock)) != 0)
458 +       {
459 +               PRINTK(KERN_ERR "copy_to_user failed for cb %p\n", pUserCB);
460 +               *pError = 1;
461 +               return 0;
462 +       }
463 +
464 +       __cpuc_flush_dcache_area(pUserCB, 32);
465 +
466 +       *pError = 0;
467 +       return pUNext;
468 +}
469 +
470 +static int DmaKick(struct DmaControlBlock __user *pUserCB)
471 +{
472 +       void __iomem *pBusCB;
473 +       
474 +       pBusCB = UserVirtualToBusViaCbCache(pUserCB);
475 +       if (!pBusCB)
476 +       {
477 +               PRINTK(KERN_ERR "virtual to bus translation failure for cb\n");
478 +               return 1;
479 +       }
480 +
481 +       //flush_cache_all();
482 +
483 +       bcm_dma_start(g_pDmaChanBase, (dma_addr_t)pBusCB);
484 +       
485 +       return 0;
486 +}
487 +
488 +static void DmaWaitAll(void)
489 +{
490 +       int counter = 0;
491 +       volatile int inner_count;
492 +       volatile unsigned int cs;
493 +       unsigned long time_before, time_after;
494 +
495 +       time_before = jiffies;
496 +       //bcm_dma_wait_idle(g_pDmaChanBase);
497 +       dsb();
498 +       
499 +       cs = readl(g_pDmaChanBase);
500 +       
501 +       while ((cs & 1) == 1)
502 +       {
503 +               cs = readl(g_pDmaChanBase);
504 +               counter++;
505 +
506 +               for (inner_count = 0; inner_count < 32; inner_count++);
507 +
508 +               asm volatile ("MCR p15,0,r0,c7,c0,4 \n");
509 +               //cpu_do_idle();
510 +               if (counter >= 1000000)
511 +               {
512 +                       PRINTK(KERN_WARNING "DMA failed to finish in a timely fashion\n");
513 +                       break;
514 +               }
515 +       }
516 +       time_after = jiffies;
517 +       PRINTK_VERBOSE(KERN_DEBUG "done, counter %d, cs %08x", counter, cs);
518 +       PRINTK_VERBOSE(KERN_DEBUG "took %ld jiffies, %d HZ\n", time_after - time_before, HZ);
519 +}
520 +
521 +static long Ioctl(struct file *pFile, unsigned int cmd, unsigned long arg)
522 +{
523 +       int error = 0;
524 +       PRINTK_VERBOSE(KERN_DEBUG "ioctl cmd %x arg %lx\n", cmd, arg);
525 +
526 +       switch (cmd)
527 +       {
528 +       case DMA_PREPARE:
529 +       case DMA_PREPARE_KICK:
530 +       case DMA_PREPARE_KICK_WAIT:
531 +               {
532 +                       struct DmaControlBlock __user *pUCB = (struct DmaControlBlock *)arg;
533 +                       int steps = 0;
534 +                       unsigned long start_time = jiffies;
535 +                       (void)start_time;
536 +
537 +                       //flush our address cache
538 +                       FlushAddrCache();
539 +
540 +                       PRINTK_VERBOSE(KERN_DEBUG "dma prepare\n");
541 +
542 +                       //do virtual to bus translation for each entry
543 +                       do
544 +                       {
545 +                               pUCB = DmaPrepare(pUCB, &error);
546 +                       } while (error == 0 && ++steps && pUCB);
547 +                       PRINTK_VERBOSE(KERN_DEBUG "prepare done in %d steps, %ld\n", steps, jiffies - start_time);
548 +
549 +                       //carry straight on if we want to kick too
550 +                       if (cmd == DMA_PREPARE || error)
551 +                       {
552 +                               PRINTK_VERBOSE(KERN_DEBUG "falling out\n");
553 +                               return error ? -EINVAL : 0;
554 +                       }
555 +               }
556 +       case DMA_KICK:
557 +               PRINTK_VERBOSE(KERN_DEBUG "dma begin\n");
558 +
559 +               if (cmd == DMA_KICK)
560 +                       FlushAddrCache();
561 +
562 +               DmaKick((struct DmaControlBlock __user *)arg);
563 +               
564 +               if (cmd != DMA_PREPARE_KICK_WAIT)
565 +                       break;
566 +/*     case DMA_WAIT_ONE:
567 +               //PRINTK(KERN_DEBUG "dma wait one\n");
568 +               break;*/
569 +       case DMA_WAIT_ALL:
570 +               //PRINTK(KERN_DEBUG "dma wait all\n");
571 +               DmaWaitAll();
572 +               break;
573 +       case DMA_MAX_BURST:
574 +               if (g_dmaChan == 0)
575 +                       return 10;
576 +               else
577 +                       return 5;
578 +       case DMA_SET_MIN_PHYS:
579 +               g_pMinPhys = (void __user *)arg;
580 +               PRINTK(KERN_DEBUG "min/max user/phys bypass set to %p %p\n", g_pMinPhys, g_pMaxPhys);
581 +               break;
582 +       case DMA_SET_MAX_PHYS:
583 +               g_pMaxPhys = (void __user *)arg;
584 +               PRINTK(KERN_DEBUG "min/max user/phys bypass set to %p %p\n", g_pMinPhys, g_pMaxPhys);
585 +               break;
586 +       case DMA_SET_PHYS_OFFSET:
587 +               g_physOffset = arg;
588 +               PRINTK(KERN_DEBUG "user/phys bypass offset set to %ld\n", g_physOffset);
589 +               break;
590 +       case DMA_CMA_SET_SIZE:
591 +       {
592 +               unsigned int pBusAddr;
593 +
594 +               if (g_cmaHandle)
595 +               {
596 +                       PRINTK(KERN_ERR "memory has already been allocated (handle %d)\n", g_cmaHandle);
597 +                       return -EINVAL;
598 +               }
599 +
600 +               PRINTK(KERN_INFO "allocating %ld bytes of VC memory\n", arg * 4096);
601 +
602 +               //get the memory
603 +               if (AllocateVcMemory(&g_cmaHandle, arg * 4096, 4096, MEM_FLAG_L1_NONALLOCATING | MEM_FLAG_NO_INIT | MEM_FLAG_HINT_PERMALOCK))
604 +               {
605 +                       PRINTK(KERN_ERR "failed to allocate %ld bytes of VC memory\n", arg * 4096);
606 +                       g_cmaHandle = 0;
607 +                       return -EINVAL;
608 +               }
609 +
610 +               //get an address for it
611 +               PRINTK(KERN_INFO "trying to map VC memory\n");
612 +
613 +               if (LockVcMemory(&pBusAddr, g_cmaHandle))
614 +               {
615 +                       PRINTK(KERN_ERR "failed to map CMA handle %d, releasing memory\n", g_cmaHandle);
616 +                       ReleaseVcMemory(g_cmaHandle);
617 +                       g_cmaHandle = 0;
618 +               }
619 +
620 +               PRINTK(KERN_INFO "bus address for CMA memory is %x\n", pBusAddr);
621 +               return pBusAddr;
622 +       }
623 +       case DMA_GET_VERSION:
624 +               PRINTK(KERN_DEBUG "returning version number, %d\n", VERSION_NUMBER);
625 +               return VERSION_NUMBER;
626 +       default:
627 +               PRINTK(KERN_DEBUG "unknown ioctl: %d\n", cmd);
628 +               return -EINVAL;
629 +       }
630 +
631 +       return 0;
632 +}
633 +
634 +static ssize_t Read(struct file *pFile, char __user *pUser, size_t count, loff_t *offp)
635 +{
636 +       return -EIO;
637 +}
638 +
639 +static int Mmap(struct file *pFile, struct vm_area_struct *pVma)
640 +{
641 +       struct PageList *pPages;
642 +       struct VmaPageList *pVmaList;
643 +       
644 +       PRINTK_VERBOSE(KERN_DEBUG "MMAP vma %p, length %ld (%s %d)\n",
645 +               pVma, pVma->vm_end - pVma->vm_start,
646 +               current->comm, current->pid);
647 +       PRINTK_VERBOSE(KERN_DEBUG "MMAP %p %d (tracked %d)\n", pVma, current->pid, g_trackedPages);
648 +
649 +       //make a new page list
650 +       pPages = (struct PageList *)kmalloc(sizeof(struct PageList), GFP_KERNEL);
651 +       if (!pPages)
652 +       {
653 +               PRINTK(KERN_ERR "couldn\'t allocate a new page list (%s %d)\n",
654 +                       current->comm, current->pid);
655 +               return -ENOMEM;
656 +       }
657 +
658 +       //clear the page list
659 +       pPages->m_used = 0;
660 +       pPages->m_pNext = 0;
661 +       
662 +       //insert our vma and new page list somewhere
663 +       if (!pVma->vm_private_data)
664 +       {
665 +               struct VmaPageList *pList;
666 +
667 +               PRINTK_VERBOSE(KERN_DEBUG "new vma list, making new one (%s %d)\n",
668 +                       current->comm, current->pid);
669 +
670 +               //make a new vma list
671 +               pList = (struct VmaPageList *)kmalloc(sizeof(struct VmaPageList), GFP_KERNEL);
672 +               if (!pList)
673 +               {
674 +                       PRINTK(KERN_ERR "couldn\'t allocate vma page list (%s %d)\n",
675 +                               current->comm, current->pid);
676 +                       kfree(pPages);
677 +                       return -ENOMEM;
678 +               }
679 +
680 +               //clear this list
681 +               pVma->vm_private_data = (void *)pList;
682 +               pList->m_refCount = 0;
683 +       }
684 +
685 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
686 +
687 +       //add it to the vma list
688 +       pVmaList->m_pPageHead = pPages;
689 +       pVmaList->m_pPageTail = pPages;
690 +
691 +       pVma->vm_ops = &g_vmOps4k;
692 +       pVma->vm_flags |= VM_IO;
693 +
694 +       VmaOpen4k(pVma);
695 +
696 +       return 0;
697 +}
698 +
699 +/****** VMA OPERATIONS ******/
700 +
701 +static void VmaOpen4k(struct vm_area_struct *pVma)
702 +{
703 +       struct VmaPageList *pVmaList;
704 +
705 +       PRINTK_VERBOSE(KERN_DEBUG "vma open %p private %p (%s %d), %d live pages\n", pVma, pVma->vm_private_data, current->comm, current->pid, g_trackedPages);
706 +       PRINTK_VERBOSE(KERN_DEBUG "OPEN %p %d %ld pages (tracked pages %d)\n",
707 +               pVma, current->pid, (pVma->vm_end - pVma->vm_start) >> 12,
708 +               g_trackedPages);
709 +
710 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
711 +
712 +       if (pVmaList)
713 +       {
714 +               pVmaList->m_refCount++;
715 +               PRINTK_VERBOSE(KERN_DEBUG "ref count is now %d\n", pVmaList->m_refCount);
716 +       }
717 +       else
718 +       {
719 +               PRINTK_VERBOSE(KERN_DEBUG "err, open but no vma page list\n");
720 +       }
721 +}
722 +
723 +static void VmaClose4k(struct vm_area_struct *pVma)
724 +{
725 +       struct VmaPageList *pVmaList;
726 +       int freed = 0;
727 +       
728 +       PRINTK_VERBOSE(KERN_DEBUG "vma close %p private %p (%s %d)\n", pVma, pVma->vm_private_data, current->comm, current->pid);
729 +       
730 +       //wait for any dmas to finish
731 +       DmaWaitAll();
732 +
733 +       //find our vma in the list
734 +       pVmaList = (struct VmaPageList *)pVma->vm_private_data;
735 +
736 +       //may be a fork
737 +       if (pVmaList)
738 +       {
739 +               struct PageList *pPages;
740 +               
741 +               pVmaList->m_refCount--;
742 +
743 +               if (pVmaList->m_refCount == 0)
744 +               {
745 +                       PRINTK_VERBOSE(KERN_DEBUG "found vma, freeing pages (%s %d)\n",
746 +                               current->comm, current->pid);
747 +
748 +                       pPages = pVmaList->m_pPageHead;
749 +
750 +                       if (!pPages)
751 +                       {
752 +                               PRINTK(KERN_ERR "no page list (%s %d)!\n",
753 +                                       current->comm, current->pid);
754 +                               return;
755 +                       }
756 +
757 +                       while (pPages)
758 +                       {
759 +                               struct PageList *next;
760 +                               int count;
761 +
762 +                               PRINTK_VERBOSE(KERN_DEBUG "page list (%s %d)\n",
763 +                                       current->comm, current->pid);
764 +
765 +                               next = pPages->m_pNext;
766 +                               for (count = 0; count < pPages->m_used; count++)
767 +                               {
768 +                                       PRINTK_VERBOSE(KERN_DEBUG "freeing page %p (%s %d)\n",
769 +                                               pPages->m_pPages[count],
770 +                                               current->comm, current->pid);
771 +                                       __free_pages(pPages->m_pPages[count], 0);
772 +                                       g_trackedPages--;
773 +                                       freed++;
774 +                               }
775 +
776 +                               PRINTK_VERBOSE(KERN_DEBUG "freeing page list (%s %d)\n",
777 +                                       current->comm, current->pid);
778 +                               kfree(pPages);
779 +                               pPages = next;
780 +                       }
781 +                       
782 +                       //remove our vma from the list
783 +                       kfree(pVmaList);
784 +                       pVma->vm_private_data = 0;
785 +               }
786 +               else
787 +               {
788 +                       PRINTK_VERBOSE(KERN_DEBUG "ref count is %d, not closing\n", pVmaList->m_refCount);
789 +               }
790 +       }
791 +       else
792 +       {
793 +               PRINTK_VERBOSE(KERN_ERR "uh-oh, vma %p not found (%s %d)!\n", pVma, current->comm, current->pid);
794 +               PRINTK_VERBOSE(KERN_ERR "CLOSE ERR\n");
795 +       }
796 +
797 +       PRINTK_VERBOSE(KERN_DEBUG "CLOSE %p %d %d pages (tracked pages %d)",
798 +               pVma, current->pid, freed, g_trackedPages);
799 +
800 +       PRINTK_VERBOSE(KERN_DEBUG "%d pages open\n", g_trackedPages);
801 +}
802 +
803 +static int VmaFault4k(struct vm_area_struct *pVma, struct vm_fault *pVmf)
804 +{
805 +       PRINTK_VERBOSE(KERN_DEBUG "vma fault for vma %p private %p at offset %ld (%s %d)\n", pVma, pVma->vm_private_data, pVmf->pgoff,
806 +               current->comm, current->pid);
807 +       PRINTK_VERBOSE(KERN_DEBUG "FAULT\n");
808 +       pVmf->page = alloc_page(GFP_KERNEL);
809 +       
810 +       if (pVmf->page)
811 +       {
812 +               PRINTK_VERBOSE(KERN_DEBUG "alloc page virtual %p\n", page_address(pVmf->page));
813 +       }
814 +
815 +       if (!pVmf->page)
816 +       {
817 +               PRINTK(KERN_ERR "vma fault oom (%s %d)\n", current->comm, current->pid);
818 +               return VM_FAULT_OOM;
819 +       }
820 +       else
821 +       {
822 +               struct VmaPageList *pVmaList;
823 +               
824 +               get_page(pVmf->page);
825 +               g_trackedPages++;
826 +               
827 +               //find our vma in the list
828 +               pVmaList = (struct VmaPageList *)pVma->vm_private_data;
829 +               
830 +               if (pVmaList)
831 +               {
832 +                       PRINTK_VERBOSE(KERN_DEBUG "vma found (%s %d)\n", current->comm, current->pid);
833 +
834 +                       if (pVmaList->m_pPageTail->m_used == PAGES_PER_LIST)
835 +                       {
836 +                               PRINTK_VERBOSE(KERN_DEBUG "making new page list (%s %d)\n", current->comm, current->pid);
837 +                               //making a new page list
838 +                               pVmaList->m_pPageTail->m_pNext = (struct PageList *)kmalloc(sizeof(struct PageList), GFP_KERNEL);
839 +                               if (!pVmaList->m_pPageTail->m_pNext)
840 +                                       return -ENOMEM;
841 +                               
842 +                               //update the tail pointer
843 +                               pVmaList->m_pPageTail = pVmaList->m_pPageTail->m_pNext;
844 +                               pVmaList->m_pPageTail->m_used = 0;
845 +                               pVmaList->m_pPageTail->m_pNext = 0;
846 +                       }
847 +
848 +                       PRINTK_VERBOSE(KERN_DEBUG "adding page to list (%s %d)\n", current->comm, current->pid);
849 +                       
850 +                       pVmaList->m_pPageTail->m_pPages[pVmaList->m_pPageTail->m_used] = pVmf->page;
851 +                       pVmaList->m_pPageTail->m_used++;
852 +               }
853 +               else
854 +                       PRINTK(KERN_ERR "returned page for vma we don\'t know %p (%s %d)\n", pVma, current->comm, current->pid);
855 +               
856 +               return 0;
857 +       }
858 +}
859 +
860 +/****** GENERIC FUNCTIONS ******/
861 +static int __init dmaer_init(void)
862 +{
863 +       int result = alloc_chrdev_region(&g_majorMinor, 0, 1, "dmaer");
864 +       if (result < 0)
865 +       {
866 +               PRINTK(KERN_ERR "unable to get major device number\n");
867 +               return result;
868 +       }
869 +       else
870 +               PRINTK(KERN_DEBUG "major device number %d\n", MAJOR(g_majorMinor));
871 +       
872 +       PRINTK(KERN_DEBUG "vma list size %d, page list size %d, page size %ld\n",
873 +               sizeof(struct VmaPageList), sizeof(struct PageList), PAGE_SIZE);
874 +
875 +       //get a dma channel to work with
876 +       result = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST, (void **)&g_pDmaChanBase, &g_dmaIrq);
877 +
878 +       //uncomment to force to channel 0
879 +       //result = 0;
880 +       //g_pDmaChanBase = 0xce808000;
881 +       
882 +       if (result < 0)
883 +       {
884 +               PRINTK(KERN_ERR "failed to allocate dma channel\n");
885 +               cdev_del(&g_cDev);
886 +               unregister_chrdev_region(g_majorMinor, 1);
887 +       }
888 +       
889 +       //reset the channel
890 +       PRINTK(KERN_DEBUG "allocated dma channel %d (%p), initial state %08x\n", result, g_pDmaChanBase, *g_pDmaChanBase);
891 +       *g_pDmaChanBase = 1 << 31;
892 +       PRINTK(KERN_DEBUG "post-reset %08x\n", *g_pDmaChanBase);
893 +       
894 +       g_dmaChan = result;
895 +
896 +       //clear the cache stats
897 +       g_cacheHit = 0;
898 +       g_cacheMiss = 0;
899 +
900 +       //register our device - after this we are go go go
901 +       cdev_init(&g_cDev, &g_fOps);
902 +       g_cDev.owner = THIS_MODULE;
903 +       g_cDev.ops = &g_fOps;
904 +       
905 +       result = cdev_add(&g_cDev, g_majorMinor, 1);
906 +       if (result < 0)
907 +       {
908 +               PRINTK(KERN_ERR "failed to add character device\n");
909 +               unregister_chrdev_region(g_majorMinor, 1);
910 +               bcm_dma_chan_free(g_dmaChan);
911 +               return result;
912 +       }
913 +               
914 +       return 0;
915 +}
916 +
917 +static void __exit dmaer_exit(void)
918 +{
919 +       PRINTK(KERN_INFO "closing dmaer device, cache stats: %d hits %d misses\n", g_cacheHit, g_cacheMiss);
920 +       //unregister the device
921 +       cdev_del(&g_cDev);
922 +       unregister_chrdev_region(g_majorMinor, 1);
923 +       //free the dma channel
924 +       bcm_dma_chan_free(g_dmaChan);
925 +}
926 +
927 +MODULE_LICENSE("Dual BSD/GPL");
928 +MODULE_AUTHOR("Simon Hall");
929 +module_init(dmaer_init);
930 +module_exit(dmaer_exit);
931 +
932 --- /dev/null
933 +++ b/arch/arm/mach-bcm2708/include/mach/vc_support.h
934 @@ -0,0 +1,69 @@
935 +#ifndef _VC_SUPPORT_H_
936 +#define _VC_SUPPORT_H_
937 +
938 +/*
939 + * vc_support.h
940 + *
941 + *  Created on: 25 Nov 2012
942 + *      Author: Simon
943 + */
944 +
945 +enum {
946 +/*
947 +      If a MEM_HANDLE_T is discardable, the memory manager may resize it to size
948 +      0 at any time when it is not locked or retained.
949 +   */
950 +   MEM_FLAG_DISCARDABLE = 1 << 0,
951 +
952 +   /*
953 +      If a MEM_HANDLE_T is allocating (or normal), its block of memory will be
954 +      accessed in an allocating fashion through the cache.
955 +   */
956 +   MEM_FLAG_NORMAL = 0 << 2,
957 +   MEM_FLAG_ALLOCATING = MEM_FLAG_NORMAL,
958 +
959 +   /*
960 +      If a MEM_HANDLE_T is direct, its block of memory will be accessed
961 +      directly, bypassing the cache.
962 +   */
963 +   MEM_FLAG_DIRECT = 1 << 2,
964 +
965 +   /*
966 +      If a MEM_HANDLE_T is coherent, its block of memory will be accessed in a
967 +      non-allocating fashion through the cache.
968 +   */
969 +   MEM_FLAG_COHERENT = 2 << 2,
970 +
971 +   /*
972 +      If a MEM_HANDLE_T is L1-nonallocating, its block of memory will be accessed by
973 +      the VPU in a fashion which is allocating in L2, but only coherent in L1.
974 +   */
975 +   MEM_FLAG_L1_NONALLOCATING = (MEM_FLAG_DIRECT | MEM_FLAG_COHERENT),
976 +
977 +   /*
978 +      If a MEM_HANDLE_T is zero'd, its contents are set to 0 rather than
979 +      MEM_HANDLE_INVALID on allocation and resize up.
980 +   */
981 +   MEM_FLAG_ZERO = 1 << 4,
982 +
983 +   /*
984 +      If a MEM_HANDLE_T is uninitialised, it will not be reset to a defined value
985 +      (either zero, or all 1's) on allocation.
986 +    */
987 +   MEM_FLAG_NO_INIT = 1 << 5,
988 +
989 +   /*
990 +      Hints.
991 +   */
992 +   MEM_FLAG_HINT_PERMALOCK = 1 << 6, /* Likely to be locked for long periods of time. */
993 +};
994 +
995 +unsigned int AllocateVcMemory(unsigned int *pHandle, unsigned int size, unsigned int alignment, unsigned int flags);
996 +unsigned int ReleaseVcMemory(unsigned int handle);
997 +unsigned int LockVcMemory(unsigned int *pBusAddress, unsigned int handle);
998 +unsigned int UnlockVcMemory(unsigned int handle);
999 +
1000 +unsigned int ExecuteVcCode(unsigned int code,
1001 +               unsigned int r0, unsigned int r1, unsigned int r2, unsigned int r3, unsigned int r4, unsigned int r5);
1002 +
1003 +#endif
1004 --- /dev/null
1005 +++ b/arch/arm/mach-bcm2708/vc_support.c
1006 @@ -0,0 +1,319 @@
1007 +/*
1008 + * vc_support.c
1009 + *
1010 + *  Created on: 25 Nov 2012
1011 + *      Author: Simon
1012 + */
1013 +
1014 +#include <linux/module.h>
1015 +#include <mach/vcio.h>
1016 +
1017 +#ifdef ECLIPSE_IGNORE
1018 +
1019 +#define __user
1020 +#define __init
1021 +#define __exit
1022 +#define __iomem
1023 +#define KERN_DEBUG
1024 +#define KERN_ERR
1025 +#define KERN_WARNING
1026 +#define KERN_INFO
1027 +#define _IOWR(a, b, c) b
1028 +#define _IOW(a, b, c) b
1029 +#define _IO(a, b) b
1030 +
1031 +#endif
1032 +
1033 +/****** VC MAILBOX FUNCTIONALITY ******/
1034 +unsigned int AllocateVcMemory(unsigned int *pHandle, unsigned int size, unsigned int alignment, unsigned int flags)
1035 +{
1036 +       struct vc_msg
1037 +       {
1038 +               unsigned int m_msgSize;
1039 +               unsigned int m_response;
1040 +
1041 +               struct vc_tag
1042 +               {
1043 +                       unsigned int m_tagId;
1044 +                       unsigned int m_sendBufferSize;
1045 +                       union {
1046 +                               unsigned int m_sendDataSize;
1047 +                               unsigned int m_recvDataSize;
1048 +                       };
1049 +
1050 +                       struct args
1051 +                       {
1052 +                               union {
1053 +                                       unsigned int m_size;
1054 +                                       unsigned int m_handle;
1055 +                               };
1056 +                               unsigned int m_alignment;
1057 +                               unsigned int m_flags;
1058 +                       } m_args;
1059 +               } m_tag;
1060 +
1061 +               unsigned int m_endTag;
1062 +       } msg;
1063 +       int s;
1064 +
1065 +       msg.m_msgSize = sizeof(msg);
1066 +       msg.m_response = 0;
1067 +       msg.m_endTag = 0;
1068 +
1069 +       //fill in the tag for the allocation command
1070 +       msg.m_tag.m_tagId = 0x3000c;
1071 +       msg.m_tag.m_sendBufferSize = 12;
1072 +       msg.m_tag.m_sendDataSize = 12;
1073 +
1074 +       //fill in our args
1075 +       msg.m_tag.m_args.m_size = size;
1076 +       msg.m_tag.m_args.m_alignment = alignment;
1077 +       msg.m_tag.m_args.m_flags = flags;
1078 +
1079 +       //run the command
1080 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1081 +
1082 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1083 +       {
1084 +               *pHandle = msg.m_tag.m_args.m_handle;
1085 +               return 0;
1086 +       }
1087 +       else
1088 +       {
1089 +               printk(KERN_ERR "failed to allocate vc memory: s=%d response=%08x recv data size=%08x\n",
1090 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1091 +               return 1;
1092 +       }
1093 +}
1094 +
1095 +unsigned int ReleaseVcMemory(unsigned int handle)
1096 +{
1097 +       struct vc_msg
1098 +       {
1099 +               unsigned int m_msgSize;
1100 +               unsigned int m_response;
1101 +
1102 +               struct vc_tag
1103 +               {
1104 +                       unsigned int m_tagId;
1105 +                       unsigned int m_sendBufferSize;
1106 +                       union {
1107 +                               unsigned int m_sendDataSize;
1108 +                               unsigned int m_recvDataSize;
1109 +                       };
1110 +
1111 +                       struct args
1112 +                       {
1113 +                               union {
1114 +                                       unsigned int m_handle;
1115 +                                       unsigned int m_error;
1116 +                               };
1117 +                       } m_args;
1118 +               } m_tag;
1119 +
1120 +               unsigned int m_endTag;
1121 +       } msg;
1122 +       int s;
1123 +
1124 +       msg.m_msgSize = sizeof(msg);
1125 +       msg.m_response = 0;
1126 +       msg.m_endTag = 0;
1127 +
1128 +       //fill in the tag for the release command
1129 +       msg.m_tag.m_tagId = 0x3000f;
1130 +       msg.m_tag.m_sendBufferSize = 4;
1131 +       msg.m_tag.m_sendDataSize = 4;
1132 +
1133 +       //pass across the handle
1134 +       msg.m_tag.m_args.m_handle = handle;
1135 +
1136 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1137 +
1138 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004 && msg.m_tag.m_args.m_error == 0)
1139 +               return 0;
1140 +       else
1141 +       {
1142 +               printk(KERN_ERR "failed to release vc memory: s=%d response=%08x recv data size=%08x error=%08x\n",
1143 +                               s, msg.m_response, msg.m_tag.m_recvDataSize, msg.m_tag.m_args.m_error);
1144 +               return 1;
1145 +       }
1146 +}
1147 +
1148 +unsigned int LockVcMemory(unsigned int *pBusAddress, unsigned int handle)
1149 +{
1150 +       struct vc_msg
1151 +       {
1152 +               unsigned int m_msgSize;
1153 +               unsigned int m_response;
1154 +
1155 +               struct vc_tag
1156 +               {
1157 +                       unsigned int m_tagId;
1158 +                       unsigned int m_sendBufferSize;
1159 +                       union {
1160 +                               unsigned int m_sendDataSize;
1161 +                               unsigned int m_recvDataSize;
1162 +                       };
1163 +
1164 +                       struct args
1165 +                       {
1166 +                               union {
1167 +                                       unsigned int m_handle;
1168 +                                       unsigned int m_busAddress;
1169 +                               };
1170 +                       } m_args;
1171 +               } m_tag;
1172 +
1173 +               unsigned int m_endTag;
1174 +       } msg;
1175 +       int s;
1176 +
1177 +       msg.m_msgSize = sizeof(msg);
1178 +       msg.m_response = 0;
1179 +       msg.m_endTag = 0;
1180 +
1181 +       //fill in the tag for the lock command
1182 +       msg.m_tag.m_tagId = 0x3000d;
1183 +       msg.m_tag.m_sendBufferSize = 4;
1184 +       msg.m_tag.m_sendDataSize = 4;
1185 +
1186 +       //pass across the handle
1187 +       msg.m_tag.m_args.m_handle = handle;
1188 +
1189 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1190 +
1191 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1192 +       {
1193 +               //pick out the bus address
1194 +               *pBusAddress = msg.m_tag.m_args.m_busAddress;
1195 +               return 0;
1196 +       }
1197 +       else
1198 +       {
1199 +               printk(KERN_ERR "failed to lock vc memory: s=%d response=%08x recv data size=%08x\n",
1200 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1201 +               return 1;
1202 +       }
1203 +}
1204 +
1205 +unsigned int UnlockVcMemory(unsigned int handle)
1206 +{
1207 +       struct vc_msg
1208 +       {
1209 +               unsigned int m_msgSize;
1210 +               unsigned int m_response;
1211 +
1212 +               struct vc_tag
1213 +               {
1214 +                       unsigned int m_tagId;
1215 +                       unsigned int m_sendBufferSize;
1216 +                       union {
1217 +                               unsigned int m_sendDataSize;
1218 +                               unsigned int m_recvDataSize;
1219 +                       };
1220 +
1221 +                       struct args
1222 +                       {
1223 +                               union {
1224 +                                       unsigned int m_handle;
1225 +                                       unsigned int m_error;
1226 +                               };
1227 +                       } m_args;
1228 +               } m_tag;
1229 +
1230 +               unsigned int m_endTag;
1231 +       } msg;
1232 +       int s;
1233 +
1234 +       msg.m_msgSize = sizeof(msg);
1235 +       msg.m_response = 0;
1236 +       msg.m_endTag = 0;
1237 +
1238 +       //fill in the tag for the unlock command
1239 +       msg.m_tag.m_tagId = 0x3000e;
1240 +       msg.m_tag.m_sendBufferSize = 4;
1241 +       msg.m_tag.m_sendDataSize = 4;
1242 +
1243 +       //pass across the handle
1244 +       msg.m_tag.m_args.m_handle = handle;
1245 +
1246 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1247 +
1248 +       //check the error code too
1249 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004 && msg.m_tag.m_args.m_error == 0)
1250 +               return 0;
1251 +       else
1252 +       {
1253 +               printk(KERN_ERR "failed to unlock vc memory: s=%d response=%08x recv data size=%08x error%08x\n",
1254 +                               s, msg.m_response, msg.m_tag.m_recvDataSize, msg.m_tag.m_args.m_error);
1255 +               return 1;
1256 +       }
1257 +}
1258 +
1259 +unsigned int ExecuteVcCode(unsigned int code,
1260 +               unsigned int r0, unsigned int r1, unsigned int r2, unsigned int r3, unsigned int r4, unsigned int r5)
1261 +{
1262 +       struct vc_msg
1263 +       {
1264 +               unsigned int m_msgSize;
1265 +               unsigned int m_response;
1266 +
1267 +               struct vc_tag
1268 +               {
1269 +                       unsigned int m_tagId;
1270 +                       unsigned int m_sendBufferSize;
1271 +                       union {
1272 +                               unsigned int m_sendDataSize;
1273 +                               unsigned int m_recvDataSize;
1274 +                       };
1275 +
1276 +                       struct args
1277 +                       {
1278 +                               union {
1279 +                                       unsigned int m_pCode;
1280 +                                       unsigned int m_return;
1281 +                               };
1282 +                               unsigned int m_r0;
1283 +                               unsigned int m_r1;
1284 +                               unsigned int m_r2;
1285 +                               unsigned int m_r3;
1286 +                               unsigned int m_r4;
1287 +                               unsigned int m_r5;
1288 +                       } m_args;
1289 +               } m_tag;
1290 +
1291 +               unsigned int m_endTag;
1292 +       } msg;
1293 +       int s;
1294 +
1295 +       msg.m_msgSize = sizeof(msg);
1296 +       msg.m_response = 0;
1297 +       msg.m_endTag = 0;
1298 +
1299 +       //fill in the tag for the unlock command
1300 +       msg.m_tag.m_tagId = 0x30010;
1301 +       msg.m_tag.m_sendBufferSize = 28;
1302 +       msg.m_tag.m_sendDataSize = 28;
1303 +
1304 +       //pass across the handle
1305 +       msg.m_tag.m_args.m_pCode = code;
1306 +       msg.m_tag.m_args.m_r0 = r0;
1307 +       msg.m_tag.m_args.m_r1 = r1;
1308 +       msg.m_tag.m_args.m_r2 = r2;
1309 +       msg.m_tag.m_args.m_r3 = r3;
1310 +       msg.m_tag.m_args.m_r4 = r4;
1311 +       msg.m_tag.m_args.m_r5 = r5;
1312 +
1313 +       s = bcm_mailbox_property(&msg, sizeof(msg));
1314 +
1315 +       //check the error code too
1316 +       if (s == 0 && msg.m_response == 0x80000000 && msg.m_tag.m_recvDataSize == 0x80000004)
1317 +               return msg.m_tag.m_args.m_return;
1318 +       else
1319 +       {
1320 +               printk(KERN_ERR "failed to execute: s=%d response=%08x recv data size=%08x\n",
1321 +                               s, msg.m_response, msg.m_tag.m_recvDataSize);
1322 +               return 1;
1323 +       }
1324 +}
1325 +