brcm2708-gpu-fw: update to a newer version
[openwrt.git] / target / linux / brcm2708 / patches-3.10 / 005-bcm2708-vchiq-driver.patch
1 --- /dev/null
2 +++ b/drivers/char/broadcom/Kconfig
3 @@ -0,0 +1,17 @@
4 +#
5 +# Broadcom char driver config
6 +#
7 +
8 +menuconfig BRCM_CHAR_DRIVERS
9 +       tristate "Broadcom Char Drivers"
10 +       depends on PROC_FS
11 +       help
12 +         Broadcom's char drivers
13 +
14 +config BCM_VC_CMA
15 +       bool "Videocore CMA"
16 +       depends on CMA
17 +       default n
18 +        help
19 +          Helper for videocore CMA access.
20 +
21 --- /dev/null
22 +++ b/drivers/char/broadcom/Makefile
23 @@ -0,0 +1,2 @@
24 +obj-$(CONFIG_BCM_VC_CMA)       += vc_cma/
25 +
26 --- /dev/null
27 +++ b/drivers/char/broadcom/vc_cma/Makefile
28 @@ -0,0 +1,15 @@
29 +EXTRA_CFLAGS  += -Wall -Wstrict-prototypes -Wno-trigraphs
30 +EXTRA_CFLAGS  += -Werror
31 +EXTRA_CFLAGS  += -I"include/linux/broadcom"
32 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services"
33 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services/interface/vchi"
34 +EXTRA_CFLAGS  += -I"drivers/misc/vc04_services/interface/vchiq_arm"
35 +
36 +EXTRA_CFLAGS  += -D__KERNEL__
37 +EXTRA_CFLAGS  += -D__linux__
38 +EXTRA_CFLAGS  += -Werror
39 +
40 +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o
41 +
42 +vc-cma-objs := vc_cma.o
43 +
44 --- /dev/null
45 +++ b/drivers/char/broadcom/vc_cma/vc_cma.c
46 @@ -0,0 +1,1145 @@
47 +/**
48 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
49 + *
50 + * Redistribution and use in source and binary forms, with or without
51 + * modification, are permitted provided that the following conditions
52 + * are met:
53 + * 1. Redistributions of source code must retain the above copyright
54 + *    notice, this list of conditions, and the following disclaimer,
55 + *    without modification.
56 + * 2. Redistributions in binary form must reproduce the above copyright
57 + *    notice, this list of conditions and the following disclaimer in the
58 + *    documentation and/or other materials provided with the distribution.
59 + * 3. The names of the above-listed copyright holders may not be used
60 + *    to endorse or promote products derived from this software without
61 + *    specific prior written permission.
62 + *
63 + * ALTERNATIVELY, this software may be distributed under the terms of the
64 + * GNU General Public License ("GPL") version 2, as published by the Free
65 + * Software Foundation.
66 + *
67 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
68 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
69 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
70 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
71 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
72 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
73 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
74 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
75 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
76 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
77 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 + */
79 +
80 +#include <linux/kernel.h>
81 +#include <linux/module.h>
82 +#include <linux/kthread.h>
83 +#include <linux/fs.h>
84 +#include <linux/device.h>
85 +#include <linux/cdev.h>
86 +#include <linux/mm.h>
87 +#include <linux/proc_fs.h>
88 +#include <linux/seq_file.h>
89 +#include <linux/dma-mapping.h>
90 +#include <linux/dma-contiguous.h>
91 +#include <linux/platform_device.h>
92 +#include <linux/uaccess.h>
93 +#include <asm/cacheflush.h>
94 +
95 +#include "vc_cma.h"
96 +
97 +#include "vchiq_util.h"
98 +#include "vchiq_connected.h"
99 +//#include "debug_sym.h"
100 +//#include "vc_mem.h"
101 +
102 +#define DRIVER_NAME  "vc-cma"
103 +
104 +#define LOG_DBG(fmt, ...) \
105 +       if (vc_cma_debug) \
106 +               printk(KERN_INFO fmt "\n", ##__VA_ARGS__)
107 +#define LOG_ERR(fmt, ...) \
108 +       printk(KERN_ERR fmt "\n", ##__VA_ARGS__)
109 +
110 +#define VC_CMA_FOURCC VCHIQ_MAKE_FOURCC('C', 'M', 'A', ' ')
111 +#define VC_CMA_VERSION 2
112 +
113 +#define VC_CMA_CHUNK_ORDER 6   /* 256K */
114 +#define VC_CMA_CHUNK_SIZE (4096 << VC_CMA_CHUNK_ORDER)
115 +#define VC_CMA_MAX_PARAMS_PER_MSG \
116 +       ((VCHIQ_MAX_MSG_SIZE - sizeof(unsigned short))/sizeof(unsigned short))
117 +#define VC_CMA_RESERVE_COUNT_MAX 16
118 +
119 +#define PAGES_PER_CHUNK (VC_CMA_CHUNK_SIZE / PAGE_SIZE)
120 +
121 +#define VCADDR_TO_PHYSADDR(vcaddr) (mm_vc_mem_phys_addr + vcaddr)
122 +
123 +#define loud_error(...) \
124 +       LOG_ERR("===== " __VA_ARGS__)
125 +
126 +enum {
127 +       VC_CMA_MSG_QUIT,
128 +       VC_CMA_MSG_OPEN,
129 +       VC_CMA_MSG_TICK,
130 +       VC_CMA_MSG_ALLOC,       /* chunk count */
131 +       VC_CMA_MSG_FREE,        /* chunk, chunk, ... */
132 +       VC_CMA_MSG_ALLOCATED,   /* chunk, chunk, ... */
133 +       VC_CMA_MSG_REQUEST_ALLOC,       /* chunk count */
134 +       VC_CMA_MSG_REQUEST_FREE,        /* chunk count */
135 +       VC_CMA_MSG_RESERVE,     /* bytes lo, bytes hi */
136 +       VC_CMA_MSG_UPDATE_RESERVE,
137 +       VC_CMA_MSG_MAX
138 +};
139 +
140 +struct cma_msg {
141 +       unsigned short type;
142 +       unsigned short params[VC_CMA_MAX_PARAMS_PER_MSG];
143 +};
144 +
145 +struct vc_cma_reserve_user {
146 +       unsigned int pid;
147 +       unsigned int reserve;
148 +};
149 +
150 +/* Device (/dev) related variables */
151 +static dev_t vc_cma_devnum;
152 +static struct class *vc_cma_class;
153 +static struct cdev vc_cma_cdev;
154 +static int vc_cma_inited;
155 +static int vc_cma_debug;
156 +
157 +/* Proc entry */
158 +static struct proc_dir_entry *vc_cma_proc_entry;
159 +
160 +phys_addr_t vc_cma_base;
161 +struct page *vc_cma_base_page;
162 +unsigned int vc_cma_size;
163 +EXPORT_SYMBOL(vc_cma_size);
164 +unsigned int vc_cma_initial;
165 +unsigned int vc_cma_chunks;
166 +unsigned int vc_cma_chunks_used;
167 +unsigned int vc_cma_chunks_reserved;
168 +
169 +static int in_loud_error;
170 +
171 +unsigned int vc_cma_reserve_total;
172 +unsigned int vc_cma_reserve_count;
173 +struct vc_cma_reserve_user vc_cma_reserve_users[VC_CMA_RESERVE_COUNT_MAX];
174 +static DEFINE_SEMAPHORE(vc_cma_reserve_mutex);
175 +static DEFINE_SEMAPHORE(vc_cma_worker_queue_push_mutex);
176 +
177 +static u64 vc_cma_dma_mask = DMA_BIT_MASK(32);
178 +static struct platform_device vc_cma_device = {
179 +       .name = "vc-cma",
180 +       .id = 0,
181 +       .dev = {
182 +               .dma_mask = &vc_cma_dma_mask,
183 +               .coherent_dma_mask = DMA_BIT_MASK(32),
184 +               },
185 +};
186 +
187 +static VCHIQ_INSTANCE_T cma_instance;
188 +static VCHIQ_SERVICE_HANDLE_T cma_service;
189 +static VCHIU_QUEUE_T cma_msg_queue;
190 +static struct task_struct *cma_worker;
191 +
192 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid);
193 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply);
194 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
195 +                                          VCHIQ_HEADER_T * header,
196 +                                          VCHIQ_SERVICE_HANDLE_T service,
197 +                                          void *bulk_userdata);
198 +static void send_vc_msg(unsigned short type,
199 +                       unsigned short param1, unsigned short param2);
200 +static bool send_worker_msg(VCHIQ_HEADER_T * msg);
201 +
202 +static int early_vc_cma_mem(char *p)
203 +{
204 +       unsigned int new_size;
205 +       printk(KERN_NOTICE "early_vc_cma_mem(%s)", p);
206 +       vc_cma_size = memparse(p, &p);
207 +       vc_cma_initial = vc_cma_size;
208 +       if (*p == '/')
209 +               vc_cma_size = memparse(p + 1, &p);
210 +       if (*p == '@')
211 +               vc_cma_base = memparse(p + 1, &p);
212 +
213 +       new_size = (vc_cma_size - ((-vc_cma_base) & (VC_CMA_CHUNK_SIZE - 1)))
214 +           & ~(VC_CMA_CHUNK_SIZE - 1);
215 +       if (new_size > vc_cma_size)
216 +               vc_cma_size = 0;
217 +       vc_cma_initial = (vc_cma_initial + VC_CMA_CHUNK_SIZE - 1)
218 +           & ~(VC_CMA_CHUNK_SIZE - 1);
219 +       if (vc_cma_initial > vc_cma_size)
220 +               vc_cma_initial = vc_cma_size;
221 +       vc_cma_base = (vc_cma_base + VC_CMA_CHUNK_SIZE - 1)
222 +           & ~(VC_CMA_CHUNK_SIZE - 1);
223 +
224 +       printk(KERN_NOTICE " -> initial %x, size %x, base %x", vc_cma_initial,
225 +              vc_cma_size, (unsigned int)vc_cma_base);
226 +
227 +       return 0;
228 +}
229 +
230 +early_param("vc-cma-mem", early_vc_cma_mem);
231 +
232 +void vc_cma_early_init(void)
233 +{
234 +       LOG_DBG("vc_cma_early_init - vc_cma_chunks = %d", vc_cma_chunks);
235 +       if (vc_cma_size) {
236 +               int rc = platform_device_register(&vc_cma_device);
237 +               LOG_DBG("platform_device_register -> %d", rc);
238 +       }
239 +}
240 +
241 +void vc_cma_reserve(void)
242 +{
243 +       /* if vc_cma_size is set, then declare vc CMA area of the same
244 +        * size from the end of memory
245 +        */
246 +       if (vc_cma_size) {
247 +               if (dma_declare_contiguous(NULL /*&vc_cma_device.dev*/, vc_cma_size,
248 +                                          vc_cma_base, 0) == 0) {
249 +               } else {
250 +                       LOG_ERR("vc_cma: dma_declare_contiguous(%x,%x) failed",
251 +                               vc_cma_size, (unsigned int)vc_cma_base);
252 +                       vc_cma_size = 0;
253 +               }
254 +       }
255 +       vc_cma_chunks = vc_cma_size / VC_CMA_CHUNK_SIZE;
256 +}
257 +
258 +/****************************************************************************
259 +*
260 +*   vc_cma_open
261 +*
262 +***************************************************************************/
263 +
264 +static int vc_cma_open(struct inode *inode, struct file *file)
265 +{
266 +       (void)inode;
267 +       (void)file;
268 +
269 +       return 0;
270 +}
271 +
272 +/****************************************************************************
273 +*
274 +*   vc_cma_release
275 +*
276 +***************************************************************************/
277 +
278 +static int vc_cma_release(struct inode *inode, struct file *file)
279 +{
280 +       (void)inode;
281 +       (void)file;
282 +
283 +       vc_cma_set_reserve(0, current->tgid);
284 +
285 +       return 0;
286 +}
287 +
288 +/****************************************************************************
289 +*
290 +*   vc_cma_ioctl
291 +*
292 +***************************************************************************/
293 +
294 +static long vc_cma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
295 +{
296 +       int rc = 0;
297 +
298 +       (void)cmd;
299 +       (void)arg;
300 +
301 +       switch (cmd) {
302 +       case VC_CMA_IOC_RESERVE:
303 +               rc = vc_cma_set_reserve((unsigned int)arg, current->tgid);
304 +               if (rc >= 0)
305 +                       rc = 0;
306 +               break;
307 +       default:
308 +               LOG_ERR("vc-cma: Unknown ioctl %x", cmd);
309 +               return -ENOTTY;
310 +       }
311 +
312 +       return rc;
313 +}
314 +
315 +/****************************************************************************
316 +*
317 +*   File Operations for the driver.
318 +*
319 +***************************************************************************/
320 +
321 +static const struct file_operations vc_cma_fops = {
322 +       .owner = THIS_MODULE,
323 +       .open = vc_cma_open,
324 +       .release = vc_cma_release,
325 +       .unlocked_ioctl = vc_cma_ioctl,
326 +};
327 +
328 +/****************************************************************************
329 +*
330 +*   vc_cma_proc_open
331 +*
332 +***************************************************************************/
333 +
334 +static int vc_cma_show_info(struct seq_file *m, void *v)
335 +{
336 +       int i;
337 +
338 +       seq_printf(m, "Videocore CMA:\n");
339 +       seq_printf(m, "   Base       : %08x\n", (unsigned int)vc_cma_base);
340 +       seq_printf(m, "   Length     : %08x\n", vc_cma_size);
341 +       seq_printf(m, "   Initial    : %08x\n", vc_cma_initial);
342 +       seq_printf(m, "   Chunk size : %08x\n", VC_CMA_CHUNK_SIZE);
343 +       seq_printf(m, "   Chunks     : %4d (%d bytes)\n",
344 +                  (int)vc_cma_chunks,
345 +                  (int)(vc_cma_chunks * VC_CMA_CHUNK_SIZE));
346 +       seq_printf(m, "   Used       : %4d (%d bytes)\n",
347 +                  (int)vc_cma_chunks_used,
348 +                  (int)(vc_cma_chunks_used * VC_CMA_CHUNK_SIZE));
349 +       seq_printf(m, "   Reserved   : %4d (%d bytes)\n",
350 +                  (unsigned int)vc_cma_chunks_reserved,
351 +                  (int)(vc_cma_chunks_reserved * VC_CMA_CHUNK_SIZE));
352 +
353 +       for (i = 0; i < vc_cma_reserve_count; i++) {
354 +               struct vc_cma_reserve_user *user = &vc_cma_reserve_users[i];
355 +               seq_printf(m, "     PID %5d: %d bytes\n", user->pid,
356 +                          user->reserve);
357 +       }
358 +
359 +       seq_printf(m, "\n");
360 +
361 +       return 0;
362 +}
363 +
364 +static int vc_cma_proc_open(struct inode *inode, struct file *file)
365 +{
366 +       return single_open(file, vc_cma_show_info, NULL);
367 +}
368 +
369 +/****************************************************************************
370 +*
371 +*   vc_cma_proc_write
372 +*
373 +***************************************************************************/
374 +
375 +static int vc_cma_proc_write(struct file *file,
376 +                            const char __user *buffer,
377 +                            size_t size, loff_t *ppos)
378 +{
379 +       int rc = -EFAULT;
380 +       char input_str[20];
381 +
382 +       memset(input_str, 0, sizeof(input_str));
383 +
384 +       if (size > sizeof(input_str)) {
385 +               LOG_ERR("%s: input string length too long", __func__);
386 +               goto out;
387 +       }
388 +
389 +       if (copy_from_user(input_str, buffer, size - 1)) {
390 +               LOG_ERR("%s: failed to get input string", __func__);
391 +               goto out;
392 +       }
393 +#define ALLOC_STR "alloc"
394 +#define FREE_STR "free"
395 +#define DEBUG_STR "debug"
396 +#define RESERVE_STR "reserve"
397 +       if (strncmp(input_str, ALLOC_STR, strlen(ALLOC_STR)) == 0) {
398 +               int size;
399 +               char *p = input_str + strlen(ALLOC_STR);
400 +
401 +               while (*p == ' ')
402 +                       p++;
403 +               size = memparse(p, NULL);
404 +               LOG_ERR("/proc/vc-cma: alloc %d", size);
405 +               if (size)
406 +                       send_vc_msg(VC_CMA_MSG_REQUEST_FREE,
407 +                                   size / VC_CMA_CHUNK_SIZE, 0);
408 +               else
409 +                       LOG_ERR("invalid size '%s'", p);
410 +               rc = size;
411 +       } else if (strncmp(input_str, FREE_STR, strlen(FREE_STR)) == 0) {
412 +               int size;
413 +               char *p = input_str + strlen(FREE_STR);
414 +
415 +               while (*p == ' ')
416 +                       p++;
417 +               size = memparse(p, NULL);
418 +               LOG_ERR("/proc/vc-cma: free %d", size);
419 +               if (size)
420 +                       send_vc_msg(VC_CMA_MSG_REQUEST_ALLOC,
421 +                                   size / VC_CMA_CHUNK_SIZE, 0);
422 +               else
423 +                       LOG_ERR("invalid size '%s'", p);
424 +               rc = size;
425 +       } else if (strncmp(input_str, DEBUG_STR, strlen(DEBUG_STR)) == 0) {
426 +               char *p = input_str + strlen(DEBUG_STR);
427 +               while (*p == ' ')
428 +                       p++;
429 +               if ((strcmp(p, "on") == 0) || (strcmp(p, "1") == 0))
430 +                       vc_cma_debug = 1;
431 +               else if ((strcmp(p, "off") == 0) || (strcmp(p, "0") == 0))
432 +                       vc_cma_debug = 0;
433 +               LOG_ERR("/proc/vc-cma: debug %s", vc_cma_debug ? "on" : "off");
434 +               rc = size;
435 +       } else if (strncmp(input_str, RESERVE_STR, strlen(RESERVE_STR)) == 0) {
436 +               int size;
437 +               int reserved;
438 +               char *p = input_str + strlen(RESERVE_STR);
439 +               while (*p == ' ')
440 +                       p++;
441 +               size = memparse(p, NULL);
442 +
443 +               reserved = vc_cma_set_reserve(size, current->tgid);
444 +               rc = (reserved >= 0) ? size : reserved;
445 +       }
446 +
447 +out:
448 +       return rc;
449 +}
450 +
451 +/****************************************************************************
452 +*
453 +*   File Operations for /proc interface.
454 +*
455 +***************************************************************************/
456 +
457 +static const struct file_operations vc_cma_proc_fops = {
458 +       .open = vc_cma_proc_open,
459 +       .read = seq_read,
460 +       .write = vc_cma_proc_write,
461 +       .llseek = seq_lseek,
462 +       .release = single_release
463 +};
464 +
465 +static int vc_cma_set_reserve(unsigned int reserve, unsigned int pid)
466 +{
467 +       struct vc_cma_reserve_user *user = NULL;
468 +       int delta = 0;
469 +       int i;
470 +
471 +       if (down_interruptible(&vc_cma_reserve_mutex))
472 +               return -ERESTARTSYS;
473 +
474 +       for (i = 0; i < vc_cma_reserve_count; i++) {
475 +               if (pid == vc_cma_reserve_users[i].pid) {
476 +                       user = &vc_cma_reserve_users[i];
477 +                       delta = reserve - user->reserve;
478 +                       if (reserve)
479 +                               user->reserve = reserve;
480 +                       else {
481 +                               /* Remove this entry by copying downwards */
482 +                               while ((i + 1) < vc_cma_reserve_count) {
483 +                                       user[0].pid = user[1].pid;
484 +                                       user[0].reserve = user[1].reserve;
485 +                                       user++;
486 +                                       i++;
487 +                               }
488 +                               vc_cma_reserve_count--;
489 +                               user = NULL;
490 +                       }
491 +                       break;
492 +               }
493 +       }
494 +
495 +       if (reserve && !user) {
496 +               if (vc_cma_reserve_count == VC_CMA_RESERVE_COUNT_MAX) {
497 +                       LOG_ERR("vc-cma: Too many reservations - "
498 +                               "increase CMA_RESERVE_COUNT_MAX");
499 +                       up(&vc_cma_reserve_mutex);
500 +                       return -EBUSY;
501 +               }
502 +               user = &vc_cma_reserve_users[vc_cma_reserve_count];
503 +               user->pid = pid;
504 +               user->reserve = reserve;
505 +               delta = reserve;
506 +               vc_cma_reserve_count++;
507 +       }
508 +
509 +       vc_cma_reserve_total += delta;
510 +
511 +       send_vc_msg(VC_CMA_MSG_RESERVE,
512 +                   vc_cma_reserve_total & 0xffff, vc_cma_reserve_total >> 16);
513 +
514 +       send_worker_msg((VCHIQ_HEADER_T *) VC_CMA_MSG_UPDATE_RESERVE);
515 +
516 +       LOG_DBG("/proc/vc-cma: reserve %d (PID %d) - total %u",
517 +               reserve, pid, vc_cma_reserve_total);
518 +
519 +       up(&vc_cma_reserve_mutex);
520 +
521 +       return vc_cma_reserve_total;
522 +}
523 +
524 +static VCHIQ_STATUS_T cma_service_callback(VCHIQ_REASON_T reason,
525 +                                          VCHIQ_HEADER_T * header,
526 +                                          VCHIQ_SERVICE_HANDLE_T service,
527 +                                          void *bulk_userdata)
528 +{
529 +       switch (reason) {
530 +       case VCHIQ_MESSAGE_AVAILABLE:
531 +               if (!send_worker_msg(header))
532 +                       return VCHIQ_RETRY;
533 +               break;
534 +       case VCHIQ_SERVICE_CLOSED:
535 +               LOG_DBG("CMA service closed");
536 +               break;
537 +       default:
538 +               LOG_ERR("Unexpected CMA callback reason %d", reason);
539 +               break;
540 +       }
541 +       return VCHIQ_SUCCESS;
542 +}
543 +
544 +static void send_vc_msg(unsigned short type,
545 +                       unsigned short param1, unsigned short param2)
546 +{
547 +       unsigned short msg[] = { type, param1, param2 };
548 +       VCHIQ_ELEMENT_T elem = { &msg, sizeof(msg) };
549 +       VCHIQ_STATUS_T ret;
550 +       vchiq_use_service(cma_service);
551 +       ret = vchiq_queue_message(cma_service, &elem, 1);
552 +       vchiq_release_service(cma_service);
553 +       if (ret != VCHIQ_SUCCESS)
554 +               LOG_ERR("vchiq_queue_message returned %x", ret);
555 +}
556 +
557 +static bool send_worker_msg(VCHIQ_HEADER_T * msg)
558 +{
559 +       if (down_interruptible(&vc_cma_worker_queue_push_mutex))
560 +               return false;
561 +       vchiu_queue_push(&cma_msg_queue, msg);
562 +       up(&vc_cma_worker_queue_push_mutex);
563 +       return true;
564 +}
565 +
566 +static int vc_cma_alloc_chunks(int num_chunks, struct cma_msg *reply)
567 +{
568 +       int i;
569 +       for (i = 0; i < num_chunks; i++) {
570 +               struct page *chunk;
571 +               unsigned int chunk_num;
572 +               uint8_t *chunk_addr;
573 +               size_t chunk_size = PAGES_PER_CHUNK << PAGE_SHIFT;
574 +
575 +               chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
576 +                                                 PAGES_PER_CHUNK,
577 +                                                 VC_CMA_CHUNK_ORDER);
578 +               if (!chunk)
579 +                       break;
580 +
581 +               chunk_addr = page_address(chunk);
582 +               dmac_flush_range(chunk_addr, chunk_addr + chunk_size);
583 +               outer_inv_range(__pa(chunk_addr), __pa(chunk_addr) +
584 +                       chunk_size);
585 +
586 +               chunk_num =
587 +                   (page_to_phys(chunk) - vc_cma_base) / VC_CMA_CHUNK_SIZE;
588 +               BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
589 +                       VC_CMA_CHUNK_SIZE) != 0);
590 +               if (chunk_num >= vc_cma_chunks) {
591 +                       LOG_ERR("%s: ===============================",
592 +                               __func__);
593 +                       LOG_ERR("%s: chunk phys %x, vc_cma %x-%x - "
594 +                               "bad SPARSEMEM configuration?",
595 +                               __func__, (unsigned int)page_to_phys(chunk),
596 +                               vc_cma_base, vc_cma_base + vc_cma_size - 1);
597 +                       LOG_ERR("%s: dev->cma_area = %p\n", __func__,
598 +                               vc_cma_device.dev.cma_area);
599 +                       LOG_ERR("%s: ===============================",
600 +                               __func__);
601 +                       break;
602 +               }
603 +               reply->params[i] = chunk_num;
604 +               vc_cma_chunks_used++;
605 +       }
606 +
607 +       if (i < num_chunks) {
608 +               LOG_ERR("%s: dma_alloc_from_contiguous failed "
609 +                       "for %x bytes (alloc %d of %d, %d free)",
610 +                       __func__, VC_CMA_CHUNK_SIZE, i,
611 +                       num_chunks, vc_cma_chunks - vc_cma_chunks_used);
612 +               num_chunks = i;
613 +       }
614 +
615 +       LOG_DBG("CMA allocated %d chunks -> %d used",
616 +               num_chunks, vc_cma_chunks_used);
617 +       reply->type = VC_CMA_MSG_ALLOCATED;
618 +
619 +       {
620 +               VCHIQ_ELEMENT_T elem = {
621 +                       reply,
622 +                       offsetof(struct cma_msg, params[0]) +
623 +                           num_chunks * sizeof(reply->params[0])
624 +               };
625 +               VCHIQ_STATUS_T ret;
626 +               vchiq_use_service(cma_service);
627 +               ret = vchiq_queue_message(cma_service, &elem, 1);
628 +               vchiq_release_service(cma_service);
629 +               if (ret != VCHIQ_SUCCESS)
630 +                       LOG_ERR("vchiq_queue_message return " "%x", ret);
631 +       }
632 +
633 +       return num_chunks;
634 +}
635 +
636 +static int cma_worker_proc(void *param)
637 +{
638 +       static struct cma_msg reply;
639 +       (void)param;
640 +
641 +       while (1) {
642 +               VCHIQ_HEADER_T *msg;
643 +               static struct cma_msg msg_copy;
644 +               struct cma_msg *cma_msg = &msg_copy;
645 +               int type, msg_size;
646 +
647 +               msg = vchiu_queue_pop(&cma_msg_queue);
648 +               if ((unsigned int)msg >= VC_CMA_MSG_MAX) {
649 +                       msg_size = msg->size;
650 +                       memcpy(&msg_copy, msg->data, msg_size);
651 +                       type = cma_msg->type;
652 +                       vchiq_release_message(cma_service, msg);
653 +               } else {
654 +                       msg_size = 0;
655 +                       type = (int)msg;
656 +                       if (type == VC_CMA_MSG_QUIT)
657 +                               break;
658 +                       else if (type == VC_CMA_MSG_UPDATE_RESERVE) {
659 +                               msg = NULL;
660 +                               cma_msg = NULL;
661 +                       } else {
662 +                               BUG();
663 +                               continue;
664 +                       }
665 +               }
666 +
667 +               switch (type) {
668 +               case VC_CMA_MSG_ALLOC:{
669 +                               int num_chunks, free_chunks;
670 +                               num_chunks = cma_msg->params[0];
671 +                               free_chunks =
672 +                                   vc_cma_chunks - vc_cma_chunks_used;
673 +                               LOG_DBG("CMA_MSG_ALLOC(%d chunks)", num_chunks);
674 +                               if (num_chunks > VC_CMA_MAX_PARAMS_PER_MSG) {
675 +                                       LOG_ERR
676 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
677 +                                            "exceeds VC_CMA_MAX_PARAMS_PER_MSG (%d)",
678 +                                            num_chunks,
679 +                                            VC_CMA_MAX_PARAMS_PER_MSG);
680 +                                       num_chunks = VC_CMA_MAX_PARAMS_PER_MSG;
681 +                               }
682 +
683 +                               if (num_chunks > free_chunks) {
684 +                                       LOG_ERR
685 +                                           ("CMA_MSG_ALLOC - chunk count (%d) "
686 +                                            "exceeds free chunks (%d)",
687 +                                            num_chunks, free_chunks);
688 +                                       num_chunks = free_chunks;
689 +                               }
690 +
691 +                               vc_cma_alloc_chunks(num_chunks, &reply);
692 +                       }
693 +                       break;
694 +
695 +               case VC_CMA_MSG_FREE:{
696 +                               int chunk_count =
697 +                                   (msg_size -
698 +                                    offsetof(struct cma_msg,
699 +                                             params)) /
700 +                                   sizeof(cma_msg->params[0]);
701 +                               int i;
702 +                               BUG_ON(chunk_count <= 0);
703 +
704 +                               LOG_DBG("CMA_MSG_FREE(%d chunks - %x, ...)",
705 +                                       chunk_count, cma_msg->params[0]);
706 +                               for (i = 0; i < chunk_count; i++) {
707 +                                       int chunk_num = cma_msg->params[i];
708 +                                       struct page *page = vc_cma_base_page +
709 +                                           chunk_num * PAGES_PER_CHUNK;
710 +                                       if (chunk_num >= vc_cma_chunks) {
711 +                                               LOG_ERR
712 +                                                   ("CMA_MSG_FREE - chunk %d of %d"
713 +                                                    " (value %x) exceeds maximum "
714 +                                                    "(%x)", i, chunk_count,
715 +                                                    chunk_num,
716 +                                                    vc_cma_chunks - 1);
717 +                                               break;
718 +                                       }
719 +
720 +                                       if (!dma_release_from_contiguous
721 +                                           (NULL /*&vc_cma_device.dev*/, page,
722 +                                            PAGES_PER_CHUNK)) {
723 +                                               LOG_ERR
724 +                                                   ("CMA_MSG_FREE - failed to "
725 +                                                    "release chunk %d (phys %x, "
726 +                                                    "page %x)", chunk_num,
727 +                                                    page_to_phys(page),
728 +                                                    (unsigned int)page);
729 +                                       }
730 +                                       vc_cma_chunks_used--;
731 +                               }
732 +                               LOG_DBG("CMA released %d chunks -> %d used",
733 +                                       i, vc_cma_chunks_used);
734 +                       }
735 +                       break;
736 +
737 +               case VC_CMA_MSG_UPDATE_RESERVE:{
738 +                               int chunks_needed =
739 +                                   ((vc_cma_reserve_total + VC_CMA_CHUNK_SIZE -
740 +                                     1)
741 +                                    / VC_CMA_CHUNK_SIZE) -
742 +                                   vc_cma_chunks_reserved;
743 +
744 +                               LOG_DBG
745 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks needed)",
746 +                                    chunks_needed);
747 +
748 +                               /* Cap the reservations to what is available */
749 +                               if (chunks_needed > 0) {
750 +                                       if (chunks_needed >
751 +                                           (vc_cma_chunks -
752 +                                            vc_cma_chunks_used))
753 +                                               chunks_needed =
754 +                                                   (vc_cma_chunks -
755 +                                                    vc_cma_chunks_used);
756 +
757 +                                       chunks_needed =
758 +                                           vc_cma_alloc_chunks(chunks_needed,
759 +                                                               &reply);
760 +                               }
761 +
762 +                               LOG_DBG
763 +                                   ("CMA_MSG_UPDATE_RESERVE(%d chunks allocated)",
764 +                                    chunks_needed);
765 +                               vc_cma_chunks_reserved += chunks_needed;
766 +                       }
767 +                       break;
768 +
769 +               default:
770 +                       LOG_ERR("unexpected msg type %d", type);
771 +                       break;
772 +               }
773 +       }
774 +
775 +       LOG_DBG("quitting...");
776 +       return 0;
777 +}
778 +
779 +/****************************************************************************
780 +*
781 +*   vc_cma_connected_init
782 +*
783 +*   This function is called once the videocore has been connected.
784 +*
785 +***************************************************************************/
786 +
787 +static void vc_cma_connected_init(void)
788 +{
789 +       VCHIQ_SERVICE_PARAMS_T service_params;
790 +
791 +       LOG_DBG("vc_cma_connected_init");
792 +
793 +       if (!vchiu_queue_init(&cma_msg_queue, 16)) {
794 +               LOG_ERR("could not create CMA msg queue");
795 +               goto fail_queue;
796 +       }
797 +
798 +       if (vchiq_initialise(&cma_instance) != VCHIQ_SUCCESS)
799 +               goto fail_vchiq_init;
800 +
801 +       vchiq_connect(cma_instance);
802 +
803 +       service_params.fourcc = VC_CMA_FOURCC;
804 +       service_params.callback = cma_service_callback;
805 +       service_params.userdata = NULL;
806 +       service_params.version = VC_CMA_VERSION;
807 +       service_params.version_min = VC_CMA_VERSION;
808 +
809 +       if (vchiq_open_service(cma_instance, &service_params,
810 +                              &cma_service) != VCHIQ_SUCCESS) {
811 +               LOG_ERR("failed to open service - already in use?");
812 +               goto fail_vchiq_open;
813 +       }
814 +
815 +       vchiq_release_service(cma_service);
816 +
817 +       cma_worker = kthread_create(cma_worker_proc, NULL, "cma_worker");
818 +       if (!cma_worker) {
819 +               LOG_ERR("could not create CMA worker thread");
820 +               goto fail_worker;
821 +       }
822 +       set_user_nice(cma_worker, -20);
823 +       wake_up_process(cma_worker);
824 +
825 +       return;
826 +
827 +fail_worker:
828 +       vchiq_close_service(cma_service);
829 +fail_vchiq_open:
830 +       vchiq_shutdown(cma_instance);
831 +fail_vchiq_init:
832 +       vchiu_queue_delete(&cma_msg_queue);
833 +fail_queue:
834 +       return;
835 +}
836 +
837 +void
838 +loud_error_header(void)
839 +{
840 +       if (in_loud_error)
841 +               return;
842 +
843 +       LOG_ERR("============================================================"
844 +               "================");
845 +       LOG_ERR("============================================================"
846 +               "================");
847 +       LOG_ERR("=====");
848 +
849 +       in_loud_error = 1;
850 +}
851 +
852 +void
853 +loud_error_footer(void)
854 +{
855 +       if (!in_loud_error)
856 +               return;
857 +
858 +       LOG_ERR("=====");
859 +       LOG_ERR("============================================================"
860 +               "================");
861 +       LOG_ERR("============================================================"
862 +               "================");
863 +
864 +       in_loud_error = 0;
865 +}
866 +
867 +#if 1
868 +static int check_cma_config(void) { return 1; }
869 +#else
870 +static int
871 +read_vc_debug_var(VC_MEM_ACCESS_HANDLE_T handle,
872 +       const char *symbol,
873 +       void *buf, size_t bufsize)
874 +{
875 +       VC_MEM_ADDR_T vcMemAddr;
876 +       size_t vcMemSize;
877 +       uint8_t *mapAddr;
878 +       off_t  vcMapAddr;
879 +
880 +       if (!LookupVideoCoreSymbol(handle, symbol,
881 +               &vcMemAddr,
882 +               &vcMemSize)) {
883 +               loud_error_header();
884 +               loud_error(
885 +                       "failed to find VC symbol \"%s\".",
886 +                       symbol);
887 +               loud_error_footer();
888 +               return 0;
889 +       }
890 +
891 +       if (vcMemSize != bufsize) {
892 +               loud_error_header();
893 +               loud_error(
894 +                       "VC symbol \"%s\" is the wrong size.",
895 +                       symbol);
896 +               loud_error_footer();
897 +               return 0;
898 +       }
899 +
900 +       vcMapAddr = (off_t)vcMemAddr & VC_MEM_TO_ARM_ADDR_MASK;
901 +       vcMapAddr += mm_vc_mem_phys_addr;
902 +       mapAddr = ioremap_nocache(vcMapAddr, vcMemSize);
903 +       if (mapAddr == 0) {
904 +               loud_error_header();
905 +               loud_error(
906 +                       "failed to ioremap \"%s\" @ 0x%x "
907 +                       "(phys: 0x%x, size: %u).",
908 +                       symbol,
909 +                       (unsigned int)vcMapAddr,
910 +                       (unsigned int)vcMemAddr,
911 +                       (unsigned int)vcMemSize);
912 +               loud_error_footer();
913 +               return 0;
914 +       }
915 +
916 +       memcpy(buf, mapAddr, bufsize);
917 +       iounmap(mapAddr);
918 +
919 +       return 1;
920 +}
921 +
922 +
923 +static int
924 +check_cma_config(void)
925 +{
926 +       VC_MEM_ACCESS_HANDLE_T mem_hndl;
927 +       VC_MEM_ADDR_T mempool_start;
928 +       VC_MEM_ADDR_T mempool_end;
929 +       VC_MEM_ADDR_T mempool_offline_start;
930 +       VC_MEM_ADDR_T mempool_offline_end;
931 +       VC_MEM_ADDR_T cam_alloc_base;
932 +       VC_MEM_ADDR_T cam_alloc_size;
933 +       VC_MEM_ADDR_T cam_alloc_end;
934 +       int success = 0;
935 +
936 +       if (OpenVideoCoreMemory(&mem_hndl) != 0)
937 +               goto out;
938 +
939 +       /* Read the relevant VideoCore variables */
940 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_START",
941 +               &mempool_start,
942 +               sizeof(mempool_start)))
943 +               goto close;
944 +
945 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_END",
946 +               &mempool_end,
947 +               sizeof(mempool_end)))
948 +               goto close;
949 +
950 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_START",
951 +               &mempool_offline_start,
952 +               sizeof(mempool_offline_start)))
953 +               goto close;
954 +
955 +       if (!read_vc_debug_var(mem_hndl, "__MEMPOOL_OFFLINE_END",
956 +               &mempool_offline_end,
957 +               sizeof(mempool_offline_end)))
958 +               goto close;
959 +
960 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_base",
961 +               &cam_alloc_base,
962 +               sizeof(cam_alloc_base)))
963 +               goto close;
964 +
965 +       if (!read_vc_debug_var(mem_hndl, "cam_alloc_size",
966 +               &cam_alloc_size,
967 +               sizeof(cam_alloc_size)))
968 +               goto close;
969 +
970 +       cam_alloc_end = cam_alloc_base + cam_alloc_size;
971 +
972 +       success = 1;
973 +
974 +       /* Now the sanity checks */
975 +       if (!mempool_offline_start)
976 +               mempool_offline_start = mempool_start;
977 +       if (!mempool_offline_end)
978 +               mempool_offline_end = mempool_end;
979 +
980 +       if (VCADDR_TO_PHYSADDR(mempool_offline_start) != vc_cma_base) {
981 +               loud_error_header();
982 +               loud_error(
983 +                       "__MEMPOOL_OFFLINE_START(%x -> %lx) doesn't match "
984 +                       "vc_cma_base(%x)",
985 +                       mempool_offline_start,
986 +                       VCADDR_TO_PHYSADDR(mempool_offline_start),
987 +                       vc_cma_base);
988 +               success = 0;
989 +       }
990 +
991 +       if (VCADDR_TO_PHYSADDR(mempool_offline_end) !=
992 +               (vc_cma_base + vc_cma_size)) {
993 +               loud_error_header();
994 +               loud_error(
995 +                       "__MEMPOOL_OFFLINE_END(%x -> %lx) doesn't match "
996 +                       "vc_cma_base(%x) + vc_cma_size(%x) = %x",
997 +                       mempool_offline_start,
998 +                       VCADDR_TO_PHYSADDR(mempool_offline_end),
999 +                       vc_cma_base, vc_cma_size, vc_cma_base + vc_cma_size);
1000 +               success = 0;
1001 +       }
1002 +
1003 +       if (mempool_end < mempool_start) {
1004 +               loud_error_header();
1005 +               loud_error(
1006 +                       "__MEMPOOL_END(%x) must not be before "
1007 +                       "__MEMPOOL_START(%x)",
1008 +                       mempool_end,
1009 +                       mempool_start);
1010 +               success = 0;
1011 +       }
1012 +
1013 +       if (mempool_offline_end < mempool_offline_start) {
1014 +               loud_error_header();
1015 +               loud_error(
1016 +                       "__MEMPOOL_OFFLINE_END(%x) must not be before "
1017 +                       "__MEMPOOL_OFFLINE_START(%x)",
1018 +                       mempool_offline_end,
1019 +                       mempool_offline_start);
1020 +               success = 0;
1021 +       }
1022 +
1023 +       if (mempool_offline_start < mempool_start) {
1024 +               loud_error_header();
1025 +               loud_error(
1026 +                       "__MEMPOOL_OFFLINE_START(%x) must not be before "
1027 +                       "__MEMPOOL_START(%x)",
1028 +                       mempool_offline_start,
1029 +                       mempool_start);
1030 +               success = 0;
1031 +       }
1032 +
1033 +       if (mempool_offline_end > mempool_end) {
1034 +               loud_error_header();
1035 +               loud_error(
1036 +                       "__MEMPOOL_OFFLINE_END(%x) must not be after "
1037 +                       "__MEMPOOL_END(%x)",
1038 +                       mempool_offline_end,
1039 +                       mempool_end);
1040 +               success = 0;
1041 +       }
1042 +
1043 +       if ((cam_alloc_base < mempool_end) &&
1044 +               (cam_alloc_end > mempool_start)) {
1045 +               loud_error_header();
1046 +               loud_error(
1047 +                       "cam_alloc pool(%x-%x) overlaps "
1048 +                       "mempool(%x-%x)",
1049 +                       cam_alloc_base, cam_alloc_end,
1050 +                       mempool_start, mempool_end);
1051 +               success = 0;
1052 +       }
1053 +
1054 +       loud_error_footer();
1055 +
1056 +close:
1057 +       CloseVideoCoreMemory(mem_hndl);
1058 +
1059 +out:
1060 +       return success;
1061 +}
1062 +#endif
1063 +
1064 +static int vc_cma_init(void)
1065 +{
1066 +       int rc = -EFAULT;
1067 +       struct device *dev;
1068 +
1069 +       if (!check_cma_config())
1070 +               goto out_release;
1071 +
1072 +       printk(KERN_INFO "vc-cma: Videocore CMA driver\n");
1073 +       printk(KERN_INFO "vc-cma: vc_cma_base      = 0x%08x\n", vc_cma_base);
1074 +       printk(KERN_INFO "vc-cma: vc_cma_size      = 0x%08x (%u MiB)\n",
1075 +              vc_cma_size, vc_cma_size / (1024 * 1024));
1076 +       printk(KERN_INFO "vc-cma: vc_cma_initial   = 0x%08x (%u MiB)\n",
1077 +              vc_cma_initial, vc_cma_initial / (1024 * 1024));
1078 +
1079 +       vc_cma_base_page = phys_to_page(vc_cma_base);
1080 +
1081 +       if (vc_cma_chunks) {
1082 +               int chunks_needed = vc_cma_initial / VC_CMA_CHUNK_SIZE;
1083 +
1084 +               for (vc_cma_chunks_used = 0;
1085 +                    vc_cma_chunks_used < chunks_needed; vc_cma_chunks_used++) {
1086 +                       struct page *chunk;
1087 +                       chunk = dma_alloc_from_contiguous(NULL /*&vc_cma_device.dev*/,
1088 +                                                         PAGES_PER_CHUNK,
1089 +                                                         VC_CMA_CHUNK_ORDER);
1090 +                       if (!chunk)
1091 +                               break;
1092 +                       BUG_ON(((page_to_phys(chunk) - vc_cma_base) %
1093 +                               VC_CMA_CHUNK_SIZE) != 0);
1094 +               }
1095 +               if (vc_cma_chunks_used != chunks_needed) {
1096 +                       LOG_ERR("%s: dma_alloc_from_contiguous failed (%d "
1097 +                               "bytes, allocation %d of %d)",
1098 +                               __func__, VC_CMA_CHUNK_SIZE,
1099 +                               vc_cma_chunks_used, chunks_needed);
1100 +                       goto out_release;
1101 +               }
1102 +
1103 +               vchiq_add_connected_callback(vc_cma_connected_init);
1104 +       }
1105 +
1106 +       rc = alloc_chrdev_region(&vc_cma_devnum, 0, 1, DRIVER_NAME);
1107 +       if (rc < 0) {
1108 +               LOG_ERR("%s: alloc_chrdev_region failed (rc=%d)", __func__, rc);
1109 +               goto out_release;
1110 +       }
1111 +
1112 +       cdev_init(&vc_cma_cdev, &vc_cma_fops);
1113 +       rc = cdev_add(&vc_cma_cdev, vc_cma_devnum, 1);
1114 +       if (rc != 0) {
1115 +               LOG_ERR("%s: cdev_add failed (rc=%d)", __func__, rc);
1116 +               goto out_unregister;
1117 +       }
1118 +
1119 +       vc_cma_class = class_create(THIS_MODULE, DRIVER_NAME);
1120 +       if (IS_ERR(vc_cma_class)) {
1121 +               rc = PTR_ERR(vc_cma_class);
1122 +               LOG_ERR("%s: class_create failed (rc=%d)", __func__, rc);
1123 +               goto out_cdev_del;
1124 +       }
1125 +
1126 +       dev = device_create(vc_cma_class, NULL, vc_cma_devnum, NULL,
1127 +                           DRIVER_NAME);
1128 +       if (IS_ERR(dev)) {
1129 +               rc = PTR_ERR(dev);
1130 +               LOG_ERR("%s: device_create failed (rc=%d)", __func__, rc);
1131 +               goto out_class_destroy;
1132 +       }
1133 +
1134 +       vc_cma_proc_entry = create_proc_entry(DRIVER_NAME, 0444, NULL);
1135 +       if (vc_cma_proc_entry == NULL) {
1136 +               rc = -EFAULT;
1137 +               LOG_ERR("%s: create_proc_entry failed", __func__);
1138 +               goto out_device_destroy;
1139 +       }
1140 +
1141 +       vc_cma_proc_entry->proc_fops = &vc_cma_proc_fops;
1142 +
1143 +       vc_cma_inited = 1;
1144 +       return 0;
1145 +
1146 +out_device_destroy:
1147 +       device_destroy(vc_cma_class, vc_cma_devnum);
1148 +
1149 +out_class_destroy:
1150 +       class_destroy(vc_cma_class);
1151 +       vc_cma_class = NULL;
1152 +
1153 +out_cdev_del:
1154 +       cdev_del(&vc_cma_cdev);
1155 +
1156 +out_unregister:
1157 +       unregister_chrdev_region(vc_cma_devnum, 1);
1158 +
1159 +out_release:
1160 +       /* It is tempting to try to clean up by calling
1161 +          dma_release_from_contiguous for all allocated chunks, but it isn't
1162 +          a very safe thing to do. If vc_cma_initial is non-zero it is because
1163 +          VideoCore is already using that memory, so giving it back to Linux
1164 +          is likely to be fatal.
1165 +        */
1166 +       return -1;
1167 +}
1168 +
1169 +/****************************************************************************
1170 +*
1171 +*   vc_cma_exit
1172 +*
1173 +***************************************************************************/
1174 +
1175 +static void __exit vc_cma_exit(void)
1176 +{
1177 +       LOG_DBG("%s: called", __func__);
1178 +
1179 +       if (vc_cma_inited) {
1180 +               remove_proc_entry(vc_cma_proc_entry->name, NULL);
1181 +               device_destroy(vc_cma_class, vc_cma_devnum);
1182 +               class_destroy(vc_cma_class);
1183 +               cdev_del(&vc_cma_cdev);
1184 +               unregister_chrdev_region(vc_cma_devnum, 1);
1185 +       }
1186 +}
1187 +
1188 +module_init(vc_cma_init);
1189 +module_exit(vc_cma_exit);
1190 +MODULE_LICENSE("GPL");
1191 +MODULE_AUTHOR("Broadcom Corporation");
1192 --- a/drivers/char/Makefile
1193 +++ b/drivers/char/Makefile
1194 @@ -62,3 +62,6 @@ obj-$(CONFIG_JS_RTC)          += js-rtc.o
1195  js-rtc-y = rtc.o
1196  
1197  obj-$(CONFIG_TILE_SROM)                += tile-srom.o
1198 +
1199 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
1200 +
1201 --- a/drivers/char/Kconfig
1202 +++ b/drivers/char/Kconfig
1203 @@ -586,6 +586,8 @@ config DEVPORT
1204  
1205  source "drivers/s390/char/Kconfig"
1206  
1207 +source "drivers/char/broadcom/Kconfig"
1208 +
1209  config MSM_SMD_PKT
1210         bool "Enable device interface for some SMD packet ports"
1211         default n
1212 --- a/drivers/misc/Kconfig
1213 +++ b/drivers/misc/Kconfig
1214 @@ -536,4 +536,6 @@ source "drivers/misc/carma/Kconfig"
1215  source "drivers/misc/altera-stapl/Kconfig"
1216  source "drivers/misc/mei/Kconfig"
1217  source "drivers/misc/vmw_vmci/Kconfig"
1218 +source "drivers/misc/vc04_services/Kconfig"
1219  endmenu
1220 +
1221 --- a/drivers/misc/Makefile
1222 +++ b/drivers/misc/Makefile
1223 @@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI)               += mei/
1224  obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
1225  obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
1226  obj-$(CONFIG_SRAM)             += sram.o
1227 +obj-$(CONFIG_BCM2708_VCHIQ)    += vc04_services/
1228 --- /dev/null
1229 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
1230 @@ -0,0 +1,328 @@
1231 +/**
1232 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1233 + *
1234 + * Redistribution and use in source and binary forms, with or without
1235 + * modification, are permitted provided that the following conditions
1236 + * are met:
1237 + * 1. Redistributions of source code must retain the above copyright
1238 + *    notice, this list of conditions, and the following disclaimer,
1239 + *    without modification.
1240 + * 2. Redistributions in binary form must reproduce the above copyright
1241 + *    notice, this list of conditions and the following disclaimer in the
1242 + *    documentation and/or other materials provided with the distribution.
1243 + * 3. The names of the above-listed copyright holders may not be used
1244 + *    to endorse or promote products derived from this software without
1245 + *    specific prior written permission.
1246 + *
1247 + * ALTERNATIVELY, this software may be distributed under the terms of the
1248 + * GNU General Public License ("GPL") version 2, as published by the Free
1249 + * Software Foundation.
1250 + *
1251 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1252 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1253 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1254 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1255 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1256 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1257 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1258 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1259 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1260 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1261 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1262 + */
1263 +
1264 +#ifndef CONNECTION_H_
1265 +#define CONNECTION_H_
1266 +
1267 +#include <linux/kernel.h>
1268 +#include <linux/types.h>
1269 +#include <linux/semaphore.h>
1270 +
1271 +#include "interface/vchi/vchi_cfg_internal.h"
1272 +#include "interface/vchi/vchi_common.h"
1273 +#include "interface/vchi/message_drivers/message.h"
1274 +
1275 +/******************************************************************************
1276 + Global defs
1277 + *****************************************************************************/
1278 +
1279 +// Opaque handle for a connection / service pair
1280 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
1281 +
1282 +// opaque handle to the connection state information
1283 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
1284 +
1285 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
1286 +
1287 +
1288 +/******************************************************************************
1289 + API
1290 + *****************************************************************************/
1291 +
1292 +// Routine to init a connection with a particular low level driver
1293 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
1294 +                                                             const VCHI_MESSAGE_DRIVER_T * driver );
1295 +
1296 +// Routine to control CRC enabling at a connection level
1297 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
1298 +                                                  VCHI_CRC_CONTROL_T control );
1299 +
1300 +// Routine to create a service
1301 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
1302 +                                                      int32_t service_id,
1303 +                                                      uint32_t rx_fifo_size,
1304 +                                                      uint32_t tx_fifo_size,
1305 +                                                      int server,
1306 +                                                      VCHI_CALLBACK_T callback,
1307 +                                                      void *callback_param,
1308 +                                                      int32_t want_crc,
1309 +                                                      int32_t want_unaligned_bulk_rx,
1310 +                                                      int32_t want_unaligned_bulk_tx,
1311 +                                                      VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
1312 +
1313 +// Routine to close a service
1314 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
1315 +
1316 +// Routine to queue a message
1317 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1318 +                                                            const void *data,
1319 +                                                            uint32_t data_size,
1320 +                                                            VCHI_FLAGS_T flags,
1321 +                                                            void *msg_handle );
1322 +
1323 +// scatter-gather (vector) message queueing
1324 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1325 +                                                             VCHI_MSG_VECTOR_T *vector,
1326 +                                                             uint32_t count,
1327 +                                                             VCHI_FLAGS_T flags,
1328 +                                                             void *msg_handle );
1329 +
1330 +// Routine to dequeue a message
1331 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1332 +                                                              void *data,
1333 +                                                              uint32_t max_data_size_to_read,
1334 +                                                              uint32_t *actual_msg_size,
1335 +                                                              VCHI_FLAGS_T flags );
1336 +
1337 +// Routine to peek at a message
1338 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1339 +                                                           void **data,
1340 +                                                           uint32_t *msg_size,
1341 +                                                           VCHI_FLAGS_T flags );
1342 +
1343 +// Routine to hold a message
1344 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1345 +                                                           void **data,
1346 +                                                           uint32_t *msg_size,
1347 +                                                           VCHI_FLAGS_T flags,
1348 +                                                           void **message_handle );
1349 +
1350 +// Routine to initialise a received message iterator
1351 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1352 +                                                                VCHI_MSG_ITER_T *iter,
1353 +                                                                VCHI_FLAGS_T flags );
1354 +
1355 +// Routine to release a held message
1356 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1357 +                                                       void *message_handle );
1358 +
1359 +// Routine to get info on a held message
1360 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1361 +                                                    void *message_handle,
1362 +                                                    void **data,
1363 +                                                    int32_t *msg_size,
1364 +                                                    uint32_t *tx_timestamp,
1365 +                                                    uint32_t *rx_timestamp );
1366 +
1367 +// Routine to check whether the iterator has a next message
1368 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1369 +                                                       const VCHI_MSG_ITER_T *iter );
1370 +
1371 +// Routine to advance the iterator
1372 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1373 +                                                    VCHI_MSG_ITER_T *iter,
1374 +                                                    void **data,
1375 +                                                    uint32_t *msg_size );
1376 +
1377 +// Routine to remove the last message returned by the iterator
1378 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1379 +                                                      VCHI_MSG_ITER_T *iter );
1380 +
1381 +// Routine to hold the last message returned by the iterator
1382 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
1383 +                                                    VCHI_MSG_ITER_T *iter,
1384 +                                                    void **msg_handle );
1385 +
1386 +// Routine to transmit bulk data
1387 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1388 +                                                          const void *data_src,
1389 +                                                          uint32_t data_size,
1390 +                                                          VCHI_FLAGS_T flags,
1391 +                                                          void *bulk_handle );
1392 +
1393 +// Routine to receive data
1394 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
1395 +                                                         void *data_dst,
1396 +                                                         uint32_t data_size,
1397 +                                                         VCHI_FLAGS_T flags,
1398 +                                                         void *bulk_handle );
1399 +
1400 +// Routine to report if a server is available
1401 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
1402 +
1403 +// Routine to report the number of RX slots available
1404 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
1405 +
1406 +// Routine to report the RX slot size
1407 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
1408 +
1409 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1410 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
1411 +                                                     int32_t service,
1412 +                                                     uint32_t length,
1413 +                                                     MESSAGE_TX_CHANNEL_T channel,
1414 +                                                     uint32_t channel_params,
1415 +                                                     uint32_t data_length,
1416 +                                                     uint32_t data_offset);
1417 +
1418 +// Callback to inform a service that a Xon or Xoff message has been received
1419 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
1420 +
1421 +// Callback to inform a service that a server available reply message has been received
1422 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
1423 +
1424 +// Callback to indicate that bulk auxiliary messages have arrived
1425 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
1426 +
1427 +// Callback to indicate that bulk auxiliary messages have arrived
1428 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
1429 +
1430 +// Callback with all the connection info you require
1431 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
1432 +
1433 +// Callback to inform of a disconnect
1434 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
1435 +
1436 +// Callback to inform of a power control request
1437 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
1438 +
1439 +// allocate memory suitably aligned for this connection
1440 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
1441 +
1442 +// free memory allocated by buffer_allocate
1443 +typedef void   (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
1444 +
1445 +
1446 +/******************************************************************************
1447 + System driver struct
1448 + *****************************************************************************/
1449 +
1450 +struct opaque_vchi_connection_api_t
1451 +{
1452 +   // Routine to init the connection
1453 +   VCHI_CONNECTION_INIT_T                      init;
1454 +
1455 +   // Connection-level CRC control
1456 +   VCHI_CONNECTION_CRC_CONTROL_T               crc_control;
1457 +
1458 +   // Routine to connect to or create service
1459 +   VCHI_CONNECTION_SERVICE_CONNECT_T           service_connect;
1460 +
1461 +   // Routine to disconnect from a service
1462 +   VCHI_CONNECTION_SERVICE_DISCONNECT_T        service_disconnect;
1463 +
1464 +   // Routine to queue a message
1465 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T     service_queue_msg;
1466 +
1467 +   // scatter-gather (vector) message queue
1468 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T    service_queue_msgv;
1469 +
1470 +   // Routine to dequeue a message
1471 +   VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T   service_dequeue_msg;
1472 +
1473 +   // Routine to peek at a message
1474 +   VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T      service_peek_msg;
1475 +
1476 +   // Routine to hold a message
1477 +   VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T      service_hold_msg;
1478 +
1479 +   // Routine to initialise a received message iterator
1480 +   VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
1481 +
1482 +   // Routine to release a message
1483 +   VCHI_CONNECTION_HELD_MSG_RELEASE_T          held_msg_release;
1484 +
1485 +   // Routine to get information on a held message
1486 +   VCHI_CONNECTION_HELD_MSG_INFO_T             held_msg_info;
1487 +
1488 +   // Routine to check for next message on iterator
1489 +   VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T         msg_iter_has_next;
1490 +
1491 +   // Routine to get next message on iterator
1492 +   VCHI_CONNECTION_MSG_ITER_NEXT_T             msg_iter_next;
1493 +
1494 +   // Routine to remove the last message returned by iterator
1495 +   VCHI_CONNECTION_MSG_ITER_REMOVE_T           msg_iter_remove;
1496 +
1497 +   // Routine to hold the last message returned by iterator
1498 +   VCHI_CONNECTION_MSG_ITER_HOLD_T             msg_iter_hold;
1499 +
1500 +   // Routine to transmit bulk data
1501 +   VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T       bulk_queue_transmit;
1502 +
1503 +   // Routine to receive data
1504 +   VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T        bulk_queue_receive;
1505 +
1506 +   // Routine to report the available servers
1507 +   VCHI_CONNECTION_SERVER_PRESENT              server_present;
1508 +
1509 +   // Routine to report the number of RX slots available
1510 +   VCHI_CONNECTION_RX_SLOTS_AVAILABLE          connection_rx_slots_available;
1511 +
1512 +   // Routine to report the RX slot size
1513 +   VCHI_CONNECTION_RX_SLOT_SIZE                connection_rx_slot_size;
1514 +
1515 +   // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
1516 +   VCHI_CONNECTION_RX_BULK_BUFFER_ADDED        rx_bulk_buffer_added;
1517 +
1518 +   // Callback to inform a service that a Xon or Xoff message has been received
1519 +   VCHI_CONNECTION_FLOW_CONTROL                flow_control;
1520 +
1521 +   // Callback to inform a service that a server available reply message has been received
1522 +   VCHI_CONNECTION_SERVER_AVAILABLE_REPLY      server_available_reply;
1523 +
1524 +   // Callback to indicate that bulk auxiliary messages have arrived
1525 +   VCHI_CONNECTION_BULK_AUX_RECEIVED           bulk_aux_received;
1526 +
1527 +   // Callback to indicate that a bulk auxiliary message has been transmitted
1528 +   VCHI_CONNECTION_BULK_AUX_TRANSMITTED        bulk_aux_transmitted;
1529 +
1530 +   // Callback to provide information about the connection
1531 +   VCHI_CONNECTION_INFO                        connection_info;
1532 +
1533 +   // Callback to notify that peer has requested disconnect
1534 +   VCHI_CONNECTION_DISCONNECT                  disconnect;
1535 +
1536 +   // Callback to notify that peer has requested power change
1537 +   VCHI_CONNECTION_POWER_CONTROL               power_control;
1538 +
1539 +   // allocate memory suitably aligned for this connection
1540 +   VCHI_BUFFER_ALLOCATE                        buffer_allocate;
1541 +
1542 +   // free memory allocated by buffer_allocate
1543 +   VCHI_BUFFER_FREE                            buffer_free;
1544 +
1545 +};
1546 +
1547 +struct vchi_connection_t {
1548 +   const VCHI_CONNECTION_API_T *api;
1549 +   VCHI_CONNECTION_STATE_T     *state;
1550 +#ifdef VCHI_COARSE_LOCKING
1551 +   struct semaphore             sem;
1552 +#endif
1553 +};
1554 +
1555 +
1556 +#endif /* CONNECTION_H_ */
1557 +
1558 +/****************************** End of file **********************************/
1559 --- /dev/null
1560 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
1561 @@ -0,0 +1,204 @@
1562 +/**
1563 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1564 + *
1565 + * Redistribution and use in source and binary forms, with or without
1566 + * modification, are permitted provided that the following conditions
1567 + * are met:
1568 + * 1. Redistributions of source code must retain the above copyright
1569 + *    notice, this list of conditions, and the following disclaimer,
1570 + *    without modification.
1571 + * 2. Redistributions in binary form must reproduce the above copyright
1572 + *    notice, this list of conditions and the following disclaimer in the
1573 + *    documentation and/or other materials provided with the distribution.
1574 + * 3. The names of the above-listed copyright holders may not be used
1575 + *    to endorse or promote products derived from this software without
1576 + *    specific prior written permission.
1577 + *
1578 + * ALTERNATIVELY, this software may be distributed under the terms of the
1579 + * GNU General Public License ("GPL") version 2, as published by the Free
1580 + * Software Foundation.
1581 + *
1582 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1583 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1584 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1585 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1586 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1587 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1588 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1589 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1590 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1591 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1592 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1593 + */
1594 +
1595 +#ifndef _VCHI_MESSAGE_H_
1596 +#define _VCHI_MESSAGE_H_
1597 +
1598 +#include <linux/kernel.h>
1599 +#include <linux/types.h>
1600 +#include <linux/semaphore.h>
1601 +
1602 +#include "interface/vchi/vchi_cfg_internal.h"
1603 +#include "interface/vchi/vchi_common.h"
1604 +
1605 +
1606 +typedef enum message_event_type {
1607 +   MESSAGE_EVENT_NONE,
1608 +   MESSAGE_EVENT_NOP,
1609 +   MESSAGE_EVENT_MESSAGE,
1610 +   MESSAGE_EVENT_SLOT_COMPLETE,
1611 +   MESSAGE_EVENT_RX_BULK_PAUSED,
1612 +   MESSAGE_EVENT_RX_BULK_COMPLETE,
1613 +   MESSAGE_EVENT_TX_COMPLETE,
1614 +   MESSAGE_EVENT_MSG_DISCARDED
1615 +} MESSAGE_EVENT_TYPE_T;
1616 +
1617 +typedef enum vchi_msg_flags
1618 +{
1619 +   VCHI_MSG_FLAGS_NONE                  = 0x0,
1620 +   VCHI_MSG_FLAGS_TERMINATE_DMA         = 0x1
1621 +} VCHI_MSG_FLAGS_T;
1622 +
1623 +typedef enum message_tx_channel
1624 +{
1625 +   MESSAGE_TX_CHANNEL_MESSAGE           = 0,
1626 +   MESSAGE_TX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
1627 +} MESSAGE_TX_CHANNEL_T;
1628 +
1629 +// Macros used for cycling through bulk channels
1630 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1631 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
1632 +
1633 +typedef enum message_rx_channel
1634 +{
1635 +   MESSAGE_RX_CHANNEL_MESSAGE           = 0,
1636 +   MESSAGE_RX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
1637 +} MESSAGE_RX_CHANNEL_T;
1638 +
1639 +// Message receive slot information
1640 +typedef struct rx_msg_slot_info {
1641 +
1642 +   struct rx_msg_slot_info *next;
1643 +   //struct slot_info *prev;
1644 +#if !defined VCHI_COARSE_LOCKING
1645 +   struct semaphore   sem;
1646 +#endif
1647 +
1648 +   uint8_t           *addr;               // base address of slot
1649 +   uint32_t           len;                // length of slot in bytes
1650 +
1651 +   uint32_t           write_ptr;          // hardware causes this to advance
1652 +   uint32_t           read_ptr;           // this module does the reading
1653 +   int                active;             // is this slot in the hardware dma fifo?
1654 +   uint32_t           msgs_parsed;        // count how many messages are in this slot
1655 +   uint32_t           msgs_released;      // how many messages have been released
1656 +   void              *state;              // connection state information
1657 +   uint8_t            ref_count[VCHI_MAX_SERVICES_PER_CONNECTION];          // reference count for slots held by services
1658 +} RX_MSG_SLOTINFO_T;
1659 +
1660 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
1661 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
1662 +// driver will be tasked with sending the aligned core section.
1663 +typedef struct rx_bulk_slotinfo_t {
1664 +   struct rx_bulk_slotinfo_t *next;
1665 +
1666 +   struct semaphore *blocking;
1667 +
1668 +   // needed by DMA
1669 +   void        *addr;
1670 +   uint32_t     len;
1671 +
1672 +   // needed for the callback
1673 +   void        *service;
1674 +   void        *handle;
1675 +   VCHI_FLAGS_T flags;
1676 +} RX_BULK_SLOTINFO_T;
1677 +
1678 +
1679 +/* ----------------------------------------------------------------------
1680 + * each connection driver will have a pool of the following struct.
1681 + *
1682 + * the pool will be managed by vchi_qman_*
1683 + * this means there will be multiple queues (single linked lists)
1684 + * a given struct message_info will be on exactly one of these queues
1685 + * at any one time
1686 + * -------------------------------------------------------------------- */
1687 +typedef struct rx_message_info {
1688 +
1689 +   struct message_info *next;
1690 +   //struct message_info *prev;
1691 +
1692 +   uint8_t    *addr;
1693 +   uint32_t   len;
1694 +   RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
1695 +   uint32_t   tx_timestamp;
1696 +   uint32_t   rx_timestamp;
1697 +
1698 +} RX_MESSAGE_INFO_T;
1699 +
1700 +typedef struct {
1701 +   MESSAGE_EVENT_TYPE_T type;
1702 +
1703 +   struct {
1704 +      // for messages
1705 +      void    *addr;           // address of message
1706 +      uint16_t slot_delta;     // whether this message indicated slot delta
1707 +      uint32_t len;            // length of message
1708 +      RX_MSG_SLOTINFO_T *slot; // slot this message is in
1709 +      int32_t  service;   // service id this message is destined for
1710 +      uint32_t tx_timestamp;   // timestamp from the header
1711 +      uint32_t rx_timestamp;   // timestamp when we parsed it
1712 +   } message;
1713 +
1714 +   // FIXME: cleanup slot reporting...
1715 +   RX_MSG_SLOTINFO_T *rx_msg;
1716 +   RX_BULK_SLOTINFO_T *rx_bulk;
1717 +   void *tx_handle;
1718 +   MESSAGE_TX_CHANNEL_T tx_channel;
1719 +
1720 +} MESSAGE_EVENT_T;
1721 +
1722 +
1723 +// callbacks
1724 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
1725 +
1726 +typedef struct {
1727 +   VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
1728 +} VCHI_MESSAGE_DRIVER_OPEN_T;
1729 +
1730 +
1731 +// handle to this instance of message driver (as returned by ->open)
1732 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
1733 +
1734 +struct opaque_vchi_message_driver_t {
1735 +   VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
1736 +   int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
1737 +   int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
1738 +   int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
1739 +   int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot );      // rx message
1740 +   int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot );  // rx data (bulk)
1741 +   int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle );      // tx (message & bulk)
1742 +   void    (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event );     // get the next event from message_driver
1743 +   int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
1744 +   int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
1745 +                            *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
1746 +
1747 +   int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
1748 +   int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
1749 +   void *  (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
1750 +   void    (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
1751 +   int     (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1752 +   int     (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
1753 +
1754 +   int32_t  (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1755 +   uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1756 +   int     (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
1757 +   int     (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
1758 +   void    (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
1759 +   void    (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
1760 +};
1761 +
1762 +
1763 +#endif // _VCHI_MESSAGE_H_
1764 +
1765 +/****************************** End of file ***********************************/
1766 --- /dev/null
1767 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1768 @@ -0,0 +1,224 @@
1769 +/**
1770 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1771 + *
1772 + * Redistribution and use in source and binary forms, with or without
1773 + * modification, are permitted provided that the following conditions
1774 + * are met:
1775 + * 1. Redistributions of source code must retain the above copyright
1776 + *    notice, this list of conditions, and the following disclaimer,
1777 + *    without modification.
1778 + * 2. Redistributions in binary form must reproduce the above copyright
1779 + *    notice, this list of conditions and the following disclaimer in the
1780 + *    documentation and/or other materials provided with the distribution.
1781 + * 3. The names of the above-listed copyright holders may not be used
1782 + *    to endorse or promote products derived from this software without
1783 + *    specific prior written permission.
1784 + *
1785 + * ALTERNATIVELY, this software may be distributed under the terms of the
1786 + * GNU General Public License ("GPL") version 2, as published by the Free
1787 + * Software Foundation.
1788 + *
1789 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1790 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1791 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1792 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1793 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1794 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1795 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1796 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1797 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1798 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1799 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1800 + */
1801 +
1802 +#ifndef VCHI_CFG_H_
1803 +#define VCHI_CFG_H_
1804 +
1805 +/****************************************************************************************
1806 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1807 + * services.
1808 + ***************************************************************************************/
1809 +
1810 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1811 +/* Really determined by the message driver, and should be available from a run-time call. */
1812 +#ifndef VCHI_BULK_ALIGN
1813 +#   if __VCCOREVER__ >= 0x04000000
1814 +#       define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1815 +#   else
1816 +#       define VCHI_BULK_ALIGN 16
1817 +#   endif
1818 +#endif
1819 +
1820 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1821 +/* May be less than or greater than VCHI_BULK_ALIGN */
1822 +/* Really determined by the message driver, and should be available from a run-time call. */
1823 +#ifndef VCHI_BULK_GRANULARITY
1824 +#   if __VCCOREVER__ >= 0x04000000
1825 +#       define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1826 +#   else
1827 +#       define VCHI_BULK_GRANULARITY 16
1828 +#   endif
1829 +#endif
1830 +
1831 +/* The largest possible message to be queued with vchi_msg_queue. */
1832 +#ifndef VCHI_MAX_MSG_SIZE
1833 +#   if defined VCHI_LOCAL_HOST_PORT
1834 +#       define VCHI_MAX_MSG_SIZE     16384         // makes file transfers fast, but should they be using bulk?
1835 +#   else
1836 +#       define VCHI_MAX_MSG_SIZE      4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1837 +#   endif
1838 +#endif
1839 +
1840 +/******************************************************************************************
1841 + * Defines below are system configuration options, and should not be used by VCHI services.
1842 + *****************************************************************************************/
1843 +
1844 +/* How many connections can we support? A localhost implementation uses 2 connections,
1845 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1846 + * driver. */
1847 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1848 +#   define VCHI_MAX_NUM_CONNECTIONS 3
1849 +#endif
1850 +
1851 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1852 + * amount of static memory. */
1853 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1854 +#  define VCHI_MAX_SERVICES_PER_CONNECTION 36
1855 +#endif
1856 +
1857 +/* Adjust if using a message driver that supports more logical TX channels */
1858 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1859 +#   define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1860 +#endif
1861 +
1862 +/* Adjust if using a message driver that supports more logical RX channels */
1863 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1864 +#   define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1865 +#endif
1866 +
1867 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1868 + * receive queue space, less message headers. */
1869 +#ifndef VCHI_NUM_READ_SLOTS
1870 +#  if defined(VCHI_LOCAL_HOST_PORT)
1871 +#     define VCHI_NUM_READ_SLOTS 4
1872 +#  else
1873 +#     define VCHI_NUM_READ_SLOTS 48
1874 +#  endif
1875 +#endif
1876 +
1877 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1878 + * performance. Only define on VideoCore end, talking to host.
1879 + */
1880 +//#define VCHI_MSG_RX_OVERRUN
1881 +
1882 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1883 + * underneath VCHI will usually have its own buffering. */
1884 +#ifndef VCHI_NUM_WRITE_SLOTS
1885 +#  define VCHI_NUM_WRITE_SLOTS 4
1886 +#endif
1887 +
1888 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1889 + * then it's taking up too much buffer space, and the peer service will be told to stop
1890 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1891 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1892 + * is too high. */
1893 +#ifndef VCHI_XOFF_THRESHOLD
1894 +#  define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1895 +#endif
1896 +
1897 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1898 + * service has dequeued/released enough messages that it's now occupying
1899 + * VCHI_XON_THRESHOLD slots or fewer. */
1900 +#ifndef VCHI_XON_THRESHOLD
1901 +#  define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1902 +#endif
1903 +
1904 +/* A size below which a bulk transfer omits the handshake completely and always goes
1905 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1906 + * can guarantee this by enabling unaligned transmits).
1907 + * Not API. */
1908 +#ifndef VCHI_MIN_BULK_SIZE
1909 +#  define VCHI_MIN_BULK_SIZE    ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1910 +#endif
1911 +
1912 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1913 + * speed and latency; the smaller the chunk size the better change of messages and other
1914 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1915 + * break transmissions into chunks.
1916 + */
1917 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1918 +#  define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1919 +#endif
1920 +
1921 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1922 + * with multiple-line frames. Only use if the receiver can cope. */
1923 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1924 +#  define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1925 +#endif
1926 +
1927 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1928 + * vchi_msg_queue will be blocked. */
1929 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1930 +#  define VCHI_TX_MSG_QUEUE_SIZE           256
1931 +#endif
1932 +
1933 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1934 + * will be suspended until older messages are dequeued/released. */
1935 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1936 +#  define VCHI_RX_MSG_QUEUE_SIZE           256
1937 +#endif
1938 +
1939 +/* Really should be able to cope if we run out of received message descriptors, by
1940 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1941 + * under the carpet. */
1942 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1943 +#  undef VCHI_RX_MSG_QUEUE_SIZE
1944 +#  define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1945 +#endif
1946 +
1947 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1948 + * will be blocked. */
1949 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1950 +#  define VCHI_TX_BULK_QUEUE_SIZE           64
1951 +#endif
1952 +
1953 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1954 + * will be blocked. */
1955 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1956 +#  define VCHI_RX_BULK_QUEUE_SIZE           64
1957 +#endif
1958 +
1959 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1960 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1961 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1962 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1963 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1964 +#  define VCHI_MAX_PEER_BULK_REQUESTS       32
1965 +#endif
1966 +
1967 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1968 + * transmitter on and off.
1969 + */
1970 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1971 +
1972 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1973 +
1974 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1975 + * negative for no IDLE.
1976 + */
1977 +#  ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1978 +#    define VCHI_CCP2TX_IDLE_TIMEOUT        5
1979 +#  endif
1980 +
1981 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1982 + * negative for no OFF.
1983 + */
1984 +#  ifndef VCHI_CCP2TX_OFF_TIMEOUT
1985 +#    define VCHI_CCP2TX_OFF_TIMEOUT         1000
1986 +#  endif
1987 +
1988 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1989 +
1990 +#endif /* VCHI_CFG_H_ */
1991 +
1992 +/****************************** End of file **********************************/
1993 --- /dev/null
1994 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1995 @@ -0,0 +1,71 @@
1996 +/**
1997 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1998 + *
1999 + * Redistribution and use in source and binary forms, with or without
2000 + * modification, are permitted provided that the following conditions
2001 + * are met:
2002 + * 1. Redistributions of source code must retain the above copyright
2003 + *    notice, this list of conditions, and the following disclaimer,
2004 + *    without modification.
2005 + * 2. Redistributions in binary form must reproduce the above copyright
2006 + *    notice, this list of conditions and the following disclaimer in the
2007 + *    documentation and/or other materials provided with the distribution.
2008 + * 3. The names of the above-listed copyright holders may not be used
2009 + *    to endorse or promote products derived from this software without
2010 + *    specific prior written permission.
2011 + *
2012 + * ALTERNATIVELY, this software may be distributed under the terms of the
2013 + * GNU General Public License ("GPL") version 2, as published by the Free
2014 + * Software Foundation.
2015 + *
2016 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2017 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2018 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2019 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2020 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2021 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2022 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2023 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2024 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2025 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2026 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2027 + */
2028 +
2029 +#ifndef VCHI_CFG_INTERNAL_H_
2030 +#define VCHI_CFG_INTERNAL_H_
2031 +
2032 +/****************************************************************************************
2033 + * Control optimisation attempts.
2034 + ***************************************************************************************/
2035 +
2036 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
2037 +#define VCHI_COARSE_LOCKING
2038 +
2039 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
2040 +// (only relevant if VCHI_COARSE_LOCKING)
2041 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
2042 +
2043 +// Avoid lock on non-blocking peek
2044 +// (only relevant if VCHI_COARSE_LOCKING)
2045 +#define VCHI_AVOID_PEEK_LOCK
2046 +
2047 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
2048 +#define VCHI_MULTIPLE_HANDLER_THREADS
2049 +
2050 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
2051 +// our way through the pool of descriptors.
2052 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
2053 +
2054 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
2055 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
2056 +
2057 +// Don't use message descriptors for TX messages that don't need them
2058 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
2059 +
2060 +// Nano-locks for multiqueue
2061 +//#define VCHI_MQUEUE_NANOLOCKS
2062 +
2063 +// Lock-free(er) dequeuing
2064 +//#define VCHI_RX_NANOLOCKS
2065 +
2066 +#endif /*VCHI_CFG_INTERNAL_H_*/
2067 --- /dev/null
2068 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
2069 @@ -0,0 +1,163 @@
2070 +/**
2071 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2072 + *
2073 + * Redistribution and use in source and binary forms, with or without
2074 + * modification, are permitted provided that the following conditions
2075 + * are met:
2076 + * 1. Redistributions of source code must retain the above copyright
2077 + *    notice, this list of conditions, and the following disclaimer,
2078 + *    without modification.
2079 + * 2. Redistributions in binary form must reproduce the above copyright
2080 + *    notice, this list of conditions and the following disclaimer in the
2081 + *    documentation and/or other materials provided with the distribution.
2082 + * 3. The names of the above-listed copyright holders may not be used
2083 + *    to endorse or promote products derived from this software without
2084 + *    specific prior written permission.
2085 + *
2086 + * ALTERNATIVELY, this software may be distributed under the terms of the
2087 + * GNU General Public License ("GPL") version 2, as published by the Free
2088 + * Software Foundation.
2089 + *
2090 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2091 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2092 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2093 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2094 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2095 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2096 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2097 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2098 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2099 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2100 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2101 + */
2102 +
2103 +#ifndef VCHI_COMMON_H_
2104 +#define VCHI_COMMON_H_
2105 +
2106 +
2107 +//flags used when sending messages (must be bitmapped)
2108 +typedef enum
2109 +{
2110 +   VCHI_FLAGS_NONE                      = 0x0,
2111 +   VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE   = 0x1,   // waits for message to be received, or sent (NB. not the same as being seen on other side)
2112 +   VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2,   // run a callback when message sent
2113 +   VCHI_FLAGS_BLOCK_UNTIL_QUEUED        = 0x4,   // return once the transfer is in a queue ready to go
2114 +   VCHI_FLAGS_ALLOW_PARTIAL             = 0x8,
2115 +   VCHI_FLAGS_BLOCK_UNTIL_DATA_READ     = 0x10,
2116 +   VCHI_FLAGS_CALLBACK_WHEN_DATA_READ   = 0x20,
2117 +
2118 +   VCHI_FLAGS_ALIGN_SLOT            = 0x000080,  // internal use only
2119 +   VCHI_FLAGS_BULK_AUX_QUEUED       = 0x010000,  // internal use only
2120 +   VCHI_FLAGS_BULK_AUX_COMPLETE     = 0x020000,  // internal use only
2121 +   VCHI_FLAGS_BULK_DATA_QUEUED      = 0x040000,  // internal use only
2122 +   VCHI_FLAGS_BULK_DATA_COMPLETE    = 0x080000,  // internal use only
2123 +   VCHI_FLAGS_INTERNAL              = 0xFF0000
2124 +} VCHI_FLAGS_T;
2125 +
2126 +// constants for vchi_crc_control()
2127 +typedef enum {
2128 +   VCHI_CRC_NOTHING = -1,
2129 +   VCHI_CRC_PER_SERVICE = 0,
2130 +   VCHI_CRC_EVERYTHING = 1,
2131 +} VCHI_CRC_CONTROL_T;
2132 +
2133 +//callback reasons when an event occurs on a service
2134 +typedef enum
2135 +{
2136 +   VCHI_CALLBACK_REASON_MIN,
2137 +
2138 +   //This indicates that there is data available
2139 +   //handle is the msg id that was transmitted with the data
2140 +   //    When a message is received and there was no FULL message available previously, send callback
2141 +   //    Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
2142 +   VCHI_CALLBACK_MSG_AVAILABLE,
2143 +   VCHI_CALLBACK_MSG_SENT,
2144 +   VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
2145 +
2146 +   // This indicates that a transfer from the other side has completed
2147 +   VCHI_CALLBACK_BULK_RECEIVED,
2148 +   //This indicates that data queued up to be sent has now gone
2149 +   //handle is the msg id that was used when sending the data
2150 +   VCHI_CALLBACK_BULK_SENT,
2151 +   VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
2152 +   VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
2153 +
2154 +   VCHI_CALLBACK_SERVICE_CLOSED,
2155 +
2156 +   // this side has sent XOFF to peer due to lack of data consumption by service
2157 +   // (suggests the service may need to take some recovery action if it has
2158 +   // been deliberately holding off consuming data)
2159 +   VCHI_CALLBACK_SENT_XOFF,
2160 +   VCHI_CALLBACK_SENT_XON,
2161 +
2162 +   // indicates that a bulk transfer has finished reading the source buffer
2163 +   VCHI_CALLBACK_BULK_DATA_READ,
2164 +
2165 +   // power notification events (currently host side only)
2166 +   VCHI_CALLBACK_PEER_OFF,
2167 +   VCHI_CALLBACK_PEER_SUSPENDED,
2168 +   VCHI_CALLBACK_PEER_ON,
2169 +   VCHI_CALLBACK_PEER_RESUMED,
2170 +   VCHI_CALLBACK_FORCED_POWER_OFF,
2171 +
2172 +#ifdef USE_VCHIQ_ARM
2173 +   // some extra notifications provided by vchiq_arm
2174 +   VCHI_CALLBACK_SERVICE_OPENED,
2175 +   VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
2176 +   VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
2177 +#endif
2178 +
2179 +   VCHI_CALLBACK_REASON_MAX
2180 +} VCHI_CALLBACK_REASON_T;
2181 +
2182 +//Calback used by all services / bulk transfers
2183 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
2184 +                                 VCHI_CALLBACK_REASON_T reason,
2185 +                                 void *handle ); //for transmitting msg's only
2186 +
2187 +
2188 +
2189 +/*
2190 + * Define vector struct for scatter-gather (vector) operations
2191 + * Vectors can be nested - if a vector element has negative length, then
2192 + * the data pointer is treated as pointing to another vector array, with
2193 + * '-vec_len' elements. Thus to append a header onto an existing vector,
2194 + * you can do this:
2195 + *
2196 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
2197 + * {
2198 + *    VCHI_MSG_VECTOR_T nv[2];
2199 + *    nv[0].vec_base = my_header;
2200 + *    nv[0].vec_len = sizeof my_header;
2201 + *    nv[1].vec_base = v;
2202 + *    nv[1].vec_len = -n;
2203 + *    ...
2204 + *
2205 + */
2206 +typedef struct vchi_msg_vector {
2207 +   const void *vec_base;
2208 +   int32_t vec_len;
2209 +} VCHI_MSG_VECTOR_T;
2210 +
2211 +// Opaque type for a connection API
2212 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
2213 +
2214 +// Opaque type for a message driver
2215 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
2216 +
2217 +
2218 +// Iterator structure for reading ahead through received message queue. Allocated by client,
2219 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
2220 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
2221 +// will not proceed to messages received since. Behaviour is undefined if an iterator
2222 +// is used again after messages for that service are removed/dequeued by any
2223 +// means other than vchi_msg_iter_... calls on the iterator itself.
2224 +typedef struct {
2225 +   struct opaque_vchi_service_t *service;
2226 +   void *last;
2227 +   void *next;
2228 +   void *remove;
2229 +} VCHI_MSG_ITER_T;
2230 +
2231 +
2232 +#endif // VCHI_COMMON_H_
2233 --- /dev/null
2234 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
2235 @@ -0,0 +1,373 @@
2236 +/**
2237 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2238 + *
2239 + * Redistribution and use in source and binary forms, with or without
2240 + * modification, are permitted provided that the following conditions
2241 + * are met:
2242 + * 1. Redistributions of source code must retain the above copyright
2243 + *    notice, this list of conditions, and the following disclaimer,
2244 + *    without modification.
2245 + * 2. Redistributions in binary form must reproduce the above copyright
2246 + *    notice, this list of conditions and the following disclaimer in the
2247 + *    documentation and/or other materials provided with the distribution.
2248 + * 3. The names of the above-listed copyright holders may not be used
2249 + *    to endorse or promote products derived from this software without
2250 + *    specific prior written permission.
2251 + *
2252 + * ALTERNATIVELY, this software may be distributed under the terms of the
2253 + * GNU General Public License ("GPL") version 2, as published by the Free
2254 + * Software Foundation.
2255 + *
2256 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2257 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2258 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2259 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2260 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2261 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2262 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2263 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2264 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2265 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2266 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2267 + */
2268 +
2269 +#ifndef VCHI_H_
2270 +#define VCHI_H_
2271 +
2272 +#include "interface/vchi/vchi_cfg.h"
2273 +#include "interface/vchi/vchi_common.h"
2274 +#include "interface/vchi/connections/connection.h"
2275 +#include "vchi_mh.h"
2276 +
2277 +
2278 +/******************************************************************************
2279 + Global defs
2280 + *****************************************************************************/
2281 +
2282 +#define VCHI_BULK_ROUND_UP(x)     ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
2283 +#define VCHI_BULK_ROUND_DOWN(x)   (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
2284 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
2285 +
2286 +#ifdef USE_VCHIQ_ARM
2287 +#define VCHI_BULK_ALIGNED(x)      1
2288 +#else
2289 +#define VCHI_BULK_ALIGNED(x)      (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
2290 +#endif
2291 +
2292 +struct vchi_version {
2293 +       uint32_t version;
2294 +       uint32_t version_min;
2295 +};
2296 +#define VCHI_VERSION(v_) { v_, v_ }
2297 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
2298 +
2299 +typedef enum
2300 +{
2301 +   VCHI_VEC_POINTER,
2302 +   VCHI_VEC_HANDLE,
2303 +   VCHI_VEC_LIST
2304 +} VCHI_MSG_VECTOR_TYPE_T;
2305 +
2306 +typedef struct vchi_msg_vector_ex {
2307 +
2308 +   VCHI_MSG_VECTOR_TYPE_T type;
2309 +   union
2310 +   {
2311 +      // a memory handle
2312 +      struct
2313 +      {
2314 +         VCHI_MEM_HANDLE_T handle;
2315 +         uint32_t offset;
2316 +         int32_t vec_len;
2317 +      } handle;
2318 +
2319 +      // an ordinary data pointer
2320 +      struct
2321 +      {
2322 +         const void *vec_base;
2323 +         int32_t vec_len;
2324 +      } ptr;
2325 +
2326 +      // a nested vector list
2327 +      struct
2328 +      {
2329 +         struct vchi_msg_vector_ex *vec;
2330 +         uint32_t vec_len;
2331 +      } list;
2332 +   } u;
2333 +} VCHI_MSG_VECTOR_EX_T;
2334 +
2335 +
2336 +// Construct an entry in a msg vector for a pointer (p) of length (l)
2337 +#define VCHI_VEC_POINTER(p,l)  VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
2338 +
2339 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
2340 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE,  { { (h), (o), (l) } }
2341 +
2342 +// Macros to manipulate 'FOURCC' values
2343 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
2344 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
2345 +
2346 +
2347 +// Opaque service information
2348 +struct opaque_vchi_service_t;
2349 +
2350 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
2351 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
2352 +typedef struct
2353 +{
2354 +   struct opaque_vchi_service_t *service;
2355 +   void *message;
2356 +} VCHI_HELD_MSG_T;
2357 +
2358 +
2359 +
2360 +// structure used to provide the information needed to open a server or a client
2361 +typedef struct {
2362 +       struct vchi_version version;
2363 +       int32_t service_id;
2364 +       VCHI_CONNECTION_T *connection;
2365 +       uint32_t rx_fifo_size;
2366 +       uint32_t tx_fifo_size;
2367 +       VCHI_CALLBACK_T callback;
2368 +       void *callback_param;
2369 +       /* client intends to receive bulk transfers of
2370 +               odd lengths or into unaligned buffers */
2371 +       int32_t want_unaligned_bulk_rx;
2372 +       /* client intends to transmit bulk transfers of
2373 +               odd lengths or out of unaligned buffers */
2374 +       int32_t want_unaligned_bulk_tx;
2375 +       /* client wants to check CRCs on (bulk) xfers.
2376 +               Only needs to be set at 1 end - will do both directions. */
2377 +       int32_t want_crc;
2378 +} SERVICE_CREATION_T;
2379 +
2380 +// Opaque handle for a VCHI instance
2381 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
2382 +
2383 +// Opaque handle for a server or client
2384 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
2385 +
2386 +// Service registration & startup
2387 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
2388 +
2389 +typedef struct service_info_tag {
2390 +   const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
2391 +   VCHI_SERVICE_INIT init;          /* Service initialisation function */
2392 +   void *vll_handle;                /* VLL handle; NULL when unloaded or a "static VLL" in build */
2393 +} SERVICE_INFO_T;
2394 +
2395 +/******************************************************************************
2396 + Global funcs - implementation is specific to which side you are on (local / remote)
2397 + *****************************************************************************/
2398 +
2399 +#ifdef __cplusplus
2400 +extern "C" {
2401 +#endif
2402 +
2403 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
2404 +                                                   const VCHI_MESSAGE_DRIVER_T * low_level);
2405 +
2406 +
2407 +// Routine used to initialise the vchi on both local + remote connections
2408 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
2409 +
2410 +extern int32_t vchi_exit( void );
2411 +
2412 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
2413 +                             const uint32_t num_connections,
2414 +                             VCHI_INSTANCE_T instance_handle );
2415 +
2416 +//When this is called, ensure that all services have no data pending.
2417 +//Bulk transfers can remain 'queued'
2418 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
2419 +
2420 +// Global control over bulk CRC checking
2421 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
2422 +                                 VCHI_CRC_CONTROL_T control );
2423 +
2424 +// helper functions
2425 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
2426 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
2427 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
2428 +
2429 +
2430 +/******************************************************************************
2431 + Global service API
2432 + *****************************************************************************/
2433 +// Routine to create a named service
2434 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
2435 +                                    SERVICE_CREATION_T *setup,
2436 +                                    VCHI_SERVICE_HANDLE_T *handle );
2437 +
2438 +// Routine to destory a service
2439 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
2440 +
2441 +// Routine to open a named service
2442 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
2443 +                                  SERVICE_CREATION_T *setup,
2444 +                                  VCHI_SERVICE_HANDLE_T *handle);
2445 +
2446 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
2447 +                                      short *peer_version );
2448 +
2449 +// Routine to close a named service
2450 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
2451 +
2452 +// Routine to increment ref count on a named service
2453 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
2454 +
2455 +// Routine to decrement ref count on a named service
2456 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
2457 +
2458 +// Routine to send a message accross a service
2459 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
2460 +                               const void *data,
2461 +                               uint32_t data_size,
2462 +                               VCHI_FLAGS_T flags,
2463 +                               void *msg_handle );
2464 +
2465 +// scatter-gather (vector) and send message
2466 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
2467 +                            VCHI_MSG_VECTOR_EX_T *vector,
2468 +                            uint32_t count,
2469 +                            VCHI_FLAGS_T flags,
2470 +                            void *msg_handle );
2471 +
2472 +// legacy scatter-gather (vector) and send message, only handles pointers
2473 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
2474 +                         VCHI_MSG_VECTOR_T *vector,
2475 +                         uint32_t count,
2476 +                         VCHI_FLAGS_T flags,
2477 +                         void *msg_handle );
2478 +
2479 +// Routine to receive a msg from a service
2480 +// Dequeue is equivalent to hold, copy into client buffer, release
2481 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
2482 +                                 void *data,
2483 +                                 uint32_t max_data_size_to_read,
2484 +                                 uint32_t *actual_msg_size,
2485 +                                 VCHI_FLAGS_T flags );
2486 +
2487 +// Routine to look at a message in place.
2488 +// The message is not dequeued, so a subsequent call to peek or dequeue
2489 +// will return the same message.
2490 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
2491 +                              void **data,
2492 +                              uint32_t *msg_size,
2493 +                              VCHI_FLAGS_T flags );
2494 +
2495 +// Routine to remove a message after it has been read in place with peek
2496 +// The first message on the queue is dequeued.
2497 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
2498 +
2499 +// Routine to look at a message in place.
2500 +// The message is dequeued, so the caller is left holding it; the descriptor is
2501 +// filled in and must be released when the user has finished with the message.
2502 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
2503 +                              void **data,        // } may be NULL, as info can be
2504 +                              uint32_t *msg_size, // } obtained from HELD_MSG_T
2505 +                              VCHI_FLAGS_T flags,
2506 +                              VCHI_HELD_MSG_T *message_descriptor );
2507 +
2508 +// Initialise an iterator to look through messages in place
2509 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
2510 +                                    VCHI_MSG_ITER_T *iter,
2511 +                                    VCHI_FLAGS_T flags );
2512 +
2513 +/******************************************************************************
2514 + Global service support API - operations on held messages and message iterators
2515 + *****************************************************************************/
2516 +
2517 +// Routine to get the address of a held message
2518 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
2519 +
2520 +// Routine to get the size of a held message
2521 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
2522 +
2523 +// Routine to get the transmit timestamp as written into the header by the peer
2524 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
2525 +
2526 +// Routine to get the reception timestamp, written as we parsed the header
2527 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
2528 +
2529 +// Routine to release a held message after it has been processed
2530 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
2531 +
2532 +// Indicates whether the iterator has a next message.
2533 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
2534 +
2535 +// Return the pointer and length for the next message and advance the iterator.
2536 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
2537 +                                   void **data,
2538 +                                   uint32_t *msg_size );
2539 +
2540 +// Remove the last message returned by vchi_msg_iter_next.
2541 +// Can only be called once after each call to vchi_msg_iter_next.
2542 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
2543 +
2544 +// Hold the last message returned by vchi_msg_iter_next.
2545 +// Can only be called once after each call to vchi_msg_iter_next.
2546 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
2547 +                                   VCHI_HELD_MSG_T *message );
2548 +
2549 +// Return information for the next message, and hold it, advancing the iterator.
2550 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
2551 +                                        void **data,        // } may be NULL
2552 +                                        uint32_t *msg_size, // }
2553 +                                        VCHI_HELD_MSG_T *message );
2554 +
2555 +
2556 +/******************************************************************************
2557 + Global bulk API
2558 + *****************************************************************************/
2559 +
2560 +// Routine to prepare interface for a transfer from the other side
2561 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
2562 +                                        void *data_dst,
2563 +                                        uint32_t data_size,
2564 +                                        VCHI_FLAGS_T flags,
2565 +                                        void *transfer_handle );
2566 +
2567 +
2568 +// Prepare interface for a transfer from the other side into relocatable memory.
2569 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
2570 +                                       VCHI_MEM_HANDLE_T h_dst,
2571 +                                       uint32_t offset,
2572 +                                       uint32_t data_size,
2573 +                                       const VCHI_FLAGS_T flags,
2574 +                                       void * const bulk_handle );
2575 +
2576 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
2577 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
2578 +                                         const void *data_src,
2579 +                                         uint32_t data_size,
2580 +                                         VCHI_FLAGS_T flags,
2581 +                                         void *transfer_handle );
2582 +
2583 +
2584 +/******************************************************************************
2585 + Configuration plumbing
2586 + *****************************************************************************/
2587 +
2588 +// function prototypes for the different mid layers (the state info gives the different physical connections)
2589 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
2590 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
2591 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
2592 +
2593 +// declare all message drivers here
2594 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
2595 +
2596 +#ifdef __cplusplus
2597 +}
2598 +#endif
2599 +
2600 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
2601 +                                               VCHI_MEM_HANDLE_T h_src,
2602 +                                               uint32_t offset,
2603 +                                               uint32_t data_size,
2604 +                                               VCHI_FLAGS_T flags,
2605 +                                               void *transfer_handle );
2606 +#endif /* VCHI_H_ */
2607 +
2608 +/****************************** End of file **********************************/
2609 --- /dev/null
2610 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
2611 @@ -0,0 +1,42 @@
2612 +/**
2613 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2614 + *
2615 + * Redistribution and use in source and binary forms, with or without
2616 + * modification, are permitted provided that the following conditions
2617 + * are met:
2618 + * 1. Redistributions of source code must retain the above copyright
2619 + *    notice, this list of conditions, and the following disclaimer,
2620 + *    without modification.
2621 + * 2. Redistributions in binary form must reproduce the above copyright
2622 + *    notice, this list of conditions and the following disclaimer in the
2623 + *    documentation and/or other materials provided with the distribution.
2624 + * 3. The names of the above-listed copyright holders may not be used
2625 + *    to endorse or promote products derived from this software without
2626 + *    specific prior written permission.
2627 + *
2628 + * ALTERNATIVELY, this software may be distributed under the terms of the
2629 + * GNU General Public License ("GPL") version 2, as published by the Free
2630 + * Software Foundation.
2631 + *
2632 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2633 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2634 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2635 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2636 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2637 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2638 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2639 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2640 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2641 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2642 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2643 + */
2644 +
2645 +#ifndef VCHI_MH_H_
2646 +#define VCHI_MH_H_
2647 +
2648 +#include <linux/types.h>
2649 +
2650 +typedef int32_t VCHI_MEM_HANDLE_T;
2651 +#define VCHI_MEM_HANDLE_INVALID 0
2652 +
2653 +#endif
2654 --- /dev/null
2655 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
2656 @@ -0,0 +1,538 @@
2657 +/**
2658 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2659 + *
2660 + * Redistribution and use in source and binary forms, with or without
2661 + * modification, are permitted provided that the following conditions
2662 + * are met:
2663 + * 1. Redistributions of source code must retain the above copyright
2664 + *    notice, this list of conditions, and the following disclaimer,
2665 + *    without modification.
2666 + * 2. Redistributions in binary form must reproduce the above copyright
2667 + *    notice, this list of conditions and the following disclaimer in the
2668 + *    documentation and/or other materials provided with the distribution.
2669 + * 3. The names of the above-listed copyright holders may not be used
2670 + *    to endorse or promote products derived from this software without
2671 + *    specific prior written permission.
2672 + *
2673 + * ALTERNATIVELY, this software may be distributed under the terms of the
2674 + * GNU General Public License ("GPL") version 2, as published by the Free
2675 + * Software Foundation.
2676 + *
2677 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2678 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2679 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2680 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2681 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2682 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2683 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2684 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2685 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2686 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2687 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2688 + */
2689 +
2690 +#include <linux/kernel.h>
2691 +#include <linux/types.h>
2692 +#include <linux/errno.h>
2693 +#include <linux/interrupt.h>
2694 +#include <linux/irq.h>
2695 +#include <linux/pagemap.h>
2696 +#include <linux/dma-mapping.h>
2697 +#include <linux/version.h>
2698 +#include <linux/io.h>
2699 +#include <linux/uaccess.h>
2700 +#include <asm/pgtable.h>
2701 +
2702 +#include <mach/irqs.h>
2703 +
2704 +#include <mach/platform.h>
2705 +#include <mach/vcio.h>
2706 +
2707 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
2708 +
2709 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
2710 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
2711 +
2712 +#include "vchiq_arm.h"
2713 +#include "vchiq_2835.h"
2714 +#include "vchiq_connected.h"
2715 +
2716 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
2717 +
2718 +typedef struct vchiq_2835_state_struct {
2719 +   int inited;
2720 +   VCHIQ_ARM_STATE_T arm_state;
2721 +} VCHIQ_2835_ARM_STATE_T;
2722 +
2723 +static char *g_slot_mem;
2724 +static int g_slot_mem_size;
2725 +dma_addr_t g_slot_phys;
2726 +static FRAGMENTS_T *g_fragments_base;
2727 +static FRAGMENTS_T *g_free_fragments;
2728 +struct semaphore g_free_fragments_sema;
2729 +
2730 +extern int vchiq_arm_log_level;
2731 +
2732 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
2733 +
2734 +static irqreturn_t
2735 +vchiq_doorbell_irq(int irq, void *dev_id);
2736 +
2737 +static int
2738 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2739 +                struct task_struct *task, PAGELIST_T ** ppagelist);
2740 +
2741 +static void
2742 +free_pagelist(PAGELIST_T *pagelist, int actual);
2743 +
2744 +int __init
2745 +vchiq_platform_init(VCHIQ_STATE_T *state)
2746 +{
2747 +       VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
2748 +       int frag_mem_size;
2749 +       int err;
2750 +       int i;
2751 +
2752 +       /* Allocate space for the channels in coherent memory */
2753 +       g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
2754 +       frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
2755 +
2756 +       g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
2757 +               &g_slot_phys, GFP_ATOMIC);
2758 +
2759 +       if (!g_slot_mem) {
2760 +               vchiq_log_error(vchiq_arm_log_level,
2761 +                       "Unable to allocate channel memory");
2762 +               err = -ENOMEM;
2763 +               goto failed_alloc;
2764 +       }
2765 +
2766 +       WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
2767 +
2768 +       vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
2769 +       if (!vchiq_slot_zero) {
2770 +               err = -EINVAL;
2771 +               goto failed_init_slots;
2772 +       }
2773 +
2774 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
2775 +               (int)g_slot_phys + g_slot_mem_size;
2776 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
2777 +               MAX_FRAGMENTS;
2778 +
2779 +       g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
2780 +       g_slot_mem_size += frag_mem_size;
2781 +
2782 +       g_free_fragments = g_fragments_base;
2783 +       for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
2784 +               *(FRAGMENTS_T **)&g_fragments_base[i] =
2785 +                       &g_fragments_base[i + 1];
2786 +       }
2787 +       *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
2788 +       sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
2789 +
2790 +       if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
2791 +               VCHIQ_SUCCESS) {
2792 +               err = -EINVAL;
2793 +               goto failed_vchiq_init;
2794 +       }
2795 +
2796 +       err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
2797 +               IRQF_IRQPOLL, "VCHIQ doorbell",
2798 +               state);
2799 +       if (err < 0) {
2800 +               vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
2801 +                       "irq=%d err=%d", __func__,
2802 +                       VCHIQ_DOORBELL_IRQ, err);
2803 +               goto failed_request_irq;
2804 +       }
2805 +
2806 +       /* Send the base address of the slots to VideoCore */
2807 +
2808 +       dsb(); /* Ensure all writes have completed */
2809 +
2810 +       bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
2811 +
2812 +       vchiq_log_info(vchiq_arm_log_level,
2813 +               "vchiq_init - done (slots %x, phys %x)",
2814 +               (unsigned int)vchiq_slot_zero, g_slot_phys);
2815 +
2816 +   vchiq_call_connected_callbacks();
2817 +
2818 +   return 0;
2819 +
2820 +failed_request_irq:
2821 +failed_vchiq_init:
2822 +failed_init_slots:
2823 +   dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
2824 +
2825 +failed_alloc:
2826 +   return err;
2827 +}
2828 +
2829 +void __exit
2830 +vchiq_platform_exit(VCHIQ_STATE_T *state)
2831 +{
2832 +   free_irq(VCHIQ_DOORBELL_IRQ, state);
2833 +   dma_free_coherent(NULL, g_slot_mem_size,
2834 +                     g_slot_mem, g_slot_phys);
2835 +}
2836 +
2837 +
2838 +VCHIQ_STATUS_T
2839 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
2840 +{
2841 +   VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2842 +   state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
2843 +   ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
2844 +   status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
2845 +   if(status != VCHIQ_SUCCESS)
2846 +   {
2847 +      ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
2848 +   }
2849 +   return status;
2850 +}
2851 +
2852 +VCHIQ_ARM_STATE_T*
2853 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
2854 +{
2855 +   if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
2856 +   {
2857 +      BUG();
2858 +   }
2859 +   return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
2860 +}
2861 +
2862 +void
2863 +remote_event_signal(REMOTE_EVENT_T *event)
2864 +{
2865 +       wmb();
2866 +
2867 +       event->fired = 1;
2868 +
2869 +       dsb();         /* data barrier operation */
2870 +
2871 +       if (event->armed) {
2872 +               /* trigger vc interrupt */
2873 +
2874 +               writel(0, __io_address(ARM_0_BELL2));
2875 +       }
2876 +}
2877 +
2878 +int
2879 +vchiq_copy_from_user(void *dst, const void *src, int size)
2880 +{
2881 +       if ((uint32_t)src < TASK_SIZE) {
2882 +               return copy_from_user(dst, src, size);
2883 +       } else {
2884 +               memcpy(dst, src, size);
2885 +               return 0;
2886 +       }
2887 +}
2888 +
2889 +VCHIQ_STATUS_T
2890 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
2891 +       void *offset, int size, int dir)
2892 +{
2893 +       PAGELIST_T *pagelist;
2894 +       int ret;
2895 +
2896 +       WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
2897 +
2898 +       ret = create_pagelist((char __user *)offset, size,
2899 +                       (dir == VCHIQ_BULK_RECEIVE)
2900 +                       ? PAGELIST_READ
2901 +                       : PAGELIST_WRITE,
2902 +                       current,
2903 +                       &pagelist);
2904 +       if (ret != 0)
2905 +               return VCHIQ_ERROR;
2906 +
2907 +       bulk->handle = memhandle;
2908 +       bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
2909 +
2910 +       /* Store the pagelist address in remote_data, which isn't used by the
2911 +          slave. */
2912 +       bulk->remote_data = pagelist;
2913 +
2914 +       return VCHIQ_SUCCESS;
2915 +}
2916 +
2917 +void
2918 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
2919 +{
2920 +       if (bulk && bulk->remote_data && bulk->actual)
2921 +               free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
2922 +}
2923 +
2924 +void
2925 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
2926 +{
2927 +       /*
2928 +        * This should only be called on the master (VideoCore) side, but
2929 +        * provide an implementation to avoid the need for ifdefery.
2930 +        */
2931 +       BUG();
2932 +}
2933 +
2934 +void
2935 +vchiq_dump_platform_state(void *dump_context)
2936 +{
2937 +       char buf[80];
2938 +       int len;
2939 +       len = snprintf(buf, sizeof(buf),
2940 +               "  Platform: 2835 (VC master)");
2941 +       vchiq_dump(dump_context, buf, len + 1);
2942 +}
2943 +
2944 +VCHIQ_STATUS_T
2945 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
2946 +{
2947 +   return VCHIQ_ERROR;
2948 +}
2949 +
2950 +VCHIQ_STATUS_T
2951 +vchiq_platform_resume(VCHIQ_STATE_T *state)
2952 +{
2953 +   return VCHIQ_SUCCESS;
2954 +}
2955 +
2956 +void
2957 +vchiq_platform_paused(VCHIQ_STATE_T *state)
2958 +{
2959 +}
2960 +
2961 +void
2962 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
2963 +{
2964 +}
2965 +
2966 +int
2967 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
2968 +{
2969 +   return 1; // autosuspend not supported - videocore always wanted
2970 +}
2971 +
2972 +int
2973 +vchiq_platform_use_suspend_timer(void)
2974 +{
2975 +   return 0;
2976 +}
2977 +void
2978 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
2979 +{
2980 +       vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
2981 +}
2982 +void
2983 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
2984 +{
2985 +       (void)state;
2986 +}
2987 +/*
2988 + * Local functions
2989 + */
2990 +
2991 +static irqreturn_t
2992 +vchiq_doorbell_irq(int irq, void *dev_id)
2993 +{
2994 +       VCHIQ_STATE_T *state = dev_id;
2995 +       irqreturn_t ret = IRQ_NONE;
2996 +       unsigned int status;
2997 +
2998 +       /* Read (and clear) the doorbell */
2999 +       status = readl(__io_address(ARM_0_BELL0));
3000 +
3001 +       if (status & 0x4) {  /* Was the doorbell rung? */
3002 +               remote_event_pollall(state);
3003 +               ret = IRQ_HANDLED;
3004 +       }
3005 +
3006 +       return ret;
3007 +}
3008 +
3009 +/* There is a potential problem with partial cache lines (pages?)
3010 +** at the ends of the block when reading. If the CPU accessed anything in
3011 +** the same line (page?) then it may have pulled old data into the cache,
3012 +** obscuring the new data underneath. We can solve this by transferring the
3013 +** partial cache lines separately, and allowing the ARM to copy into the
3014 +** cached area.
3015 +
3016 +** N.B. This implementation plays slightly fast and loose with the Linux
3017 +** driver programming rules, e.g. its use of __virt_to_bus instead of
3018 +** dma_map_single, but it isn't a multi-platform driver and it benefits
3019 +** from increased speed as a result.
3020 +*/
3021 +
3022 +static int
3023 +create_pagelist(char __user *buf, size_t count, unsigned short type,
3024 +       struct task_struct *task, PAGELIST_T ** ppagelist)
3025 +{
3026 +       PAGELIST_T *pagelist;
3027 +       struct page **pages;
3028 +       struct page *page;
3029 +       unsigned long *addrs;
3030 +       unsigned int num_pages, offset, i;
3031 +       char *addr, *base_addr, *next_addr;
3032 +       int run, addridx, actual_pages;
3033 +
3034 +       offset = (unsigned int)buf & (PAGE_SIZE - 1);
3035 +       num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
3036 +
3037 +       *ppagelist = NULL;
3038 +
3039 +       /* Allocate enough storage to hold the page pointers and the page
3040 +       ** list
3041 +       */
3042 +       pagelist = kmalloc(sizeof(PAGELIST_T) +
3043 +               (num_pages * sizeof(unsigned long)) +
3044 +               (num_pages * sizeof(pages[0])),
3045 +               GFP_KERNEL);
3046 +
3047 +       vchiq_log_trace(vchiq_arm_log_level,
3048 +               "create_pagelist - %x", (unsigned int)pagelist);
3049 +       if (!pagelist)
3050 +               return -ENOMEM;
3051 +
3052 +       addrs = pagelist->addrs;
3053 +       pages = (struct page **)(addrs + num_pages);
3054 +
3055 +       down_read(&task->mm->mmap_sem);
3056 +       actual_pages = get_user_pages(task, task->mm,
3057 +               (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
3058 +               (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
3059 +               pages, NULL /*vmas */);
3060 +       up_read(&task->mm->mmap_sem);
3061 +
3062 +   if (actual_pages != num_pages)
3063 +   {
3064 +      /* This is probably due to the process being killed */
3065 +      while (actual_pages > 0)
3066 +      {
3067 +         actual_pages--;
3068 +         page_cache_release(pages[actual_pages]);
3069 +      }
3070 +      kfree(pagelist);
3071 +      if (actual_pages == 0)
3072 +         actual_pages = -ENOMEM;
3073 +      return actual_pages;
3074 +   }
3075 +
3076 +       pagelist->length = count;
3077 +       pagelist->type = type;
3078 +       pagelist->offset = offset;
3079 +
3080 +       /* Group the pages into runs of contiguous pages */
3081 +
3082 +       base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
3083 +       next_addr = base_addr + PAGE_SIZE;
3084 +       addridx = 0;
3085 +       run = 0;
3086 +
3087 +       for (i = 1; i < num_pages; i++) {
3088 +               addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
3089 +               if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
3090 +                       next_addr += PAGE_SIZE;
3091 +                       run++;
3092 +               } else {
3093 +                       addrs[addridx] = (unsigned long)base_addr + run;
3094 +                       addridx++;
3095 +                       base_addr = addr;
3096 +                       next_addr = addr + PAGE_SIZE;
3097 +                       run = 0;
3098 +               }
3099 +       }
3100 +
3101 +       addrs[addridx] = (unsigned long)base_addr + run;
3102 +       addridx++;
3103 +
3104 +       /* Partial cache lines (fragments) require special measures */
3105 +       if ((type == PAGELIST_READ) &&
3106 +               ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
3107 +               ((pagelist->offset + pagelist->length) &
3108 +               (CACHE_LINE_SIZE - 1)))) {
3109 +               FRAGMENTS_T *fragments;
3110 +
3111 +               if (down_interruptible(&g_free_fragments_sema) != 0) {
3112 +                       kfree(pagelist);
3113 +                       return -EINTR;
3114 +               }
3115 +
3116 +               WARN_ON(g_free_fragments == NULL);
3117 +
3118 +               down(&g_free_fragments_mutex);
3119 +               fragments = (FRAGMENTS_T *) g_free_fragments;
3120 +               WARN_ON(fragments == NULL);
3121 +               g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
3122 +               up(&g_free_fragments_mutex);
3123 +               pagelist->type =
3124 +                        PAGELIST_READ_WITH_FRAGMENTS + (fragments -
3125 +                                                        g_fragments_base);
3126 +       }
3127 +
3128 +       for (page = virt_to_page(pagelist);
3129 +               page <= virt_to_page(addrs + num_pages - 1); page++) {
3130 +               flush_dcache_page(page);
3131 +       }
3132 +
3133 +       *ppagelist = pagelist;
3134 +
3135 +       return 0;
3136 +}
3137 +
3138 +static void
3139 +free_pagelist(PAGELIST_T *pagelist, int actual)
3140 +{
3141 +       struct page **pages;
3142 +       unsigned int num_pages, i;
3143 +
3144 +       vchiq_log_trace(vchiq_arm_log_level,
3145 +               "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
3146 +
3147 +       num_pages =
3148 +               (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
3149 +               PAGE_SIZE;
3150 +
3151 +       pages = (struct page **)(pagelist->addrs + num_pages);
3152 +
3153 +       /* Deal with any partial cache lines (fragments) */
3154 +       if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
3155 +               FRAGMENTS_T *fragments = g_fragments_base +
3156 +                       (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
3157 +               int head_bytes, tail_bytes;
3158 +               head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
3159 +                       (CACHE_LINE_SIZE - 1);
3160 +               tail_bytes = (pagelist->offset + actual) &
3161 +                       (CACHE_LINE_SIZE - 1);
3162 +
3163 +               if ((actual >= 0) && (head_bytes != 0)) {
3164 +                       if (head_bytes > actual)
3165 +                               head_bytes = actual;
3166 +
3167 +                       memcpy((char *)page_address(pages[0]) +
3168 +                               pagelist->offset,
3169 +                               fragments->headbuf,
3170 +                               head_bytes);
3171 +               }
3172 +               if ((actual >= 0) && (head_bytes < actual) &&
3173 +                       (tail_bytes != 0)) {
3174 +                       memcpy((char *)page_address(pages[num_pages - 1]) +
3175 +                               ((pagelist->offset + actual) &
3176 +                               (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
3177 +                               fragments->tailbuf, tail_bytes);
3178 +               }
3179 +
3180 +               down(&g_free_fragments_mutex);
3181 +               *(FRAGMENTS_T **) fragments = g_free_fragments;
3182 +               g_free_fragments = fragments;
3183 +               up(&g_free_fragments_mutex);
3184 +               up(&g_free_fragments_sema);
3185 +       }
3186 +
3187 +       for (i = 0; i < num_pages; i++) {
3188 +               if (pagelist->type != PAGELIST_WRITE)
3189 +                       set_page_dirty(pages[i]);
3190 +               page_cache_release(pages[i]);
3191 +       }
3192 +
3193 +       kfree(pagelist);
3194 +}
3195 --- /dev/null
3196 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
3197 @@ -0,0 +1,42 @@
3198 +/**
3199 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3200 + *
3201 + * Redistribution and use in source and binary forms, with or without
3202 + * modification, are permitted provided that the following conditions
3203 + * are met:
3204 + * 1. Redistributions of source code must retain the above copyright
3205 + *    notice, this list of conditions, and the following disclaimer,
3206 + *    without modification.
3207 + * 2. Redistributions in binary form must reproduce the above copyright
3208 + *    notice, this list of conditions and the following disclaimer in the
3209 + *    documentation and/or other materials provided with the distribution.
3210 + * 3. The names of the above-listed copyright holders may not be used
3211 + *    to endorse or promote products derived from this software without
3212 + *    specific prior written permission.
3213 + *
3214 + * ALTERNATIVELY, this software may be distributed under the terms of the
3215 + * GNU General Public License ("GPL") version 2, as published by the Free
3216 + * Software Foundation.
3217 + *
3218 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3219 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3220 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3221 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3222 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3223 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3224 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3225 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3226 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3227 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3228 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3229 + */
3230 +
3231 +#ifndef VCHIQ_2835_H
3232 +#define VCHIQ_2835_H
3233 +
3234 +#include "vchiq_pagelist.h"
3235 +
3236 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
3237 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
3238 +
3239 +#endif /* VCHIQ_2835_H */
3240 --- /dev/null
3241 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
3242 @@ -0,0 +1,2806 @@
3243 +/**
3244 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3245 + *
3246 + * Redistribution and use in source and binary forms, with or without
3247 + * modification, are permitted provided that the following conditions
3248 + * are met:
3249 + * 1. Redistributions of source code must retain the above copyright
3250 + *    notice, this list of conditions, and the following disclaimer,
3251 + *    without modification.
3252 + * 2. Redistributions in binary form must reproduce the above copyright
3253 + *    notice, this list of conditions and the following disclaimer in the
3254 + *    documentation and/or other materials provided with the distribution.
3255 + * 3. The names of the above-listed copyright holders may not be used
3256 + *    to endorse or promote products derived from this software without
3257 + *    specific prior written permission.
3258 + *
3259 + * ALTERNATIVELY, this software may be distributed under the terms of the
3260 + * GNU General Public License ("GPL") version 2, as published by the Free
3261 + * Software Foundation.
3262 + *
3263 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
3264 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
3265 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
3266 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
3267 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
3268 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
3269 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
3270 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
3271 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
3272 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3273 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3274 + */
3275 +
3276 +#include <linux/kernel.h>
3277 +#include <linux/module.h>
3278 +#include <linux/types.h>
3279 +#include <linux/errno.h>
3280 +#include <linux/cdev.h>
3281 +#include <linux/fs.h>
3282 +#include <linux/device.h>
3283 +#include <linux/mm.h>
3284 +#include <linux/highmem.h>
3285 +#include <linux/pagemap.h>
3286 +#include <linux/bug.h>
3287 +#include <linux/semaphore.h>
3288 +#include <linux/list.h>
3289 +#include <linux/proc_fs.h>
3290 +
3291 +#include "vchiq_core.h"
3292 +#include "vchiq_ioctl.h"
3293 +#include "vchiq_arm.h"
3294 +
3295 +#define DEVICE_NAME "vchiq"
3296 +
3297 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
3298 +#undef MODULE_PARAM_PREFIX
3299 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
3300 +
3301 +#define VCHIQ_MINOR 0
3302 +
3303 +/* Some per-instance constants */
3304 +#define MAX_COMPLETIONS 16
3305 +#define MAX_SERVICES 64
3306 +#define MAX_ELEMENTS 8
3307 +#define MSG_QUEUE_SIZE 64
3308 +
3309 +#define KEEPALIVE_VER 1
3310 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
3311 +
3312 +/* Run time control of log level, based on KERN_XXX level. */
3313 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
3314 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
3315 +
3316 +#define SUSPEND_TIMER_TIMEOUT_MS 100
3317 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
3318 +
3319 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
3320 +static const char *const suspend_state_names[] = {
3321 +       "VC_SUSPEND_FORCE_CANCELED",
3322 +       "VC_SUSPEND_REJECTED",
3323 +       "VC_SUSPEND_FAILED",
3324 +       "VC_SUSPEND_IDLE",
3325 +       "VC_SUSPEND_REQUESTED",
3326 +       "VC_SUSPEND_IN_PROGRESS",
3327 +       "VC_SUSPEND_SUSPENDED"
3328 +};
3329 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
3330 +static const char *const resume_state_names[] = {
3331 +       "VC_RESUME_FAILED",
3332 +       "VC_RESUME_IDLE",
3333 +       "VC_RESUME_REQUESTED",
3334 +       "VC_RESUME_IN_PROGRESS",
3335 +       "VC_RESUME_RESUMED"
3336 +};
3337 +/* The number of times we allow force suspend to timeout before actually
3338 +** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
3339 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
3340 +*/
3341 +#define FORCE_SUSPEND_FAIL_MAX 8
3342 +
3343 +/* The time in ms allowed for videocore to go idle when force suspend has been
3344 + * requested */
3345 +#define FORCE_SUSPEND_TIMEOUT_MS 200
3346 +
3347 +
3348 +static void suspend_timer_callback(unsigned long context);
3349 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
3350 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
3351 +
3352 +
3353 +typedef struct user_service_struct {
3354 +       VCHIQ_SERVICE_T *service;
3355 +       void *userdata;
3356 +       VCHIQ_INSTANCE_T instance;
3357 +       int is_vchi;
3358 +       int dequeue_pending;
3359 +       int message_available_pos;
3360 +       int msg_insert;
3361 +       int msg_remove;
3362 +       struct semaphore insert_event;
3363 +       struct semaphore remove_event;
3364 +       VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
3365 +} USER_SERVICE_T;
3366 +
3367 +struct bulk_waiter_node {
3368 +       struct bulk_waiter bulk_waiter;
3369 +       int pid;
3370 +       struct list_head list;
3371 +};
3372 +
3373 +struct vchiq_instance_struct {
3374 +       VCHIQ_STATE_T *state;
3375 +       VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
3376 +       int completion_insert;
3377 +       int completion_remove;
3378 +       struct semaphore insert_event;
3379 +       struct semaphore remove_event;
3380 +       struct mutex completion_mutex;
3381 +
3382 +       int connected;
3383 +       int closing;
3384 +       int pid;
3385 +       int mark;
3386 +
3387 +       struct list_head bulk_waiter_list;
3388 +       struct mutex bulk_waiter_list_mutex;
3389 +
3390 +       struct proc_dir_entry *proc_entry;
3391 +};
3392 +
3393 +typedef struct dump_context_struct {
3394 +       char __user *buf;
3395 +       size_t actual;
3396 +       size_t space;
3397 +       loff_t offset;
3398 +} DUMP_CONTEXT_T;
3399 +
3400 +static struct cdev    vchiq_cdev;
3401 +static dev_t          vchiq_devid;
3402 +static VCHIQ_STATE_T g_state;
3403 +static struct class  *vchiq_class;
3404 +static struct device *vchiq_dev;
3405 +static DEFINE_SPINLOCK(msg_queue_spinlock);
3406 +
3407 +static const char *const ioctl_names[] = {
3408 +       "CONNECT",
3409 +       "SHUTDOWN",
3410 +       "CREATE_SERVICE",
3411 +       "REMOVE_SERVICE",
3412 +       "QUEUE_MESSAGE",
3413 +       "QUEUE_BULK_TRANSMIT",
3414 +       "QUEUE_BULK_RECEIVE",
3415 +       "AWAIT_COMPLETION",
3416 +       "DEQUEUE_MESSAGE",
3417 +       "GET_CLIENT_ID",
3418 +       "GET_CONFIG",
3419 +       "CLOSE_SERVICE",
3420 +       "USE_SERVICE",
3421 +       "RELEASE_SERVICE",
3422 +       "SET_SERVICE_OPTION",
3423 +       "DUMP_PHYS_MEM"
3424 +};
3425 +
3426 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
3427 +       (VCHIQ_IOC_MAX + 1));
3428 +
3429 +static void
3430 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
3431 +
3432 +/****************************************************************************
3433 +*
3434 +*   add_completion
3435 +*
3436 +***************************************************************************/
3437 +
3438 +static VCHIQ_STATUS_T
3439 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
3440 +       VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
3441 +       void *bulk_userdata)
3442 +{
3443 +       VCHIQ_COMPLETION_DATA_T *completion;
3444 +       DEBUG_INITIALISE(g_state.local)
3445 +
3446 +       while (instance->completion_insert ==
3447 +               (instance->completion_remove + MAX_COMPLETIONS)) {
3448 +               /* Out of space - wait for the client */
3449 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3450 +               vchiq_log_trace(vchiq_arm_log_level,
3451 +                       "add_completion - completion queue full");
3452 +               DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
3453 +               if (down_interruptible(&instance->remove_event) != 0) {
3454 +                       vchiq_log_info(vchiq_arm_log_level,
3455 +                               "service_callback interrupted");
3456 +                       return VCHIQ_RETRY;
3457 +               } else if (instance->closing) {
3458 +                       vchiq_log_info(vchiq_arm_log_level,
3459 +                               "service_callback closing");
3460 +                       return VCHIQ_ERROR;
3461 +               }
3462 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3463 +       }
3464 +
3465 +       completion =
3466 +                &instance->completions[instance->completion_insert &
3467 +                (MAX_COMPLETIONS - 1)];
3468 +
3469 +       completion->header = header;
3470 +       completion->reason = reason;
3471 +       /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
3472 +       completion->service_userdata = user_service->service;
3473 +       completion->bulk_userdata = bulk_userdata;
3474 +
3475 +       if (reason == VCHIQ_SERVICE_CLOSED)
3476 +               /* Take an extra reference, to be held until
3477 +                  this CLOSED notification is delivered. */
3478 +               lock_service(user_service->service);
3479 +
3480 +       /* A write barrier is needed here to ensure that the entire completion
3481 +               record is written out before the insert point. */
3482 +       wmb();
3483 +
3484 +       if (reason == VCHIQ_MESSAGE_AVAILABLE)
3485 +               user_service->message_available_pos =
3486 +                       instance->completion_insert;
3487 +       instance->completion_insert++;
3488 +
3489 +       up(&instance->insert_event);
3490 +
3491 +       return VCHIQ_SUCCESS;
3492 +}
3493 +
3494 +/****************************************************************************
3495 +*
3496 +*   service_callback
3497 +*
3498 +***************************************************************************/
3499 +
3500 +static VCHIQ_STATUS_T
3501 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
3502 +       VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
3503 +{
3504 +       /* How do we ensure the callback goes to the right client?
3505 +       ** The service_user data points to a USER_SERVICE_T record containing
3506 +       ** the original callback and the user state structure, which contains a
3507 +       ** circular buffer for completion records.
3508 +       */
3509 +       USER_SERVICE_T *user_service;
3510 +       VCHIQ_SERVICE_T *service;
3511 +       VCHIQ_INSTANCE_T instance;
3512 +       DEBUG_INITIALISE(g_state.local)
3513 +
3514 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3515 +
3516 +       service = handle_to_service(handle);
3517 +       BUG_ON(!service);
3518 +       user_service = (USER_SERVICE_T *)service->base.userdata;
3519 +       instance = user_service->instance;
3520 +
3521 +       if (!instance || instance->closing)
3522 +               return VCHIQ_SUCCESS;
3523 +
3524 +       vchiq_log_trace(vchiq_arm_log_level,
3525 +               "service_callback - service %lx(%d), reason %d, header %lx, "
3526 +               "instance %lx, bulk_userdata %lx",
3527 +               (unsigned long)user_service,
3528 +               service->localport,
3529 +               reason, (unsigned long)header,
3530 +               (unsigned long)instance, (unsigned long)bulk_userdata);
3531 +
3532 +       if (header && user_service->is_vchi) {
3533 +               spin_lock(&msg_queue_spinlock);
3534 +               while (user_service->msg_insert ==
3535 +                       (user_service->msg_remove + MSG_QUEUE_SIZE)) {
3536 +                       spin_unlock(&msg_queue_spinlock);
3537 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3538 +                       DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
3539 +                       vchiq_log_trace(vchiq_arm_log_level,
3540 +                               "service_callback - msg queue full");
3541 +                       /* If there is no MESSAGE_AVAILABLE in the completion
3542 +                       ** queue, add one
3543 +                       */
3544 +                       if ((user_service->message_available_pos -
3545 +                               instance->completion_remove) < 0) {
3546 +                               VCHIQ_STATUS_T status;
3547 +                               vchiq_log_info(vchiq_arm_log_level,
3548 +                                       "Inserting extra MESSAGE_AVAILABLE");
3549 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3550 +                               status = add_completion(instance, reason,
3551 +                                       NULL, user_service, bulk_userdata);
3552 +                               if (status != VCHIQ_SUCCESS) {
3553 +                                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3554 +                                       return status;
3555 +                               }
3556 +                       }
3557 +
3558 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3559 +                       if (down_interruptible(&user_service->remove_event)
3560 +                               != 0) {
3561 +                               vchiq_log_info(vchiq_arm_log_level,
3562 +                                       "service_callback interrupted");
3563 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3564 +                               return VCHIQ_RETRY;
3565 +                       } else if (instance->closing) {
3566 +                               vchiq_log_info(vchiq_arm_log_level,
3567 +                                       "service_callback closing");
3568 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3569 +                               return VCHIQ_ERROR;
3570 +                       }
3571 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3572 +                       spin_lock(&msg_queue_spinlock);
3573 +               }
3574 +
3575 +               user_service->msg_queue[user_service->msg_insert &
3576 +                       (MSG_QUEUE_SIZE - 1)] = header;
3577 +               user_service->msg_insert++;
3578 +               spin_unlock(&msg_queue_spinlock);
3579 +
3580 +               up(&user_service->insert_event);
3581 +
3582 +               /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
3583 +               ** there is a MESSAGE_AVAILABLE in the completion queue then
3584 +               ** bypass the completion queue.
3585 +               */
3586 +               if (((user_service->message_available_pos -
3587 +                       instance->completion_remove) >= 0) ||
3588 +                       user_service->dequeue_pending) {
3589 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3590 +                       user_service->dequeue_pending = 0;
3591 +                       return VCHIQ_SUCCESS;
3592 +               }
3593 +
3594 +               header = NULL;
3595 +       }
3596 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
3597 +
3598 +       return add_completion(instance, reason, header, user_service,
3599 +               bulk_userdata);
3600 +}
3601 +
3602 +/****************************************************************************
3603 +*
3604 +*   vchiq_ioctl
3605 +*
3606 +***************************************************************************/
3607 +
3608 +static long
3609 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3610 +{
3611 +       VCHIQ_INSTANCE_T instance = file->private_data;
3612 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3613 +       VCHIQ_SERVICE_T *service = NULL;
3614 +       long ret = 0;
3615 +       int i, rc;
3616 +       DEBUG_INITIALISE(g_state.local)
3617 +
3618 +       vchiq_log_trace(vchiq_arm_log_level,
3619 +                "vchiq_ioctl - instance %x, cmd %s, arg %lx",
3620 +               (unsigned int)instance,
3621 +               ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
3622 +               (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
3623 +               ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
3624 +
3625 +       switch (cmd) {
3626 +       case VCHIQ_IOC_SHUTDOWN:
3627 +               if (!instance->connected)
3628 +                       break;
3629 +
3630 +               /* Remove all services */
3631 +               i = 0;
3632 +               while ((service = next_service_by_instance(instance->state,
3633 +                       instance, &i)) != NULL) {
3634 +                       status = vchiq_remove_service(service->handle);
3635 +                       unlock_service(service);
3636 +                       if (status != VCHIQ_SUCCESS)
3637 +                               break;
3638 +               }
3639 +               service = NULL;
3640 +
3641 +               if (status == VCHIQ_SUCCESS) {
3642 +                       /* Wake the completion thread and ask it to exit */
3643 +                       instance->closing = 1;
3644 +                       up(&instance->insert_event);
3645 +               }
3646 +
3647 +               break;
3648 +
3649 +       case VCHIQ_IOC_CONNECT:
3650 +               if (instance->connected) {
3651 +                       ret = -EINVAL;
3652 +                       break;
3653 +               }
3654 +               rc = mutex_lock_interruptible(&instance->state->mutex);
3655 +               if (rc != 0) {
3656 +                       vchiq_log_error(vchiq_arm_log_level,
3657 +                               "vchiq: connect: could not lock mutex for "
3658 +                               "state %d: %d",
3659 +                               instance->state->id, rc);
3660 +                       ret = -EINTR;
3661 +                       break;
3662 +               }
3663 +               status = vchiq_connect_internal(instance->state, instance);
3664 +               mutex_unlock(&instance->state->mutex);
3665 +
3666 +               if (status == VCHIQ_SUCCESS)
3667 +                       instance->connected = 1;
3668 +               else
3669 +                       vchiq_log_error(vchiq_arm_log_level,
3670 +                               "vchiq: could not connect: %d", status);
3671 +               break;
3672 +
3673 +       case VCHIQ_IOC_CREATE_SERVICE: {
3674 +               VCHIQ_CREATE_SERVICE_T args;
3675 +               USER_SERVICE_T *user_service = NULL;
3676 +               void *userdata;
3677 +               int srvstate;
3678 +
3679 +               if (copy_from_user
3680 +                        (&args, (const void __user *)arg,
3681 +                         sizeof(args)) != 0) {
3682 +                       ret = -EFAULT;
3683 +                       break;
3684 +               }
3685 +
3686 +               user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
3687 +               if (!user_service) {
3688 +                       ret = -ENOMEM;
3689 +                       break;
3690 +               }
3691 +
3692 +               if (args.is_open) {
3693 +                       if (!instance->connected) {
3694 +                               ret = -ENOTCONN;
3695 +                               kfree(user_service);
3696 +                               break;
3697 +                       }
3698 +                       srvstate = VCHIQ_SRVSTATE_OPENING;
3699 +               } else {
3700 +                       srvstate =
3701 +                                instance->connected ?
3702 +                                VCHIQ_SRVSTATE_LISTENING :
3703 +                                VCHIQ_SRVSTATE_HIDDEN;
3704 +               }
3705 +
3706 +               userdata = args.params.userdata;
3707 +               args.params.callback = service_callback;
3708 +               args.params.userdata = user_service;
3709 +               service = vchiq_add_service_internal(
3710 +                               instance->state,
3711 +                               &args.params, srvstate,
3712 +                               instance);
3713 +
3714 +               if (service != NULL) {
3715 +                       user_service->service = service;
3716 +                       user_service->userdata = userdata;
3717 +                       user_service->instance = instance;
3718 +                       user_service->is_vchi = args.is_vchi;
3719 +                       user_service->dequeue_pending = 0;
3720 +                       user_service->message_available_pos =
3721 +                               instance->completion_remove - 1;
3722 +                       user_service->msg_insert = 0;
3723 +                       user_service->msg_remove = 0;
3724 +                       sema_init(&user_service->insert_event, 0);
3725 +                       sema_init(&user_service->remove_event, 0);
3726 +
3727 +                       if (args.is_open) {
3728 +                               status = vchiq_open_service_internal
3729 +                                       (service, instance->pid);
3730 +                               if (status != VCHIQ_SUCCESS) {
3731 +                                       vchiq_remove_service(service->handle);
3732 +                                       service = NULL;
3733 +                                       ret = (status == VCHIQ_RETRY) ?
3734 +                                               -EINTR : -EIO;
3735 +                                       user_service->service = NULL;
3736 +                                       user_service->instance = NULL;
3737 +                                       break;
3738 +                               }
3739 +                       }
3740 +
3741 +                       if (copy_to_user((void __user *)
3742 +                               &(((VCHIQ_CREATE_SERVICE_T __user *)
3743 +                                       arg)->handle),
3744 +                               (const void *)&service->handle,
3745 +                               sizeof(service->handle)) != 0) {
3746 +                               ret = -EFAULT;
3747 +                               vchiq_remove_service(service->handle);
3748 +                               kfree(user_service);
3749 +                       }
3750 +
3751 +                       service = NULL;
3752 +               } else {
3753 +                       ret = -EEXIST;
3754 +                       kfree(user_service);
3755 +               }
3756 +       } break;
3757 +
3758 +       case VCHIQ_IOC_CLOSE_SERVICE: {
3759 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3760 +
3761 +               service = find_service_for_instance(instance, handle);
3762 +               if (service != NULL)
3763 +                       status = vchiq_close_service(service->handle);
3764 +               else
3765 +                       ret = -EINVAL;
3766 +       } break;
3767 +
3768 +       case VCHIQ_IOC_REMOVE_SERVICE: {
3769 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3770 +
3771 +               service = find_service_for_instance(instance, handle);
3772 +               if (service != NULL)
3773 +                       status = vchiq_remove_service(service->handle);
3774 +               else
3775 +                       ret = -EINVAL;
3776 +       } break;
3777 +
3778 +       case VCHIQ_IOC_USE_SERVICE:
3779 +       case VCHIQ_IOC_RELEASE_SERVICE: {
3780 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3781 +
3782 +               service = find_service_for_instance(instance, handle);
3783 +               if (service != NULL) {
3784 +                       status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
3785 +                               vchiq_use_service_internal(service) :
3786 +                               vchiq_release_service_internal(service);
3787 +                       if (status != VCHIQ_SUCCESS) {
3788 +                               vchiq_log_error(vchiq_susp_log_level,
3789 +                                       "%s: cmd %s returned error %d for "
3790 +                                       "service %c%c%c%c:%03d",
3791 +                                       __func__,
3792 +                                       (cmd == VCHIQ_IOC_USE_SERVICE) ?
3793 +                                               "VCHIQ_IOC_USE_SERVICE" :
3794 +                                               "VCHIQ_IOC_RELEASE_SERVICE",
3795 +                                       status,
3796 +                                       VCHIQ_FOURCC_AS_4CHARS(
3797 +                                               service->base.fourcc),
3798 +                                       service->client_id);
3799 +                               ret = -EINVAL;
3800 +                       }
3801 +               } else
3802 +                       ret = -EINVAL;
3803 +       } break;
3804 +
3805 +       case VCHIQ_IOC_QUEUE_MESSAGE: {
3806 +               VCHIQ_QUEUE_MESSAGE_T args;
3807 +               if (copy_from_user
3808 +                        (&args, (const void __user *)arg,
3809 +                         sizeof(args)) != 0) {
3810 +                       ret = -EFAULT;
3811 +                       break;
3812 +               }
3813 +
3814 +               service = find_service_for_instance(instance, args.handle);
3815 +
3816 +               if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
3817 +                       /* Copy elements into kernel space */
3818 +                       VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
3819 +                       if (copy_from_user(elements, args.elements,
3820 +                               args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
3821 +                               status = vchiq_queue_message
3822 +                                       (args.handle,
3823 +                                       elements, args.count);
3824 +                       else
3825 +                               ret = -EFAULT;
3826 +               } else {
3827 +                       ret = -EINVAL;
3828 +               }
3829 +       } break;
3830 +
3831 +       case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
3832 +       case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
3833 +               VCHIQ_QUEUE_BULK_TRANSFER_T args;
3834 +               struct bulk_waiter_node *waiter = NULL;
3835 +               VCHIQ_BULK_DIR_T dir =
3836 +                       (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
3837 +                       VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
3838 +
3839 +               if (copy_from_user
3840 +                       (&args, (const void __user *)arg,
3841 +                       sizeof(args)) != 0) {
3842 +                       ret = -EFAULT;
3843 +                       break;
3844 +               }
3845 +
3846 +               service = find_service_for_instance(instance, args.handle);
3847 +               if (!service) {
3848 +                       ret = -EINVAL;
3849 +                       break;
3850 +               }
3851 +
3852 +               if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
3853 +                       waiter = kzalloc(sizeof(struct bulk_waiter_node),
3854 +                               GFP_KERNEL);
3855 +                       if (!waiter) {
3856 +                               ret = -ENOMEM;
3857 +                               break;
3858 +                       }
3859 +                       args.userdata = &waiter->bulk_waiter;
3860 +               } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
3861 +                       struct list_head *pos;
3862 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
3863 +                       list_for_each(pos, &instance->bulk_waiter_list) {
3864 +                               if (list_entry(pos, struct bulk_waiter_node,
3865 +                                       list)->pid == current->pid) {
3866 +                                       waiter = list_entry(pos,
3867 +                                               struct bulk_waiter_node,
3868 +                                               list);
3869 +                                       list_del(pos);
3870 +                                       break;
3871 +                               }
3872 +
3873 +                       }
3874 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
3875 +                       if (!waiter) {
3876 +                               vchiq_log_error(vchiq_arm_log_level,
3877 +                                       "no bulk_waiter found for pid %d",
3878 +                                       current->pid);
3879 +                               ret = -ESRCH;
3880 +                               break;
3881 +                       }
3882 +                       vchiq_log_info(vchiq_arm_log_level,
3883 +                               "found bulk_waiter %x for pid %d",
3884 +                               (unsigned int)waiter, current->pid);
3885 +                       args.userdata = &waiter->bulk_waiter;
3886 +               }
3887 +               status = vchiq_bulk_transfer
3888 +                       (args.handle,
3889 +                        VCHI_MEM_HANDLE_INVALID,
3890 +                        args.data, args.size,
3891 +                        args.userdata, args.mode,
3892 +                        dir);
3893 +               if (!waiter)
3894 +                       break;
3895 +               if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
3896 +                       !waiter->bulk_waiter.bulk) {
3897 +                       if (waiter->bulk_waiter.bulk) {
3898 +                               /* Cancel the signal when the transfer
3899 +                               ** completes. */
3900 +                               spin_lock(&bulk_waiter_spinlock);
3901 +                               waiter->bulk_waiter.bulk->userdata = NULL;
3902 +                               spin_unlock(&bulk_waiter_spinlock);
3903 +                       }
3904 +                       kfree(waiter);
3905 +               } else {
3906 +                       const VCHIQ_BULK_MODE_T mode_waiting =
3907 +                               VCHIQ_BULK_MODE_WAITING;
3908 +                       waiter->pid = current->pid;
3909 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
3910 +                       list_add(&waiter->list, &instance->bulk_waiter_list);
3911 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
3912 +                       vchiq_log_info(vchiq_arm_log_level,
3913 +                               "saved bulk_waiter %x for pid %d",
3914 +                               (unsigned int)waiter, current->pid);
3915 +
3916 +                       if (copy_to_user((void __user *)
3917 +                               &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
3918 +                                       arg)->mode),
3919 +                               (const void *)&mode_waiting,
3920 +                               sizeof(mode_waiting)) != 0)
3921 +                               ret = -EFAULT;
3922 +               }
3923 +       } break;
3924 +
3925 +       case VCHIQ_IOC_AWAIT_COMPLETION: {
3926 +               VCHIQ_AWAIT_COMPLETION_T args;
3927 +
3928 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3929 +               if (!instance->connected) {
3930 +                       ret = -ENOTCONN;
3931 +                       break;
3932 +               }
3933 +
3934 +               if (copy_from_user(&args, (const void __user *)arg,
3935 +                       sizeof(args)) != 0) {
3936 +                       ret = -EFAULT;
3937 +                       break;
3938 +               }
3939 +
3940 +               mutex_lock(&instance->completion_mutex);
3941 +
3942 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3943 +               while ((instance->completion_remove ==
3944 +                       instance->completion_insert)
3945 +                       && !instance->closing) {
3946 +                       int rc;
3947 +                       DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3948 +                       mutex_unlock(&instance->completion_mutex);
3949 +                       rc = down_interruptible(&instance->insert_event);
3950 +                       mutex_lock(&instance->completion_mutex);
3951 +                       if (rc != 0) {
3952 +                               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3953 +                               vchiq_log_info(vchiq_arm_log_level,
3954 +                                       "AWAIT_COMPLETION interrupted");
3955 +                               ret = -EINTR;
3956 +                               break;
3957 +                       }
3958 +               }
3959 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3960 +
3961 +               /* A read memory barrier is needed to stop prefetch of a stale
3962 +               ** completion record
3963 +               */
3964 +               rmb();
3965 +
3966 +               if (ret == 0) {
3967 +                       int msgbufcount = args.msgbufcount;
3968 +                       for (ret = 0; ret < args.count; ret++) {
3969 +                               VCHIQ_COMPLETION_DATA_T *completion;
3970 +                               VCHIQ_SERVICE_T *service;
3971 +                               USER_SERVICE_T *user_service;
3972 +                               VCHIQ_HEADER_T *header;
3973 +                               if (instance->completion_remove ==
3974 +                                       instance->completion_insert)
3975 +                                       break;
3976 +                               completion = &instance->completions[
3977 +                                       instance->completion_remove &
3978 +                                       (MAX_COMPLETIONS - 1)];
3979 +
3980 +                               service = completion->service_userdata;
3981 +                               user_service = service->base.userdata;
3982 +                               completion->service_userdata =
3983 +                                       user_service->userdata;
3984 +
3985 +                               header = completion->header;
3986 +                               if (header) {
3987 +                                       void __user *msgbuf;
3988 +                                       int msglen;
3989 +
3990 +                                       msglen = header->size +
3991 +                                               sizeof(VCHIQ_HEADER_T);
3992 +                                       /* This must be a VCHIQ-style service */
3993 +                                       if (args.msgbufsize < msglen) {
3994 +                                               vchiq_log_error(
3995 +                                                       vchiq_arm_log_level,
3996 +                                                       "header %x: msgbufsize"
3997 +                                                       " %x < msglen %x",
3998 +                                                       (unsigned int)header,
3999 +                                                       args.msgbufsize,
4000 +                                                       msglen);
4001 +                                               WARN(1, "invalid message "
4002 +                                                       "size\n");
4003 +                                               if (ret == 0)
4004 +                                                       ret = -EMSGSIZE;
4005 +                                               break;
4006 +                                       }
4007 +                                       if (msgbufcount <= 0)
4008 +                                               /* Stall here for lack of a
4009 +                                               ** buffer for the message. */
4010 +                                               break;
4011 +                                       /* Get the pointer from user space */
4012 +                                       msgbufcount--;
4013 +                                       if (copy_from_user(&msgbuf,
4014 +                                               (const void __user *)
4015 +                                               &args.msgbufs[msgbufcount],
4016 +                                               sizeof(msgbuf)) != 0) {
4017 +                                               if (ret == 0)
4018 +                                                       ret = -EFAULT;
4019 +                                               break;
4020 +                                       }
4021 +
4022 +                                       /* Copy the message to user space */
4023 +                                       if (copy_to_user(msgbuf, header,
4024 +                                               msglen) != 0) {
4025 +                                               if (ret == 0)
4026 +                                                       ret = -EFAULT;
4027 +                                               break;
4028 +                                       }
4029 +
4030 +                                       /* Now it has been copied, the message
4031 +                                       ** can be released. */
4032 +                                       vchiq_release_message(service->handle,
4033 +                                               header);
4034 +
4035 +                                       /* The completion must point to the
4036 +                                       ** msgbuf. */
4037 +                                       completion->header = msgbuf;
4038 +                               }
4039 +
4040 +                               if (completion->reason ==
4041 +                                       VCHIQ_SERVICE_CLOSED) {
4042 +                                       unlock_service(service);
4043 +                                       kfree(user_service);
4044 +                               }
4045 +
4046 +                               if (copy_to_user((void __user *)(
4047 +                                       (size_t)args.buf +
4048 +                                       ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
4049 +                                       completion,
4050 +                                       sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
4051 +                                               if (ret == 0)
4052 +                                                       ret = -EFAULT;
4053 +                                       break;
4054 +                               }
4055 +
4056 +                               instance->completion_remove++;
4057 +                       }
4058 +
4059 +                       if (msgbufcount != args.msgbufcount) {
4060 +                               if (copy_to_user((void __user *)
4061 +                                       &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
4062 +                                               msgbufcount,
4063 +                                       &msgbufcount,
4064 +                                       sizeof(msgbufcount)) != 0) {
4065 +                                       ret = -EFAULT;
4066 +                               }
4067 +                       }
4068 +               }
4069 +
4070 +               if (ret != 0)
4071 +                       up(&instance->remove_event);
4072 +               mutex_unlock(&instance->completion_mutex);
4073 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
4074 +       } break;
4075 +
4076 +       case VCHIQ_IOC_DEQUEUE_MESSAGE: {
4077 +               VCHIQ_DEQUEUE_MESSAGE_T args;
4078 +               USER_SERVICE_T *user_service;
4079 +               VCHIQ_HEADER_T *header;
4080 +
4081 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4082 +               if (copy_from_user
4083 +                        (&args, (const void __user *)arg,
4084 +                         sizeof(args)) != 0) {
4085 +                       ret = -EFAULT;
4086 +                       break;
4087 +               }
4088 +               service = find_service_for_instance(instance, args.handle);
4089 +               if (!service) {
4090 +                       ret = -EINVAL;
4091 +                       break;
4092 +               }
4093 +               user_service = (USER_SERVICE_T *)service->base.userdata;
4094 +               if (user_service->is_vchi == 0) {
4095 +                       ret = -EINVAL;
4096 +                       break;
4097 +               }
4098 +
4099 +               spin_lock(&msg_queue_spinlock);
4100 +               if (user_service->msg_remove == user_service->msg_insert) {
4101 +                       if (!args.blocking) {
4102 +                               spin_unlock(&msg_queue_spinlock);
4103 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4104 +                               ret = -EWOULDBLOCK;
4105 +                               break;
4106 +                       }
4107 +                       user_service->dequeue_pending = 1;
4108 +                       do {
4109 +                               spin_unlock(&msg_queue_spinlock);
4110 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4111 +                               if (down_interruptible(
4112 +                                       &user_service->insert_event) != 0) {
4113 +                                       vchiq_log_info(vchiq_arm_log_level,
4114 +                                               "DEQUEUE_MESSAGE interrupted");
4115 +                                       ret = -EINTR;
4116 +                                       break;
4117 +                               }
4118 +                               spin_lock(&msg_queue_spinlock);
4119 +                       } while (user_service->msg_remove ==
4120 +                               user_service->msg_insert);
4121 +
4122 +                       if (ret)
4123 +                               break;
4124 +               }
4125 +
4126 +               BUG_ON((int)(user_service->msg_insert -
4127 +                       user_service->msg_remove) < 0);
4128 +
4129 +               header = user_service->msg_queue[user_service->msg_remove &
4130 +                       (MSG_QUEUE_SIZE - 1)];
4131 +               user_service->msg_remove++;
4132 +               spin_unlock(&msg_queue_spinlock);
4133 +
4134 +               up(&user_service->remove_event);
4135 +               if (header == NULL)
4136 +                       ret = -ENOTCONN;
4137 +               else if (header->size <= args.bufsize) {
4138 +                       /* Copy to user space if msgbuf is not NULL */
4139 +                       if ((args.buf == NULL) ||
4140 +                               (copy_to_user((void __user *)args.buf,
4141 +                               header->data,
4142 +                               header->size) == 0)) {
4143 +                               ret = header->size;
4144 +                               vchiq_release_message(
4145 +                                       service->handle,
4146 +                                       header);
4147 +                       } else
4148 +                               ret = -EFAULT;
4149 +               } else {
4150 +                       vchiq_log_error(vchiq_arm_log_level,
4151 +                               "header %x: bufsize %x < size %x",
4152 +                               (unsigned int)header, args.bufsize,
4153 +                               header->size);
4154 +                       WARN(1, "invalid size\n");
4155 +                       ret = -EMSGSIZE;
4156 +               }
4157 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
4158 +       } break;
4159 +
4160 +       case VCHIQ_IOC_GET_CLIENT_ID: {
4161 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
4162 +
4163 +               ret = vchiq_get_client_id(handle);
4164 +       } break;
4165 +
4166 +       case VCHIQ_IOC_GET_CONFIG: {
4167 +               VCHIQ_GET_CONFIG_T args;
4168 +               VCHIQ_CONFIG_T config;
4169 +
4170 +               if (copy_from_user(&args, (const void __user *)arg,
4171 +                       sizeof(args)) != 0) {
4172 +                       ret = -EFAULT;
4173 +                       break;
4174 +               }
4175 +               if (args.config_size > sizeof(config)) {
4176 +                       ret = -EINVAL;
4177 +                       break;
4178 +               }
4179 +               status = vchiq_get_config(instance, args.config_size, &config);
4180 +               if (status == VCHIQ_SUCCESS) {
4181 +                       if (copy_to_user((void __user *)args.pconfig,
4182 +                                   &config, args.config_size) != 0) {
4183 +                               ret = -EFAULT;
4184 +                               break;
4185 +                       }
4186 +               }
4187 +       } break;
4188 +
4189 +       case VCHIQ_IOC_SET_SERVICE_OPTION: {
4190 +               VCHIQ_SET_SERVICE_OPTION_T args;
4191 +
4192 +               if (copy_from_user(
4193 +                       &args, (const void __user *)arg,
4194 +                       sizeof(args)) != 0) {
4195 +                       ret = -EFAULT;
4196 +                       break;
4197 +               }
4198 +
4199 +               service = find_service_for_instance(instance, args.handle);
4200 +               if (!service) {
4201 +                       ret = -EINVAL;
4202 +                       break;
4203 +               }
4204 +
4205 +               status = vchiq_set_service_option(
4206 +                               args.handle, args.option, args.value);
4207 +       } break;
4208 +
4209 +       case VCHIQ_IOC_DUMP_PHYS_MEM: {
4210 +               VCHIQ_DUMP_MEM_T  args;
4211 +
4212 +               if (copy_from_user
4213 +                        (&args, (const void __user *)arg,
4214 +                         sizeof(args)) != 0) {
4215 +                       ret = -EFAULT;
4216 +                       break;
4217 +               }
4218 +               dump_phys_mem(args.virt_addr, args.num_bytes);
4219 +       } break;
4220 +
4221 +       default:
4222 +               ret = -ENOTTY;
4223 +               break;
4224 +       }
4225 +
4226 +       if (service)
4227 +               unlock_service(service);
4228 +
4229 +       if (ret == 0) {
4230 +               if (status == VCHIQ_ERROR)
4231 +                       ret = -EIO;
4232 +               else if (status == VCHIQ_RETRY)
4233 +                       ret = -EINTR;
4234 +       }
4235 +
4236 +       if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
4237 +               (ret != -EWOULDBLOCK))
4238 +               vchiq_log_info(vchiq_arm_log_level,
4239 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
4240 +                       (unsigned long)instance,
4241 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4242 +                               ioctl_names[_IOC_NR(cmd)] :
4243 +                               "<invalid>",
4244 +                       status, ret);
4245 +       else
4246 +               vchiq_log_trace(vchiq_arm_log_level,
4247 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
4248 +                       (unsigned long)instance,
4249 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
4250 +                               ioctl_names[_IOC_NR(cmd)] :
4251 +                               "<invalid>",
4252 +                       status, ret);
4253 +
4254 +       return ret;
4255 +}
4256 +
4257 +/****************************************************************************
4258 +*
4259 +*   vchiq_open
4260 +*
4261 +***************************************************************************/
4262 +
4263 +static int
4264 +vchiq_open(struct inode *inode, struct file *file)
4265 +{
4266 +       int dev = iminor(inode) & 0x0f;
4267 +       vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
4268 +       switch (dev) {
4269 +       case VCHIQ_MINOR: {
4270 +               int ret;
4271 +               VCHIQ_STATE_T *state = vchiq_get_state();
4272 +               VCHIQ_INSTANCE_T instance;
4273 +
4274 +               if (!state) {
4275 +                       vchiq_log_error(vchiq_arm_log_level,
4276 +                               "vchiq has no connection to VideoCore");
4277 +                       return -ENOTCONN;
4278 +               }
4279 +
4280 +               instance = kzalloc(sizeof(*instance), GFP_KERNEL);
4281 +               if (!instance)
4282 +                       return -ENOMEM;
4283 +
4284 +               instance->state = state;
4285 +               instance->pid = current->tgid;
4286 +
4287 +               ret = vchiq_proc_add_instance(instance);
4288 +               if (ret != 0) {
4289 +                       kfree(instance);
4290 +                       return ret;
4291 +               }
4292 +
4293 +               sema_init(&instance->insert_event, 0);
4294 +               sema_init(&instance->remove_event, 0);
4295 +               mutex_init(&instance->completion_mutex);
4296 +               mutex_init(&instance->bulk_waiter_list_mutex);
4297 +               INIT_LIST_HEAD(&instance->bulk_waiter_list);
4298 +
4299 +               file->private_data = instance;
4300 +       } break;
4301 +
4302 +       default:
4303 +               vchiq_log_error(vchiq_arm_log_level,
4304 +                       "Unknown minor device: %d", dev);
4305 +               return -ENXIO;
4306 +       }
4307 +
4308 +       return 0;
4309 +}
4310 +
4311 +/****************************************************************************
4312 +*
4313 +*   vchiq_release
4314 +*
4315 +***************************************************************************/
4316 +
4317 +static int
4318 +vchiq_release(struct inode *inode, struct file *file)
4319 +{
4320 +       int dev = iminor(inode) & 0x0f;
4321 +       int ret = 0;
4322 +       switch (dev) {
4323 +       case VCHIQ_MINOR: {
4324 +               VCHIQ_INSTANCE_T instance = file->private_data;
4325 +               VCHIQ_STATE_T *state = vchiq_get_state();
4326 +               VCHIQ_SERVICE_T *service;
4327 +               int i;
4328 +
4329 +               vchiq_log_info(vchiq_arm_log_level,
4330 +                       "vchiq_release: instance=%lx",
4331 +                       (unsigned long)instance);
4332 +
4333 +               if (!state) {
4334 +                       ret = -EPERM;
4335 +                       goto out;
4336 +               }
4337 +
4338 +               /* Ensure videocore is awake to allow termination. */
4339 +               vchiq_use_internal(instance->state, NULL,
4340 +                               USE_TYPE_VCHIQ);
4341 +
4342 +               mutex_lock(&instance->completion_mutex);
4343 +
4344 +               /* Wake the completion thread and ask it to exit */
4345 +               instance->closing = 1;
4346 +               up(&instance->insert_event);
4347 +
4348 +               mutex_unlock(&instance->completion_mutex);
4349 +
4350 +               /* Wake the slot handler if the completion queue is full. */
4351 +               up(&instance->remove_event);
4352 +
4353 +               /* Mark all services for termination... */
4354 +               i = 0;
4355 +               while ((service = next_service_by_instance(state, instance,
4356 +                       &i)) != NULL) {
4357 +                       USER_SERVICE_T *user_service = service->base.userdata;
4358 +
4359 +                       /* Wake the slot handler if the msg queue is full. */
4360 +                       up(&user_service->remove_event);
4361 +
4362 +                       vchiq_terminate_service_internal(service);
4363 +                       unlock_service(service);
4364 +               }
4365 +
4366 +               /* ...and wait for them to die */
4367 +               i = 0;
4368 +               while ((service = next_service_by_instance(state, instance, &i))
4369 +                       != NULL) {
4370 +                       USER_SERVICE_T *user_service = service->base.userdata;
4371 +
4372 +                       down(&service->remove_event);
4373 +
4374 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
4375 +
4376 +                       spin_lock(&msg_queue_spinlock);
4377 +
4378 +                       while (user_service->msg_remove !=
4379 +                               user_service->msg_insert) {
4380 +                               VCHIQ_HEADER_T *header = user_service->
4381 +                                       msg_queue[user_service->msg_remove &
4382 +                                               (MSG_QUEUE_SIZE - 1)];
4383 +                               user_service->msg_remove++;
4384 +                               spin_unlock(&msg_queue_spinlock);
4385 +
4386 +                               if (header)
4387 +                                       vchiq_release_message(
4388 +                                               service->handle,
4389 +                                               header);
4390 +                               spin_lock(&msg_queue_spinlock);
4391 +                       }
4392 +
4393 +                       spin_unlock(&msg_queue_spinlock);
4394 +
4395 +                       unlock_service(service);
4396 +                       kfree(user_service);
4397 +               }
4398 +
4399 +               /* Release any closed services */
4400 +               while (instance->completion_remove !=
4401 +                       instance->completion_insert) {
4402 +                       VCHIQ_COMPLETION_DATA_T *completion;
4403 +                       VCHIQ_SERVICE_T *service;
4404 +                       completion = &instance->completions[
4405 +                               instance->completion_remove &
4406 +                               (MAX_COMPLETIONS - 1)];
4407 +                       service = completion->service_userdata;
4408 +                       if (completion->reason == VCHIQ_SERVICE_CLOSED)
4409 +                               unlock_service(service);
4410 +                       instance->completion_remove++;
4411 +               }
4412 +
4413 +               /* Release the PEER service count. */
4414 +               vchiq_release_internal(instance->state, NULL);
4415 +
4416 +               {
4417 +                       struct list_head *pos, *next;
4418 +                       list_for_each_safe(pos, next,
4419 +                               &instance->bulk_waiter_list) {
4420 +                               struct bulk_waiter_node *waiter;
4421 +                               waiter = list_entry(pos,
4422 +                                       struct bulk_waiter_node,
4423 +                                       list);
4424 +                               list_del(pos);
4425 +                               vchiq_log_info(vchiq_arm_log_level,
4426 +                                       "bulk_waiter - cleaned up %x "
4427 +                                       "for pid %d",
4428 +                                       (unsigned int)waiter, waiter->pid);
4429 +                               kfree(waiter);
4430 +                       }
4431 +               }
4432 +
4433 +               vchiq_proc_remove_instance(instance);
4434 +
4435 +               kfree(instance);
4436 +               file->private_data = NULL;
4437 +       } break;
4438 +
4439 +       default:
4440 +               vchiq_log_error(vchiq_arm_log_level,
4441 +                       "Unknown minor device: %d", dev);
4442 +               ret = -ENXIO;
4443 +       }
4444 +
4445 +out:
4446 +       return ret;
4447 +}
4448 +
4449 +/****************************************************************************
4450 +*
4451 +*   vchiq_dump
4452 +*
4453 +***************************************************************************/
4454 +
4455 +void
4456 +vchiq_dump(void *dump_context, const char *str, int len)
4457 +{
4458 +       DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
4459 +
4460 +       if (context->actual < context->space) {
4461 +               int copy_bytes;
4462 +               if (context->offset > 0) {
4463 +                       int skip_bytes = min(len, (int)context->offset);
4464 +                       str += skip_bytes;
4465 +                       len -= skip_bytes;
4466 +                       context->offset -= skip_bytes;
4467 +                       if (context->offset > 0)
4468 +                               return;
4469 +               }
4470 +               copy_bytes = min(len, (int)(context->space - context->actual));
4471 +               if (copy_bytes == 0)
4472 +                       return;
4473 +               if (copy_to_user(context->buf + context->actual, str,
4474 +                       copy_bytes))
4475 +                       context->actual = -EFAULT;
4476 +               context->actual += copy_bytes;
4477 +               len -= copy_bytes;
4478 +
4479 +               /* If tne terminating NUL is included in the length, then it
4480 +               ** marks the end of a line and should be replaced with a
4481 +               ** carriage return. */
4482 +               if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
4483 +                       char cr = '\n';
4484 +                       if (copy_to_user(context->buf + context->actual - 1,
4485 +                               &cr, 1))
4486 +                               context->actual = -EFAULT;
4487 +               }
4488 +       }
4489 +}
4490 +
4491 +/****************************************************************************
4492 +*
4493 +*   vchiq_dump_platform_instance_state
4494 +*
4495 +***************************************************************************/
4496 +
4497 +void
4498 +vchiq_dump_platform_instances(void *dump_context)
4499 +{
4500 +       VCHIQ_STATE_T *state = vchiq_get_state();
4501 +       char buf[80];
4502 +       int len;
4503 +       int i;
4504 +
4505 +       /* There is no list of instances, so instead scan all services,
4506 +               marking those that have been dumped. */
4507 +
4508 +       for (i = 0; i < state->unused_service; i++) {
4509 +               VCHIQ_SERVICE_T *service = state->services[i];
4510 +               VCHIQ_INSTANCE_T instance;
4511 +
4512 +               if (service && (service->base.callback == service_callback)) {
4513 +                       instance = service->instance;
4514 +                       if (instance)
4515 +                               instance->mark = 0;
4516 +               }
4517 +       }
4518 +
4519 +       for (i = 0; i < state->unused_service; i++) {
4520 +               VCHIQ_SERVICE_T *service = state->services[i];
4521 +               VCHIQ_INSTANCE_T instance;
4522 +
4523 +               if (service && (service->base.callback == service_callback)) {
4524 +                       instance = service->instance;
4525 +                       if (instance && !instance->mark) {
4526 +                               len = snprintf(buf, sizeof(buf),
4527 +                                       "Instance %x: pid %d,%s completions "
4528 +                                               "%d/%d",
4529 +                                       (unsigned int)instance, instance->pid,
4530 +                                       instance->connected ? " connected, " :
4531 +                                               "",
4532 +                                       instance->completion_insert -
4533 +                                               instance->completion_remove,
4534 +                                       MAX_COMPLETIONS);
4535 +
4536 +                               vchiq_dump(dump_context, buf, len + 1);
4537 +
4538 +                               instance->mark = 1;
4539 +                       }
4540 +               }
4541 +       }
4542 +}
4543 +
4544 +/****************************************************************************
4545 +*
4546 +*   vchiq_dump_platform_service_state
4547 +*
4548 +***************************************************************************/
4549 +
4550 +void
4551 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
4552 +{
4553 +       USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
4554 +       char buf[80];
4555 +       int len;
4556 +
4557 +       len = snprintf(buf, sizeof(buf), "  instance %x",
4558 +               (unsigned int)service->instance);
4559 +
4560 +       if ((service->base.callback == service_callback) &&
4561 +               user_service->is_vchi) {
4562 +               len += snprintf(buf + len, sizeof(buf) - len,
4563 +                       ", %d/%d messages",
4564 +                       user_service->msg_insert - user_service->msg_remove,
4565 +                       MSG_QUEUE_SIZE);
4566 +
4567 +               if (user_service->dequeue_pending)
4568 +                       len += snprintf(buf + len, sizeof(buf) - len,
4569 +                               " (dequeue pending)");
4570 +       }
4571 +
4572 +       vchiq_dump(dump_context, buf, len + 1);
4573 +}
4574 +
4575 +/****************************************************************************
4576 +*
4577 +*   dump_user_mem
4578 +*
4579 +***************************************************************************/
4580 +
4581 +static void
4582 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
4583 +{
4584 +       int            rc;
4585 +       uint8_t       *end_virt_addr = virt_addr + num_bytes;
4586 +       int            num_pages;
4587 +       int            offset;
4588 +       int            end_offset;
4589 +       int            page_idx;
4590 +       int            prev_idx;
4591 +       struct page   *page;
4592 +       struct page  **pages;
4593 +       uint8_t       *kmapped_virt_ptr;
4594 +
4595 +       /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
4596 +
4597 +       virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
4598 +       end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
4599 +               ~0x0fuL);
4600 +
4601 +       offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
4602 +       end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
4603 +
4604 +       num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
4605 +
4606 +       pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
4607 +       if (pages == NULL) {
4608 +               vchiq_log_error(vchiq_arm_log_level,
4609 +                       "Unable to allocation memory for %d pages\n",
4610 +                       num_pages);
4611 +               return;
4612 +       }
4613 +
4614 +       down_read(&current->mm->mmap_sem);
4615 +       rc = get_user_pages(current,      /* task */
4616 +               current->mm,              /* mm */
4617 +               (unsigned long)virt_addr, /* start */
4618 +               num_pages,                /* len */
4619 +               0,                        /* write */
4620 +               0,                        /* force */
4621 +               pages,                    /* pages (array of page pointers) */
4622 +               NULL);                    /* vmas */
4623 +       up_read(&current->mm->mmap_sem);
4624 +
4625 +       prev_idx = -1;
4626 +       page = NULL;
4627 +
4628 +       while (offset < end_offset) {
4629 +
4630 +               int page_offset = offset % PAGE_SIZE;
4631 +               page_idx = offset / PAGE_SIZE;
4632 +
4633 +               if (page_idx != prev_idx) {
4634 +
4635 +                       if (page != NULL)
4636 +                               kunmap(page);
4637 +                       page = pages[page_idx];
4638 +                       kmapped_virt_ptr = kmap(page);
4639 +
4640 +                       prev_idx = page_idx;
4641 +               }
4642 +
4643 +               if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
4644 +                       vchiq_log_dump_mem("ph",
4645 +                               (uint32_t)(unsigned long)&kmapped_virt_ptr[
4646 +                                       page_offset],
4647 +                               &kmapped_virt_ptr[page_offset], 16);
4648 +
4649 +               offset += 16;
4650 +       }
4651 +       if (page != NULL)
4652 +               kunmap(page);
4653 +
4654 +       for (page_idx = 0; page_idx < num_pages; page_idx++)
4655 +               page_cache_release(pages[page_idx]);
4656 +
4657 +       kfree(pages);
4658 +}
4659 +
4660 +/****************************************************************************
4661 +*
4662 +*   vchiq_read
4663 +*
4664 +***************************************************************************/
4665 +
4666 +static ssize_t
4667 +vchiq_read(struct file *file, char __user *buf,
4668 +       size_t count, loff_t *ppos)
4669 +{
4670 +       DUMP_CONTEXT_T context;
4671 +       context.buf = buf;
4672 +       context.actual = 0;
4673 +       context.space = count;
4674 +       context.offset = *ppos;
4675 +
4676 +       vchiq_dump_state(&context, &g_state);
4677 +
4678 +       *ppos += context.actual;
4679 +
4680 +       return context.actual;
4681 +}
4682 +
4683 +VCHIQ_STATE_T *
4684 +vchiq_get_state(void)
4685 +{
4686 +
4687 +       if (g_state.remote == NULL)
4688 +               printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
4689 +       else if (g_state.remote->initialised != 1)
4690 +               printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
4691 +                       __func__, g_state.remote->initialised);
4692 +
4693 +       return ((g_state.remote != NULL) &&
4694 +               (g_state.remote->initialised == 1)) ? &g_state : NULL;
4695 +}
4696 +
4697 +static const struct file_operations
4698 +vchiq_fops = {
4699 +       .owner = THIS_MODULE,
4700 +       .unlocked_ioctl = vchiq_ioctl,
4701 +       .open = vchiq_open,
4702 +       .release = vchiq_release,
4703 +       .read = vchiq_read
4704 +};
4705 +
4706 +/*
4707 + * Autosuspend related functionality
4708 + */
4709 +
4710 +int
4711 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
4712 +{
4713 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4714 +       if (!arm_state)
4715 +               /* autosuspend not supported - always return wanted */
4716 +               return 1;
4717 +       else if (arm_state->blocked_count)
4718 +               return 1;
4719 +       else if (!arm_state->videocore_use_count)
4720 +               /* usage count zero - check for override unless we're forcing */
4721 +               if (arm_state->resume_blocked)
4722 +                       return 0;
4723 +               else
4724 +                       return vchiq_platform_videocore_wanted(state);
4725 +       else
4726 +               /* non-zero usage count - videocore still required */
4727 +               return 1;
4728 +}
4729 +
4730 +static VCHIQ_STATUS_T
4731 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
4732 +       VCHIQ_HEADER_T *header,
4733 +       VCHIQ_SERVICE_HANDLE_T service_user,
4734 +       void *bulk_user)
4735 +{
4736 +       vchiq_log_error(vchiq_susp_log_level,
4737 +               "%s callback reason %d", __func__, reason);
4738 +       return 0;
4739 +}
4740 +
4741 +static int
4742 +vchiq_keepalive_thread_func(void *v)
4743 +{
4744 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
4745 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4746 +
4747 +       VCHIQ_STATUS_T status;
4748 +       VCHIQ_INSTANCE_T instance;
4749 +       VCHIQ_SERVICE_HANDLE_T ka_handle;
4750 +
4751 +       VCHIQ_SERVICE_PARAMS_T params = {
4752 +               .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
4753 +               .callback    = vchiq_keepalive_vchiq_callback,
4754 +               .version     = KEEPALIVE_VER,
4755 +               .version_min = KEEPALIVE_VER_MIN
4756 +       };
4757 +
4758 +       status = vchiq_initialise(&instance);
4759 +       if (status != VCHIQ_SUCCESS) {
4760 +               vchiq_log_error(vchiq_susp_log_level,
4761 +                       "%s vchiq_initialise failed %d", __func__, status);
4762 +               goto exit;
4763 +       }
4764 +
4765 +       status = vchiq_connect(instance);
4766 +       if (status != VCHIQ_SUCCESS) {
4767 +               vchiq_log_error(vchiq_susp_log_level,
4768 +                       "%s vchiq_connect failed %d", __func__, status);
4769 +               goto shutdown;
4770 +       }
4771 +
4772 +       status = vchiq_add_service(instance, &params, &ka_handle);
4773 +       if (status != VCHIQ_SUCCESS) {
4774 +               vchiq_log_error(vchiq_susp_log_level,
4775 +                       "%s vchiq_open_service failed %d", __func__, status);
4776 +               goto shutdown;
4777 +       }
4778 +
4779 +       while (1) {
4780 +               long rc = 0, uc = 0;
4781 +               if (wait_for_completion_interruptible(&arm_state->ka_evt)
4782 +                               != 0) {
4783 +                       vchiq_log_error(vchiq_susp_log_level,
4784 +                               "%s interrupted", __func__);
4785 +                       flush_signals(current);
4786 +                       continue;
4787 +               }
4788 +
4789 +               /* read and clear counters.  Do release_count then use_count to
4790 +                * prevent getting more releases than uses */
4791 +               rc = atomic_xchg(&arm_state->ka_release_count, 0);
4792 +               uc = atomic_xchg(&arm_state->ka_use_count, 0);
4793 +
4794 +               /* Call use/release service the requisite number of times.
4795 +                * Process use before release so use counts don't go negative */
4796 +               while (uc--) {
4797 +                       atomic_inc(&arm_state->ka_use_ack_count);
4798 +                       status = vchiq_use_service(ka_handle);
4799 +                       if (status != VCHIQ_SUCCESS) {
4800 +                               vchiq_log_error(vchiq_susp_log_level,
4801 +                                       "%s vchiq_use_service error %d",
4802 +                                       __func__, status);
4803 +                       }
4804 +               }
4805 +               while (rc--) {
4806 +                       status = vchiq_release_service(ka_handle);
4807 +                       if (status != VCHIQ_SUCCESS) {
4808 +                               vchiq_log_error(vchiq_susp_log_level,
4809 +                                       "%s vchiq_release_service error %d",
4810 +                                       __func__, status);
4811 +                       }
4812 +               }
4813 +       }
4814 +
4815 +shutdown:
4816 +       vchiq_shutdown(instance);
4817 +exit:
4818 +       return 0;
4819 +}
4820 +
4821 +
4822 +
4823 +VCHIQ_STATUS_T
4824 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
4825 +{
4826 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4827 +
4828 +       if (arm_state) {
4829 +               rwlock_init(&arm_state->susp_res_lock);
4830 +
4831 +               init_completion(&arm_state->ka_evt);
4832 +               atomic_set(&arm_state->ka_use_count, 0);
4833 +               atomic_set(&arm_state->ka_use_ack_count, 0);
4834 +               atomic_set(&arm_state->ka_release_count, 0);
4835 +
4836 +               init_completion(&arm_state->vc_suspend_complete);
4837 +
4838 +               init_completion(&arm_state->vc_resume_complete);
4839 +               /* Initialise to 'done' state.  We only want to block on resume
4840 +                * completion while videocore is suspended. */
4841 +               set_resume_state(arm_state, VC_RESUME_RESUMED);
4842 +
4843 +               init_completion(&arm_state->resume_blocker);
4844 +               /* Initialise to 'done' state.  We only want to block on this
4845 +                * completion while resume is blocked */
4846 +               complete_all(&arm_state->resume_blocker);
4847 +
4848 +               init_completion(&arm_state->blocked_blocker);
4849 +               /* Initialise to 'done' state.  We only want to block on this
4850 +                * completion while things are waiting on the resume blocker */
4851 +               complete_all(&arm_state->blocked_blocker);
4852 +
4853 +               arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
4854 +               arm_state->suspend_timer_running = 0;
4855 +               init_timer(&arm_state->suspend_timer);
4856 +               arm_state->suspend_timer.data = (unsigned long)(state);
4857 +               arm_state->suspend_timer.function = suspend_timer_callback;
4858 +
4859 +               arm_state->first_connect = 0;
4860 +
4861 +       }
4862 +       return status;
4863 +}
4864 +
4865 +/*
4866 +** Functions to modify the state variables;
4867 +**     set_suspend_state
4868 +**     set_resume_state
4869 +**
4870 +** There are more state variables than we might like, so ensure they remain in
4871 +** step.  Suspend and resume state are maintained separately, since most of
4872 +** these state machines can operate independently.  However, there are a few
4873 +** states where state transitions in one state machine cause a reset to the
4874 +** other state machine.  In addition, there are some completion events which
4875 +** need to occur on state machine reset and end-state(s), so these are also
4876 +** dealt with in these functions.
4877 +**
4878 +** In all states we set the state variable according to the input, but in some
4879 +** cases we perform additional steps outlined below;
4880 +**
4881 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
4882 +**                     The suspend completion is completed after any suspend
4883 +**                     attempt.  When we reset the state machine we also reset
4884 +**                     the completion.  This reset occurs when videocore is
4885 +**                     resumed, and also if we initiate suspend after a suspend
4886 +**                     failure.
4887 +**
4888 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
4889 +**                     suspend - ie from this point on we must try to suspend
4890 +**                     before resuming can occur.  We therefore also reset the
4891 +**                     resume state machine to VC_RESUME_IDLE in this state.
4892 +**
4893 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
4894 +**                     complete_all on the suspend completion to notify
4895 +**                     anything waiting for suspend to happen.
4896 +**
4897 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
4898 +**                     initiate resume, so no need to alter resume state.
4899 +**                     We call complete_all on the suspend completion to notify
4900 +**                     of suspend rejection.
4901 +**
4902 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
4903 +**                     suspend completion and reset the resume state machine.
4904 +**
4905 +** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
4906 +**                     resume completion is in it's 'done' state whenever
4907 +**                     videcore is running.  Therfore, the VC_RESUME_IDLE state
4908 +**                     implies that videocore is suspended.
4909 +**                     Hence, any thread which needs to wait until videocore is
4910 +**                     running can wait on this completion - it will only block
4911 +**                     if videocore is suspended.
4912 +**
4913 +** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
4914 +**                     Call complete_all on the resume completion to unblock
4915 +**                     any threads waiting for resume.  Also reset the suspend
4916 +**                     state machine to it's idle state.
4917 +**
4918 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
4919 +*/
4920 +
4921 +inline void
4922 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
4923 +       enum vc_suspend_status new_state)
4924 +{
4925 +       /* set the state in all cases */
4926 +       arm_state->vc_suspend_state = new_state;
4927 +
4928 +       /* state specific additional actions */
4929 +       switch (new_state) {
4930 +       case VC_SUSPEND_FORCE_CANCELED:
4931 +               complete_all(&arm_state->vc_suspend_complete);
4932 +               break;
4933 +       case VC_SUSPEND_REJECTED:
4934 +               complete_all(&arm_state->vc_suspend_complete);
4935 +               break;
4936 +       case VC_SUSPEND_FAILED:
4937 +               complete_all(&arm_state->vc_suspend_complete);
4938 +               arm_state->vc_resume_state = VC_RESUME_RESUMED;
4939 +               complete_all(&arm_state->vc_resume_complete);
4940 +               break;
4941 +       case VC_SUSPEND_IDLE:
4942 +               INIT_COMPLETION(arm_state->vc_suspend_complete);
4943 +               break;
4944 +       case VC_SUSPEND_REQUESTED:
4945 +               break;
4946 +       case VC_SUSPEND_IN_PROGRESS:
4947 +               set_resume_state(arm_state, VC_RESUME_IDLE);
4948 +               break;
4949 +       case VC_SUSPEND_SUSPENDED:
4950 +               complete_all(&arm_state->vc_suspend_complete);
4951 +               break;
4952 +       default:
4953 +               BUG();
4954 +               break;
4955 +       }
4956 +}
4957 +
4958 +inline void
4959 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
4960 +       enum vc_resume_status new_state)
4961 +{
4962 +       /* set the state in all cases */
4963 +       arm_state->vc_resume_state = new_state;
4964 +
4965 +       /* state specific additional actions */
4966 +       switch (new_state) {
4967 +       case VC_RESUME_FAILED:
4968 +               break;
4969 +       case VC_RESUME_IDLE:
4970 +               INIT_COMPLETION(arm_state->vc_resume_complete);
4971 +               break;
4972 +       case VC_RESUME_REQUESTED:
4973 +               break;
4974 +       case VC_RESUME_IN_PROGRESS:
4975 +               break;
4976 +       case VC_RESUME_RESUMED:
4977 +               complete_all(&arm_state->vc_resume_complete);
4978 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4979 +               break;
4980 +       default:
4981 +               BUG();
4982 +               break;
4983 +       }
4984 +}
4985 +
4986 +
4987 +/* should be called with the write lock held */
4988 +inline void
4989 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
4990 +{
4991 +       del_timer(&arm_state->suspend_timer);
4992 +       arm_state->suspend_timer.expires = jiffies +
4993 +               msecs_to_jiffies(arm_state->
4994 +                       suspend_timer_timeout);
4995 +       add_timer(&arm_state->suspend_timer);
4996 +       arm_state->suspend_timer_running = 1;
4997 +}
4998 +
4999 +/* should be called with the write lock held */
5000 +static inline void
5001 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
5002 +{
5003 +       if (arm_state->suspend_timer_running) {
5004 +               del_timer(&arm_state->suspend_timer);
5005 +               arm_state->suspend_timer_running = 0;
5006 +       }
5007 +}
5008 +
5009 +static inline int
5010 +need_resume(VCHIQ_STATE_T *state)
5011 +{
5012 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5013 +       return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
5014 +                       (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
5015 +                       vchiq_videocore_wanted(state);
5016 +}
5017 +
5018 +static int
5019 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
5020 +{
5021 +       int status = VCHIQ_SUCCESS;
5022 +       const unsigned long timeout_val =
5023 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
5024 +       int resume_count = 0;
5025 +
5026 +       /* Allow any threads which were blocked by the last force suspend to
5027 +        * complete if they haven't already.  Only give this one shot; if
5028 +        * blocked_count is incremented after blocked_blocker is completed
5029 +        * (which only happens when blocked_count hits 0) then those threads
5030 +        * will have to wait until next time around */
5031 +       if (arm_state->blocked_count) {
5032 +               INIT_COMPLETION(arm_state->blocked_blocker);
5033 +               write_unlock_bh(&arm_state->susp_res_lock);
5034 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
5035 +                       "blocked clients", __func__);
5036 +               if (wait_for_completion_interruptible_timeout(
5037 +                               &arm_state->blocked_blocker, timeout_val)
5038 +                                       <= 0) {
5039 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5040 +                               "previously blocked clients failed" , __func__);
5041 +                       status = VCHIQ_ERROR;
5042 +                       write_lock_bh(&arm_state->susp_res_lock);
5043 +                       goto out;
5044 +               }
5045 +               vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
5046 +                       "clients resumed", __func__);
5047 +               write_lock_bh(&arm_state->susp_res_lock);
5048 +       }
5049 +
5050 +       /* We need to wait for resume to complete if it's in process */
5051 +       while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
5052 +                       arm_state->vc_resume_state > VC_RESUME_IDLE) {
5053 +               if (resume_count > 1) {
5054 +                       status = VCHIQ_ERROR;
5055 +                       vchiq_log_error(vchiq_susp_log_level, "%s waited too "
5056 +                               "many times for resume" , __func__);
5057 +                       goto out;
5058 +               }
5059 +               write_unlock_bh(&arm_state->susp_res_lock);
5060 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
5061 +                       __func__);
5062 +               if (wait_for_completion_interruptible_timeout(
5063 +                               &arm_state->vc_resume_complete, timeout_val)
5064 +                                       <= 0) {
5065 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
5066 +                               "resume failed (%s)", __func__,
5067 +                               resume_state_names[arm_state->vc_resume_state +
5068 +                                                       VC_RESUME_NUM_OFFSET]);
5069 +                       status = VCHIQ_ERROR;
5070 +                       write_lock_bh(&arm_state->susp_res_lock);
5071 +                       goto out;
5072 +               }
5073 +               vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
5074 +               write_lock_bh(&arm_state->susp_res_lock);
5075 +               resume_count++;
5076 +       }
5077 +       INIT_COMPLETION(arm_state->resume_blocker);
5078 +       arm_state->resume_blocked = 1;
5079 +
5080 +out:
5081 +       return status;
5082 +}
5083 +
5084 +static inline void
5085 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
5086 +{
5087 +       complete_all(&arm_state->resume_blocker);
5088 +       arm_state->resume_blocked = 0;
5089 +}
5090 +
5091 +/* Initiate suspend via slot handler. Should be called with the write lock
5092 + * held */
5093 +VCHIQ_STATUS_T
5094 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
5095 +{
5096 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
5097 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5098 +
5099 +       if (!arm_state)
5100 +               goto out;
5101 +
5102 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5103 +       status = VCHIQ_SUCCESS;
5104 +
5105 +
5106 +       switch (arm_state->vc_suspend_state) {
5107 +       case VC_SUSPEND_REQUESTED:
5108 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
5109 +                       "requested", __func__);
5110 +               break;
5111 +       case VC_SUSPEND_IN_PROGRESS:
5112 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
5113 +                       "progress", __func__);
5114 +               break;
5115 +
5116 +       default:
5117 +               /* We don't expect to be in other states, so log but continue
5118 +                * anyway */
5119 +               vchiq_log_error(vchiq_susp_log_level,
5120 +                       "%s unexpected suspend state %s", __func__,
5121 +                       suspend_state_names[arm_state->vc_suspend_state +
5122 +                                               VC_SUSPEND_NUM_OFFSET]);
5123 +               /* fall through */
5124 +       case VC_SUSPEND_REJECTED:
5125 +       case VC_SUSPEND_FAILED:
5126 +               /* Ensure any idle state actions have been run */
5127 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5128 +               /* fall through */
5129 +       case VC_SUSPEND_IDLE:
5130 +               vchiq_log_info(vchiq_susp_log_level,
5131 +                       "%s: suspending", __func__);
5132 +               set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
5133 +               /* kick the slot handler thread to initiate suspend */
5134 +               request_poll(state, NULL, 0);
5135 +               break;
5136 +       }
5137 +
5138 +out:
5139 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5140 +       return status;
5141 +}
5142 +
5143 +void
5144 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
5145 +{
5146 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5147 +       int susp = 0;
5148 +
5149 +       if (!arm_state)
5150 +               goto out;
5151 +
5152 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5153 +
5154 +       write_lock_bh(&arm_state->susp_res_lock);
5155 +       if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
5156 +                       arm_state->vc_resume_state == VC_RESUME_RESUMED) {
5157 +               set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
5158 +               susp = 1;
5159 +       }
5160 +       write_unlock_bh(&arm_state->susp_res_lock);
5161 +
5162 +       if (susp)
5163 +               vchiq_platform_suspend(state);
5164 +
5165 +out:
5166 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5167 +       return;
5168 +}
5169 +
5170 +
5171 +static void
5172 +output_timeout_error(VCHIQ_STATE_T *state)
5173 +{
5174 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5175 +       char service_err[50] = "";
5176 +       int vc_use_count = arm_state->videocore_use_count;
5177 +       int active_services = state->unused_service;
5178 +       int i;
5179 +
5180 +       if (!arm_state->videocore_use_count) {
5181 +               snprintf(service_err, 50, " Videocore usecount is 0");
5182 +               goto output_msg;
5183 +       }
5184 +       for (i = 0; i < active_services; i++) {
5185 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
5186 +               if (service_ptr && service_ptr->service_use_count &&
5187 +                       (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
5188 +                       snprintf(service_err, 50, " %c%c%c%c(%d) service has "
5189 +                               "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
5190 +                                       service_ptr->base.fourcc),
5191 +                                service_ptr->client_id,
5192 +                                service_ptr->service_use_count,
5193 +                                service_ptr->service_use_count ==
5194 +                                        vc_use_count ? "" : " (+ more)");
5195 +                       break;
5196 +               }
5197 +       }
5198 +
5199 +output_msg:
5200 +       vchiq_log_error(vchiq_susp_log_level,
5201 +               "timed out waiting for vc suspend (%d).%s",
5202 +                arm_state->autosuspend_override, service_err);
5203 +
5204 +}
5205 +
5206 +/* Try to get videocore into suspended state, regardless of autosuspend state.
5207 +** We don't actually force suspend, since videocore may get into a bad state
5208 +** if we force suspend at a bad time.  Instead, we wait for autosuspend to
5209 +** determine a good point to suspend.  If this doesn't happen within 100ms we
5210 +** report failure.
5211 +**
5212 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
5213 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
5214 +*/
5215 +VCHIQ_STATUS_T
5216 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
5217 +{
5218 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5219 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
5220 +       long rc = 0;
5221 +       int repeat = -1;
5222 +
5223 +       if (!arm_state)
5224 +               goto out;
5225 +
5226 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5227 +
5228 +       write_lock_bh(&arm_state->susp_res_lock);
5229 +
5230 +       status = block_resume(arm_state);
5231 +       if (status != VCHIQ_SUCCESS)
5232 +               goto unlock;
5233 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5234 +               /* Already suspended - just block resume and exit */
5235 +               vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
5236 +                       __func__);
5237 +               status = VCHIQ_SUCCESS;
5238 +               goto unlock;
5239 +       } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
5240 +               /* initiate suspend immediately in the case that we're waiting
5241 +                * for the timeout */
5242 +               stop_suspend_timer(arm_state);
5243 +               if (!vchiq_videocore_wanted(state)) {
5244 +                       vchiq_log_info(vchiq_susp_log_level, "%s videocore "
5245 +                               "idle, initiating suspend", __func__);
5246 +                       status = vchiq_arm_vcsuspend(state);
5247 +               } else if (arm_state->autosuspend_override <
5248 +                                               FORCE_SUSPEND_FAIL_MAX) {
5249 +                       vchiq_log_info(vchiq_susp_log_level, "%s letting "
5250 +                               "videocore go idle", __func__);
5251 +                       status = VCHIQ_SUCCESS;
5252 +               } else {
5253 +                       vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
5254 +                               "many times - attempting suspend", __func__);
5255 +                       status = vchiq_arm_vcsuspend(state);
5256 +               }
5257 +       } else {
5258 +               vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
5259 +                       "in progress - wait for completion", __func__);
5260 +               status = VCHIQ_SUCCESS;
5261 +       }
5262 +
5263 +       /* Wait for suspend to happen due to system idle (not forced..) */
5264 +       if (status != VCHIQ_SUCCESS)
5265 +               goto unblock_resume;
5266 +
5267 +       do {
5268 +               write_unlock_bh(&arm_state->susp_res_lock);
5269 +
5270 +               rc = wait_for_completion_interruptible_timeout(
5271 +                               &arm_state->vc_suspend_complete,
5272 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
5273 +
5274 +               write_lock_bh(&arm_state->susp_res_lock);
5275 +               if (rc < 0) {
5276 +                       vchiq_log_warning(vchiq_susp_log_level, "%s "
5277 +                               "interrupted waiting for suspend", __func__);
5278 +                       status = VCHIQ_ERROR;
5279 +                       goto unblock_resume;
5280 +               } else if (rc == 0) {
5281 +                       if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
5282 +                               /* Repeat timeout once if in progress */
5283 +                               if (repeat < 0) {
5284 +                                       repeat = 1;
5285 +                                       continue;
5286 +                               }
5287 +                       }
5288 +                       arm_state->autosuspend_override++;
5289 +                       output_timeout_error(state);
5290 +
5291 +                       status = VCHIQ_RETRY;
5292 +                       goto unblock_resume;
5293 +               }
5294 +       } while (0 < (repeat--));
5295 +
5296 +       /* Check and report state in case we need to abort ARM suspend */
5297 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
5298 +               status = VCHIQ_RETRY;
5299 +               vchiq_log_error(vchiq_susp_log_level,
5300 +                       "%s videocore suspend failed (state %s)", __func__,
5301 +                       suspend_state_names[arm_state->vc_suspend_state +
5302 +                                               VC_SUSPEND_NUM_OFFSET]);
5303 +               /* Reset the state only if it's still in an error state.
5304 +                * Something could have already initiated another suspend. */
5305 +               if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
5306 +                       set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5307 +
5308 +               goto unblock_resume;
5309 +       }
5310 +
5311 +       /* successfully suspended - unlock and exit */
5312 +       goto unlock;
5313 +
5314 +unblock_resume:
5315 +       /* all error states need to unblock resume before exit */
5316 +       unblock_resume(arm_state);
5317 +
5318 +unlock:
5319 +       write_unlock_bh(&arm_state->susp_res_lock);
5320 +
5321 +out:
5322 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
5323 +       return status;
5324 +}
5325 +
5326 +void
5327 +vchiq_check_suspend(VCHIQ_STATE_T *state)
5328 +{
5329 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5330 +
5331 +       if (!arm_state)
5332 +               goto out;
5333 +
5334 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5335 +
5336 +       write_lock_bh(&arm_state->susp_res_lock);
5337 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
5338 +                       arm_state->first_connect &&
5339 +                       !vchiq_videocore_wanted(state)) {
5340 +               vchiq_arm_vcsuspend(state);
5341 +       }
5342 +       write_unlock_bh(&arm_state->susp_res_lock);
5343 +
5344 +out:
5345 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5346 +       return;
5347 +}
5348 +
5349 +
5350 +int
5351 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
5352 +{
5353 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5354 +       int resume = 0;
5355 +       int ret = -1;
5356 +
5357 +       if (!arm_state)
5358 +               goto out;
5359 +
5360 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5361 +
5362 +       write_lock_bh(&arm_state->susp_res_lock);
5363 +       unblock_resume(arm_state);
5364 +       resume = vchiq_check_resume(state);
5365 +       write_unlock_bh(&arm_state->susp_res_lock);
5366 +
5367 +       if (resume) {
5368 +               if (wait_for_completion_interruptible(
5369 +                       &arm_state->vc_resume_complete) < 0) {
5370 +                       vchiq_log_error(vchiq_susp_log_level,
5371 +                               "%s interrupted", __func__);
5372 +                       /* failed, cannot accurately derive suspend
5373 +                        * state, so exit early. */
5374 +                       goto out;
5375 +               }
5376 +       }
5377 +
5378 +       read_lock_bh(&arm_state->susp_res_lock);
5379 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
5380 +               vchiq_log_info(vchiq_susp_log_level,
5381 +                               "%s: Videocore remains suspended", __func__);
5382 +       } else {
5383 +               vchiq_log_info(vchiq_susp_log_level,
5384 +                               "%s: Videocore resumed", __func__);
5385 +               ret = 0;
5386 +       }
5387 +       read_unlock_bh(&arm_state->susp_res_lock);
5388 +out:
5389 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5390 +       return ret;
5391 +}
5392 +
5393 +/* This function should be called with the write lock held */
5394 +int
5395 +vchiq_check_resume(VCHIQ_STATE_T *state)
5396 +{
5397 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5398 +       int resume = 0;
5399 +
5400 +       if (!arm_state)
5401 +               goto out;
5402 +
5403 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5404 +
5405 +       if (need_resume(state)) {
5406 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
5407 +               request_poll(state, NULL, 0);
5408 +               resume = 1;
5409 +       }
5410 +
5411 +out:
5412 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5413 +       return resume;
5414 +}
5415 +
5416 +void
5417 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
5418 +{
5419 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5420 +       int res = 0;
5421 +
5422 +       if (!arm_state)
5423 +               goto out;
5424 +
5425 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5426 +
5427 +       write_lock_bh(&arm_state->susp_res_lock);
5428 +       if (arm_state->wake_address == 0) {
5429 +               vchiq_log_info(vchiq_susp_log_level,
5430 +                                       "%s: already awake", __func__);
5431 +               goto unlock;
5432 +       }
5433 +       if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
5434 +               vchiq_log_info(vchiq_susp_log_level,
5435 +                                       "%s: already resuming", __func__);
5436 +               goto unlock;
5437 +       }
5438 +
5439 +       if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
5440 +               set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
5441 +               res = 1;
5442 +       } else
5443 +               vchiq_log_trace(vchiq_susp_log_level,
5444 +                               "%s: not resuming (resume state %s)", __func__,
5445 +                               resume_state_names[arm_state->vc_resume_state +
5446 +                                                       VC_RESUME_NUM_OFFSET]);
5447 +
5448 +unlock:
5449 +       write_unlock_bh(&arm_state->susp_res_lock);
5450 +
5451 +       if (res)
5452 +               vchiq_platform_resume(state);
5453 +
5454 +out:
5455 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
5456 +       return;
5457 +
5458 +}
5459 +
5460 +
5461 +
5462 +VCHIQ_STATUS_T
5463 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5464 +               enum USE_TYPE_E use_type)
5465 +{
5466 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5467 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5468 +       char entity[16];
5469 +       int *entity_uc;
5470 +       int local_uc, local_entity_uc;
5471 +
5472 +       if (!arm_state)
5473 +               goto out;
5474 +
5475 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5476 +
5477 +       if (use_type == USE_TYPE_VCHIQ) {
5478 +               sprintf(entity, "VCHIQ:   ");
5479 +               entity_uc = &arm_state->peer_use_count;
5480 +       } else if (service) {
5481 +               sprintf(entity, "%c%c%c%c:%03d",
5482 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5483 +                       service->client_id);
5484 +               entity_uc = &service->service_use_count;
5485 +       } else {
5486 +               vchiq_log_error(vchiq_susp_log_level, "%s null service "
5487 +                               "ptr", __func__);
5488 +               ret = VCHIQ_ERROR;
5489 +               goto out;
5490 +       }
5491 +
5492 +       write_lock_bh(&arm_state->susp_res_lock);
5493 +       while (arm_state->resume_blocked) {
5494 +               /* If we call 'use' while force suspend is waiting for suspend,
5495 +                * then we're about to block the thread which the force is
5496 +                * waiting to complete, so we're bound to just time out. In this
5497 +                * case, set the suspend state such that the wait will be
5498 +                * canceled, so we can complete as quickly as possible. */
5499 +               if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
5500 +                               VC_SUSPEND_IDLE) {
5501 +                       set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
5502 +                       break;
5503 +               }
5504 +               /* If suspend is already in progress then we need to block */
5505 +               if (!try_wait_for_completion(&arm_state->resume_blocker)) {
5506 +                       /* Indicate that there are threads waiting on the resume
5507 +                        * blocker.  These need to be allowed to complete before
5508 +                        * a _second_ call to force suspend can complete,
5509 +                        * otherwise low priority threads might never actually
5510 +                        * continue */
5511 +                       arm_state->blocked_count++;
5512 +                       write_unlock_bh(&arm_state->susp_res_lock);
5513 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5514 +                               "blocked - waiting...", __func__, entity);
5515 +                       if (wait_for_completion_killable(
5516 +                                       &arm_state->resume_blocker) != 0) {
5517 +                               vchiq_log_error(vchiq_susp_log_level, "%s %s "
5518 +                                       "wait for resume blocker interrupted",
5519 +                                       __func__, entity);
5520 +                               ret = VCHIQ_ERROR;
5521 +                               write_lock_bh(&arm_state->susp_res_lock);
5522 +                               arm_state->blocked_count--;
5523 +                               write_unlock_bh(&arm_state->susp_res_lock);
5524 +                               goto out;
5525 +                       }
5526 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
5527 +                               "unblocked", __func__, entity);
5528 +                       write_lock_bh(&arm_state->susp_res_lock);
5529 +                       if (--arm_state->blocked_count == 0)
5530 +                               complete_all(&arm_state->blocked_blocker);
5531 +               }
5532 +       }
5533 +
5534 +       stop_suspend_timer(arm_state);
5535 +
5536 +       local_uc = ++arm_state->videocore_use_count;
5537 +       local_entity_uc = ++(*entity_uc);
5538 +
5539 +       /* If there's a pending request which hasn't yet been serviced then
5540 +        * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
5541 +        * vc_resume_complete will block until we either resume or fail to
5542 +        * suspend */
5543 +       if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
5544 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
5545 +
5546 +       if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
5547 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
5548 +               vchiq_log_info(vchiq_susp_log_level,
5549 +                       "%s %s count %d, state count %d",
5550 +                       __func__, entity, local_entity_uc, local_uc);
5551 +               request_poll(state, NULL, 0);
5552 +       } else
5553 +               vchiq_log_trace(vchiq_susp_log_level,
5554 +                       "%s %s count %d, state count %d",
5555 +                       __func__, entity, *entity_uc, local_uc);
5556 +
5557 +
5558 +       write_unlock_bh(&arm_state->susp_res_lock);
5559 +
5560 +       /* Completion is in a done state when we're not suspended, so this won't
5561 +        * block for the non-suspended case. */
5562 +       if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
5563 +               vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
5564 +                       __func__, entity);
5565 +               if (wait_for_completion_killable(
5566 +                               &arm_state->vc_resume_complete) != 0) {
5567 +                       vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
5568 +                               "resume interrupted", __func__, entity);
5569 +                       ret = VCHIQ_ERROR;
5570 +                       goto out;
5571 +               }
5572 +               vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
5573 +                       entity);
5574 +       }
5575 +
5576 +       if (ret == VCHIQ_SUCCESS) {
5577 +               VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
5578 +               long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
5579 +               while (ack_cnt && (status == VCHIQ_SUCCESS)) {
5580 +                       /* Send the use notify to videocore */
5581 +                       status = vchiq_send_remote_use_active(state);
5582 +                       if (status == VCHIQ_SUCCESS)
5583 +                               ack_cnt--;
5584 +                       else
5585 +                               atomic_add(ack_cnt,
5586 +                                       &arm_state->ka_use_ack_count);
5587 +               }
5588 +       }
5589 +
5590 +out:
5591 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5592 +       return ret;
5593 +}
5594 +
5595 +VCHIQ_STATUS_T
5596 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
5597 +{
5598 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5599 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
5600 +       char entity[16];
5601 +       int *entity_uc;
5602 +       int local_uc, local_entity_uc;
5603 +
5604 +       if (!arm_state)
5605 +               goto out;
5606 +
5607 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5608 +
5609 +       if (service) {
5610 +               sprintf(entity, "%c%c%c%c:%03d",
5611 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5612 +                       service->client_id);
5613 +               entity_uc = &service->service_use_count;
5614 +       } else {
5615 +               sprintf(entity, "PEER:   ");
5616 +               entity_uc = &arm_state->peer_use_count;
5617 +       }
5618 +
5619 +       write_lock_bh(&arm_state->susp_res_lock);
5620 +       if (!arm_state->videocore_use_count || !(*entity_uc)) {
5621 +               /* Don't use BUG_ON - don't allow user thread to crash kernel */
5622 +               WARN_ON(!arm_state->videocore_use_count);
5623 +               WARN_ON(!(*entity_uc));
5624 +               ret = VCHIQ_ERROR;
5625 +               goto unlock;
5626 +       }
5627 +       local_uc = --arm_state->videocore_use_count;
5628 +       local_entity_uc = --(*entity_uc);
5629 +
5630 +       if (!vchiq_videocore_wanted(state)) {
5631 +               if (vchiq_platform_use_suspend_timer() &&
5632 +                               !arm_state->resume_blocked) {
5633 +                       /* Only use the timer if we're not trying to force
5634 +                        * suspend (=> resume_blocked) */
5635 +                       start_suspend_timer(arm_state);
5636 +               } else {
5637 +                       vchiq_log_info(vchiq_susp_log_level,
5638 +                               "%s %s count %d, state count %d - suspending",
5639 +                               __func__, entity, *entity_uc,
5640 +                               arm_state->videocore_use_count);
5641 +                       vchiq_arm_vcsuspend(state);
5642 +               }
5643 +       } else
5644 +               vchiq_log_trace(vchiq_susp_log_level,
5645 +                       "%s %s count %d, state count %d",
5646 +                       __func__, entity, *entity_uc,
5647 +                       arm_state->videocore_use_count);
5648 +
5649 +unlock:
5650 +       write_unlock_bh(&arm_state->susp_res_lock);
5651 +
5652 +out:
5653 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
5654 +       return ret;
5655 +}
5656 +
5657 +void
5658 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
5659 +{
5660 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5661 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5662 +       atomic_inc(&arm_state->ka_use_count);
5663 +       complete(&arm_state->ka_evt);
5664 +}
5665 +
5666 +void
5667 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
5668 +{
5669 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5670 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5671 +       atomic_inc(&arm_state->ka_release_count);
5672 +       complete(&arm_state->ka_evt);
5673 +}
5674 +
5675 +VCHIQ_STATUS_T
5676 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
5677 +{
5678 +       return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
5679 +}
5680 +
5681 +VCHIQ_STATUS_T
5682 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
5683 +{
5684 +       return vchiq_release_internal(service->state, service);
5685 +}
5686 +
5687 +static void suspend_timer_callback(unsigned long context)
5688 +{
5689 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
5690 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5691 +       if (!arm_state)
5692 +               goto out;
5693 +       vchiq_log_info(vchiq_susp_log_level,
5694 +               "%s - suspend timer expired - check suspend", __func__);
5695 +       vchiq_check_suspend(state);
5696 +out:
5697 +       return;
5698 +}
5699 +
5700 +VCHIQ_STATUS_T
5701 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
5702 +{
5703 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5704 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5705 +       if (service) {
5706 +               ret = vchiq_use_internal(service->state, service,
5707 +                               USE_TYPE_SERVICE_NO_RESUME);
5708 +               unlock_service(service);
5709 +       }
5710 +       return ret;
5711 +}
5712 +
5713 +VCHIQ_STATUS_T
5714 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
5715 +{
5716 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5717 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5718 +       if (service) {
5719 +               ret = vchiq_use_internal(service->state, service,
5720 +                               USE_TYPE_SERVICE);
5721 +               unlock_service(service);
5722 +       }
5723 +       return ret;
5724 +}
5725 +
5726 +VCHIQ_STATUS_T
5727 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
5728 +{
5729 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5730 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5731 +       if (service) {
5732 +               ret = vchiq_release_internal(service->state, service);
5733 +               unlock_service(service);
5734 +       }
5735 +       return ret;
5736 +}
5737 +
5738 +void
5739 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
5740 +{
5741 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5742 +       int i, j = 0;
5743 +       /* Only dump 64 services */
5744 +       static const int local_max_services = 64;
5745 +       /* If there's more than 64 services, only dump ones with
5746 +        * non-zero counts */
5747 +       int only_nonzero = 0;
5748 +       static const char *nz = "<-- preventing suspend";
5749 +
5750 +       enum vc_suspend_status vc_suspend_state;
5751 +       enum vc_resume_status  vc_resume_state;
5752 +       int peer_count;
5753 +       int vc_use_count;
5754 +       int active_services;
5755 +       struct service_data_struct {
5756 +               int fourcc;
5757 +               int clientid;
5758 +               int use_count;
5759 +       } service_data[local_max_services];
5760 +
5761 +       if (!arm_state)
5762 +               return;
5763 +
5764 +       read_lock_bh(&arm_state->susp_res_lock);
5765 +       vc_suspend_state = arm_state->vc_suspend_state;
5766 +       vc_resume_state  = arm_state->vc_resume_state;
5767 +       peer_count = arm_state->peer_use_count;
5768 +       vc_use_count = arm_state->videocore_use_count;
5769 +       active_services = state->unused_service;
5770 +       if (active_services > local_max_services)
5771 +               only_nonzero = 1;
5772 +
5773 +       for (i = 0; (i < active_services) && (j < local_max_services); i++) {
5774 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
5775 +               if (!service_ptr)
5776 +                       continue;
5777 +
5778 +               if (only_nonzero && !service_ptr->service_use_count)
5779 +                       continue;
5780 +
5781 +               if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
5782 +                       service_data[j].fourcc = service_ptr->base.fourcc;
5783 +                       service_data[j].clientid = service_ptr->client_id;
5784 +                       service_data[j++].use_count = service_ptr->
5785 +                                                       service_use_count;
5786 +               }
5787 +       }
5788 +
5789 +       read_unlock_bh(&arm_state->susp_res_lock);
5790 +
5791 +       vchiq_log_warning(vchiq_susp_log_level,
5792 +               "-- Videcore suspend state: %s --",
5793 +               suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
5794 +       vchiq_log_warning(vchiq_susp_log_level,
5795 +               "-- Videcore resume state: %s --",
5796 +               resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
5797 +
5798 +       if (only_nonzero)
5799 +               vchiq_log_warning(vchiq_susp_log_level, "Too many active "
5800 +                       "services (%d).  Only dumping up to first %d services "
5801 +                       "with non-zero use-count", active_services,
5802 +                       local_max_services);
5803 +
5804 +       for (i = 0; i < j; i++) {
5805 +               vchiq_log_warning(vchiq_susp_log_level,
5806 +                       "----- %c%c%c%c:%d service count %d %s",
5807 +                       VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
5808 +                       service_data[i].clientid,
5809 +                       service_data[i].use_count,
5810 +                       service_data[i].use_count ? nz : "");
5811 +       }
5812 +       vchiq_log_warning(vchiq_susp_log_level,
5813 +               "----- VCHIQ use count count %d", peer_count);
5814 +       vchiq_log_warning(vchiq_susp_log_level,
5815 +               "--- Overall vchiq instance use count %d", vc_use_count);
5816 +
5817 +       vchiq_dump_platform_use_state(state);
5818 +}
5819 +
5820 +VCHIQ_STATUS_T
5821 +vchiq_check_service(VCHIQ_SERVICE_T *service)
5822 +{
5823 +       VCHIQ_ARM_STATE_T *arm_state;
5824 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
5825 +
5826 +       if (!service || !service->state)
5827 +               goto out;
5828 +
5829 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
5830 +
5831 +       arm_state = vchiq_platform_get_arm_state(service->state);
5832 +
5833 +       read_lock_bh(&arm_state->susp_res_lock);
5834 +       if (service->service_use_count)
5835 +               ret = VCHIQ_SUCCESS;
5836 +       read_unlock_bh(&arm_state->susp_res_lock);
5837 +
5838 +       if (ret == VCHIQ_ERROR) {
5839 +               vchiq_log_error(vchiq_susp_log_level,
5840 +                       "%s ERROR - %c%c%c%c:%d service count %d, "
5841 +                       "state count %d, videocore suspend state %s", __func__,
5842 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
5843 +                       service->client_id, service->service_use_count,
5844 +                       arm_state->videocore_use_count,
5845 +                       suspend_state_names[arm_state->vc_suspend_state +
5846 +                                               VC_SUSPEND_NUM_OFFSET]);
5847 +               vchiq_dump_service_use_state(service->state);
5848 +       }
5849 +out:
5850 +       return ret;
5851 +}
5852 +
5853 +/* stub functions */
5854 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
5855 +{
5856 +       (void)state;
5857 +}
5858 +
5859 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
5860 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
5861 +{
5862 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
5863 +       vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
5864 +               get_conn_state_name(oldstate), get_conn_state_name(newstate));
5865 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
5866 +               write_lock_bh(&arm_state->susp_res_lock);
5867 +               if (!arm_state->first_connect) {
5868 +                       char threadname[10];
5869 +                       arm_state->first_connect = 1;
5870 +                       write_unlock_bh(&arm_state->susp_res_lock);
5871 +                       snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
5872 +                               state->id);
5873 +                       arm_state->ka_thread = kthread_create(
5874 +                               &vchiq_keepalive_thread_func,
5875 +                               (void *)state,
5876 +                               threadname);
5877 +                       if (arm_state->ka_thread == NULL) {
5878 +                               vchiq_log_error(vchiq_susp_log_level,
5879 +                                       "vchiq: FATAL: couldn't create thread %s",
5880 +                                       threadname);
5881 +                       } else {
5882 +                               wake_up_process(arm_state->ka_thread);
5883 +                       }
5884 +               } else
5885 +                       write_unlock_bh(&arm_state->susp_res_lock);
5886 +       }
5887 +}
5888 +
5889 +
5890 +/****************************************************************************
5891 +*
5892 +*   vchiq_init - called when the module is loaded.
5893 +*
5894 +***************************************************************************/
5895 +
5896 +static int __init
5897 +vchiq_init(void)
5898 +{
5899 +       int err;
5900 +       void *ptr_err;
5901 +
5902 +       /* create proc entries */
5903 +       err = vchiq_proc_init();
5904 +       if (err != 0)
5905 +               goto failed_proc_init;
5906 +
5907 +       err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
5908 +       if (err != 0) {
5909 +               vchiq_log_error(vchiq_arm_log_level,
5910 +                       "Unable to allocate device number");
5911 +               goto failed_alloc_chrdev;
5912 +       }
5913 +       cdev_init(&vchiq_cdev, &vchiq_fops);
5914 +       vchiq_cdev.owner = THIS_MODULE;
5915 +       err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
5916 +       if (err != 0) {
5917 +               vchiq_log_error(vchiq_arm_log_level,
5918 +                       "Unable to register device");
5919 +               goto failed_cdev_add;
5920 +       }
5921 +
5922 +       /* create sysfs entries */
5923 +       vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
5924 +       ptr_err = vchiq_class;
5925 +       if (IS_ERR(ptr_err))
5926 +               goto failed_class_create;
5927 +
5928 +       vchiq_dev = device_create(vchiq_class, NULL,
5929 +               vchiq_devid, NULL, "vchiq");
5930 +       ptr_err = vchiq_dev;
5931 +       if (IS_ERR(ptr_err))
5932 +               goto failed_device_create;
5933 +
5934 +       err = vchiq_platform_init(&g_state);
5935 +       if (err != 0)
5936 +               goto failed_platform_init;
5937 +
5938 +       vchiq_log_info(vchiq_arm_log_level,
5939 +               "vchiq: initialised - version %d (min %d), device %d.%d",
5940 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN,
5941 +               MAJOR(vchiq_devid), MINOR(vchiq_devid));
5942 +
5943 +       return 0;
5944 +
5945 +failed_platform_init:
5946 +       device_destroy(vchiq_class, vchiq_devid);
5947 +failed_device_create:
5948 +       class_destroy(vchiq_class);
5949 +failed_class_create:
5950 +       cdev_del(&vchiq_cdev);
5951 +       err = PTR_ERR(ptr_err);
5952 +failed_cdev_add:
5953 +       unregister_chrdev_region(vchiq_devid, 1);
5954 +failed_alloc_chrdev:
5955 +       vchiq_proc_deinit();
5956 +failed_proc_init:
5957 +       vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
5958 +       return err;
5959 +}
5960 +
5961 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
5962 +{
5963 +       VCHIQ_SERVICE_T *service;
5964 +       int use_count = 0, i;
5965 +       i = 0;
5966 +       while ((service = next_service_by_instance(instance->state,
5967 +               instance, &i)) != NULL) {
5968 +               use_count += service->service_use_count;
5969 +               unlock_service(service);
5970 +       }
5971 +       return use_count;
5972 +}
5973 +
5974 +/* read the per-process use-count */
5975 +static int proc_read_use_count(char *page, char **start,
5976 +                              off_t off, int count,
5977 +                              int *eof, void *data)
5978 +{
5979 +       VCHIQ_INSTANCE_T instance = data;
5980 +       int len, use_count;
5981 +
5982 +       use_count = vchiq_instance_get_use_count(instance);
5983 +       len = snprintf(page+off, count, "%d\n", use_count);
5984 +
5985 +       return len;
5986 +}
5987 +
5988 +/* add an instance (process) to the proc entries */
5989 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
5990 +{
5991 +       char pidstr[32];
5992 +       struct proc_dir_entry *top, *use_count;
5993 +       struct proc_dir_entry *clients = vchiq_clients_top();
5994 +       int pid = instance->pid;
5995 +
5996 +       snprintf(pidstr, sizeof(pidstr), "%d", pid);
5997 +       top = proc_mkdir(pidstr, clients);
5998 +       if (!top)
5999 +               goto fail_top;
6000 +#if 0
6001 +       use_count = create_proc_read_entry("use_count",
6002 +                                          0444, top,
6003 +                                          proc_read_use_count,
6004 +                                          instance);
6005 +       if (!use_count)
6006 +               goto fail_use_count;
6007 +
6008 +       instance->proc_entry = top;
6009 +#endif
6010 +       return 0;
6011 +
6012 +fail_use_count:
6013 +#if 0
6014 +       remove_proc_entry(top->name, clients);
6015 +#endif
6016 +fail_top:
6017 +       return -ENOMEM;
6018 +}
6019 +
6020 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
6021 +{
6022 +#if 0
6023 +       struct proc_dir_entry *clients = vchiq_clients_top();
6024 +       remove_proc_entry("use_count", instance->proc_entry);
6025 +       remove_proc_entry(instance->proc_entry->name, clients);
6026 +#endif
6027 +}
6028 +
6029 +/****************************************************************************
6030 +*
6031 +*   vchiq_exit - called when the module is unloaded.
6032 +*
6033 +***************************************************************************/
6034 +
6035 +static void __exit
6036 +vchiq_exit(void)
6037 +{
6038 +       vchiq_platform_exit(&g_state);
6039 +       device_destroy(vchiq_class, vchiq_devid);
6040 +       class_destroy(vchiq_class);
6041 +       cdev_del(&vchiq_cdev);
6042 +       unregister_chrdev_region(vchiq_devid, 1);
6043 +}
6044 +
6045 +module_init(vchiq_init);
6046 +module_exit(vchiq_exit);
6047 +MODULE_LICENSE("GPL");
6048 +MODULE_AUTHOR("Broadcom Corporation");
6049 --- /dev/null
6050 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
6051 @@ -0,0 +1,212 @@
6052 +/**
6053 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6054 + *
6055 + * Redistribution and use in source and binary forms, with or without
6056 + * modification, are permitted provided that the following conditions
6057 + * are met:
6058 + * 1. Redistributions of source code must retain the above copyright
6059 + *    notice, this list of conditions, and the following disclaimer,
6060 + *    without modification.
6061 + * 2. Redistributions in binary form must reproduce the above copyright
6062 + *    notice, this list of conditions and the following disclaimer in the
6063 + *    documentation and/or other materials provided with the distribution.
6064 + * 3. The names of the above-listed copyright holders may not be used
6065 + *    to endorse or promote products derived from this software without
6066 + *    specific prior written permission.
6067 + *
6068 + * ALTERNATIVELY, this software may be distributed under the terms of the
6069 + * GNU General Public License ("GPL") version 2, as published by the Free
6070 + * Software Foundation.
6071 + *
6072 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6073 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6074 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6075 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6076 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6077 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6078 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6079 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6080 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6081 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6082 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6083 + */
6084 +
6085 +#ifndef VCHIQ_ARM_H
6086 +#define VCHIQ_ARM_H
6087 +
6088 +#include <linux/mutex.h>
6089 +#include <linux/semaphore.h>
6090 +#include <linux/atomic.h>
6091 +#include "vchiq_core.h"
6092 +
6093 +
6094 +enum vc_suspend_status {
6095 +       VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
6096 +       VC_SUSPEND_REJECTED = -2,  /* Videocore rejected suspend request */
6097 +       VC_SUSPEND_FAILED = -1,    /* Videocore suspend failed */
6098 +       VC_SUSPEND_IDLE = 0,       /* VC active, no suspend actions */
6099 +       VC_SUSPEND_REQUESTED,      /* User has requested suspend */
6100 +       VC_SUSPEND_IN_PROGRESS,    /* Slot handler has recvd suspend request */
6101 +       VC_SUSPEND_SUSPENDED       /* Videocore suspend succeeded */
6102 +};
6103 +
6104 +enum vc_resume_status {
6105 +       VC_RESUME_FAILED = -1, /* Videocore resume failed */
6106 +       VC_RESUME_IDLE = 0,    /* VC suspended, no resume actions */
6107 +       VC_RESUME_REQUESTED,   /* User has requested resume */
6108 +       VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
6109 +       VC_RESUME_RESUMED      /* Videocore resumed successfully (active) */
6110 +};
6111 +
6112 +
6113 +enum USE_TYPE_E {
6114 +       USE_TYPE_SERVICE,
6115 +       USE_TYPE_SERVICE_NO_RESUME,
6116 +       USE_TYPE_VCHIQ
6117 +};
6118 +
6119 +
6120 +
6121 +typedef struct vchiq_arm_state_struct {
6122 +       /* Keepalive-related data */
6123 +       struct task_struct *ka_thread;
6124 +       struct completion ka_evt;
6125 +       atomic_t ka_use_count;
6126 +       atomic_t ka_use_ack_count;
6127 +       atomic_t ka_release_count;
6128 +
6129 +       struct completion vc_suspend_complete;
6130 +       struct completion vc_resume_complete;
6131 +
6132 +       rwlock_t susp_res_lock;
6133 +       enum vc_suspend_status vc_suspend_state;
6134 +       enum vc_resume_status vc_resume_state;
6135 +
6136 +       unsigned int wake_address;
6137 +
6138 +       struct timer_list suspend_timer;
6139 +       int suspend_timer_timeout;
6140 +       int suspend_timer_running;
6141 +
6142 +       /* Global use count for videocore.
6143 +       ** This is equal to the sum of the use counts for all services.  When
6144 +       ** this hits zero the videocore suspend procedure will be initiated.
6145 +       */
6146 +       int videocore_use_count;
6147 +
6148 +       /* Use count to track requests from videocore peer.
6149 +       ** This use count is not associated with a service, so needs to be
6150 +       ** tracked separately with the state.
6151 +       */
6152 +       int peer_use_count;
6153 +
6154 +       /* Flag to indicate whether resume is blocked.  This happens when the
6155 +       ** ARM is suspending
6156 +       */
6157 +       struct completion resume_blocker;
6158 +       int resume_blocked;
6159 +       struct completion blocked_blocker;
6160 +       int blocked_count;
6161 +
6162 +       int autosuspend_override;
6163 +
6164 +       /* Flag to indicate that the first vchiq connect has made it through.
6165 +       ** This means that both sides should be fully ready, and we should
6166 +       ** be able to suspend after this point.
6167 +       */
6168 +       int first_connect;
6169 +
6170 +       unsigned long long suspend_start_time;
6171 +       unsigned long long sleep_start_time;
6172 +       unsigned long long resume_start_time;
6173 +       unsigned long long last_wake_time;
6174 +
6175 +} VCHIQ_ARM_STATE_T;
6176 +
6177 +extern int vchiq_arm_log_level;
6178 +extern int vchiq_susp_log_level;
6179 +
6180 +extern int __init
6181 +vchiq_platform_init(VCHIQ_STATE_T *state);
6182 +
6183 +extern void __exit
6184 +vchiq_platform_exit(VCHIQ_STATE_T *state);
6185 +
6186 +extern VCHIQ_STATE_T *
6187 +vchiq_get_state(void);
6188 +
6189 +extern VCHIQ_STATUS_T
6190 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
6191 +
6192 +extern VCHIQ_STATUS_T
6193 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
6194 +
6195 +extern int
6196 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
6197 +
6198 +extern VCHIQ_STATUS_T
6199 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
6200 +
6201 +extern VCHIQ_STATUS_T
6202 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
6203 +
6204 +extern int
6205 +vchiq_check_resume(VCHIQ_STATE_T *state);
6206 +
6207 +extern void
6208 +vchiq_check_suspend(VCHIQ_STATE_T *state);
6209 +
6210 +extern VCHIQ_STATUS_T
6211 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
6212 +
6213 +extern VCHIQ_STATUS_T
6214 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
6215 +
6216 +extern VCHIQ_STATUS_T
6217 +vchiq_check_service(VCHIQ_SERVICE_T *service);
6218 +
6219 +extern VCHIQ_STATUS_T
6220 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
6221 +
6222 +extern int
6223 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
6224 +
6225 +extern int
6226 +vchiq_platform_use_suspend_timer(void);
6227 +
6228 +extern void
6229 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
6230 +
6231 +extern void
6232 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
6233 +
6234 +extern VCHIQ_ARM_STATE_T*
6235 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
6236 +
6237 +extern int
6238 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
6239 +
6240 +extern VCHIQ_STATUS_T
6241 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6242 +               enum USE_TYPE_E use_type);
6243 +extern VCHIQ_STATUS_T
6244 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
6245 +
6246 +void
6247 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
6248 +       enum vc_suspend_status new_state);
6249 +
6250 +void
6251 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
6252 +       enum vc_resume_status new_state);
6253 +
6254 +void
6255 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
6256 +
6257 +extern int vchiq_proc_init(void);
6258 +extern void vchiq_proc_deinit(void);
6259 +extern struct proc_dir_entry *vchiq_proc_top(void);
6260 +extern struct proc_dir_entry *vchiq_clients_top(void);
6261 +
6262 +
6263 +#endif /* VCHIQ_ARM_H */
6264 --- /dev/null
6265 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
6266 @@ -0,0 +1,37 @@
6267 +/**
6268 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6269 + *
6270 + * Redistribution and use in source and binary forms, with or without
6271 + * modification, are permitted provided that the following conditions
6272 + * are met:
6273 + * 1. Redistributions of source code must retain the above copyright
6274 + *    notice, this list of conditions, and the following disclaimer,
6275 + *    without modification.
6276 + * 2. Redistributions in binary form must reproduce the above copyright
6277 + *    notice, this list of conditions and the following disclaimer in the
6278 + *    documentation and/or other materials provided with the distribution.
6279 + * 3. The names of the above-listed copyright holders may not be used
6280 + *    to endorse or promote products derived from this software without
6281 + *    specific prior written permission.
6282 + *
6283 + * ALTERNATIVELY, this software may be distributed under the terms of the
6284 + * GNU General Public License ("GPL") version 2, as published by the Free
6285 + * Software Foundation.
6286 + *
6287 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6288 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6289 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6290 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6291 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6292 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6293 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6294 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6295 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6296 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6297 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6298 + */
6299 +
6300 +const char *vchiq_get_build_hostname(void);
6301 +const char *vchiq_get_build_version(void);
6302 +const char *vchiq_get_build_time(void);
6303 +const char *vchiq_get_build_date(void);
6304 --- /dev/null
6305 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
6306 @@ -0,0 +1,60 @@
6307 +/**
6308 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6309 + *
6310 + * Redistribution and use in source and binary forms, with or without
6311 + * modification, are permitted provided that the following conditions
6312 + * are met:
6313 + * 1. Redistributions of source code must retain the above copyright
6314 + *    notice, this list of conditions, and the following disclaimer,
6315 + *    without modification.
6316 + * 2. Redistributions in binary form must reproduce the above copyright
6317 + *    notice, this list of conditions and the following disclaimer in the
6318 + *    documentation and/or other materials provided with the distribution.
6319 + * 3. The names of the above-listed copyright holders may not be used
6320 + *    to endorse or promote products derived from this software without
6321 + *    specific prior written permission.
6322 + *
6323 + * ALTERNATIVELY, this software may be distributed under the terms of the
6324 + * GNU General Public License ("GPL") version 2, as published by the Free
6325 + * Software Foundation.
6326 + *
6327 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6328 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6329 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6330 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6331 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6332 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6333 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6334 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6335 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6336 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6337 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6338 + */
6339 +
6340 +#ifndef VCHIQ_CFG_H
6341 +#define VCHIQ_CFG_H
6342 +
6343 +#define VCHIQ_MAGIC              VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
6344 +/* The version of VCHIQ - change with any non-trivial change */
6345 +#define VCHIQ_VERSION            6
6346 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
6347 +** incompatible change */
6348 +#define VCHIQ_VERSION_MIN        3
6349 +
6350 +#define VCHIQ_MAX_STATES         1
6351 +#define VCHIQ_MAX_SERVICES       4096
6352 +#define VCHIQ_MAX_SLOTS          128
6353 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
6354 +
6355 +#define VCHIQ_NUM_CURRENT_BULKS        32
6356 +#define VCHIQ_NUM_SERVICE_BULKS        4
6357 +
6358 +#ifndef VCHIQ_ENABLE_DEBUG
6359 +#define VCHIQ_ENABLE_DEBUG             1
6360 +#endif
6361 +
6362 +#ifndef VCHIQ_ENABLE_STATS
6363 +#define VCHIQ_ENABLE_STATS             1
6364 +#endif
6365 +
6366 +#endif /* VCHIQ_CFG_H */
6367 --- /dev/null
6368 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
6369 @@ -0,0 +1,119 @@
6370 +/**
6371 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6372 + *
6373 + * Redistribution and use in source and binary forms, with or without
6374 + * modification, are permitted provided that the following conditions
6375 + * are met:
6376 + * 1. Redistributions of source code must retain the above copyright
6377 + *    notice, this list of conditions, and the following disclaimer,
6378 + *    without modification.
6379 + * 2. Redistributions in binary form must reproduce the above copyright
6380 + *    notice, this list of conditions and the following disclaimer in the
6381 + *    documentation and/or other materials provided with the distribution.
6382 + * 3. The names of the above-listed copyright holders may not be used
6383 + *    to endorse or promote products derived from this software without
6384 + *    specific prior written permission.
6385 + *
6386 + * ALTERNATIVELY, this software may be distributed under the terms of the
6387 + * GNU General Public License ("GPL") version 2, as published by the Free
6388 + * Software Foundation.
6389 + *
6390 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6391 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6392 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6393 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6394 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6395 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6396 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6397 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6398 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6399 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6400 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6401 + */
6402 +
6403 +#include "vchiq_connected.h"
6404 +#include "vchiq_core.h"
6405 +#include <linux/module.h>
6406 +#include <linux/mutex.h>
6407 +
6408 +#define  MAX_CALLBACKS  10
6409 +
6410 +static   int                        g_connected;
6411 +static   int                        g_num_deferred_callbacks;
6412 +static   VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
6413 +static   int                        g_once_init;
6414 +static   struct mutex               g_connected_mutex;
6415 +
6416 +/****************************************************************************
6417 +*
6418 +* Function to initialize our lock.
6419 +*
6420 +***************************************************************************/
6421 +
6422 +static void connected_init(void)
6423 +{
6424 +       if (!g_once_init) {
6425 +               mutex_init(&g_connected_mutex);
6426 +               g_once_init = 1;
6427 +       }
6428 +}
6429 +
6430 +/****************************************************************************
6431 +*
6432 +* This function is used to defer initialization until the vchiq stack is
6433 +* initialized. If the stack is already initialized, then the callback will
6434 +* be made immediately, otherwise it will be deferred until
6435 +* vchiq_call_connected_callbacks is called.
6436 +*
6437 +***************************************************************************/
6438 +
6439 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
6440 +{
6441 +       connected_init();
6442 +
6443 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6444 +               return;
6445 +
6446 +       if (g_connected)
6447 +               /* We're already connected. Call the callback immediately. */
6448 +
6449 +               callback();
6450 +       else {
6451 +               if (g_num_deferred_callbacks >= MAX_CALLBACKS)
6452 +                       vchiq_log_error(vchiq_core_log_level,
6453 +                               "There already %d callback registered - "
6454 +                               "please increase MAX_CALLBACKS",
6455 +                               g_num_deferred_callbacks);
6456 +               else {
6457 +                       g_deferred_callback[g_num_deferred_callbacks] =
6458 +                               callback;
6459 +                       g_num_deferred_callbacks++;
6460 +               }
6461 +       }
6462 +       mutex_unlock(&g_connected_mutex);
6463 +}
6464 +
6465 +/****************************************************************************
6466 +*
6467 +* This function is called by the vchiq stack once it has been connected to
6468 +* the videocore and clients can start to use the stack.
6469 +*
6470 +***************************************************************************/
6471 +
6472 +void vchiq_call_connected_callbacks(void)
6473 +{
6474 +       int i;
6475 +
6476 +       connected_init();
6477 +
6478 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
6479 +               return;
6480 +
6481 +       for (i = 0; i <  g_num_deferred_callbacks; i++)
6482 +               g_deferred_callback[i]();
6483 +
6484 +       g_num_deferred_callbacks = 0;
6485 +       g_connected = 1;
6486 +       mutex_unlock(&g_connected_mutex);
6487 +}
6488 +EXPORT_SYMBOL(vchiq_add_connected_callback);
6489 --- /dev/null
6490 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
6491 @@ -0,0 +1,51 @@
6492 +/**
6493 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6494 + *
6495 + * Redistribution and use in source and binary forms, with or without
6496 + * modification, are permitted provided that the following conditions
6497 + * are met:
6498 + * 1. Redistributions of source code must retain the above copyright
6499 + *    notice, this list of conditions, and the following disclaimer,
6500 + *    without modification.
6501 + * 2. Redistributions in binary form must reproduce the above copyright
6502 + *    notice, this list of conditions and the following disclaimer in the
6503 + *    documentation and/or other materials provided with the distribution.
6504 + * 3. The names of the above-listed copyright holders may not be used
6505 + *    to endorse or promote products derived from this software without
6506 + *    specific prior written permission.
6507 + *
6508 + * ALTERNATIVELY, this software may be distributed under the terms of the
6509 + * GNU General Public License ("GPL") version 2, as published by the Free
6510 + * Software Foundation.
6511 + *
6512 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6513 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6514 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6515 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6516 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6517 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6518 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6519 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6520 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6521 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6522 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6523 + */
6524 +
6525 +#ifndef VCHIQ_CONNECTED_H
6526 +#define VCHIQ_CONNECTED_H
6527 +
6528 +/* ---- Include Files ----------------------------------------------------- */
6529 +
6530 +/* ---- Constants and Types ---------------------------------------------- */
6531 +
6532 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
6533 +
6534 +/* ---- Variable Externs ------------------------------------------------- */
6535 +
6536 +/* ---- Function Prototypes ---------------------------------------------- */
6537 +
6538 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
6539 +void vchiq_call_connected_callbacks(void);
6540 +
6541 +#endif /* VCHIQ_CONNECTED_H */
6542 +
6543 --- /dev/null
6544 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
6545 @@ -0,0 +1,3818 @@
6546 +/**
6547 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
6548 + *
6549 + * Redistribution and use in source and binary forms, with or without
6550 + * modification, are permitted provided that the following conditions
6551 + * are met:
6552 + * 1. Redistributions of source code must retain the above copyright
6553 + *    notice, this list of conditions, and the following disclaimer,
6554 + *    without modification.
6555 + * 2. Redistributions in binary form must reproduce the above copyright
6556 + *    notice, this list of conditions and the following disclaimer in the
6557 + *    documentation and/or other materials provided with the distribution.
6558 + * 3. The names of the above-listed copyright holders may not be used
6559 + *    to endorse or promote products derived from this software without
6560 + *    specific prior written permission.
6561 + *
6562 + * ALTERNATIVELY, this software may be distributed under the terms of the
6563 + * GNU General Public License ("GPL") version 2, as published by the Free
6564 + * Software Foundation.
6565 + *
6566 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
6567 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
6568 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
6569 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
6570 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
6571 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6572 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
6573 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
6574 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
6575 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6576 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6577 + */
6578 +
6579 +#include "vchiq_core.h"
6580 +
6581 +#define VCHIQ_SLOT_HANDLER_STACK 8192
6582 +
6583 +#define HANDLE_STATE_SHIFT 12
6584 +
6585 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
6586 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
6587 +#define SLOT_INDEX_FROM_DATA(state, data) \
6588 +       (((unsigned int)((char *)data - (char *)state->slot_data)) / \
6589 +       VCHIQ_SLOT_SIZE)
6590 +#define SLOT_INDEX_FROM_INFO(state, info) \
6591 +       ((unsigned int)(info - state->slot_info))
6592 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
6593 +       ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
6594 +
6595 +
6596 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
6597 +
6598 +
6599 +struct vchiq_open_payload {
6600 +       int fourcc;
6601 +       int client_id;
6602 +       short version;
6603 +       short version_min;
6604 +};
6605 +
6606 +struct vchiq_openack_payload {
6607 +       short version;
6608 +};
6609 +
6610 +/* we require this for consistency between endpoints */
6611 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
6612 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
6613 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
6614 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
6615 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
6616 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
6617 +
6618 +/* Run time control of log level, based on KERN_XXX level. */
6619 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
6620 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
6621 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
6622 +
6623 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
6624 +
6625 +static DEFINE_SPINLOCK(service_spinlock);
6626 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
6627 +DEFINE_SPINLOCK(quota_spinlock);
6628 +
6629 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
6630 +static unsigned int handle_seq;
6631 +
6632 +static const char *const srvstate_names[] = {
6633 +       "FREE",
6634 +       "HIDDEN",
6635 +       "LISTENING",
6636 +       "OPENING",
6637 +       "OPEN",
6638 +       "OPENSYNC",
6639 +       "CLOSESENT",
6640 +       "CLOSERECVD",
6641 +       "CLOSEWAIT",
6642 +       "CLOSED"
6643 +};
6644 +
6645 +static const char *const reason_names[] = {
6646 +       "SERVICE_OPENED",
6647 +       "SERVICE_CLOSED",
6648 +       "MESSAGE_AVAILABLE",
6649 +       "BULK_TRANSMIT_DONE",
6650 +       "BULK_RECEIVE_DONE",
6651 +       "BULK_TRANSMIT_ABORTED",
6652 +       "BULK_RECEIVE_ABORTED"
6653 +};
6654 +
6655 +static const char *const conn_state_names[] = {
6656 +       "DISCONNECTED",
6657 +       "CONNECTING",
6658 +       "CONNECTED",
6659 +       "PAUSING",
6660 +       "PAUSE_SENT",
6661 +       "PAUSED",
6662 +       "RESUMING",
6663 +       "PAUSE_TIMEOUT",
6664 +       "RESUME_TIMEOUT"
6665 +};
6666 +
6667 +
6668 +static void
6669 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
6670 +
6671 +static const char *msg_type_str(unsigned int msg_type)
6672 +{
6673 +       switch (msg_type) {
6674 +       case VCHIQ_MSG_PADDING:       return "PADDING";
6675 +       case VCHIQ_MSG_CONNECT:       return "CONNECT";
6676 +       case VCHIQ_MSG_OPEN:          return "OPEN";
6677 +       case VCHIQ_MSG_OPENACK:       return "OPENACK";
6678 +       case VCHIQ_MSG_CLOSE:         return "CLOSE";
6679 +       case VCHIQ_MSG_DATA:          return "DATA";
6680 +       case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
6681 +       case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
6682 +       case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
6683 +       case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
6684 +       case VCHIQ_MSG_PAUSE:         return "PAUSE";
6685 +       case VCHIQ_MSG_RESUME:        return "RESUME";
6686 +       case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
6687 +       case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
6688 +       case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
6689 +       }
6690 +       return "???";
6691 +}
6692 +
6693 +static inline void
6694 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
6695 +{
6696 +       vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
6697 +               service->state->id, service->localport,
6698 +               srvstate_names[service->srvstate],
6699 +               srvstate_names[newstate]);
6700 +       service->srvstate = newstate;
6701 +}
6702 +
6703 +VCHIQ_SERVICE_T *
6704 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
6705 +{
6706 +       VCHIQ_SERVICE_T *service;
6707 +
6708 +       spin_lock(&service_spinlock);
6709 +       service = handle_to_service(handle);
6710 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6711 +               (service->handle == handle)) {
6712 +               BUG_ON(service->ref_count == 0);
6713 +               service->ref_count++;
6714 +       } else
6715 +               service = NULL;
6716 +       spin_unlock(&service_spinlock);
6717 +
6718 +       if (!service)
6719 +               vchiq_log_info(vchiq_core_log_level,
6720 +                       "Invalid service handle 0x%x", handle);
6721 +
6722 +       return service;
6723 +}
6724 +
6725 +VCHIQ_SERVICE_T *
6726 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
6727 +{
6728 +       VCHIQ_SERVICE_T *service = NULL;
6729 +       if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
6730 +               spin_lock(&service_spinlock);
6731 +               service = state->services[localport];
6732 +               if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
6733 +                       BUG_ON(service->ref_count == 0);
6734 +                       service->ref_count++;
6735 +               } else
6736 +                       service = NULL;
6737 +               spin_unlock(&service_spinlock);
6738 +       }
6739 +
6740 +       if (!service)
6741 +               vchiq_log_info(vchiq_core_log_level,
6742 +                       "Invalid port %d", localport);
6743 +
6744 +       return service;
6745 +}
6746 +
6747 +VCHIQ_SERVICE_T *
6748 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
6749 +       VCHIQ_SERVICE_HANDLE_T handle) {
6750 +       VCHIQ_SERVICE_T *service;
6751 +
6752 +       spin_lock(&service_spinlock);
6753 +       service = handle_to_service(handle);
6754 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
6755 +               (service->handle == handle) &&
6756 +               (service->instance == instance)) {
6757 +               BUG_ON(service->ref_count == 0);
6758 +               service->ref_count++;
6759 +       } else
6760 +               service = NULL;
6761 +       spin_unlock(&service_spinlock);
6762 +
6763 +       if (!service)
6764 +               vchiq_log_info(vchiq_core_log_level,
6765 +                       "Invalid service handle 0x%x", handle);
6766 +
6767 +       return service;
6768 +}
6769 +
6770 +VCHIQ_SERVICE_T *
6771 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
6772 +       int *pidx)
6773 +{
6774 +       VCHIQ_SERVICE_T *service = NULL;
6775 +       int idx = *pidx;
6776 +
6777 +       spin_lock(&service_spinlock);
6778 +       while (idx < state->unused_service) {
6779 +               VCHIQ_SERVICE_T *srv = state->services[idx++];
6780 +               if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
6781 +                       (srv->instance == instance)) {
6782 +                       service = srv;
6783 +                       BUG_ON(service->ref_count == 0);
6784 +                       service->ref_count++;
6785 +                       break;
6786 +               }
6787 +       }
6788 +       spin_unlock(&service_spinlock);
6789 +
6790 +       *pidx = idx;
6791 +
6792 +       return service;
6793 +}
6794 +
6795 +void
6796 +lock_service(VCHIQ_SERVICE_T *service)
6797 +{
6798 +       spin_lock(&service_spinlock);
6799 +       BUG_ON(!service || (service->ref_count == 0));
6800 +       if (service)
6801 +               service->ref_count++;
6802 +       spin_unlock(&service_spinlock);
6803 +}
6804 +
6805 +void
6806 +unlock_service(VCHIQ_SERVICE_T *service)
6807 +{
6808 +       VCHIQ_STATE_T *state = service->state;
6809 +       spin_lock(&service_spinlock);
6810 +       BUG_ON(!service || (service->ref_count == 0));
6811 +       if (service && service->ref_count) {
6812 +               service->ref_count--;
6813 +               if (!service->ref_count) {
6814 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
6815 +                       state->services[service->localport] = NULL;
6816 +               } else
6817 +                       service = NULL;
6818 +       }
6819 +       spin_unlock(&service_spinlock);
6820 +
6821 +       kfree(service);
6822 +}
6823 +
6824 +int
6825 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
6826 +{
6827 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
6828 +       int id;
6829 +
6830 +       id = service ? service->client_id : 0;
6831 +       if (service)
6832 +               unlock_service(service);
6833 +
6834 +       return id;
6835 +}
6836 +
6837 +void *
6838 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
6839 +{
6840 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
6841 +
6842 +       return service ? service->base.userdata : NULL;
6843 +}
6844 +
6845 +int
6846 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
6847 +{
6848 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
6849 +
6850 +       return service ? service->base.fourcc : 0;
6851 +}
6852 +
6853 +static void
6854 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
6855 +{
6856 +       VCHIQ_STATE_T *state = service->state;
6857 +       VCHIQ_SERVICE_QUOTA_T *service_quota;
6858 +
6859 +       service->closing = 1;
6860 +
6861 +       /* Synchronise with other threads. */
6862 +       mutex_lock(&state->recycle_mutex);
6863 +       mutex_unlock(&state->recycle_mutex);
6864 +       if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
6865 +               /* If we're pausing then the slot_mutex is held until resume
6866 +                * by the slot handler.  Therefore don't try to acquire this
6867 +                * mutex if we're the slot handler and in the pause sent state.
6868 +                * We don't need to in this case anyway. */
6869 +               mutex_lock(&state->slot_mutex);
6870 +               mutex_unlock(&state->slot_mutex);
6871 +       }
6872 +
6873 +       /* Unblock any sending thread. */
6874 +       service_quota = &state->service_quotas[service->localport];
6875 +       up(&service_quota->quota_event);
6876 +}
6877 +
6878 +static void
6879 +mark_service_closing(VCHIQ_SERVICE_T *service)
6880 +{
6881 +       mark_service_closing_internal(service, 0);
6882 +}
6883 +
6884 +static inline VCHIQ_STATUS_T
6885 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
6886 +       VCHIQ_HEADER_T *header, void *bulk_userdata)
6887 +{
6888 +       VCHIQ_STATUS_T status;
6889 +       vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
6890 +               service->state->id, service->localport, reason_names[reason],
6891 +               (unsigned int)header, (unsigned int)bulk_userdata);
6892 +       status = service->base.callback(reason, header, service->handle,
6893 +               bulk_userdata);
6894 +       if (status == VCHIQ_ERROR) {
6895 +               vchiq_log_warning(vchiq_core_log_level,
6896 +                       "%d: ignoring ERROR from callback to service %x",
6897 +                       service->state->id, service->handle);
6898 +               status = VCHIQ_SUCCESS;
6899 +       }
6900 +       return status;
6901 +}
6902 +
6903 +inline void
6904 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
6905 +{
6906 +       VCHIQ_CONNSTATE_T oldstate = state->conn_state;
6907 +       vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
6908 +               conn_state_names[oldstate],
6909 +               conn_state_names[newstate]);
6910 +       state->conn_state = newstate;
6911 +       vchiq_platform_conn_state_changed(state, oldstate, newstate);
6912 +}
6913 +
6914 +static inline void
6915 +remote_event_create(REMOTE_EVENT_T *event)
6916 +{
6917 +       event->armed = 0;
6918 +       /* Don't clear the 'fired' flag because it may already have been set
6919 +       ** by the other side. */
6920 +       sema_init(event->event, 0);
6921 +}
6922 +
6923 +static inline void
6924 +remote_event_destroy(REMOTE_EVENT_T *event)
6925 +{
6926 +       (void)event;
6927 +}
6928 +
6929 +static inline int
6930 +remote_event_wait(REMOTE_EVENT_T *event)
6931 +{
6932 +       if (!event->fired) {
6933 +               event->armed = 1;
6934 +               dsb();
6935 +               if (!event->fired) {
6936 +                       if (down_interruptible(event->event) != 0) {
6937 +                               event->armed = 0;
6938 +                               return 0;
6939 +                       }
6940 +               }
6941 +               event->armed = 0;
6942 +               wmb();
6943 +       }
6944 +
6945 +       event->fired = 0;
6946 +       return 1;
6947 +}
6948 +
6949 +static inline void
6950 +remote_event_signal_local(REMOTE_EVENT_T *event)
6951 +{
6952 +       event->armed = 0;
6953 +       up(event->event);
6954 +}
6955 +
6956 +static inline void
6957 +remote_event_poll(REMOTE_EVENT_T *event)
6958 +{
6959 +       if (event->fired && event->armed)
6960 +               remote_event_signal_local(event);
6961 +}
6962 +
6963 +void
6964 +remote_event_pollall(VCHIQ_STATE_T *state)
6965 +{
6966 +       remote_event_poll(&state->local->sync_trigger);
6967 +       remote_event_poll(&state->local->sync_release);
6968 +       remote_event_poll(&state->local->trigger);
6969 +       remote_event_poll(&state->local->recycle);
6970 +}
6971 +
6972 +/* Round up message sizes so that any space at the end of a slot is always big
6973 +** enough for a header. This relies on header size being a power of two, which
6974 +** has been verified earlier by a static assertion. */
6975 +
6976 +static inline unsigned int
6977 +calc_stride(unsigned int size)
6978 +{
6979 +       /* Allow room for the header */
6980 +       size += sizeof(VCHIQ_HEADER_T);
6981 +
6982 +       /* Round up */
6983 +       return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
6984 +               - 1);
6985 +}
6986 +
6987 +/* Called by the slot handler thread */
6988 +static VCHIQ_SERVICE_T *
6989 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
6990 +{
6991 +       int i;
6992 +
6993 +       WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
6994 +
6995 +       for (i = 0; i < state->unused_service; i++) {
6996 +               VCHIQ_SERVICE_T *service = state->services[i];
6997 +               if (service &&
6998 +                       (service->public_fourcc == fourcc) &&
6999 +                       ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
7000 +                       ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
7001 +                       (service->remoteport == VCHIQ_PORT_FREE)))) {
7002 +                       lock_service(service);
7003 +                       return service;
7004 +               }
7005 +       }
7006 +
7007 +       return NULL;
7008 +}
7009 +
7010 +/* Called by the slot handler thread */
7011 +static VCHIQ_SERVICE_T *
7012 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
7013 +{
7014 +       int i;
7015 +       for (i = 0; i < state->unused_service; i++) {
7016 +               VCHIQ_SERVICE_T *service = state->services[i];
7017 +               if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
7018 +                       && (service->remoteport == port)) {
7019 +                       lock_service(service);
7020 +                       return service;
7021 +               }
7022 +       }
7023 +       return NULL;
7024 +}
7025 +
7026 +inline void
7027 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
7028 +{
7029 +       uint32_t value;
7030 +
7031 +       if (service) {
7032 +               do {
7033 +                       value = atomic_read(&service->poll_flags);
7034 +               } while (atomic_cmpxchg(&service->poll_flags, value,
7035 +                       value | (1 << poll_type)) != value);
7036 +
7037 +               do {
7038 +                       value = atomic_read(&state->poll_services[
7039 +                               service->localport>>5]);
7040 +               } while (atomic_cmpxchg(
7041 +                       &state->poll_services[service->localport>>5],
7042 +                       value, value | (1 << (service->localport & 0x1f)))
7043 +                       != value);
7044 +       }
7045 +
7046 +       state->poll_needed = 1;
7047 +       wmb();
7048 +
7049 +       /* ... and ensure the slot handler runs. */
7050 +       remote_event_signal_local(&state->local->trigger);
7051 +}
7052 +
7053 +/* Called from queue_message, by the slot handler and application threads,
7054 +** with slot_mutex held */
7055 +static VCHIQ_HEADER_T *
7056 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
7057 +{
7058 +       VCHIQ_SHARED_STATE_T *local = state->local;
7059 +       int tx_pos = state->local_tx_pos;
7060 +       int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
7061 +
7062 +       if (space > slot_space) {
7063 +               VCHIQ_HEADER_T *header;
7064 +               /* Fill the remaining space with padding */
7065 +               WARN_ON(state->tx_data == NULL);
7066 +               header = (VCHIQ_HEADER_T *)
7067 +                       (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7068 +               header->msgid = VCHIQ_MSGID_PADDING;
7069 +               header->size = slot_space - sizeof(VCHIQ_HEADER_T);
7070 +
7071 +               tx_pos += slot_space;
7072 +       }
7073 +
7074 +       /* If necessary, get the next slot. */
7075 +       if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
7076 +               int slot_index;
7077 +
7078 +               /* If there is no free slot... */
7079 +
7080 +               if (down_trylock(&state->slot_available_event) != 0) {
7081 +                       /* ...wait for one. */
7082 +
7083 +                       VCHIQ_STATS_INC(state, slot_stalls);
7084 +
7085 +                       /* But first, flush through the last slot. */
7086 +                       state->local_tx_pos = tx_pos;
7087 +                       local->tx_pos = tx_pos;
7088 +                       remote_event_signal(&state->remote->trigger);
7089 +
7090 +                       if (!is_blocking ||
7091 +                               (down_interruptible(
7092 +                               &state->slot_available_event) != 0))
7093 +                               return NULL; /* No space available */
7094 +               }
7095 +
7096 +               BUG_ON(tx_pos ==
7097 +                       (state->slot_queue_available * VCHIQ_SLOT_SIZE));
7098 +
7099 +               slot_index = local->slot_queue[
7100 +                       SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
7101 +                       VCHIQ_SLOT_QUEUE_MASK];
7102 +               state->tx_data =
7103 +                       (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7104 +       }
7105 +
7106 +       state->local_tx_pos = tx_pos + space;
7107 +
7108 +       return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
7109 +}
7110 +
7111 +/* Called by the recycle thread. */
7112 +static void
7113 +process_free_queue(VCHIQ_STATE_T *state)
7114 +{
7115 +       VCHIQ_SHARED_STATE_T *local = state->local;
7116 +       BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
7117 +       int slot_queue_available;
7118 +
7119 +       /* Use a read memory barrier to ensure that any state that may have
7120 +       ** been modified by another thread is not masked by stale prefetched
7121 +       ** values. */
7122 +       rmb();
7123 +
7124 +       /* Find slots which have been freed by the other side, and return them
7125 +       ** to the available queue. */
7126 +       slot_queue_available = state->slot_queue_available;
7127 +
7128 +       while (slot_queue_available != local->slot_queue_recycle) {
7129 +               unsigned int pos;
7130 +               int slot_index = local->slot_queue[slot_queue_available++ &
7131 +                       VCHIQ_SLOT_QUEUE_MASK];
7132 +               char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
7133 +               int data_found = 0;
7134 +
7135 +               vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
7136 +                       state->id, slot_index, (unsigned int)data,
7137 +                       local->slot_queue_recycle, slot_queue_available);
7138 +
7139 +               /* Initialise the bitmask for services which have used this
7140 +               ** slot */
7141 +               BITSET_ZERO(service_found);
7142 +
7143 +               pos = 0;
7144 +
7145 +               while (pos < VCHIQ_SLOT_SIZE) {
7146 +                       VCHIQ_HEADER_T *header =
7147 +                               (VCHIQ_HEADER_T *)(data + pos);
7148 +                       int msgid = header->msgid;
7149 +                       if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
7150 +                               int port = VCHIQ_MSG_SRCPORT(msgid);
7151 +                               VCHIQ_SERVICE_QUOTA_T *service_quota =
7152 +                                       &state->service_quotas[port];
7153 +                               int count;
7154 +                               spin_lock(&quota_spinlock);
7155 +                               count = service_quota->message_use_count;
7156 +                               if (count > 0)
7157 +                                       service_quota->message_use_count =
7158 +                                               count - 1;
7159 +                               spin_unlock(&quota_spinlock);
7160 +
7161 +                               if (count == service_quota->message_quota)
7162 +                                       /* Signal the service that it
7163 +                                       ** has dropped below its quota
7164 +                                       */
7165 +                                       up(&service_quota->quota_event);
7166 +                               else if (count == 0) {
7167 +                                       vchiq_log_error(vchiq_core_log_level,
7168 +                                               "service %d "
7169 +                                               "message_use_count=%d "
7170 +                                               "(header %x, msgid %x, "
7171 +                                               "header->msgid %x, "
7172 +                                               "header->size %x)",
7173 +                                               port,
7174 +                                               service_quota->
7175 +                                                       message_use_count,
7176 +                                               (unsigned int)header, msgid,
7177 +                                               header->msgid,
7178 +                                               header->size);
7179 +                                       WARN(1, "invalid message use count\n");
7180 +                               }
7181 +                               if (!BITSET_IS_SET(service_found, port)) {
7182 +                                       /* Set the found bit for this service */
7183 +                                       BITSET_SET(service_found, port);
7184 +
7185 +                                       spin_lock(&quota_spinlock);
7186 +                                       count = service_quota->slot_use_count;
7187 +                                       if (count > 0)
7188 +                                               service_quota->slot_use_count =
7189 +                                                       count - 1;
7190 +                                       spin_unlock(&quota_spinlock);
7191 +
7192 +                                       if (count > 0) {
7193 +                                               /* Signal the service in case
7194 +                                               ** it has dropped below its
7195 +                                               ** quota */
7196 +                                               up(&service_quota->quota_event);
7197 +                                               vchiq_log_trace(
7198 +                                                       vchiq_core_log_level,
7199 +                                                       "%d: pfq:%d %x@%x - "
7200 +                                                       "slot_use->%d",
7201 +                                                       state->id, port,
7202 +                                                       header->size,
7203 +                                                       (unsigned int)header,
7204 +                                                       count - 1);
7205 +                                       } else {
7206 +                                               vchiq_log_error(
7207 +                                                       vchiq_core_log_level,
7208 +                                                               "service %d "
7209 +                                                               "slot_use_count"
7210 +                                                               "=%d (header %x"
7211 +                                                               ", msgid %x, "
7212 +                                                               "header->msgid"
7213 +                                                               " %x, header->"
7214 +                                                               "size %x)",
7215 +                                                       port, count,
7216 +                                                       (unsigned int)header,
7217 +                                                       msgid,
7218 +                                                       header->msgid,
7219 +                                                       header->size);
7220 +                                               WARN(1, "bad slot use count\n");
7221 +                                       }
7222 +                               }
7223 +
7224 +                               data_found = 1;
7225 +                       }
7226 +
7227 +                       pos += calc_stride(header->size);
7228 +                       if (pos > VCHIQ_SLOT_SIZE) {
7229 +                               vchiq_log_error(vchiq_core_log_level,
7230 +                                       "pfq - pos %x: header %x, msgid %x, "
7231 +                                       "header->msgid %x, header->size %x",
7232 +                                       pos, (unsigned int)header, msgid,
7233 +                                       header->msgid, header->size);
7234 +                               WARN(1, "invalid slot position\n");
7235 +                       }
7236 +               }
7237 +
7238 +               if (data_found) {
7239 +                       int count;
7240 +                       spin_lock(&quota_spinlock);
7241 +                       count = state->data_use_count;
7242 +                       if (count > 0)
7243 +                               state->data_use_count =
7244 +                                       count - 1;
7245 +                       spin_unlock(&quota_spinlock);
7246 +                       if (count == state->data_quota)
7247 +                               up(&state->data_quota_event);
7248 +               }
7249 +
7250 +               state->slot_queue_available = slot_queue_available;
7251 +               up(&state->slot_available_event);
7252 +       }
7253 +}
7254 +
7255 +/* Called by the slot handler and application threads */
7256 +static VCHIQ_STATUS_T
7257 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7258 +       int msgid, const VCHIQ_ELEMENT_T *elements,
7259 +       int count, int size, int is_blocking)
7260 +{
7261 +       VCHIQ_SHARED_STATE_T *local;
7262 +       VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
7263 +       VCHIQ_HEADER_T *header;
7264 +       int type = VCHIQ_MSG_TYPE(msgid);
7265 +
7266 +       unsigned int stride;
7267 +
7268 +       local = state->local;
7269 +
7270 +       stride = calc_stride(size);
7271 +
7272 +       WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
7273 +
7274 +       if ((type != VCHIQ_MSG_RESUME) &&
7275 +               (mutex_lock_interruptible(&state->slot_mutex) != 0))
7276 +               return VCHIQ_RETRY;
7277 +
7278 +       if (type == VCHIQ_MSG_DATA) {
7279 +               int tx_end_index;
7280 +
7281 +               BUG_ON(!service);
7282 +
7283 +               if (service->closing) {
7284 +                       /* The service has been closed */
7285 +                       mutex_unlock(&state->slot_mutex);
7286 +                       return VCHIQ_ERROR;
7287 +               }
7288 +
7289 +               service_quota = &state->service_quotas[service->localport];
7290 +
7291 +               spin_lock(&quota_spinlock);
7292 +
7293 +               /* Ensure this service doesn't use more than its quota of
7294 +               ** messages or slots */
7295 +               tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7296 +                       state->local_tx_pos + stride - 1);
7297 +
7298 +               /* Ensure data messages don't use more than their quota of
7299 +               ** slots */
7300 +               while ((tx_end_index != state->previous_data_index) &&
7301 +                       (state->data_use_count == state->data_quota)) {
7302 +                       VCHIQ_STATS_INC(state, data_stalls);
7303 +                       spin_unlock(&quota_spinlock);
7304 +                       mutex_unlock(&state->slot_mutex);
7305 +
7306 +                       if (down_interruptible(&state->data_quota_event)
7307 +                               != 0)
7308 +                               return VCHIQ_RETRY;
7309 +
7310 +                       mutex_lock(&state->slot_mutex);
7311 +                       spin_lock(&quota_spinlock);
7312 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7313 +                               state->local_tx_pos + stride - 1);
7314 +                       if ((tx_end_index == state->previous_data_index) ||
7315 +                               (state->data_use_count < state->data_quota)) {
7316 +                               /* Pass the signal on to other waiters */
7317 +                               up(&state->data_quota_event);
7318 +                               break;
7319 +                       }
7320 +               }
7321 +
7322 +               while ((service_quota->message_use_count ==
7323 +                               service_quota->message_quota) ||
7324 +                       ((tx_end_index != service_quota->previous_tx_index) &&
7325 +                       (service_quota->slot_use_count ==
7326 +                               service_quota->slot_quota))) {
7327 +                       spin_unlock(&quota_spinlock);
7328 +                       vchiq_log_trace(vchiq_core_log_level,
7329 +                               "%d: qm:%d %s,%x - quota stall "
7330 +                               "(msg %d, slot %d)",
7331 +                               state->id, service->localport,
7332 +                               msg_type_str(type), size,
7333 +                               service_quota->message_use_count,
7334 +                               service_quota->slot_use_count);
7335 +                       VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
7336 +                       mutex_unlock(&state->slot_mutex);
7337 +                       if (down_interruptible(&service_quota->quota_event)
7338 +                               != 0)
7339 +                               return VCHIQ_RETRY;
7340 +                       if (service->closing)
7341 +                               return VCHIQ_ERROR;
7342 +                       if (mutex_lock_interruptible(&state->slot_mutex) != 0)
7343 +                               return VCHIQ_RETRY;
7344 +                       if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
7345 +                               /* The service has been closed */
7346 +                               mutex_unlock(&state->slot_mutex);
7347 +                               return VCHIQ_ERROR;
7348 +                       }
7349 +                       spin_lock(&quota_spinlock);
7350 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
7351 +                               state->local_tx_pos + stride - 1);
7352 +               }
7353 +
7354 +               spin_unlock(&quota_spinlock);
7355 +       }
7356 +
7357 +       header = reserve_space(state, stride, is_blocking);
7358 +
7359 +       if (!header) {
7360 +               if (service)
7361 +                       VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
7362 +               mutex_unlock(&state->slot_mutex);
7363 +               return VCHIQ_RETRY;
7364 +       }
7365 +
7366 +       if (type == VCHIQ_MSG_DATA) {
7367 +               int i, pos;
7368 +               int tx_end_index;
7369 +               int slot_use_count;
7370 +
7371 +               vchiq_log_info(vchiq_core_log_level,
7372 +                       "%d: qm %s@%x,%x (%d->%d)",
7373 +                       state->id,
7374 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7375 +                       (unsigned int)header, size,
7376 +                       VCHIQ_MSG_SRCPORT(msgid),
7377 +                       VCHIQ_MSG_DSTPORT(msgid));
7378 +
7379 +               BUG_ON(!service);
7380 +
7381 +               for (i = 0, pos = 0; i < (unsigned int)count;
7382 +                       pos += elements[i++].size)
7383 +                       if (elements[i].size) {
7384 +                               if (vchiq_copy_from_user
7385 +                                       (header->data + pos, elements[i].data,
7386 +                                       (size_t) elements[i].size) !=
7387 +                                       VCHIQ_SUCCESS) {
7388 +                                       mutex_unlock(&state->slot_mutex);
7389 +                                       VCHIQ_SERVICE_STATS_INC(service,
7390 +                                               error_count);
7391 +                                       return VCHIQ_ERROR;
7392 +                               }
7393 +                               if (i == 0) {
7394 +                                       if (vchiq_core_msg_log_level >=
7395 +                                               VCHIQ_LOG_INFO)
7396 +                                               vchiq_log_dump_mem("Sent", 0,
7397 +                                                       header->data + pos,
7398 +                                                       min(64u,
7399 +                                                       elements[0].size));
7400 +                               }
7401 +                       }
7402 +
7403 +               spin_lock(&quota_spinlock);
7404 +               service_quota->message_use_count++;
7405 +
7406 +               tx_end_index =
7407 +                       SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
7408 +
7409 +               /* If this transmission can't fit in the last slot used by any
7410 +               ** service, the data_use_count must be increased. */
7411 +               if (tx_end_index != state->previous_data_index) {
7412 +                       state->previous_data_index = tx_end_index;
7413 +                       state->data_use_count++;
7414 +               }
7415 +
7416 +               /* If this isn't the same slot last used by this service,
7417 +               ** the service's slot_use_count must be increased. */
7418 +               if (tx_end_index != service_quota->previous_tx_index) {
7419 +                       service_quota->previous_tx_index = tx_end_index;
7420 +                       slot_use_count = ++service_quota->slot_use_count;
7421 +               } else {
7422 +                       slot_use_count = 0;
7423 +               }
7424 +
7425 +               spin_unlock(&quota_spinlock);
7426 +
7427 +               if (slot_use_count)
7428 +                       vchiq_log_trace(vchiq_core_log_level,
7429 +                               "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
7430 +                               state->id, service->localport,
7431 +                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
7432 +                               slot_use_count, header);
7433 +
7434 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7435 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7436 +       } else {
7437 +               vchiq_log_info(vchiq_core_log_level,
7438 +                       "%d: qm %s@%x,%x (%d->%d)", state->id,
7439 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7440 +                       (unsigned int)header, size,
7441 +                       VCHIQ_MSG_SRCPORT(msgid),
7442 +                       VCHIQ_MSG_DSTPORT(msgid));
7443 +               if (size != 0) {
7444 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
7445 +                       memcpy(header->data, elements[0].data,
7446 +                               elements[0].size);
7447 +               }
7448 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
7449 +       }
7450 +
7451 +       header->msgid = msgid;
7452 +       header->size = size;
7453 +
7454 +       {
7455 +               int svc_fourcc;
7456 +
7457 +               svc_fourcc = service
7458 +                       ? service->base.fourcc
7459 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7460 +
7461 +               vchiq_log_info(vchiq_core_msg_log_level,
7462 +                       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7463 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7464 +                       VCHIQ_MSG_TYPE(msgid),
7465 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7466 +                       VCHIQ_MSG_SRCPORT(msgid),
7467 +                       VCHIQ_MSG_DSTPORT(msgid),
7468 +                       size);
7469 +       }
7470 +
7471 +       /* Make sure the new header is visible to the peer. */
7472 +       wmb();
7473 +
7474 +       /* Make the new tx_pos visible to the peer. */
7475 +       local->tx_pos = state->local_tx_pos;
7476 +       wmb();
7477 +
7478 +       if (service && (type == VCHIQ_MSG_CLOSE))
7479 +               vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
7480 +
7481 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7482 +               mutex_unlock(&state->slot_mutex);
7483 +
7484 +       remote_event_signal(&state->remote->trigger);
7485 +
7486 +       return VCHIQ_SUCCESS;
7487 +}
7488 +
7489 +/* Called by the slot handler and application threads */
7490 +static VCHIQ_STATUS_T
7491 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
7492 +       int msgid, const VCHIQ_ELEMENT_T *elements,
7493 +       int count, int size, int is_blocking)
7494 +{
7495 +       VCHIQ_SHARED_STATE_T *local;
7496 +       VCHIQ_HEADER_T *header;
7497 +
7498 +       local = state->local;
7499 +
7500 +       if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
7501 +               (mutex_lock_interruptible(&state->sync_mutex) != 0))
7502 +               return VCHIQ_RETRY;
7503 +
7504 +       remote_event_wait(&local->sync_release);
7505 +
7506 +       rmb();
7507 +
7508 +       header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7509 +               local->slot_sync);
7510 +
7511 +       {
7512 +               int oldmsgid = header->msgid;
7513 +               if (oldmsgid != VCHIQ_MSGID_PADDING)
7514 +                       vchiq_log_error(vchiq_core_log_level,
7515 +                               "%d: qms - msgid %x, not PADDING",
7516 +                               state->id, oldmsgid);
7517 +       }
7518 +
7519 +       if (service) {
7520 +               int i, pos;
7521 +
7522 +               vchiq_log_info(vchiq_sync_log_level,
7523 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
7524 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7525 +                       (unsigned int)header, size,
7526 +                       VCHIQ_MSG_SRCPORT(msgid),
7527 +                       VCHIQ_MSG_DSTPORT(msgid));
7528 +
7529 +               for (i = 0, pos = 0; i < (unsigned int)count;
7530 +                       pos += elements[i++].size)
7531 +                       if (elements[i].size) {
7532 +                               if (vchiq_copy_from_user
7533 +                                       (header->data + pos, elements[i].data,
7534 +                                       (size_t) elements[i].size) !=
7535 +                                       VCHIQ_SUCCESS) {
7536 +                                       mutex_unlock(&state->sync_mutex);
7537 +                                       VCHIQ_SERVICE_STATS_INC(service,
7538 +                                               error_count);
7539 +                                       return VCHIQ_ERROR;
7540 +                               }
7541 +                               if (i == 0) {
7542 +                                       if (vchiq_sync_log_level >=
7543 +                                               VCHIQ_LOG_TRACE)
7544 +                                               vchiq_log_dump_mem("Sent Sync",
7545 +                                                       0, header->data + pos,
7546 +                                                       min(64u,
7547 +                                                       elements[0].size));
7548 +                               }
7549 +                       }
7550 +
7551 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
7552 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
7553 +       } else {
7554 +               vchiq_log_info(vchiq_sync_log_level,
7555 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
7556 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7557 +                       (unsigned int)header, size,
7558 +                       VCHIQ_MSG_SRCPORT(msgid),
7559 +                       VCHIQ_MSG_DSTPORT(msgid));
7560 +               if (size != 0) {
7561 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
7562 +                       memcpy(header->data, elements[0].data,
7563 +                               elements[0].size);
7564 +               }
7565 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
7566 +       }
7567 +
7568 +       header->size = size;
7569 +       header->msgid = msgid;
7570 +
7571 +       if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7572 +               int svc_fourcc;
7573 +
7574 +               svc_fourcc = service
7575 +                       ? service->base.fourcc
7576 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7577 +
7578 +               vchiq_log_trace(vchiq_sync_log_level,
7579 +                       "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
7580 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
7581 +                       VCHIQ_MSG_TYPE(msgid),
7582 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7583 +                       VCHIQ_MSG_SRCPORT(msgid),
7584 +                       VCHIQ_MSG_DSTPORT(msgid),
7585 +                       size);
7586 +       }
7587 +
7588 +       /* Make sure the new header is visible to the peer. */
7589 +       wmb();
7590 +
7591 +       remote_event_signal(&state->remote->sync_trigger);
7592 +
7593 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
7594 +               mutex_unlock(&state->sync_mutex);
7595 +
7596 +       return VCHIQ_SUCCESS;
7597 +}
7598 +
7599 +static inline void
7600 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
7601 +{
7602 +       slot->use_count++;
7603 +}
7604 +
7605 +static void
7606 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
7607 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
7608 +{
7609 +       int release_count;
7610 +
7611 +       mutex_lock(&state->recycle_mutex);
7612 +
7613 +       if (header) {
7614 +               int msgid = header->msgid;
7615 +               if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
7616 +                       (service && service->closing)) {
7617 +                       mutex_unlock(&state->recycle_mutex);
7618 +                       return;
7619 +               }
7620 +
7621 +               /* Rewrite the message header to prevent a double
7622 +               ** release */
7623 +               header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
7624 +       }
7625 +
7626 +       release_count = slot_info->release_count;
7627 +       slot_info->release_count = ++release_count;
7628 +
7629 +       if (release_count == slot_info->use_count) {
7630 +               int slot_queue_recycle;
7631 +               /* Add to the freed queue */
7632 +
7633 +               /* A read barrier is necessary here to prevent speculative
7634 +               ** fetches of remote->slot_queue_recycle from overtaking the
7635 +               ** mutex. */
7636 +               rmb();
7637 +
7638 +               slot_queue_recycle = state->remote->slot_queue_recycle;
7639 +               state->remote->slot_queue[slot_queue_recycle &
7640 +                       VCHIQ_SLOT_QUEUE_MASK] =
7641 +                       SLOT_INDEX_FROM_INFO(state, slot_info);
7642 +               state->remote->slot_queue_recycle = slot_queue_recycle + 1;
7643 +               vchiq_log_info(vchiq_core_log_level,
7644 +                       "%d: release_slot %d - recycle->%x",
7645 +                       state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
7646 +                       state->remote->slot_queue_recycle);
7647 +
7648 +               /* A write barrier is necessary, but remote_event_signal
7649 +               ** contains one. */
7650 +               remote_event_signal(&state->remote->recycle);
7651 +       }
7652 +
7653 +       mutex_unlock(&state->recycle_mutex);
7654 +}
7655 +
7656 +/* Called by the slot handler - don't hold the bulk mutex */
7657 +static VCHIQ_STATUS_T
7658 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
7659 +       int retry_poll)
7660 +{
7661 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
7662 +
7663 +       vchiq_log_trace(vchiq_core_log_level,
7664 +               "%d: nb:%d %cx - p=%x rn=%x r=%x",
7665 +               service->state->id, service->localport,
7666 +               (queue == &service->bulk_tx) ? 't' : 'r',
7667 +               queue->process, queue->remote_notify, queue->remove);
7668 +
7669 +       if (service->state->is_master) {
7670 +               while (queue->remote_notify != queue->process) {
7671 +                       VCHIQ_BULK_T *bulk =
7672 +                               &queue->bulks[BULK_INDEX(queue->remote_notify)];
7673 +                       int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
7674 +                               VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
7675 +                       int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
7676 +                               service->remoteport);
7677 +                       VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
7678 +                       /* Only reply to non-dummy bulk requests */
7679 +                       if (bulk->remote_data) {
7680 +                               status = queue_message(service->state, NULL,
7681 +                                       msgid, &element, 1, 4, 0);
7682 +                               if (status != VCHIQ_SUCCESS)
7683 +                                       break;
7684 +                       }
7685 +                       queue->remote_notify++;
7686 +               }
7687 +       } else {
7688 +               queue->remote_notify = queue->process;
7689 +       }
7690 +
7691 +       if (status == VCHIQ_SUCCESS) {
7692 +               while (queue->remove != queue->remote_notify) {
7693 +                       VCHIQ_BULK_T *bulk =
7694 +                               &queue->bulks[BULK_INDEX(queue->remove)];
7695 +
7696 +                       /* Only generate callbacks for non-dummy bulk
7697 +                       ** requests, and non-terminated services */
7698 +                       if (bulk->data && service->instance) {
7699 +                               if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
7700 +                                       if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
7701 +                                               VCHIQ_SERVICE_STATS_INC(service,
7702 +                                                       bulk_tx_count);
7703 +                                               VCHIQ_SERVICE_STATS_ADD(service,
7704 +                                                       bulk_tx_bytes,
7705 +                                                       bulk->actual);
7706 +                                       } else {
7707 +                                               VCHIQ_SERVICE_STATS_INC(service,
7708 +                                                       bulk_rx_count);
7709 +                                               VCHIQ_SERVICE_STATS_ADD(service,
7710 +                                                       bulk_rx_bytes,
7711 +                                                       bulk->actual);
7712 +                                       }
7713 +                               } else {
7714 +                                       VCHIQ_SERVICE_STATS_INC(service,
7715 +                                               bulk_aborted_count);
7716 +                               }
7717 +                               if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
7718 +                                       struct bulk_waiter *waiter;
7719 +                                       spin_lock(&bulk_waiter_spinlock);
7720 +                                       waiter = bulk->userdata;
7721 +                                       if (waiter) {
7722 +                                               waiter->actual = bulk->actual;
7723 +                                               up(&waiter->event);
7724 +                                       }
7725 +                                       spin_unlock(&bulk_waiter_spinlock);
7726 +                               } else if (bulk->mode ==
7727 +                                       VCHIQ_BULK_MODE_CALLBACK) {
7728 +                                       VCHIQ_REASON_T reason = (bulk->dir ==
7729 +                                               VCHIQ_BULK_TRANSMIT) ?
7730 +                                               ((bulk->actual ==
7731 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
7732 +                                               VCHIQ_BULK_TRANSMIT_ABORTED :
7733 +                                               VCHIQ_BULK_TRANSMIT_DONE) :
7734 +                                               ((bulk->actual ==
7735 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
7736 +                                               VCHIQ_BULK_RECEIVE_ABORTED :
7737 +                                               VCHIQ_BULK_RECEIVE_DONE);
7738 +                                       status = make_service_callback(service,
7739 +                                               reason, NULL, bulk->userdata);
7740 +                                       if (status == VCHIQ_RETRY)
7741 +                                               break;
7742 +                               }
7743 +                       }
7744 +
7745 +                       queue->remove++;
7746 +                       up(&service->bulk_remove_event);
7747 +               }
7748 +               if (!retry_poll)
7749 +                       status = VCHIQ_SUCCESS;
7750 +       }
7751 +
7752 +       if (status == VCHIQ_RETRY)
7753 +               request_poll(service->state, service,
7754 +                       (queue == &service->bulk_tx) ?
7755 +                       VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
7756 +
7757 +       return status;
7758 +}
7759 +
7760 +/* Called by the slot handler thread */
7761 +static void
7762 +poll_services(VCHIQ_STATE_T *state)
7763 +{
7764 +       int group, i;
7765 +
7766 +       for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
7767 +               uint32_t flags;
7768 +               flags = atomic_xchg(&state->poll_services[group], 0);
7769 +               for (i = 0; flags; i++) {
7770 +                       if (flags & (1 << i)) {
7771 +                               VCHIQ_SERVICE_T *service =
7772 +                                       find_service_by_port(state,
7773 +                                               (group<<5) + i);
7774 +                               uint32_t service_flags;
7775 +                               flags &= ~(1 << i);
7776 +                               if (!service)
7777 +                                       continue;
7778 +                               service_flags =
7779 +                                       atomic_xchg(&service->poll_flags, 0);
7780 +                               if (service_flags &
7781 +                                       (1 << VCHIQ_POLL_REMOVE)) {
7782 +                                       vchiq_log_info(vchiq_core_log_level,
7783 +                                               "%d: ps - remove %d<->%d",
7784 +                                               state->id, service->localport,
7785 +                                               service->remoteport);
7786 +
7787 +                                       /* Make it look like a client, because
7788 +                                          it must be removed and not left in
7789 +                                          the LISTENING state. */
7790 +                                       service->public_fourcc =
7791 +                                               VCHIQ_FOURCC_INVALID;
7792 +
7793 +                                       if (vchiq_close_service_internal(
7794 +                                               service, 0/*!close_recvd*/) !=
7795 +                                               VCHIQ_SUCCESS)
7796 +                                               request_poll(state, service,
7797 +                                                       VCHIQ_POLL_REMOVE);
7798 +                               } else if (service_flags &
7799 +                                       (1 << VCHIQ_POLL_TERMINATE)) {
7800 +                                       vchiq_log_info(vchiq_core_log_level,
7801 +                                               "%d: ps - terminate %d<->%d",
7802 +                                               state->id, service->localport,
7803 +                                               service->remoteport);
7804 +                                       if (vchiq_close_service_internal(
7805 +                                               service, 0/*!close_recvd*/) !=
7806 +                                               VCHIQ_SUCCESS)
7807 +                                               request_poll(state, service,
7808 +                                                       VCHIQ_POLL_TERMINATE);
7809 +                               }
7810 +                               if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
7811 +                                       notify_bulks(service,
7812 +                                               &service->bulk_tx,
7813 +                                               1/*retry_poll*/);
7814 +                               if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
7815 +                                       notify_bulks(service,
7816 +                                               &service->bulk_rx,
7817 +                                               1/*retry_poll*/);
7818 +                               unlock_service(service);
7819 +                       }
7820 +               }
7821 +       }
7822 +}
7823 +
7824 +/* Called by the slot handler or application threads, holding the bulk mutex. */
7825 +static int
7826 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7827 +{
7828 +       VCHIQ_STATE_T *state = service->state;
7829 +       int resolved = 0;
7830 +       int rc;
7831 +
7832 +       while ((queue->process != queue->local_insert) &&
7833 +               (queue->process != queue->remote_insert)) {
7834 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7835 +
7836 +               vchiq_log_trace(vchiq_core_log_level,
7837 +                       "%d: rb:%d %cx - li=%x ri=%x p=%x",
7838 +                       state->id, service->localport,
7839 +                       (queue == &service->bulk_tx) ? 't' : 'r',
7840 +                       queue->local_insert, queue->remote_insert,
7841 +                       queue->process);
7842 +
7843 +               WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
7844 +               WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
7845 +
7846 +               rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
7847 +               if (rc != 0)
7848 +                       break;
7849 +
7850 +               vchiq_transfer_bulk(bulk);
7851 +               mutex_unlock(&state->bulk_transfer_mutex);
7852 +
7853 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7854 +                       const char *header = (queue == &service->bulk_tx) ?
7855 +                               "Send Bulk to" : "Recv Bulk from";
7856 +                       if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
7857 +                               vchiq_log_info(vchiq_core_msg_log_level,
7858 +                                       "%s %c%c%c%c d:%d len:%d %x<->%x",
7859 +                                       header,
7860 +                                       VCHIQ_FOURCC_AS_4CHARS(
7861 +                                               service->base.fourcc),
7862 +                                       service->remoteport,
7863 +                                       bulk->size,
7864 +                                       (unsigned int)bulk->data,
7865 +                                       (unsigned int)bulk->remote_data);
7866 +                       else
7867 +                               vchiq_log_info(vchiq_core_msg_log_level,
7868 +                                       "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
7869 +                                       " rx len:%d %x<->%x",
7870 +                                       header,
7871 +                                       VCHIQ_FOURCC_AS_4CHARS(
7872 +                                               service->base.fourcc),
7873 +                                       service->remoteport,
7874 +                                       bulk->size,
7875 +                                       bulk->remote_size,
7876 +                                       (unsigned int)bulk->data,
7877 +                                       (unsigned int)bulk->remote_data);
7878 +               }
7879 +
7880 +               vchiq_complete_bulk(bulk);
7881 +               queue->process++;
7882 +               resolved++;
7883 +       }
7884 +       return resolved;
7885 +}
7886 +
7887 +/* Called with the bulk_mutex held */
7888 +static void
7889 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
7890 +{
7891 +       int is_tx = (queue == &service->bulk_tx);
7892 +       vchiq_log_trace(vchiq_core_log_level,
7893 +               "%d: aob:%d %cx - li=%x ri=%x p=%x",
7894 +               service->state->id, service->localport, is_tx ? 't' : 'r',
7895 +               queue->local_insert, queue->remote_insert, queue->process);
7896 +
7897 +       WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
7898 +       WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
7899 +
7900 +       while ((queue->process != queue->local_insert) ||
7901 +               (queue->process != queue->remote_insert)) {
7902 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
7903 +
7904 +               if (queue->process == queue->remote_insert) {
7905 +                       /* fabricate a matching dummy bulk */
7906 +                       bulk->remote_data = NULL;
7907 +                       bulk->remote_size = 0;
7908 +                       queue->remote_insert++;
7909 +               }
7910 +
7911 +               if (queue->process != queue->local_insert) {
7912 +                       vchiq_complete_bulk(bulk);
7913 +
7914 +                       vchiq_log_info(vchiq_core_msg_log_level,
7915 +                               "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
7916 +                               "rx len:%d",
7917 +                               is_tx ? "Send Bulk to" : "Recv Bulk from",
7918 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7919 +                               service->remoteport,
7920 +                               bulk->size,
7921 +                               bulk->remote_size);
7922 +               } else {
7923 +                       /* fabricate a matching dummy bulk */
7924 +                       bulk->data = NULL;
7925 +                       bulk->size = 0;
7926 +                       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
7927 +                       bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
7928 +                               VCHIQ_BULK_RECEIVE;
7929 +                       queue->local_insert++;
7930 +               }
7931 +
7932 +               queue->process++;
7933 +       }
7934 +}
7935 +
7936 +/* Called from the slot handler thread */
7937 +static void
7938 +pause_bulks(VCHIQ_STATE_T *state)
7939 +{
7940 +       if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
7941 +               WARN_ON_ONCE(1);
7942 +               atomic_set(&pause_bulks_count, 1);
7943 +               return;
7944 +       }
7945 +
7946 +       /* Block bulk transfers from all services */
7947 +       mutex_lock(&state->bulk_transfer_mutex);
7948 +}
7949 +
7950 +/* Called from the slot handler thread */
7951 +static void
7952 +resume_bulks(VCHIQ_STATE_T *state)
7953 +{
7954 +       int i;
7955 +       if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
7956 +               WARN_ON_ONCE(1);
7957 +               atomic_set(&pause_bulks_count, 0);
7958 +               return;
7959 +       }
7960 +
7961 +       /* Allow bulk transfers from all services */
7962 +       mutex_unlock(&state->bulk_transfer_mutex);
7963 +
7964 +       if (state->deferred_bulks == 0)
7965 +               return;
7966 +
7967 +       /* Deal with any bulks which had to be deferred due to being in
7968 +        * paused state.  Don't try to match up to number of deferred bulks
7969 +        * in case we've had something come and close the service in the
7970 +        * interim - just process all bulk queues for all services */
7971 +       vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
7972 +               __func__, state->deferred_bulks);
7973 +
7974 +       for (i = 0; i < state->unused_service; i++) {
7975 +               VCHIQ_SERVICE_T *service = state->services[i];
7976 +               int resolved_rx = 0;
7977 +               int resolved_tx = 0;
7978 +               if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
7979 +                       continue;
7980 +
7981 +               mutex_lock(&service->bulk_mutex);
7982 +               resolved_rx = resolve_bulks(service, &service->bulk_rx);
7983 +               resolved_tx = resolve_bulks(service, &service->bulk_tx);
7984 +               mutex_unlock(&service->bulk_mutex);
7985 +               if (resolved_rx)
7986 +                       notify_bulks(service, &service->bulk_rx, 1);
7987 +               if (resolved_tx)
7988 +                       notify_bulks(service, &service->bulk_tx, 1);
7989 +       }
7990 +       state->deferred_bulks = 0;
7991 +}
7992 +
7993 +static int
7994 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
7995 +{
7996 +       VCHIQ_SERVICE_T *service = NULL;
7997 +       int msgid, size;
7998 +       int type;
7999 +       unsigned int localport, remoteport;
8000 +
8001 +       msgid = header->msgid;
8002 +       size = header->size;
8003 +       type = VCHIQ_MSG_TYPE(msgid);
8004 +       localport = VCHIQ_MSG_DSTPORT(msgid);
8005 +       remoteport = VCHIQ_MSG_SRCPORT(msgid);
8006 +       if (size >= sizeof(struct vchiq_open_payload)) {
8007 +               const struct vchiq_open_payload *payload =
8008 +                       (struct vchiq_open_payload *)header->data;
8009 +               unsigned int fourcc;
8010 +
8011 +               fourcc = payload->fourcc;
8012 +               vchiq_log_info(vchiq_core_log_level,
8013 +                       "%d: prs OPEN@%x (%d->'%c%c%c%c')",
8014 +                       state->id, (unsigned int)header,
8015 +                       localport,
8016 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc));
8017 +
8018 +               service = get_listening_service(state, fourcc);
8019 +
8020 +               if (service) {
8021 +                       /* A matching service exists */
8022 +                       short version = payload->version;
8023 +                       short version_min = payload->version_min;
8024 +                       if ((service->version < version_min) ||
8025 +                               (version < service->version_min)) {
8026 +                               /* Version mismatch */
8027 +                               vchiq_loud_error_header();
8028 +                               vchiq_loud_error("%d: service %d (%c%c%c%c) "
8029 +                                       "version mismatch - local (%d, min %d)"
8030 +                                       " vs. remote (%d, min %d)",
8031 +                                       state->id, service->localport,
8032 +                                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
8033 +                                       service->version, service->version_min,
8034 +                                       version, version_min);
8035 +                               vchiq_loud_error_footer();
8036 +                               unlock_service(service);
8037 +                               goto fail_open;
8038 +                       }
8039 +                       service->peer_version = version;
8040 +
8041 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8042 +                               struct vchiq_openack_payload ack_payload = {
8043 +                                       service->version
8044 +                               };
8045 +                               VCHIQ_ELEMENT_T body = {
8046 +                                       &ack_payload,
8047 +                                       sizeof(ack_payload)
8048 +                               };
8049 +
8050 +                               /* Acknowledge the OPEN */
8051 +                               if (service->sync) {
8052 +                                       if (queue_message_sync(state, NULL,
8053 +                                               VCHIQ_MAKE_MSG(
8054 +                                                       VCHIQ_MSG_OPENACK,
8055 +                                                       service->localport,
8056 +                                                       remoteport),
8057 +                                               &body, 1, sizeof(ack_payload),
8058 +                                               0) == VCHIQ_RETRY)
8059 +                                               goto bail_not_ready;
8060 +                               } else {
8061 +                                       if (queue_message(state, NULL,
8062 +                                               VCHIQ_MAKE_MSG(
8063 +                                                       VCHIQ_MSG_OPENACK,
8064 +                                                       service->localport,
8065 +                                                       remoteport),
8066 +                                               &body, 1, sizeof(ack_payload),
8067 +                                               0) == VCHIQ_RETRY)
8068 +                                               goto bail_not_ready;
8069 +                               }
8070 +
8071 +                               /* The service is now open */
8072 +                               vchiq_set_service_state(service,
8073 +                                       service->sync ? VCHIQ_SRVSTATE_OPENSYNC
8074 +                                       : VCHIQ_SRVSTATE_OPEN);
8075 +                       }
8076 +
8077 +                       service->remoteport = remoteport;
8078 +                       service->client_id = ((int *)header->data)[1];
8079 +                       if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
8080 +                               NULL, NULL) == VCHIQ_RETRY) {
8081 +                               /* Bail out if not ready */
8082 +                               service->remoteport = VCHIQ_PORT_FREE;
8083 +                               goto bail_not_ready;
8084 +                       }
8085 +
8086 +                       /* Success - the message has been dealt with */
8087 +                       unlock_service(service);
8088 +                       return 1;
8089 +               }
8090 +       }
8091 +
8092 +fail_open:
8093 +       /* No available service, or an invalid request - send a CLOSE */
8094 +       if (queue_message(state, NULL,
8095 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
8096 +               NULL, 0, 0, 0) == VCHIQ_RETRY)
8097 +               goto bail_not_ready;
8098 +
8099 +       return 1;
8100 +
8101 +bail_not_ready:
8102 +       unlock_service(service);
8103 +
8104 +       return 0;
8105 +}
8106 +
8107 +/* Called by the slot handler thread */
8108 +static void
8109 +parse_rx_slots(VCHIQ_STATE_T *state)
8110 +{
8111 +       VCHIQ_SHARED_STATE_T *remote = state->remote;
8112 +       VCHIQ_SERVICE_T *service = NULL;
8113 +       int tx_pos;
8114 +       DEBUG_INITIALISE(state->local)
8115 +
8116 +       tx_pos = remote->tx_pos;
8117 +
8118 +       while (state->rx_pos != tx_pos) {
8119 +               VCHIQ_HEADER_T *header;
8120 +               int msgid, size;
8121 +               int type;
8122 +               unsigned int localport, remoteport;
8123 +
8124 +               DEBUG_TRACE(PARSE_LINE);
8125 +               if (!state->rx_data) {
8126 +                       int rx_index;
8127 +                       WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
8128 +                       rx_index = remote->slot_queue[
8129 +                               SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
8130 +                               VCHIQ_SLOT_QUEUE_MASK];
8131 +                       state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
8132 +                               rx_index);
8133 +                       state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
8134 +
8135 +                       /* Initialise use_count to one, and increment
8136 +                       ** release_count at the end of the slot to avoid
8137 +                       ** releasing the slot prematurely. */
8138 +                       state->rx_info->use_count = 1;
8139 +                       state->rx_info->release_count = 0;
8140 +               }
8141 +
8142 +               header = (VCHIQ_HEADER_T *)(state->rx_data +
8143 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
8144 +               DEBUG_VALUE(PARSE_HEADER, (int)header);
8145 +               msgid = header->msgid;
8146 +               DEBUG_VALUE(PARSE_MSGID, msgid);
8147 +               size = header->size;
8148 +               type = VCHIQ_MSG_TYPE(msgid);
8149 +               localport = VCHIQ_MSG_DSTPORT(msgid);
8150 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
8151 +
8152 +               if (type != VCHIQ_MSG_DATA)
8153 +                       VCHIQ_STATS_INC(state, ctrl_rx_count);
8154 +
8155 +               switch (type) {
8156 +               case VCHIQ_MSG_OPENACK:
8157 +               case VCHIQ_MSG_CLOSE:
8158 +               case VCHIQ_MSG_DATA:
8159 +               case VCHIQ_MSG_BULK_RX:
8160 +               case VCHIQ_MSG_BULK_TX:
8161 +               case VCHIQ_MSG_BULK_RX_DONE:
8162 +               case VCHIQ_MSG_BULK_TX_DONE:
8163 +                       service = find_service_by_port(state, localport);
8164 +                       if ((!service || service->remoteport != remoteport) &&
8165 +                               (localport == 0) &&
8166 +                               (type == VCHIQ_MSG_CLOSE)) {
8167 +                               /* This could be a CLOSE from a client which
8168 +                                  hadn't yet received the OPENACK - look for
8169 +                                  the connected service */
8170 +                               if (service)
8171 +                                       unlock_service(service);
8172 +                               service = get_connected_service(state,
8173 +                                       remoteport);
8174 +                               if (service)
8175 +                                       vchiq_log_warning(vchiq_core_log_level,
8176 +                                               "%d: prs %s@%x (%d->%d) - "
8177 +                                               "found connected service %d",
8178 +                                               state->id, msg_type_str(type),
8179 +                                               (unsigned int)header,
8180 +                                               remoteport, localport,
8181 +                                               service->localport);
8182 +                       }
8183 +
8184 +                       if (!service) {
8185 +                               vchiq_log_error(vchiq_core_log_level,
8186 +                                       "%d: prs %s@%x (%d->%d) - "
8187 +                                       "invalid/closed service %d",
8188 +                                       state->id, msg_type_str(type),
8189 +                                       (unsigned int)header,
8190 +                                       remoteport, localport, localport);
8191 +                               goto skip_message;
8192 +                       }
8193 +                       break;
8194 +               default:
8195 +                       break;
8196 +               }
8197 +
8198 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
8199 +                       int svc_fourcc;
8200 +
8201 +                       svc_fourcc = service
8202 +                               ? service->base.fourcc
8203 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8204 +                       vchiq_log_info(vchiq_core_msg_log_level,
8205 +                               "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
8206 +                               "len:%d",
8207 +                               msg_type_str(type), type,
8208 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8209 +                               remoteport, localport, size);
8210 +                       if (size > 0)
8211 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
8212 +                                       min(64, size));
8213 +               }
8214 +
8215 +               if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
8216 +                       > VCHIQ_SLOT_SIZE) {
8217 +                       vchiq_log_error(vchiq_core_log_level,
8218 +                               "header %x (msgid %x) - size %x too big for "
8219 +                               "slot",
8220 +                               (unsigned int)header, (unsigned int)msgid,
8221 +                               (unsigned int)size);
8222 +                       WARN(1, "oversized for slot\n");
8223 +               }
8224 +
8225 +               switch (type) {
8226 +               case VCHIQ_MSG_OPEN:
8227 +                       WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
8228 +                       if (!parse_open(state, header))
8229 +                               goto bail_not_ready;
8230 +                       break;
8231 +               case VCHIQ_MSG_OPENACK:
8232 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
8233 +                               const struct vchiq_openack_payload *payload =
8234 +                                       (struct vchiq_openack_payload *)
8235 +                                       header->data;
8236 +                               service->peer_version = payload->version;
8237 +                       }
8238 +                       vchiq_log_info(vchiq_core_log_level,
8239 +                               "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
8240 +                               state->id, (unsigned int)header, size,
8241 +                               remoteport, localport, service->peer_version);
8242 +                       if (service->srvstate ==
8243 +                               VCHIQ_SRVSTATE_OPENING) {
8244 +                               service->remoteport = remoteport;
8245 +                               vchiq_set_service_state(service,
8246 +                                       VCHIQ_SRVSTATE_OPEN);
8247 +                               up(&service->remove_event);
8248 +                       } else
8249 +                               vchiq_log_error(vchiq_core_log_level,
8250 +                                       "OPENACK received in state %s",
8251 +                                       srvstate_names[service->srvstate]);
8252 +                       break;
8253 +               case VCHIQ_MSG_CLOSE:
8254 +                       WARN_ON(size != 0); /* There should be no data */
8255 +
8256 +                       vchiq_log_info(vchiq_core_log_level,
8257 +                               "%d: prs CLOSE@%x (%d->%d)",
8258 +                               state->id, (unsigned int)header,
8259 +                               remoteport, localport);
8260 +
8261 +                       mark_service_closing_internal(service, 1);
8262 +
8263 +                       if (vchiq_close_service_internal(service,
8264 +                               1/*close_recvd*/) == VCHIQ_RETRY)
8265 +                               goto bail_not_ready;
8266 +
8267 +                       vchiq_log_info(vchiq_core_log_level,
8268 +                               "Close Service %c%c%c%c s:%u d:%d",
8269 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
8270 +                               service->localport,
8271 +                               service->remoteport);
8272 +                       break;
8273 +               case VCHIQ_MSG_DATA:
8274 +                       vchiq_log_trace(vchiq_core_log_level,
8275 +                               "%d: prs DATA@%x,%x (%d->%d)",
8276 +                               state->id, (unsigned int)header, size,
8277 +                               remoteport, localport);
8278 +
8279 +                       if ((service->remoteport == remoteport)
8280 +                               && (service->srvstate ==
8281 +                               VCHIQ_SRVSTATE_OPEN)) {
8282 +                               header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
8283 +                               claim_slot(state->rx_info);
8284 +                               DEBUG_TRACE(PARSE_LINE);
8285 +                               if (make_service_callback(service,
8286 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
8287 +                                       NULL) == VCHIQ_RETRY) {
8288 +                                       DEBUG_TRACE(PARSE_LINE);
8289 +                                       goto bail_not_ready;
8290 +                               }
8291 +                               VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
8292 +                               VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
8293 +                                       size);
8294 +                       } else {
8295 +                               VCHIQ_STATS_INC(state, error_count);
8296 +                       }
8297 +                       break;
8298 +               case VCHIQ_MSG_CONNECT:
8299 +                       vchiq_log_info(vchiq_core_log_level,
8300 +                               "%d: prs CONNECT@%x",
8301 +                               state->id, (unsigned int)header);
8302 +                       up(&state->connect);
8303 +                       break;
8304 +               case VCHIQ_MSG_BULK_RX:
8305 +               case VCHIQ_MSG_BULK_TX: {
8306 +                       VCHIQ_BULK_QUEUE_T *queue;
8307 +                       WARN_ON(!state->is_master);
8308 +                       queue = (type == VCHIQ_MSG_BULK_RX) ?
8309 +                               &service->bulk_tx : &service->bulk_rx;
8310 +                       if ((service->remoteport == remoteport)
8311 +                               && (service->srvstate ==
8312 +                               VCHIQ_SRVSTATE_OPEN)) {
8313 +                               VCHIQ_BULK_T *bulk;
8314 +                               int resolved = 0;
8315 +
8316 +                               DEBUG_TRACE(PARSE_LINE);
8317 +                               if (mutex_lock_interruptible(
8318 +                                       &service->bulk_mutex) != 0) {
8319 +                                       DEBUG_TRACE(PARSE_LINE);
8320 +                                       goto bail_not_ready;
8321 +                               }
8322 +
8323 +                               WARN_ON(!(queue->remote_insert < queue->remove +
8324 +                                       VCHIQ_NUM_SERVICE_BULKS));
8325 +                               bulk = &queue->bulks[
8326 +                                       BULK_INDEX(queue->remote_insert)];
8327 +                               bulk->remote_data =
8328 +                                       (void *)((int *)header->data)[0];
8329 +                               bulk->remote_size = ((int *)header->data)[1];
8330 +                               wmb();
8331 +
8332 +                               vchiq_log_info(vchiq_core_log_level,
8333 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
8334 +                                       state->id, msg_type_str(type),
8335 +                                       (unsigned int)header,
8336 +                                       remoteport, localport,
8337 +                                       bulk->remote_size,
8338 +                                       (unsigned int)bulk->remote_data);
8339 +
8340 +                               queue->remote_insert++;
8341 +
8342 +                               if (atomic_read(&pause_bulks_count)) {
8343 +                                       state->deferred_bulks++;
8344 +                                       vchiq_log_info(vchiq_core_log_level,
8345 +                                               "%s: deferring bulk (%d)",
8346 +                                               __func__,
8347 +                                               state->deferred_bulks);
8348 +                                       if (state->conn_state !=
8349 +                                               VCHIQ_CONNSTATE_PAUSE_SENT)
8350 +                                               vchiq_log_error(
8351 +                                                       vchiq_core_log_level,
8352 +                                                       "%s: bulks paused in "
8353 +                                                       "unexpected state %s",
8354 +                                                       __func__,
8355 +                                                       conn_state_names[
8356 +                                                       state->conn_state]);
8357 +                               } else if (state->conn_state ==
8358 +                                       VCHIQ_CONNSTATE_CONNECTED) {
8359 +                                       DEBUG_TRACE(PARSE_LINE);
8360 +                                       resolved = resolve_bulks(service,
8361 +                                               queue);
8362 +                               }
8363 +
8364 +                               mutex_unlock(&service->bulk_mutex);
8365 +                               if (resolved)
8366 +                                       notify_bulks(service, queue,
8367 +                                               1/*retry_poll*/);
8368 +                       }
8369 +               } break;
8370 +               case VCHIQ_MSG_BULK_RX_DONE:
8371 +               case VCHIQ_MSG_BULK_TX_DONE:
8372 +                       WARN_ON(state->is_master);
8373 +                       if ((service->remoteport == remoteport)
8374 +                               && (service->srvstate !=
8375 +                               VCHIQ_SRVSTATE_FREE)) {
8376 +                               VCHIQ_BULK_QUEUE_T *queue;
8377 +                               VCHIQ_BULK_T *bulk;
8378 +
8379 +                               queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
8380 +                                       &service->bulk_rx : &service->bulk_tx;
8381 +
8382 +                               DEBUG_TRACE(PARSE_LINE);
8383 +                               if (mutex_lock_interruptible(
8384 +                                       &service->bulk_mutex) != 0) {
8385 +                                       DEBUG_TRACE(PARSE_LINE);
8386 +                                       goto bail_not_ready;
8387 +                               }
8388 +                               if ((int)(queue->remote_insert -
8389 +                                       queue->local_insert) >= 0) {
8390 +                                       vchiq_log_error(vchiq_core_log_level,
8391 +                                               "%d: prs %s@%x (%d->%d) "
8392 +                                               "unexpected (ri=%d,li=%d)",
8393 +                                               state->id, msg_type_str(type),
8394 +                                               (unsigned int)header,
8395 +                                               remoteport, localport,
8396 +                                               queue->remote_insert,
8397 +                                               queue->local_insert);
8398 +                                       mutex_unlock(&service->bulk_mutex);
8399 +                                       break;
8400 +                               }
8401 +
8402 +                               BUG_ON(queue->process == queue->local_insert);
8403 +                               BUG_ON(queue->process != queue->remote_insert);
8404 +
8405 +                               bulk = &queue->bulks[
8406 +                                       BULK_INDEX(queue->remote_insert)];
8407 +                               bulk->actual = *(int *)header->data;
8408 +                               queue->remote_insert++;
8409 +
8410 +                               vchiq_log_info(vchiq_core_log_level,
8411 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
8412 +                                       state->id, msg_type_str(type),
8413 +                                       (unsigned int)header,
8414 +                                       remoteport, localport,
8415 +                                       bulk->actual, (unsigned int)bulk->data);
8416 +
8417 +                               vchiq_log_trace(vchiq_core_log_level,
8418 +                                       "%d: prs:%d %cx li=%x ri=%x p=%x",
8419 +                                       state->id, localport,
8420 +                                       (type == VCHIQ_MSG_BULK_RX_DONE) ?
8421 +                                               'r' : 't',
8422 +                                       queue->local_insert,
8423 +                                       queue->remote_insert, queue->process);
8424 +
8425 +                               DEBUG_TRACE(PARSE_LINE);
8426 +                               WARN_ON(queue->process == queue->local_insert);
8427 +                               vchiq_complete_bulk(bulk);
8428 +                               queue->process++;
8429 +                               mutex_unlock(&service->bulk_mutex);
8430 +                               DEBUG_TRACE(PARSE_LINE);
8431 +                               notify_bulks(service, queue, 1/*retry_poll*/);
8432 +                               DEBUG_TRACE(PARSE_LINE);
8433 +                       }
8434 +                       break;
8435 +               case VCHIQ_MSG_PADDING:
8436 +                       vchiq_log_trace(vchiq_core_log_level,
8437 +                               "%d: prs PADDING@%x,%x",
8438 +                               state->id, (unsigned int)header, size);
8439 +                       break;
8440 +               case VCHIQ_MSG_PAUSE:
8441 +                       /* If initiated, signal the application thread */
8442 +                       vchiq_log_trace(vchiq_core_log_level,
8443 +                               "%d: prs PAUSE@%x,%x",
8444 +                               state->id, (unsigned int)header, size);
8445 +                       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8446 +                               vchiq_log_error(vchiq_core_log_level,
8447 +                                       "%d: PAUSE received in state PAUSED",
8448 +                                       state->id);
8449 +                               break;
8450 +                       }
8451 +                       if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
8452 +                               /* Send a PAUSE in response */
8453 +                               if (queue_message(state, NULL,
8454 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8455 +                                       NULL, 0, 0, 0) == VCHIQ_RETRY)
8456 +                                       goto bail_not_ready;
8457 +                               if (state->is_master)
8458 +                                       pause_bulks(state);
8459 +                       }
8460 +                       /* At this point slot_mutex is held */
8461 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
8462 +                       vchiq_platform_paused(state);
8463 +                       break;
8464 +               case VCHIQ_MSG_RESUME:
8465 +                       vchiq_log_trace(vchiq_core_log_level,
8466 +                               "%d: prs RESUME@%x,%x",
8467 +                               state->id, (unsigned int)header, size);
8468 +                       /* Release the slot mutex */
8469 +                       mutex_unlock(&state->slot_mutex);
8470 +                       if (state->is_master)
8471 +                               resume_bulks(state);
8472 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8473 +                       vchiq_platform_resumed(state);
8474 +                       break;
8475 +
8476 +               case VCHIQ_MSG_REMOTE_USE:
8477 +                       vchiq_on_remote_use(state);
8478 +                       break;
8479 +               case VCHIQ_MSG_REMOTE_RELEASE:
8480 +                       vchiq_on_remote_release(state);
8481 +                       break;
8482 +               case VCHIQ_MSG_REMOTE_USE_ACTIVE:
8483 +                       vchiq_on_remote_use_active(state);
8484 +                       break;
8485 +
8486 +               default:
8487 +                       vchiq_log_error(vchiq_core_log_level,
8488 +                               "%d: prs invalid msgid %x@%x,%x",
8489 +                               state->id, msgid, (unsigned int)header, size);
8490 +                       WARN(1, "invalid message\n");
8491 +                       break;
8492 +               }
8493 +
8494 +skip_message:
8495 +               if (service) {
8496 +                       unlock_service(service);
8497 +                       service = NULL;
8498 +               }
8499 +
8500 +               state->rx_pos += calc_stride(size);
8501 +
8502 +               DEBUG_TRACE(PARSE_LINE);
8503 +               /* Perform some housekeeping when the end of the slot is
8504 +               ** reached. */
8505 +               if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
8506 +                       /* Remove the extra reference count. */
8507 +                       release_slot(state, state->rx_info, NULL, NULL);
8508 +                       state->rx_data = NULL;
8509 +               }
8510 +       }
8511 +
8512 +bail_not_ready:
8513 +       if (service)
8514 +               unlock_service(service);
8515 +}
8516 +
8517 +/* Called by the slot handler thread */
8518 +static int
8519 +slot_handler_func(void *v)
8520 +{
8521 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8522 +       VCHIQ_SHARED_STATE_T *local = state->local;
8523 +       DEBUG_INITIALISE(local)
8524 +
8525 +       while (1) {
8526 +               DEBUG_COUNT(SLOT_HANDLER_COUNT);
8527 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
8528 +               remote_event_wait(&local->trigger);
8529 +
8530 +               rmb();
8531 +
8532 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
8533 +               if (state->poll_needed) {
8534 +                       /* Check if we need to suspend - may change our
8535 +                        * conn_state */
8536 +                       vchiq_platform_check_suspend(state);
8537 +
8538 +                       state->poll_needed = 0;
8539 +
8540 +                       /* Handle service polling and other rare conditions here
8541 +                       ** out of the mainline code */
8542 +                       switch (state->conn_state) {
8543 +                       case VCHIQ_CONNSTATE_CONNECTED:
8544 +                               /* Poll the services as requested */
8545 +                               poll_services(state);
8546 +                               break;
8547 +
8548 +                       case VCHIQ_CONNSTATE_PAUSING:
8549 +                               if (state->is_master)
8550 +                                       pause_bulks(state);
8551 +                               if (queue_message(state, NULL,
8552 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
8553 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
8554 +                                       vchiq_set_conn_state(state,
8555 +                                               VCHIQ_CONNSTATE_PAUSE_SENT);
8556 +                               } else {
8557 +                                       if (state->is_master)
8558 +                                               resume_bulks(state);
8559 +                                       /* Retry later */
8560 +                                       state->poll_needed = 1;
8561 +                               }
8562 +                               break;
8563 +
8564 +                       case VCHIQ_CONNSTATE_PAUSED:
8565 +                               vchiq_platform_resume(state);
8566 +                               break;
8567 +
8568 +                       case VCHIQ_CONNSTATE_RESUMING:
8569 +                               if (queue_message(state, NULL,
8570 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
8571 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
8572 +                                       if (state->is_master)
8573 +                                               resume_bulks(state);
8574 +                                       vchiq_set_conn_state(state,
8575 +                                               VCHIQ_CONNSTATE_CONNECTED);
8576 +                                       vchiq_platform_resumed(state);
8577 +                               } else {
8578 +                                       /* This should really be impossible,
8579 +                                       ** since the PAUSE should have flushed
8580 +                                       ** through outstanding messages. */
8581 +                                       vchiq_log_error(vchiq_core_log_level,
8582 +                                               "Failed to send RESUME "
8583 +                                               "message");
8584 +                                       BUG();
8585 +                               }
8586 +                               break;
8587 +
8588 +                       case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
8589 +                       case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
8590 +                               vchiq_platform_handle_timeout(state);
8591 +                               break;
8592 +                       default:
8593 +                               break;
8594 +                       }
8595 +
8596 +
8597 +               }
8598 +
8599 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
8600 +               parse_rx_slots(state);
8601 +       }
8602 +       return 0;
8603 +}
8604 +
8605 +
8606 +/* Called by the recycle thread */
8607 +static int
8608 +recycle_func(void *v)
8609 +{
8610 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8611 +       VCHIQ_SHARED_STATE_T *local = state->local;
8612 +
8613 +       while (1) {
8614 +               remote_event_wait(&local->recycle);
8615 +
8616 +               process_free_queue(state);
8617 +       }
8618 +       return 0;
8619 +}
8620 +
8621 +
8622 +/* Called by the sync thread */
8623 +static int
8624 +sync_func(void *v)
8625 +{
8626 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
8627 +       VCHIQ_SHARED_STATE_T *local = state->local;
8628 +       VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
8629 +               state->remote->slot_sync);
8630 +
8631 +       while (1) {
8632 +               VCHIQ_SERVICE_T *service;
8633 +               int msgid, size;
8634 +               int type;
8635 +               unsigned int localport, remoteport;
8636 +
8637 +               remote_event_wait(&local->sync_trigger);
8638 +
8639 +               rmb();
8640 +
8641 +               msgid = header->msgid;
8642 +               size = header->size;
8643 +               type = VCHIQ_MSG_TYPE(msgid);
8644 +               localport = VCHIQ_MSG_DSTPORT(msgid);
8645 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
8646 +
8647 +               service = find_service_by_port(state, localport);
8648 +
8649 +               if (!service) {
8650 +                       vchiq_log_error(vchiq_sync_log_level,
8651 +                               "%d: sf %s@%x (%d->%d) - "
8652 +                               "invalid/closed service %d",
8653 +                               state->id, msg_type_str(type),
8654 +                               (unsigned int)header,
8655 +                               remoteport, localport, localport);
8656 +                       release_message_sync(state, header);
8657 +                       continue;
8658 +               }
8659 +
8660 +               if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
8661 +                       int svc_fourcc;
8662 +
8663 +                       svc_fourcc = service
8664 +                               ? service->base.fourcc
8665 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
8666 +                       vchiq_log_trace(vchiq_sync_log_level,
8667 +                               "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
8668 +                               msg_type_str(type),
8669 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
8670 +                               remoteport, localport, size);
8671 +                       if (size > 0)
8672 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
8673 +                                       min(64, size));
8674 +               }
8675 +
8676 +               switch (type) {
8677 +               case VCHIQ_MSG_OPENACK:
8678 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
8679 +                               const struct vchiq_openack_payload *payload =
8680 +                                       (struct vchiq_openack_payload *)
8681 +                                       header->data;
8682 +                               service->peer_version = payload->version;
8683 +                       }
8684 +                       vchiq_log_info(vchiq_sync_log_level,
8685 +                               "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
8686 +                               state->id, (unsigned int)header, size,
8687 +                               remoteport, localport, service->peer_version);
8688 +                       if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
8689 +                               service->remoteport = remoteport;
8690 +                               vchiq_set_service_state(service,
8691 +                                       VCHIQ_SRVSTATE_OPENSYNC);
8692 +                               up(&service->remove_event);
8693 +                       }
8694 +                       release_message_sync(state, header);
8695 +                       break;
8696 +
8697 +               case VCHIQ_MSG_DATA:
8698 +                       vchiq_log_trace(vchiq_sync_log_level,
8699 +                               "%d: sf DATA@%x,%x (%d->%d)",
8700 +                               state->id, (unsigned int)header, size,
8701 +                               remoteport, localport);
8702 +
8703 +                       if ((service->remoteport == remoteport) &&
8704 +                               (service->srvstate ==
8705 +                               VCHIQ_SRVSTATE_OPENSYNC)) {
8706 +                               if (make_service_callback(service,
8707 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
8708 +                                       NULL) == VCHIQ_RETRY)
8709 +                                       vchiq_log_error(vchiq_sync_log_level,
8710 +                                               "synchronous callback to "
8711 +                                               "service %d returns "
8712 +                                               "VCHIQ_RETRY",
8713 +                                               localport);
8714 +                       }
8715 +                       break;
8716 +
8717 +               default:
8718 +                       vchiq_log_error(vchiq_sync_log_level,
8719 +                               "%d: sf unexpected msgid %x@%x,%x",
8720 +                               state->id, msgid, (unsigned int)header, size);
8721 +                       release_message_sync(state, header);
8722 +                       break;
8723 +               }
8724 +
8725 +               unlock_service(service);
8726 +       }
8727 +
8728 +       return 0;
8729 +}
8730 +
8731 +
8732 +static void
8733 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
8734 +{
8735 +       queue->local_insert = 0;
8736 +       queue->remote_insert = 0;
8737 +       queue->process = 0;
8738 +       queue->remote_notify = 0;
8739 +       queue->remove = 0;
8740 +}
8741 +
8742 +
8743 +inline const char *
8744 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
8745 +{
8746 +       return conn_state_names[conn_state];
8747 +}
8748 +
8749 +
8750 +VCHIQ_SLOT_ZERO_T *
8751 +vchiq_init_slots(void *mem_base, int mem_size)
8752 +{
8753 +       int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
8754 +       VCHIQ_SLOT_ZERO_T *slot_zero =
8755 +               (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
8756 +       int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
8757 +       int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
8758 +
8759 +       /* Ensure there is enough memory to run an absolutely minimum system */
8760 +       num_slots -= first_data_slot;
8761 +
8762 +       if (num_slots < 4) {
8763 +               vchiq_log_error(vchiq_core_log_level,
8764 +                       "vchiq_init_slots - insufficient memory %x bytes",
8765 +                       mem_size);
8766 +               return NULL;
8767 +       }
8768 +
8769 +       memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
8770 +
8771 +       slot_zero->magic = VCHIQ_MAGIC;
8772 +       slot_zero->version = VCHIQ_VERSION;
8773 +       slot_zero->version_min = VCHIQ_VERSION_MIN;
8774 +       slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
8775 +       slot_zero->slot_size = VCHIQ_SLOT_SIZE;
8776 +       slot_zero->max_slots = VCHIQ_MAX_SLOTS;
8777 +       slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
8778 +
8779 +       slot_zero->master.slot_sync = first_data_slot;
8780 +       slot_zero->master.slot_first = first_data_slot + 1;
8781 +       slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
8782 +       slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
8783 +       slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
8784 +       slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
8785 +
8786 +       return slot_zero;
8787 +}
8788 +
8789 +VCHIQ_STATUS_T
8790 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
8791 +                int is_master)
8792 +{
8793 +       VCHIQ_SHARED_STATE_T *local;
8794 +       VCHIQ_SHARED_STATE_T *remote;
8795 +       VCHIQ_STATUS_T status;
8796 +       char threadname[10];
8797 +       static int id;
8798 +       int i;
8799 +
8800 +       vchiq_log_warning(vchiq_core_log_level,
8801 +               "%s: slot_zero = 0x%08lx, is_master = %d",
8802 +               __func__, (unsigned long)slot_zero, is_master);
8803 +
8804 +       /* Check the input configuration */
8805 +
8806 +       if (slot_zero->magic != VCHIQ_MAGIC) {
8807 +               vchiq_loud_error_header();
8808 +               vchiq_loud_error("Invalid VCHIQ magic value found.");
8809 +               vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
8810 +                       (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
8811 +               vchiq_loud_error_footer();
8812 +               return VCHIQ_ERROR;
8813 +       }
8814 +
8815 +       if (slot_zero->version < VCHIQ_VERSION_MIN) {
8816 +               vchiq_loud_error_header();
8817 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
8818 +               vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
8819 +                       "(minimum %d)",
8820 +                       (unsigned int)slot_zero, slot_zero->version,
8821 +                       VCHIQ_VERSION_MIN);
8822 +               vchiq_loud_error("Restart with a newer VideoCore image.");
8823 +               vchiq_loud_error_footer();
8824 +               return VCHIQ_ERROR;
8825 +       }
8826 +
8827 +       if (VCHIQ_VERSION < slot_zero->version_min) {
8828 +               vchiq_loud_error_header();
8829 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
8830 +               vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
8831 +                       "minimum %d)",
8832 +                       (unsigned int)slot_zero, VCHIQ_VERSION,
8833 +                       slot_zero->version_min);
8834 +               vchiq_loud_error("Restart with a newer kernel.");
8835 +               vchiq_loud_error_footer();
8836 +               return VCHIQ_ERROR;
8837 +       }
8838 +
8839 +       if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
8840 +                (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
8841 +                (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
8842 +                (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
8843 +               vchiq_loud_error_header();
8844 +               if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
8845 +                       vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
8846 +                               "(expected %x)",
8847 +                               (unsigned int)slot_zero,
8848 +                               slot_zero->slot_zero_size,
8849 +                               sizeof(VCHIQ_SLOT_ZERO_T));
8850 +               if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
8851 +                       vchiq_loud_error("slot_zero=%x: slot_size=%d "
8852 +                               "(expected %d",
8853 +                               (unsigned int)slot_zero, slot_zero->slot_size,
8854 +                               VCHIQ_SLOT_SIZE);
8855 +               if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
8856 +                       vchiq_loud_error("slot_zero=%x: max_slots=%d "
8857 +                               "(expected %d)",
8858 +                               (unsigned int)slot_zero, slot_zero->max_slots,
8859 +                               VCHIQ_MAX_SLOTS);
8860 +               if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
8861 +                       vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
8862 +                               "(expected %d)",
8863 +                               (unsigned int)slot_zero,
8864 +                               slot_zero->max_slots_per_side,
8865 +                               VCHIQ_MAX_SLOTS_PER_SIDE);
8866 +               vchiq_loud_error_footer();
8867 +               return VCHIQ_ERROR;
8868 +       }
8869 +
8870 +       if (is_master) {
8871 +               local = &slot_zero->master;
8872 +               remote = &slot_zero->slave;
8873 +       } else {
8874 +               local = &slot_zero->slave;
8875 +               remote = &slot_zero->master;
8876 +       }
8877 +
8878 +       if (local->initialised) {
8879 +               vchiq_loud_error_header();
8880 +               if (remote->initialised)
8881 +                       vchiq_loud_error("local state has already been "
8882 +                               "initialised");
8883 +               else
8884 +                       vchiq_loud_error("master/slave mismatch - two %ss",
8885 +                               is_master ? "master" : "slave");
8886 +               vchiq_loud_error_footer();
8887 +               return VCHIQ_ERROR;
8888 +       }
8889 +
8890 +       memset(state, 0, sizeof(VCHIQ_STATE_T));
8891 +
8892 +       state->id = id++;
8893 +       state->is_master = is_master;
8894 +
8895 +       /*
8896 +               initialize shared state pointers
8897 +        */
8898 +
8899 +       state->local = local;
8900 +       state->remote = remote;
8901 +       state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
8902 +
8903 +       /*
8904 +               initialize events and mutexes
8905 +        */
8906 +
8907 +       sema_init(&state->connect, 0);
8908 +       mutex_init(&state->mutex);
8909 +       sema_init(&state->trigger_event, 0);
8910 +       sema_init(&state->recycle_event, 0);
8911 +       sema_init(&state->sync_trigger_event, 0);
8912 +       sema_init(&state->sync_release_event, 0);
8913 +
8914 +       mutex_init(&state->slot_mutex);
8915 +       mutex_init(&state->recycle_mutex);
8916 +       mutex_init(&state->sync_mutex);
8917 +       mutex_init(&state->bulk_transfer_mutex);
8918 +
8919 +       sema_init(&state->slot_available_event, 0);
8920 +       sema_init(&state->slot_remove_event, 0);
8921 +       sema_init(&state->data_quota_event, 0);
8922 +
8923 +       state->slot_queue_available = 0;
8924 +
8925 +       for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
8926 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
8927 +                       &state->service_quotas[i];
8928 +               sema_init(&service_quota->quota_event, 0);
8929 +       }
8930 +
8931 +       for (i = local->slot_first; i <= local->slot_last; i++) {
8932 +               local->slot_queue[state->slot_queue_available++] = i;
8933 +               up(&state->slot_available_event);
8934 +       }
8935 +
8936 +       state->default_slot_quota = state->slot_queue_available/2;
8937 +       state->default_message_quota =
8938 +               min((unsigned short)(state->default_slot_quota * 256),
8939 +               (unsigned short)~0);
8940 +
8941 +       state->previous_data_index = -1;
8942 +       state->data_use_count = 0;
8943 +       state->data_quota = state->slot_queue_available - 1;
8944 +
8945 +       local->trigger.event = &state->trigger_event;
8946 +       remote_event_create(&local->trigger);
8947 +       local->tx_pos = 0;
8948 +
8949 +       local->recycle.event = &state->recycle_event;
8950 +       remote_event_create(&local->recycle);
8951 +       local->slot_queue_recycle = state->slot_queue_available;
8952 +
8953 +       local->sync_trigger.event = &state->sync_trigger_event;
8954 +       remote_event_create(&local->sync_trigger);
8955 +
8956 +       local->sync_release.event = &state->sync_release_event;
8957 +       remote_event_create(&local->sync_release);
8958 +
8959 +       /* At start-of-day, the slot is empty and available */
8960 +       ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
8961 +               = VCHIQ_MSGID_PADDING;
8962 +       remote_event_signal_local(&local->sync_release);
8963 +
8964 +       local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
8965 +
8966 +       status = vchiq_platform_init_state(state);
8967 +
8968 +       /*
8969 +               bring up slot handler thread
8970 +        */
8971 +       snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
8972 +       state->slot_handler_thread = kthread_create(&slot_handler_func,
8973 +               (void *)state,
8974 +               threadname);
8975 +
8976 +       if (state->slot_handler_thread == NULL) {
8977 +               vchiq_loud_error_header();
8978 +               vchiq_loud_error("couldn't create thread %s", threadname);
8979 +               vchiq_loud_error_footer();
8980 +               return VCHIQ_ERROR;
8981 +       }
8982 +       set_user_nice(state->slot_handler_thread, -19);
8983 +       wake_up_process(state->slot_handler_thread);
8984 +
8985 +       snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
8986 +       state->recycle_thread = kthread_create(&recycle_func,
8987 +               (void *)state,
8988 +               threadname);
8989 +       if (state->recycle_thread == NULL) {
8990 +               vchiq_loud_error_header();
8991 +               vchiq_loud_error("couldn't create thread %s", threadname);
8992 +               vchiq_loud_error_footer();
8993 +               return VCHIQ_ERROR;
8994 +       }
8995 +       set_user_nice(state->recycle_thread, -19);
8996 +       wake_up_process(state->recycle_thread);
8997 +
8998 +       snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
8999 +       state->sync_thread = kthread_create(&sync_func,
9000 +               (void *)state,
9001 +               threadname);
9002 +       if (state->sync_thread == NULL) {
9003 +               vchiq_loud_error_header();
9004 +               vchiq_loud_error("couldn't create thread %s", threadname);
9005 +               vchiq_loud_error_footer();
9006 +               return VCHIQ_ERROR;
9007 +       }
9008 +       set_user_nice(state->sync_thread, -20);
9009 +       wake_up_process(state->sync_thread);
9010 +
9011 +       BUG_ON(state->id >= VCHIQ_MAX_STATES);
9012 +       vchiq_states[state->id] = state;
9013 +
9014 +       /* Indicate readiness to the other side */
9015 +       local->initialised = 1;
9016 +
9017 +       return status;
9018 +}
9019 +
9020 +/* Called from application thread when a client or server service is created. */
9021 +VCHIQ_SERVICE_T *
9022 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9023 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9024 +       VCHIQ_INSTANCE_T instance)
9025 +{
9026 +       VCHIQ_SERVICE_T *service;
9027 +
9028 +       service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
9029 +       if (service) {
9030 +               service->base.fourcc   = params->fourcc;
9031 +               service->base.callback = params->callback;
9032 +               service->base.userdata = params->userdata;
9033 +               service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
9034 +               service->ref_count     = 1;
9035 +               service->srvstate      = VCHIQ_SRVSTATE_FREE;
9036 +               service->localport     = VCHIQ_PORT_FREE;
9037 +               service->remoteport    = VCHIQ_PORT_FREE;
9038 +
9039 +               service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
9040 +                       VCHIQ_FOURCC_INVALID : params->fourcc;
9041 +               service->client_id     = 0;
9042 +               service->auto_close    = 1;
9043 +               service->sync          = 0;
9044 +               service->closing       = 0;
9045 +               atomic_set(&service->poll_flags, 0);
9046 +               service->version       = params->version;
9047 +               service->version_min   = params->version_min;
9048 +               service->state         = state;
9049 +               service->instance      = instance;
9050 +               service->service_use_count = 0;
9051 +               init_bulk_queue(&service->bulk_tx);
9052 +               init_bulk_queue(&service->bulk_rx);
9053 +               sema_init(&service->remove_event, 0);
9054 +               sema_init(&service->bulk_remove_event, 0);
9055 +               mutex_init(&service->bulk_mutex);
9056 +               memset(&service->stats, 0, sizeof(service->stats));
9057 +       } else {
9058 +               vchiq_log_error(vchiq_core_log_level,
9059 +                       "Out of memory");
9060 +       }
9061 +
9062 +       if (service) {
9063 +               VCHIQ_SERVICE_T **pservice = NULL;
9064 +               int i;
9065 +
9066 +               /* Although it is perfectly possible to use service_spinlock
9067 +               ** to protect the creation of services, it is overkill as it
9068 +               ** disables interrupts while the array is searched.
9069 +               ** The only danger is of another thread trying to create a
9070 +               ** service - service deletion is safe.
9071 +               ** Therefore it is preferable to use state->mutex which,
9072 +               ** although slower to claim, doesn't block interrupts while
9073 +               ** it is held.
9074 +               */
9075 +
9076 +               mutex_lock(&state->mutex);
9077 +
9078 +               /* Prepare to use a previously unused service */
9079 +               if (state->unused_service < VCHIQ_MAX_SERVICES)
9080 +                       pservice = &state->services[state->unused_service];
9081 +
9082 +               if (srvstate == VCHIQ_SRVSTATE_OPENING) {
9083 +                       for (i = 0; i < state->unused_service; i++) {
9084 +                               VCHIQ_SERVICE_T *srv = state->services[i];
9085 +                               if (!srv) {
9086 +                                       pservice = &state->services[i];
9087 +                                       break;
9088 +                               }
9089 +                       }
9090 +               } else {
9091 +                       for (i = (state->unused_service - 1); i >= 0; i--) {
9092 +                               VCHIQ_SERVICE_T *srv = state->services[i];
9093 +                               if (!srv)
9094 +                                       pservice = &state->services[i];
9095 +                               else if ((srv->public_fourcc == params->fourcc)
9096 +                                       && ((srv->instance != instance) ||
9097 +                                       (srv->base.callback !=
9098 +                                       params->callback))) {
9099 +                                       /* There is another server using this
9100 +                                       ** fourcc which doesn't match. */
9101 +                                       pservice = NULL;
9102 +                                       break;
9103 +                               }
9104 +                       }
9105 +               }
9106 +
9107 +               if (pservice) {
9108 +                       service->localport = (pservice - state->services);
9109 +                       if (!handle_seq)
9110 +                               handle_seq = VCHIQ_MAX_STATES *
9111 +                                        VCHIQ_MAX_SERVICES;
9112 +                       service->handle = handle_seq |
9113 +                               (state->id * VCHIQ_MAX_SERVICES) |
9114 +                               service->localport;
9115 +                       handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
9116 +                       *pservice = service;
9117 +                       if (pservice == &state->services[state->unused_service])
9118 +                               state->unused_service++;
9119 +               }
9120 +
9121 +               mutex_unlock(&state->mutex);
9122 +
9123 +               if (!pservice) {
9124 +                       kfree(service);
9125 +                       service = NULL;
9126 +               }
9127 +       }
9128 +
9129 +       if (service) {
9130 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
9131 +                       &state->service_quotas[service->localport];
9132 +               service_quota->slot_quota = state->default_slot_quota;
9133 +               service_quota->message_quota = state->default_message_quota;
9134 +               if (service_quota->slot_use_count == 0)
9135 +                       service_quota->previous_tx_index =
9136 +                               SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
9137 +                               - 1;
9138 +
9139 +               /* Bring this service online */
9140 +               vchiq_set_service_state(service, srvstate);
9141 +
9142 +               vchiq_log_info(vchiq_core_msg_log_level,
9143 +                       "%s Service %c%c%c%c SrcPort:%d",
9144 +                       (srvstate == VCHIQ_SRVSTATE_OPENING)
9145 +                       ? "Open" : "Add",
9146 +                       VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
9147 +                       service->localport);
9148 +       }
9149 +
9150 +       /* Don't unlock the service - leave it with a ref_count of 1. */
9151 +
9152 +       return service;
9153 +}
9154 +
9155 +VCHIQ_STATUS_T
9156 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
9157 +{
9158 +       struct vchiq_open_payload payload = {
9159 +               service->base.fourcc,
9160 +               client_id,
9161 +               service->version,
9162 +               service->version_min
9163 +       };
9164 +       VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
9165 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9166 +
9167 +       service->client_id = client_id;
9168 +       vchiq_use_service_internal(service);
9169 +       status = queue_message(service->state, NULL,
9170 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
9171 +               &body, 1, sizeof(payload), 1);
9172 +       if (status == VCHIQ_SUCCESS) {
9173 +               if (down_interruptible(&service->remove_event) != 0) {
9174 +                       status = VCHIQ_RETRY;
9175 +                       vchiq_release_service_internal(service);
9176 +               } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
9177 +                       (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
9178 +                       if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
9179 +                               vchiq_log_error(vchiq_core_log_level,
9180 +                                       "%d: osi - srvstate = %s (ref %d)",
9181 +                                       service->state->id,
9182 +                                       srvstate_names[service->srvstate],
9183 +                                       service->ref_count);
9184 +                       status = VCHIQ_ERROR;
9185 +                       VCHIQ_SERVICE_STATS_INC(service, error_count);
9186 +                       vchiq_release_service_internal(service);
9187 +               }
9188 +       }
9189 +       return status;
9190 +}
9191 +
9192 +static void
9193 +release_service_messages(VCHIQ_SERVICE_T *service)
9194 +{
9195 +       VCHIQ_STATE_T *state = service->state;
9196 +       int slot_last = state->remote->slot_last;
9197 +       int i;
9198 +
9199 +       /* Release any claimed messages */
9200 +       for (i = state->remote->slot_first; i <= slot_last; i++) {
9201 +               VCHIQ_SLOT_INFO_T *slot_info =
9202 +                       SLOT_INFO_FROM_INDEX(state, i);
9203 +               if (slot_info->release_count != slot_info->use_count) {
9204 +                       char *data =
9205 +                               (char *)SLOT_DATA_FROM_INDEX(state, i);
9206 +                       unsigned int pos, end;
9207 +
9208 +                       end = VCHIQ_SLOT_SIZE;
9209 +                       if (data == state->rx_data)
9210 +                               /* This buffer is still being read from - stop
9211 +                               ** at the current read position */
9212 +                               end = state->rx_pos & VCHIQ_SLOT_MASK;
9213 +
9214 +                       pos = 0;
9215 +
9216 +                       while (pos < end) {
9217 +                               VCHIQ_HEADER_T *header =
9218 +                                       (VCHIQ_HEADER_T *)(data + pos);
9219 +                               int msgid = header->msgid;
9220 +                               int port = VCHIQ_MSG_DSTPORT(msgid);
9221 +                               if ((port == service->localport) &&
9222 +                                       (msgid & VCHIQ_MSGID_CLAIMED)) {
9223 +                                       vchiq_log_info(vchiq_core_log_level,
9224 +                                               "  fsi - hdr %x",
9225 +                                               (unsigned int)header);
9226 +                                       release_slot(state, slot_info, header,
9227 +                                               NULL);
9228 +                               }
9229 +                               pos += calc_stride(header->size);
9230 +                               if (pos > VCHIQ_SLOT_SIZE) {
9231 +                                       vchiq_log_error(vchiq_core_log_level,
9232 +                                               "fsi - pos %x: header %x, "
9233 +                                               "msgid %x, header->msgid %x, "
9234 +                                               "header->size %x",
9235 +                                               pos, (unsigned int)header,
9236 +                                               msgid, header->msgid,
9237 +                                               header->size);
9238 +                                       WARN(1, "invalid slot position\n");
9239 +                               }
9240 +                       }
9241 +               }
9242 +       }
9243 +}
9244 +
9245 +static int
9246 +do_abort_bulks(VCHIQ_SERVICE_T *service)
9247 +{
9248 +       VCHIQ_STATUS_T status;
9249 +
9250 +       /* Abort any outstanding bulk transfers */
9251 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
9252 +               return 0;
9253 +       abort_outstanding_bulks(service, &service->bulk_tx);
9254 +       abort_outstanding_bulks(service, &service->bulk_rx);
9255 +       mutex_unlock(&service->bulk_mutex);
9256 +
9257 +       status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
9258 +       if (status == VCHIQ_SUCCESS)
9259 +               status = notify_bulks(service, &service->bulk_rx,
9260 +                       0/*!retry_poll*/);
9261 +       return (status == VCHIQ_SUCCESS);
9262 +}
9263 +
9264 +static VCHIQ_STATUS_T
9265 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
9266 +{
9267 +       VCHIQ_STATUS_T status;
9268 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9269 +       int newstate;
9270 +
9271 +       switch (service->srvstate) {
9272 +       case VCHIQ_SRVSTATE_OPEN:
9273 +       case VCHIQ_SRVSTATE_CLOSESENT:
9274 +       case VCHIQ_SRVSTATE_CLOSERECVD:
9275 +               if (is_server) {
9276 +                       if (service->auto_close) {
9277 +                               service->client_id = 0;
9278 +                               service->remoteport = VCHIQ_PORT_FREE;
9279 +                               newstate = VCHIQ_SRVSTATE_LISTENING;
9280 +                       } else
9281 +                               newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
9282 +               } else
9283 +                       newstate = VCHIQ_SRVSTATE_CLOSED;
9284 +               vchiq_set_service_state(service, newstate);
9285 +               break;
9286 +       case VCHIQ_SRVSTATE_LISTENING:
9287 +               break;
9288 +       default:
9289 +               vchiq_log_error(vchiq_core_log_level,
9290 +                       "close_service_complete(%x) called in state %s",
9291 +                       service->handle, srvstate_names[service->srvstate]);
9292 +               WARN(1, "close_service_complete in unexpected state\n");
9293 +               return VCHIQ_ERROR;
9294 +       }
9295 +
9296 +       status = make_service_callback(service,
9297 +               VCHIQ_SERVICE_CLOSED, NULL, NULL);
9298 +
9299 +       if (status != VCHIQ_RETRY) {
9300 +               int uc = service->service_use_count;
9301 +               int i;
9302 +               /* Complete the close process */
9303 +               for (i = 0; i < uc; i++)
9304 +                       /* cater for cases where close is forced and the
9305 +                       ** client may not close all it's handles */
9306 +                       vchiq_release_service_internal(service);
9307 +
9308 +               service->client_id = 0;
9309 +               service->remoteport = VCHIQ_PORT_FREE;
9310 +
9311 +               if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
9312 +                       vchiq_free_service_internal(service);
9313 +               else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
9314 +                       if (is_server)
9315 +                               service->closing = 0;
9316 +
9317 +                       up(&service->remove_event);
9318 +               }
9319 +       } else
9320 +               vchiq_set_service_state(service, failstate);
9321 +
9322 +       return status;
9323 +}
9324 +
9325 +/* Called by the slot handler */
9326 +VCHIQ_STATUS_T
9327 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
9328 +{
9329 +       VCHIQ_STATE_T *state = service->state;
9330 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9331 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
9332 +
9333 +       vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
9334 +               service->state->id, service->localport, close_recvd,
9335 +               srvstate_names[service->srvstate]);
9336 +
9337 +       switch (service->srvstate) {
9338 +       case VCHIQ_SRVSTATE_CLOSED:
9339 +       case VCHIQ_SRVSTATE_HIDDEN:
9340 +       case VCHIQ_SRVSTATE_LISTENING:
9341 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
9342 +               if (close_recvd)
9343 +                       vchiq_log_error(vchiq_core_log_level,
9344 +                               "vchiq_close_service_internal(1) called "
9345 +                               "in state %s",
9346 +                               srvstate_names[service->srvstate]);
9347 +               else if (is_server) {
9348 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
9349 +                               status = VCHIQ_ERROR;
9350 +                       } else {
9351 +                               service->client_id = 0;
9352 +                               service->remoteport = VCHIQ_PORT_FREE;
9353 +                               if (service->srvstate ==
9354 +                                       VCHIQ_SRVSTATE_CLOSEWAIT)
9355 +                                       vchiq_set_service_state(service,
9356 +                                               VCHIQ_SRVSTATE_LISTENING);
9357 +                       }
9358 +                       up(&service->remove_event);
9359 +               } else
9360 +                       vchiq_free_service_internal(service);
9361 +               break;
9362 +       case VCHIQ_SRVSTATE_OPENING:
9363 +               if (close_recvd) {
9364 +                       /* The open was rejected - tell the user */
9365 +                       vchiq_set_service_state(service,
9366 +                               VCHIQ_SRVSTATE_CLOSEWAIT);
9367 +                       up(&service->remove_event);
9368 +               } else {
9369 +                       /* Shutdown mid-open - let the other side know */
9370 +                       status = queue_message(state, service,
9371 +                               VCHIQ_MAKE_MSG
9372 +                               (VCHIQ_MSG_CLOSE,
9373 +                               service->localport,
9374 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
9375 +                               NULL, 0, 0, 0);
9376 +               }
9377 +               break;
9378 +
9379 +       case VCHIQ_SRVSTATE_OPENSYNC:
9380 +               mutex_lock(&state->sync_mutex);
9381 +               /* Drop through */
9382 +
9383 +       case VCHIQ_SRVSTATE_OPEN:
9384 +               if (state->is_master || close_recvd) {
9385 +                       if (!do_abort_bulks(service))
9386 +                               status = VCHIQ_RETRY;
9387 +               }
9388 +
9389 +               release_service_messages(service);
9390 +
9391 +               if (status == VCHIQ_SUCCESS)
9392 +                       status = queue_message(state, service,
9393 +                               VCHIQ_MAKE_MSG
9394 +                               (VCHIQ_MSG_CLOSE,
9395 +                               service->localport,
9396 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
9397 +                               NULL, 0, 0, 0);
9398 +
9399 +               if (status == VCHIQ_SUCCESS) {
9400 +                       if (!close_recvd)
9401 +                               break;
9402 +               } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
9403 +                       mutex_unlock(&state->sync_mutex);
9404 +                       break;
9405 +               } else
9406 +                       break;
9407 +
9408 +               status = close_service_complete(service,
9409 +                               VCHIQ_SRVSTATE_CLOSERECVD);
9410 +               break;
9411 +
9412 +       case VCHIQ_SRVSTATE_CLOSESENT:
9413 +               if (!close_recvd)
9414 +                       /* This happens when a process is killed mid-close */
9415 +                       break;
9416 +
9417 +               if (!state->is_master) {
9418 +                       if (!do_abort_bulks(service)) {
9419 +                               status = VCHIQ_RETRY;
9420 +                               break;
9421 +                       }
9422 +               }
9423 +
9424 +               if (status == VCHIQ_SUCCESS)
9425 +                       status = close_service_complete(service,
9426 +                               VCHIQ_SRVSTATE_CLOSERECVD);
9427 +               break;
9428 +
9429 +       case VCHIQ_SRVSTATE_CLOSERECVD:
9430 +               if (!close_recvd && is_server)
9431 +                       /* Force into LISTENING mode */
9432 +                       vchiq_set_service_state(service,
9433 +                               VCHIQ_SRVSTATE_LISTENING);
9434 +               status = close_service_complete(service,
9435 +                       VCHIQ_SRVSTATE_CLOSERECVD);
9436 +               break;
9437 +
9438 +       default:
9439 +               vchiq_log_error(vchiq_core_log_level,
9440 +                       "vchiq_close_service_internal(%d) called in state %s",
9441 +                       close_recvd, srvstate_names[service->srvstate]);
9442 +               break;
9443 +       }
9444 +
9445 +       return status;
9446 +}
9447 +
9448 +/* Called from the application process upon process death */
9449 +void
9450 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
9451 +{
9452 +       VCHIQ_STATE_T *state = service->state;
9453 +
9454 +       vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
9455 +               state->id, service->localport, service->remoteport);
9456 +
9457 +       mark_service_closing(service);
9458 +
9459 +       /* Mark the service for removal by the slot handler */
9460 +       request_poll(state, service, VCHIQ_POLL_REMOVE);
9461 +}
9462 +
9463 +/* Called from the slot handler */
9464 +void
9465 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
9466 +{
9467 +       VCHIQ_STATE_T *state = service->state;
9468 +
9469 +       vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
9470 +               state->id, service->localport);
9471 +
9472 +       switch (service->srvstate) {
9473 +       case VCHIQ_SRVSTATE_OPENING:
9474 +       case VCHIQ_SRVSTATE_CLOSED:
9475 +       case VCHIQ_SRVSTATE_HIDDEN:
9476 +       case VCHIQ_SRVSTATE_LISTENING:
9477 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
9478 +               break;
9479 +       default:
9480 +               vchiq_log_error(vchiq_core_log_level,
9481 +                       "%d: fsi - (%d) in state %s",
9482 +                       state->id, service->localport,
9483 +                       srvstate_names[service->srvstate]);
9484 +               return;
9485 +       }
9486 +
9487 +       vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
9488 +
9489 +       up(&service->remove_event);
9490 +
9491 +       /* Release the initial lock */
9492 +       unlock_service(service);
9493 +}
9494 +
9495 +VCHIQ_STATUS_T
9496 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9497 +{
9498 +       VCHIQ_SERVICE_T *service;
9499 +       int i;
9500 +
9501 +       /* Find all services registered to this client and enable them. */
9502 +       i = 0;
9503 +       while ((service = next_service_by_instance(state, instance,
9504 +               &i)) != NULL) {
9505 +               if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
9506 +                       vchiq_set_service_state(service,
9507 +                               VCHIQ_SRVSTATE_LISTENING);
9508 +               unlock_service(service);
9509 +       }
9510 +
9511 +       if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
9512 +               if (queue_message(state, NULL,
9513 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
9514 +                       0, 1) == VCHIQ_RETRY)
9515 +                       return VCHIQ_RETRY;
9516 +
9517 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
9518 +       }
9519 +
9520 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
9521 +               if (down_interruptible(&state->connect) != 0)
9522 +                       return VCHIQ_RETRY;
9523 +
9524 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
9525 +               up(&state->connect);
9526 +       }
9527 +
9528 +       return VCHIQ_SUCCESS;
9529 +}
9530 +
9531 +VCHIQ_STATUS_T
9532 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
9533 +{
9534 +       VCHIQ_SERVICE_T *service;
9535 +       int i;
9536 +
9537 +       /* Find all services registered to this client and enable them. */
9538 +       i = 0;
9539 +       while ((service = next_service_by_instance(state, instance,
9540 +               &i)) != NULL) {
9541 +               (void)vchiq_remove_service(service->handle);
9542 +               unlock_service(service);
9543 +       }
9544 +
9545 +       return VCHIQ_SUCCESS;
9546 +}
9547 +
9548 +VCHIQ_STATUS_T
9549 +vchiq_pause_internal(VCHIQ_STATE_T *state)
9550 +{
9551 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9552 +
9553 +       switch (state->conn_state) {
9554 +       case VCHIQ_CONNSTATE_CONNECTED:
9555 +               /* Request a pause */
9556 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
9557 +               request_poll(state, NULL, 0);
9558 +               break;
9559 +       default:
9560 +               vchiq_log_error(vchiq_core_log_level,
9561 +                       "vchiq_pause_internal in state %s\n",
9562 +                       conn_state_names[state->conn_state]);
9563 +               status = VCHIQ_ERROR;
9564 +               VCHIQ_STATS_INC(state, error_count);
9565 +               break;
9566 +       }
9567 +
9568 +       return status;
9569 +}
9570 +
9571 +VCHIQ_STATUS_T
9572 +vchiq_resume_internal(VCHIQ_STATE_T *state)
9573 +{
9574 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9575 +
9576 +       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
9577 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
9578 +               request_poll(state, NULL, 0);
9579 +       } else {
9580 +               status = VCHIQ_ERROR;
9581 +               VCHIQ_STATS_INC(state, error_count);
9582 +       }
9583 +
9584 +       return status;
9585 +}
9586 +
9587 +VCHIQ_STATUS_T
9588 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
9589 +{
9590 +       /* Unregister the service */
9591 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9592 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9593 +
9594 +       if (!service)
9595 +               return VCHIQ_ERROR;
9596 +
9597 +       vchiq_log_info(vchiq_core_log_level,
9598 +               "%d: close_service:%d",
9599 +               service->state->id, service->localport);
9600 +
9601 +       if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9602 +               (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9603 +               (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
9604 +               unlock_service(service);
9605 +               return VCHIQ_ERROR;
9606 +       }
9607 +
9608 +       mark_service_closing(service);
9609 +
9610 +       if (current == service->state->slot_handler_thread) {
9611 +               status = vchiq_close_service_internal(service,
9612 +                       0/*!close_recvd*/);
9613 +               BUG_ON(status == VCHIQ_RETRY);
9614 +       } else {
9615 +       /* Mark the service for termination by the slot handler */
9616 +               request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
9617 +       }
9618 +
9619 +       while (1) {
9620 +               if (down_interruptible(&service->remove_event) != 0) {
9621 +                       status = VCHIQ_RETRY;
9622 +                       break;
9623 +               }
9624 +
9625 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9626 +                       (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
9627 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9628 +                       break;
9629 +
9630 +               vchiq_log_warning(vchiq_core_log_level,
9631 +                       "%d: close_service:%d - waiting in state %s",
9632 +                       service->state->id, service->localport,
9633 +                       srvstate_names[service->srvstate]);
9634 +       }
9635 +
9636 +       if ((status == VCHIQ_SUCCESS) &&
9637 +               (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
9638 +               (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
9639 +               status = VCHIQ_ERROR;
9640 +
9641 +       unlock_service(service);
9642 +
9643 +       return status;
9644 +}
9645 +
9646 +VCHIQ_STATUS_T
9647 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
9648 +{
9649 +       /* Unregister the service */
9650 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9651 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
9652 +
9653 +       if (!service)
9654 +               return VCHIQ_ERROR;
9655 +
9656 +       vchiq_log_info(vchiq_core_log_level,
9657 +               "%d: remove_service:%d",
9658 +               service->state->id, service->localport);
9659 +
9660 +       if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
9661 +               unlock_service(service);
9662 +               return VCHIQ_ERROR;
9663 +       }
9664 +
9665 +       mark_service_closing(service);
9666 +
9667 +       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9668 +               (current == service->state->slot_handler_thread)) {
9669 +               /* Make it look like a client, because it must be removed and
9670 +                  not left in the LISTENING state. */
9671 +               service->public_fourcc = VCHIQ_FOURCC_INVALID;
9672 +
9673 +               status = vchiq_close_service_internal(service,
9674 +                       0/*!close_recvd*/);
9675 +               BUG_ON(status == VCHIQ_RETRY);
9676 +       } else {
9677 +               /* Mark the service for removal by the slot handler */
9678 +               request_poll(service->state, service, VCHIQ_POLL_REMOVE);
9679 +       }
9680 +       while (1) {
9681 +               if (down_interruptible(&service->remove_event) != 0) {
9682 +                       status = VCHIQ_RETRY;
9683 +                       break;
9684 +               }
9685 +
9686 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
9687 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
9688 +                       break;
9689 +
9690 +               vchiq_log_warning(vchiq_core_log_level,
9691 +                       "%d: remove_service:%d - waiting in state %s",
9692 +                       service->state->id, service->localport,
9693 +                       srvstate_names[service->srvstate]);
9694 +       }
9695 +
9696 +       if ((status == VCHIQ_SUCCESS) &&
9697 +               (service->srvstate != VCHIQ_SRVSTATE_FREE))
9698 +               status = VCHIQ_ERROR;
9699 +
9700 +       unlock_service(service);
9701 +
9702 +       return status;
9703 +}
9704 +
9705 +
9706 +/* This function may be called by kernel threads or user threads.
9707 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
9708 + * received and the call should be retried after being returned to user
9709 + * context.
9710 + * When called in blocking mode, the userdata field points to a bulk_waiter
9711 + * structure.
9712 + */
9713 +VCHIQ_STATUS_T
9714 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9715 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9716 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
9717 +{
9718 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9719 +       VCHIQ_BULK_QUEUE_T *queue;
9720 +       VCHIQ_BULK_T *bulk;
9721 +       VCHIQ_STATE_T *state;
9722 +       struct bulk_waiter *bulk_waiter = NULL;
9723 +       const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
9724 +       const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
9725 +               VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
9726 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
9727 +
9728 +       if (!service ||
9729 +                (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
9730 +                ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
9731 +                (vchiq_check_service(service) != VCHIQ_SUCCESS))
9732 +               goto error_exit;
9733 +
9734 +       switch (mode) {
9735 +       case VCHIQ_BULK_MODE_NOCALLBACK:
9736 +       case VCHIQ_BULK_MODE_CALLBACK:
9737 +               break;
9738 +       case VCHIQ_BULK_MODE_BLOCKING:
9739 +               bulk_waiter = (struct bulk_waiter *)userdata;
9740 +               sema_init(&bulk_waiter->event, 0);
9741 +               bulk_waiter->actual = 0;
9742 +               bulk_waiter->bulk = NULL;
9743 +               break;
9744 +       case VCHIQ_BULK_MODE_WAITING:
9745 +               bulk_waiter = (struct bulk_waiter *)userdata;
9746 +               bulk = bulk_waiter->bulk;
9747 +               goto waiting;
9748 +       default:
9749 +               goto error_exit;
9750 +       }
9751 +
9752 +       state = service->state;
9753 +
9754 +       queue = (dir == VCHIQ_BULK_TRANSMIT) ?
9755 +               &service->bulk_tx : &service->bulk_rx;
9756 +
9757 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
9758 +               status = VCHIQ_RETRY;
9759 +               goto error_exit;
9760 +       }
9761 +
9762 +       if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
9763 +               VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
9764 +               do {
9765 +                       mutex_unlock(&service->bulk_mutex);
9766 +                       if (down_interruptible(&service->bulk_remove_event)
9767 +                               != 0) {
9768 +                               status = VCHIQ_RETRY;
9769 +                               goto error_exit;
9770 +                       }
9771 +                       if (mutex_lock_interruptible(&service->bulk_mutex)
9772 +                               != 0) {
9773 +                               status = VCHIQ_RETRY;
9774 +                               goto error_exit;
9775 +                       }
9776 +               } while (queue->local_insert == queue->remove +
9777 +                               VCHIQ_NUM_SERVICE_BULKS);
9778 +       }
9779 +
9780 +       bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
9781 +
9782 +       bulk->mode = mode;
9783 +       bulk->dir = dir;
9784 +       bulk->userdata = userdata;
9785 +       bulk->size = size;
9786 +       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
9787 +
9788 +       if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
9789 +               VCHIQ_SUCCESS)
9790 +               goto unlock_error_exit;
9791 +
9792 +       wmb();
9793 +
9794 +       vchiq_log_info(vchiq_core_log_level,
9795 +               "%d: bt (%d->%d) %cx %x@%x %x",
9796 +               state->id,
9797 +               service->localport, service->remoteport, dir_char,
9798 +               size, (unsigned int)bulk->data, (unsigned int)userdata);
9799 +
9800 +       if (state->is_master) {
9801 +               queue->local_insert++;
9802 +               if (resolve_bulks(service, queue))
9803 +                       request_poll(state, service,
9804 +                               (dir == VCHIQ_BULK_TRANSMIT) ?
9805 +                               VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
9806 +       } else {
9807 +               int payload[2] = { (int)bulk->data, bulk->size };
9808 +               VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
9809 +
9810 +               status = queue_message(state, NULL,
9811 +                       VCHIQ_MAKE_MSG(dir_msgtype,
9812 +                               service->localport, service->remoteport),
9813 +                       &element, 1, sizeof(payload), 1);
9814 +               if (status != VCHIQ_SUCCESS) {
9815 +                       vchiq_complete_bulk(bulk);
9816 +                       goto unlock_error_exit;
9817 +               }
9818 +               queue->local_insert++;
9819 +       }
9820 +
9821 +       mutex_unlock(&service->bulk_mutex);
9822 +
9823 +       vchiq_log_trace(vchiq_core_log_level,
9824 +               "%d: bt:%d %cx li=%x ri=%x p=%x",
9825 +               state->id,
9826 +               service->localport, dir_char,
9827 +               queue->local_insert, queue->remote_insert, queue->process);
9828 +
9829 +waiting:
9830 +       unlock_service(service);
9831 +
9832 +       status = VCHIQ_SUCCESS;
9833 +
9834 +       if (bulk_waiter) {
9835 +               bulk_waiter->bulk = bulk;
9836 +               if (down_interruptible(&bulk_waiter->event) != 0)
9837 +                       status = VCHIQ_RETRY;
9838 +               else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
9839 +                       status = VCHIQ_ERROR;
9840 +       }
9841 +
9842 +       return status;
9843 +
9844 +unlock_error_exit:
9845 +       mutex_unlock(&service->bulk_mutex);
9846 +
9847 +error_exit:
9848 +       if (service)
9849 +               unlock_service(service);
9850 +       return status;
9851 +}
9852 +
9853 +VCHIQ_STATUS_T
9854 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
9855 +       const VCHIQ_ELEMENT_T *elements, unsigned int count)
9856 +{
9857 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9858 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
9859 +
9860 +       unsigned int size = 0;
9861 +       unsigned int i;
9862 +
9863 +       if (!service ||
9864 +               (vchiq_check_service(service) != VCHIQ_SUCCESS))
9865 +               goto error_exit;
9866 +
9867 +       for (i = 0; i < (unsigned int)count; i++) {
9868 +               if (elements[i].size) {
9869 +                       if (elements[i].data == NULL) {
9870 +                               VCHIQ_SERVICE_STATS_INC(service, error_count);
9871 +                               goto error_exit;
9872 +                       }
9873 +                       size += elements[i].size;
9874 +               }
9875 +       }
9876 +
9877 +       if (size > VCHIQ_MAX_MSG_SIZE) {
9878 +               VCHIQ_SERVICE_STATS_INC(service, error_count);
9879 +               goto error_exit;
9880 +       }
9881 +
9882 +       switch (service->srvstate) {
9883 +       case VCHIQ_SRVSTATE_OPEN:
9884 +               status = queue_message(service->state, service,
9885 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9886 +                                       service->localport,
9887 +                                       service->remoteport),
9888 +                               elements, count, size, 1);
9889 +               break;
9890 +       case VCHIQ_SRVSTATE_OPENSYNC:
9891 +               status = queue_message_sync(service->state, service,
9892 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
9893 +                                       service->localport,
9894 +                                       service->remoteport),
9895 +                               elements, count, size, 1);
9896 +               break;
9897 +       default:
9898 +               status = VCHIQ_ERROR;
9899 +               break;
9900 +       }
9901 +
9902 +error_exit:
9903 +       if (service)
9904 +               unlock_service(service);
9905 +
9906 +       return status;
9907 +}
9908 +
9909 +void
9910 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
9911 +{
9912 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9913 +       VCHIQ_SHARED_STATE_T *remote;
9914 +       VCHIQ_STATE_T *state;
9915 +       int slot_index;
9916 +
9917 +       if (!service)
9918 +               return;
9919 +
9920 +       state = service->state;
9921 +       remote = state->remote;
9922 +
9923 +       slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
9924 +
9925 +       if ((slot_index >= remote->slot_first) &&
9926 +               (slot_index <= remote->slot_last)) {
9927 +               int msgid = header->msgid;
9928 +               if (msgid & VCHIQ_MSGID_CLAIMED) {
9929 +                       VCHIQ_SLOT_INFO_T *slot_info =
9930 +                               SLOT_INFO_FROM_INDEX(state, slot_index);
9931 +
9932 +                       release_slot(state, slot_info, header, service);
9933 +               }
9934 +       } else if (slot_index == remote->slot_sync)
9935 +               release_message_sync(state, header);
9936 +
9937 +       unlock_service(service);
9938 +}
9939 +
9940 +static void
9941 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
9942 +{
9943 +       header->msgid = VCHIQ_MSGID_PADDING;
9944 +       wmb();
9945 +       remote_event_signal(&state->remote->sync_release);
9946 +}
9947 +
9948 +VCHIQ_STATUS_T
9949 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
9950 +{
9951 +   VCHIQ_STATUS_T status = VCHIQ_ERROR;
9952 +   VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9953 +
9954 +   if (!service ||
9955 +      (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
9956 +      !peer_version)
9957 +      goto exit;
9958 +   *peer_version = service->peer_version;
9959 +   status = VCHIQ_SUCCESS;
9960 +
9961 +exit:
9962 +   if (service)
9963 +      unlock_service(service);
9964 +   return status;
9965 +}
9966 +
9967 +VCHIQ_STATUS_T
9968 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
9969 +       int config_size, VCHIQ_CONFIG_T *pconfig)
9970 +{
9971 +       VCHIQ_CONFIG_T config;
9972 +
9973 +       (void)instance;
9974 +
9975 +       config.max_msg_size           = VCHIQ_MAX_MSG_SIZE;
9976 +       config.bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
9977 +       config.max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
9978 +       config.max_services           = VCHIQ_MAX_SERVICES;
9979 +       config.version                = VCHIQ_VERSION;
9980 +       config.version_min            = VCHIQ_VERSION_MIN;
9981 +
9982 +       if (config_size > sizeof(VCHIQ_CONFIG_T))
9983 +               return VCHIQ_ERROR;
9984 +
9985 +       memcpy(pconfig, &config,
9986 +               min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
9987 +
9988 +       return VCHIQ_SUCCESS;
9989 +}
9990 +
9991 +VCHIQ_STATUS_T
9992 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
9993 +       VCHIQ_SERVICE_OPTION_T option, int value)
9994 +{
9995 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
9996 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
9997 +
9998 +       if (service) {
9999 +               switch (option) {
10000 +               case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
10001 +                       service->auto_close = value;
10002 +                       status = VCHIQ_SUCCESS;
10003 +                       break;
10004 +
10005 +               case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
10006 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
10007 +                               &service->state->service_quotas[
10008 +                                       service->localport];
10009 +                       if (value == 0)
10010 +                               value = service->state->default_slot_quota;
10011 +                       if ((value >= service_quota->slot_use_count) &&
10012 +                                (value < (unsigned short)~0)) {
10013 +                               service_quota->slot_quota = value;
10014 +                               if ((value >= service_quota->slot_use_count) &&
10015 +                                       (service_quota->message_quota >=
10016 +                                        service_quota->message_use_count)) {
10017 +                                       /* Signal the service that it may have
10018 +                                       ** dropped below its quota */
10019 +                                       up(&service_quota->quota_event);
10020 +                               }
10021 +                               status = VCHIQ_SUCCESS;
10022 +                       }
10023 +               } break;
10024 +
10025 +               case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
10026 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
10027 +                               &service->state->service_quotas[
10028 +                                       service->localport];
10029 +                       if (value == 0)
10030 +                               value = service->state->default_message_quota;
10031 +                       if ((value >= service_quota->message_use_count) &&
10032 +                                (value < (unsigned short)~0)) {
10033 +                               service_quota->message_quota = value;
10034 +                               if ((value >=
10035 +                                       service_quota->message_use_count) &&
10036 +                                       (service_quota->slot_quota >=
10037 +                                       service_quota->slot_use_count))
10038 +                                       /* Signal the service that it may have
10039 +                                       ** dropped below its quota */
10040 +                                       up(&service_quota->quota_event);
10041 +                               status = VCHIQ_SUCCESS;
10042 +                       }
10043 +               } break;
10044 +
10045 +               case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
10046 +                       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
10047 +                               (service->srvstate ==
10048 +                               VCHIQ_SRVSTATE_LISTENING)) {
10049 +                               service->sync = value;
10050 +                               status = VCHIQ_SUCCESS;
10051 +                       }
10052 +                       break;
10053 +
10054 +               default:
10055 +                       break;
10056 +               }
10057 +               unlock_service(service);
10058 +       }
10059 +
10060 +       return status;
10061 +}
10062 +
10063 +void
10064 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
10065 +       VCHIQ_SHARED_STATE_T *shared, const char *label)
10066 +{
10067 +       static const char *const debug_names[] = {
10068 +               "<entries>",
10069 +               "SLOT_HANDLER_COUNT",
10070 +               "SLOT_HANDLER_LINE",
10071 +               "PARSE_LINE",
10072 +               "PARSE_HEADER",
10073 +               "PARSE_MSGID",
10074 +               "AWAIT_COMPLETION_LINE",
10075 +               "DEQUEUE_MESSAGE_LINE",
10076 +               "SERVICE_CALLBACK_LINE",
10077 +               "MSG_QUEUE_FULL_COUNT",
10078 +               "COMPLETION_QUEUE_FULL_COUNT"
10079 +       };
10080 +       int i;
10081 +
10082 +       char buf[80];
10083 +       int len;
10084 +       len = snprintf(buf, sizeof(buf),
10085 +               "  %s: slots %d-%d tx_pos=%x recycle=%x",
10086 +               label, shared->slot_first, shared->slot_last,
10087 +               shared->tx_pos, shared->slot_queue_recycle);
10088 +       vchiq_dump(dump_context, buf, len + 1);
10089 +
10090 +       len = snprintf(buf, sizeof(buf),
10091 +               "    Slots claimed:");
10092 +       vchiq_dump(dump_context, buf, len + 1);
10093 +
10094 +       for (i = shared->slot_first; i <= shared->slot_last; i++) {
10095 +               VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
10096 +               if (slot_info.use_count != slot_info.release_count) {
10097 +                       len = snprintf(buf, sizeof(buf),
10098 +                               "      %d: %d/%d", i, slot_info.use_count,
10099 +                               slot_info.release_count);
10100 +                       vchiq_dump(dump_context, buf, len + 1);
10101 +               }
10102 +       }
10103 +
10104 +       for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
10105 +               len = snprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
10106 +                       debug_names[i], shared->debug[i], shared->debug[i]);
10107 +               vchiq_dump(dump_context, buf, len + 1);
10108 +       }
10109 +}
10110 +
10111 +void
10112 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
10113 +{
10114 +       char buf[80];
10115 +       int len;
10116 +       int i;
10117 +
10118 +       len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
10119 +               conn_state_names[state->conn_state]);
10120 +       vchiq_dump(dump_context, buf, len + 1);
10121 +
10122 +       len = snprintf(buf, sizeof(buf),
10123 +               "  tx_pos=%x(@%x), rx_pos=%x(@%x)",
10124 +               state->local->tx_pos,
10125 +               (uint32_t)state->tx_data +
10126 +                       (state->local_tx_pos & VCHIQ_SLOT_MASK),
10127 +               state->rx_pos,
10128 +               (uint32_t)state->rx_data +
10129 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
10130 +       vchiq_dump(dump_context, buf, len + 1);
10131 +
10132 +       len = snprintf(buf, sizeof(buf),
10133 +               "  Version: %d (min %d)",
10134 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN);
10135 +       vchiq_dump(dump_context, buf, len + 1);
10136 +
10137 +       if (VCHIQ_ENABLE_STATS) {
10138 +               len = snprintf(buf, sizeof(buf),
10139 +                       "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
10140 +                       "error_count=%d",
10141 +                       state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
10142 +                       state->stats.error_count);
10143 +               vchiq_dump(dump_context, buf, len + 1);
10144 +       }
10145 +
10146 +       len = snprintf(buf, sizeof(buf),
10147 +               "  Slots: %d available (%d data), %d recyclable, %d stalls "
10148 +               "(%d data)",
10149 +               ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
10150 +                       state->local_tx_pos) / VCHIQ_SLOT_SIZE,
10151 +               state->data_quota - state->data_use_count,
10152 +               state->local->slot_queue_recycle - state->slot_queue_available,
10153 +               state->stats.slot_stalls, state->stats.data_stalls);
10154 +       vchiq_dump(dump_context, buf, len + 1);
10155 +
10156 +       vchiq_dump_platform_state(dump_context);
10157 +
10158 +       vchiq_dump_shared_state(dump_context, state, state->local, "Local");
10159 +       vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
10160 +
10161 +       vchiq_dump_platform_instances(dump_context);
10162 +
10163 +       for (i = 0; i < state->unused_service; i++) {
10164 +               VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
10165 +
10166 +               if (service) {
10167 +                       vchiq_dump_service_state(dump_context, service);
10168 +                       unlock_service(service);
10169 +               }
10170 +       }
10171 +}
10172 +
10173 +void
10174 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
10175 +{
10176 +       char buf[80];
10177 +       int len;
10178 +
10179 +       len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
10180 +               service->localport, srvstate_names[service->srvstate],
10181 +               service->ref_count - 1); /*Don't include the lock just taken*/
10182 +
10183 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
10184 +               char remoteport[30];
10185 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
10186 +                       &service->state->service_quotas[service->localport];
10187 +               int fourcc = service->base.fourcc;
10188 +               int tx_pending, rx_pending;
10189 +               if (service->remoteport != VCHIQ_PORT_FREE) {
10190 +                       int len2 = snprintf(remoteport, sizeof(remoteport),
10191 +                               "%d", service->remoteport);
10192 +                       if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
10193 +                               snprintf(remoteport + len2,
10194 +                                       sizeof(remoteport) - len2,
10195 +                                       " (client %x)", service->client_id);
10196 +               } else
10197 +                       strcpy(remoteport, "n/a");
10198 +
10199 +               len += snprintf(buf + len, sizeof(buf) - len,
10200 +                       " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
10201 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
10202 +                       remoteport,
10203 +                       service_quota->message_use_count,
10204 +                       service_quota->message_quota,
10205 +                       service_quota->slot_use_count,
10206 +                       service_quota->slot_quota);
10207 +
10208 +               vchiq_dump(dump_context, buf, len + 1);
10209 +
10210 +               tx_pending = service->bulk_tx.local_insert -
10211 +                       service->bulk_tx.remote_insert;
10212 +
10213 +               rx_pending = service->bulk_rx.local_insert -
10214 +                       service->bulk_rx.remote_insert;
10215 +
10216 +               len = snprintf(buf, sizeof(buf),
10217 +                       "  Bulk: tx_pending=%d (size %d),"
10218 +                       " rx_pending=%d (size %d)",
10219 +                       tx_pending,
10220 +                       tx_pending ? service->bulk_tx.bulks[
10221 +                       BULK_INDEX(service->bulk_tx.remove)].size : 0,
10222 +                       rx_pending,
10223 +                       rx_pending ? service->bulk_rx.bulks[
10224 +                       BULK_INDEX(service->bulk_rx.remove)].size : 0);
10225 +
10226 +               if (VCHIQ_ENABLE_STATS) {
10227 +                       vchiq_dump(dump_context, buf, len + 1);
10228 +
10229 +                       len = snprintf(buf, sizeof(buf),
10230 +                               "  Ctrl: tx_count=%d, tx_bytes=%llu, "
10231 +                               "rx_count=%d, rx_bytes=%llu",
10232 +                               service->stats.ctrl_tx_count,
10233 +                               service->stats.ctrl_tx_bytes,
10234 +                               service->stats.ctrl_rx_count,
10235 +                               service->stats.ctrl_rx_bytes);
10236 +                       vchiq_dump(dump_context, buf, len + 1);
10237 +
10238 +                       len = snprintf(buf, sizeof(buf),
10239 +                               "  Bulk: tx_count=%d, tx_bytes=%llu, "
10240 +                               "rx_count=%d, rx_bytes=%llu",
10241 +                               service->stats.bulk_tx_count,
10242 +                               service->stats.bulk_tx_bytes,
10243 +                               service->stats.bulk_rx_count,
10244 +                               service->stats.bulk_rx_bytes);
10245 +                       vchiq_dump(dump_context, buf, len + 1);
10246 +
10247 +                       len = snprintf(buf, sizeof(buf),
10248 +                               "  %d quota stalls, %d slot stalls, "
10249 +                               "%d bulk stalls, %d aborted, %d errors",
10250 +                               service->stats.quota_stalls,
10251 +                               service->stats.slot_stalls,
10252 +                               service->stats.bulk_stalls,
10253 +                               service->stats.bulk_aborted_count,
10254 +                               service->stats.error_count);
10255 +                }
10256 +       }
10257 +
10258 +       vchiq_dump(dump_context, buf, len + 1);
10259 +
10260 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE)
10261 +               vchiq_dump_platform_service_state(dump_context, service);
10262 +}
10263 +
10264 +
10265 +void
10266 +vchiq_loud_error_header(void)
10267 +{
10268 +       vchiq_log_error(vchiq_core_log_level,
10269 +               "============================================================"
10270 +               "================");
10271 +       vchiq_log_error(vchiq_core_log_level,
10272 +               "============================================================"
10273 +               "================");
10274 +       vchiq_log_error(vchiq_core_log_level, "=====");
10275 +}
10276 +
10277 +void
10278 +vchiq_loud_error_footer(void)
10279 +{
10280 +       vchiq_log_error(vchiq_core_log_level, "=====");
10281 +       vchiq_log_error(vchiq_core_log_level,
10282 +               "============================================================"
10283 +               "================");
10284 +       vchiq_log_error(vchiq_core_log_level,
10285 +               "============================================================"
10286 +               "================");
10287 +}
10288 +
10289 +
10290 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
10291 +{
10292 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
10293 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10294 +               status = queue_message(state, NULL,
10295 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
10296 +                       NULL, 0, 0, 0);
10297 +       return status;
10298 +}
10299 +
10300 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
10301 +{
10302 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
10303 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10304 +               status = queue_message(state, NULL,
10305 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
10306 +                       NULL, 0, 0, 0);
10307 +       return status;
10308 +}
10309 +
10310 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
10311 +{
10312 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
10313 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
10314 +               status = queue_message(state, NULL,
10315 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
10316 +                       NULL, 0, 0, 0);
10317 +       return status;
10318 +}
10319 +
10320 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10321 +       size_t numBytes)
10322 +{
10323 +       const uint8_t  *mem = (const uint8_t *)voidMem;
10324 +       size_t          offset;
10325 +       char            lineBuf[100];
10326 +       char           *s;
10327 +
10328 +       while (numBytes > 0) {
10329 +               s = lineBuf;
10330 +
10331 +               for (offset = 0; offset < 16; offset++) {
10332 +                       if (offset < numBytes)
10333 +                               s += snprintf(s, 4, "%02x ", mem[offset]);
10334 +                       else
10335 +                               s += snprintf(s, 4, "   ");
10336 +               }
10337 +
10338 +               for (offset = 0; offset < 16; offset++) {
10339 +                       if (offset < numBytes) {
10340 +                               uint8_t ch = mem[offset];
10341 +
10342 +                               if ((ch < ' ') || (ch > '~'))
10343 +                                       ch = '.';
10344 +                               *s++ = (char)ch;
10345 +                       }
10346 +               }
10347 +               *s++ = '\0';
10348 +
10349 +               if ((label != NULL) && (*label != '\0'))
10350 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
10351 +                               "%s: %08x: %s", label, addr, lineBuf);
10352 +               else
10353 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
10354 +                               "%08x: %s", addr, lineBuf);
10355 +
10356 +               addr += 16;
10357 +               mem += 16;
10358 +               if (numBytes > 16)
10359 +                       numBytes -= 16;
10360 +               else
10361 +                       numBytes = 0;
10362 +       }
10363 +}
10364 --- /dev/null
10365 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
10366 @@ -0,0 +1,703 @@
10367 +/**
10368 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10369 + *
10370 + * Redistribution and use in source and binary forms, with or without
10371 + * modification, are permitted provided that the following conditions
10372 + * are met:
10373 + * 1. Redistributions of source code must retain the above copyright
10374 + *    notice, this list of conditions, and the following disclaimer,
10375 + *    without modification.
10376 + * 2. Redistributions in binary form must reproduce the above copyright
10377 + *    notice, this list of conditions and the following disclaimer in the
10378 + *    documentation and/or other materials provided with the distribution.
10379 + * 3. The names of the above-listed copyright holders may not be used
10380 + *    to endorse or promote products derived from this software without
10381 + *    specific prior written permission.
10382 + *
10383 + * ALTERNATIVELY, this software may be distributed under the terms of the
10384 + * GNU General Public License ("GPL") version 2, as published by the Free
10385 + * Software Foundation.
10386 + *
10387 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10388 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10389 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10390 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10391 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10392 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10393 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10394 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10395 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10396 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10397 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10398 + */
10399 +
10400 +#ifndef VCHIQ_CORE_H
10401 +#define VCHIQ_CORE_H
10402 +
10403 +#include <linux/mutex.h>
10404 +#include <linux/semaphore.h>
10405 +#include <linux/kthread.h>
10406 +
10407 +#include "vchiq_cfg.h"
10408 +
10409 +#include "vchiq.h"
10410 +
10411 +/* Run time control of log level, based on KERN_XXX level. */
10412 +#define VCHIQ_LOG_DEFAULT  4
10413 +#define VCHIQ_LOG_ERROR    3
10414 +#define VCHIQ_LOG_WARNING  4
10415 +#define VCHIQ_LOG_INFO     6
10416 +#define VCHIQ_LOG_TRACE    7
10417 +
10418 +#define VCHIQ_LOG_PREFIX   KERN_INFO "vchiq: "
10419 +
10420 +#ifndef vchiq_log_error
10421 +#define vchiq_log_error(cat, fmt, ...) \
10422 +       do { if (cat >= VCHIQ_LOG_ERROR) \
10423 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10424 +#endif
10425 +#ifndef vchiq_log_warning
10426 +#define vchiq_log_warning(cat, fmt, ...) \
10427 +       do { if (cat >= VCHIQ_LOG_WARNING) \
10428 +                printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10429 +#endif
10430 +#ifndef vchiq_log_info
10431 +#define vchiq_log_info(cat, fmt, ...) \
10432 +       do { if (cat >= VCHIQ_LOG_INFO) \
10433 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10434 +#endif
10435 +#ifndef vchiq_log_trace
10436 +#define vchiq_log_trace(cat, fmt, ...) \
10437 +       do { if (cat >= VCHIQ_LOG_TRACE) \
10438 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
10439 +#endif
10440 +
10441 +#define vchiq_loud_error(...) \
10442 +       vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
10443 +
10444 +#ifndef vchiq_static_assert
10445 +#define vchiq_static_assert(cond) __attribute__((unused)) \
10446 +       extern int vchiq_static_assert[(cond) ? 1 : -1]
10447 +#endif
10448 +
10449 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
10450 +
10451 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
10452 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
10453 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
10454 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
10455 +
10456 +#define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
10457 +#define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
10458 +#define VCHIQ_SLOT_ZERO_SLOTS  ((sizeof(VCHIQ_SLOT_ZERO_T) + \
10459 +       VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
10460 +
10461 +#define VCHIQ_MSG_PADDING            0  /* -                                 */
10462 +#define VCHIQ_MSG_CONNECT            1  /* -                                 */
10463 +#define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
10464 +#define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
10465 +#define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
10466 +#define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
10467 +#define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
10468 +#define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
10469 +#define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
10470 +#define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
10471 +#define VCHIQ_MSG_PAUSE             10  /* -                                 */
10472 +#define VCHIQ_MSG_RESUME            11  /* -                                 */
10473 +#define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
10474 +#define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
10475 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
10476 +
10477 +#define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
10478 +#define VCHIQ_PORT_FREE                0x1000
10479 +#define VCHIQ_PORT_IS_VALID(port)      (port < VCHIQ_PORT_FREE)
10480 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
10481 +       ((type<<24) | (srcport<<12) | (dstport<<0))
10482 +#define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)msgid >> 24)
10483 +#define VCHIQ_MSG_SRCPORT(msgid) \
10484 +       (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
10485 +#define VCHIQ_MSG_DSTPORT(msgid) \
10486 +       ((unsigned short)msgid & 0xfff)
10487 +
10488 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
10489 +       ((fourcc) >> 24) & 0xff, \
10490 +       ((fourcc) >> 16) & 0xff, \
10491 +       ((fourcc) >>  8) & 0xff, \
10492 +       (fourcc) & 0xff
10493 +
10494 +/* Ensure the fields are wide enough */
10495 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
10496 +       == 0);
10497 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
10498 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
10499 +       (unsigned int)VCHIQ_PORT_FREE);
10500 +
10501 +#define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
10502 +#define VCHIQ_MSGID_CLAIMED            0x40000000
10503 +
10504 +#define VCHIQ_FOURCC_INVALID           0x00000000
10505 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc)  (fourcc != VCHIQ_FOURCC_INVALID)
10506 +
10507 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
10508 +
10509 +typedef uint32_t BITSET_T;
10510 +
10511 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
10512 +
10513 +#define BITSET_SIZE(b)        ((b + 31) >> 5)
10514 +#define BITSET_WORD(b)        (b >> 5)
10515 +#define BITSET_BIT(b)         (1 << (b & 31))
10516 +#define BITSET_ZERO(bs)       memset(bs, 0, sizeof(bs))
10517 +#define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
10518 +#define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
10519 +#define BITSET_CLR(bs, b)     (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
10520 +
10521 +#if VCHIQ_ENABLE_STATS
10522 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
10523 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
10524 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
10525 +       (service->stats. stat += addend)
10526 +#else
10527 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
10528 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
10529 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
10530 +#endif
10531 +
10532 +enum {
10533 +       DEBUG_ENTRIES,
10534 +#if VCHIQ_ENABLE_DEBUG
10535 +       DEBUG_SLOT_HANDLER_COUNT,
10536 +       DEBUG_SLOT_HANDLER_LINE,
10537 +       DEBUG_PARSE_LINE,
10538 +       DEBUG_PARSE_HEADER,
10539 +       DEBUG_PARSE_MSGID,
10540 +       DEBUG_AWAIT_COMPLETION_LINE,
10541 +       DEBUG_DEQUEUE_MESSAGE_LINE,
10542 +       DEBUG_SERVICE_CALLBACK_LINE,
10543 +       DEBUG_MSG_QUEUE_FULL_COUNT,
10544 +       DEBUG_COMPLETION_QUEUE_FULL_COUNT,
10545 +#endif
10546 +       DEBUG_MAX
10547 +};
10548 +
10549 +#if VCHIQ_ENABLE_DEBUG
10550 +
10551 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
10552 +#define DEBUG_TRACE(d) \
10553 +       do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
10554 +#define DEBUG_VALUE(d, v) \
10555 +       do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
10556 +#define DEBUG_COUNT(d) \
10557 +       do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
10558 +
10559 +#else /* VCHIQ_ENABLE_DEBUG */
10560 +
10561 +#define DEBUG_INITIALISE(local)
10562 +#define DEBUG_TRACE(d)
10563 +#define DEBUG_VALUE(d, v)
10564 +#define DEBUG_COUNT(d)
10565 +
10566 +#endif /* VCHIQ_ENABLE_DEBUG */
10567 +
10568 +typedef enum {
10569 +       VCHIQ_CONNSTATE_DISCONNECTED,
10570 +       VCHIQ_CONNSTATE_CONNECTING,
10571 +       VCHIQ_CONNSTATE_CONNECTED,
10572 +       VCHIQ_CONNSTATE_PAUSING,
10573 +       VCHIQ_CONNSTATE_PAUSE_SENT,
10574 +       VCHIQ_CONNSTATE_PAUSED,
10575 +       VCHIQ_CONNSTATE_RESUMING,
10576 +       VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
10577 +       VCHIQ_CONNSTATE_RESUME_TIMEOUT
10578 +} VCHIQ_CONNSTATE_T;
10579 +
10580 +enum {
10581 +       VCHIQ_SRVSTATE_FREE,
10582 +       VCHIQ_SRVSTATE_HIDDEN,
10583 +       VCHIQ_SRVSTATE_LISTENING,
10584 +       VCHIQ_SRVSTATE_OPENING,
10585 +       VCHIQ_SRVSTATE_OPEN,
10586 +       VCHIQ_SRVSTATE_OPENSYNC,
10587 +       VCHIQ_SRVSTATE_CLOSESENT,
10588 +       VCHIQ_SRVSTATE_CLOSERECVD,
10589 +       VCHIQ_SRVSTATE_CLOSEWAIT,
10590 +       VCHIQ_SRVSTATE_CLOSED
10591 +};
10592 +
10593 +enum {
10594 +       VCHIQ_POLL_TERMINATE,
10595 +       VCHIQ_POLL_REMOVE,
10596 +       VCHIQ_POLL_TXNOTIFY,
10597 +       VCHIQ_POLL_RXNOTIFY,
10598 +       VCHIQ_POLL_COUNT
10599 +};
10600 +
10601 +typedef enum {
10602 +       VCHIQ_BULK_TRANSMIT,
10603 +       VCHIQ_BULK_RECEIVE
10604 +} VCHIQ_BULK_DIR_T;
10605 +
10606 +typedef struct vchiq_bulk_struct {
10607 +       short mode;
10608 +       short dir;
10609 +       void *userdata;
10610 +       VCHI_MEM_HANDLE_T handle;
10611 +       void *data;
10612 +       int size;
10613 +       void *remote_data;
10614 +       int remote_size;
10615 +       int actual;
10616 +} VCHIQ_BULK_T;
10617 +
10618 +typedef struct vchiq_bulk_queue_struct {
10619 +       int local_insert;  /* Where to insert the next local bulk */
10620 +       int remote_insert; /* Where to insert the next remote bulk (master) */
10621 +       int process;       /* Bulk to transfer next */
10622 +       int remote_notify; /* Bulk to notify the remote client of next (mstr) */
10623 +       int remove;        /* Bulk to notify the local client of, and remove,
10624 +                          ** next */
10625 +       VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
10626 +} VCHIQ_BULK_QUEUE_T;
10627 +
10628 +typedef struct remote_event_struct {
10629 +       int armed;
10630 +       int fired;
10631 +       struct semaphore *event;
10632 +} REMOTE_EVENT_T;
10633 +
10634 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
10635 +
10636 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
10637 +
10638 +typedef struct vchiq_slot_struct {
10639 +       char data[VCHIQ_SLOT_SIZE];
10640 +} VCHIQ_SLOT_T;
10641 +
10642 +typedef struct vchiq_slot_info_struct {
10643 +       /* Use two counters rather than one to avoid the need for a mutex. */
10644 +       short use_count;
10645 +       short release_count;
10646 +} VCHIQ_SLOT_INFO_T;
10647 +
10648 +typedef struct vchiq_service_struct {
10649 +       VCHIQ_SERVICE_BASE_T base;
10650 +       VCHIQ_SERVICE_HANDLE_T handle;
10651 +       unsigned int ref_count;
10652 +       int srvstate;
10653 +       unsigned int localport;
10654 +       unsigned int remoteport;
10655 +       int public_fourcc;
10656 +       int client_id;
10657 +       char auto_close;
10658 +       char sync;
10659 +       char closing;
10660 +       atomic_t poll_flags;
10661 +       short version;
10662 +       short version_min;
10663 +       short peer_version;
10664 +
10665 +       VCHIQ_STATE_T *state;
10666 +       VCHIQ_INSTANCE_T instance;
10667 +
10668 +       int service_use_count;
10669 +
10670 +       VCHIQ_BULK_QUEUE_T bulk_tx;
10671 +       VCHIQ_BULK_QUEUE_T bulk_rx;
10672 +
10673 +       struct semaphore remove_event;
10674 +       struct semaphore bulk_remove_event;
10675 +       struct mutex bulk_mutex;
10676 +
10677 +       struct service_stats_struct {
10678 +               int quota_stalls;
10679 +               int slot_stalls;
10680 +               int bulk_stalls;
10681 +               int error_count;
10682 +               int ctrl_tx_count;
10683 +               int ctrl_rx_count;
10684 +               int bulk_tx_count;
10685 +               int bulk_rx_count;
10686 +               int bulk_aborted_count;
10687 +               uint64_t ctrl_tx_bytes;
10688 +               uint64_t ctrl_rx_bytes;
10689 +               uint64_t bulk_tx_bytes;
10690 +               uint64_t bulk_rx_bytes;
10691 +       } stats;
10692 +} VCHIQ_SERVICE_T;
10693 +
10694 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
10695 +       statically allocated, since for accounting reasons a service's slot
10696 +       usage is carried over between users of the same port number.
10697 + */
10698 +typedef struct vchiq_service_quota_struct {
10699 +       unsigned short slot_quota;
10700 +       unsigned short slot_use_count;
10701 +       unsigned short message_quota;
10702 +       unsigned short message_use_count;
10703 +       struct semaphore quota_event;
10704 +       int previous_tx_index;
10705 +} VCHIQ_SERVICE_QUOTA_T;
10706 +
10707 +typedef struct vchiq_shared_state_struct {
10708 +
10709 +       /* A non-zero value here indicates that the content is valid. */
10710 +       int initialised;
10711 +
10712 +       /* The first and last (inclusive) slots allocated to the owner. */
10713 +       int slot_first;
10714 +       int slot_last;
10715 +
10716 +       /* The slot allocated to synchronous messages from the owner. */
10717 +       int slot_sync;
10718 +
10719 +       /* Signalling this event indicates that owner's slot handler thread
10720 +       ** should run. */
10721 +       REMOTE_EVENT_T trigger;
10722 +
10723 +       /* Indicates the byte position within the stream where the next message
10724 +       ** will be written. The least significant bits are an index into the
10725 +       ** slot. The next bits are the index of the slot in slot_queue. */
10726 +       int tx_pos;
10727 +
10728 +       /* This event should be signalled when a slot is recycled. */
10729 +       REMOTE_EVENT_T recycle;
10730 +
10731 +       /* The slot_queue index where the next recycled slot will be written. */
10732 +       int slot_queue_recycle;
10733 +
10734 +       /* This event should be signalled when a synchronous message is sent. */
10735 +       REMOTE_EVENT_T sync_trigger;
10736 +
10737 +       /* This event should be signalled when a synchronous message has been
10738 +       ** released. */
10739 +       REMOTE_EVENT_T sync_release;
10740 +
10741 +       /* A circular buffer of slot indexes. */
10742 +       int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
10743 +
10744 +       /* Debugging state */
10745 +       int debug[DEBUG_MAX];
10746 +} VCHIQ_SHARED_STATE_T;
10747 +
10748 +typedef struct vchiq_slot_zero_struct {
10749 +       int magic;
10750 +       short version;
10751 +       short version_min;
10752 +       int slot_zero_size;
10753 +       int slot_size;
10754 +       int max_slots;
10755 +       int max_slots_per_side;
10756 +       int platform_data[2];
10757 +       VCHIQ_SHARED_STATE_T master;
10758 +       VCHIQ_SHARED_STATE_T slave;
10759 +       VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
10760 +} VCHIQ_SLOT_ZERO_T;
10761 +
10762 +struct vchiq_state_struct {
10763 +       int id;
10764 +       int initialised;
10765 +       VCHIQ_CONNSTATE_T conn_state;
10766 +       int is_master;
10767 +
10768 +       VCHIQ_SHARED_STATE_T *local;
10769 +       VCHIQ_SHARED_STATE_T *remote;
10770 +       VCHIQ_SLOT_T *slot_data;
10771 +
10772 +       unsigned short default_slot_quota;
10773 +       unsigned short default_message_quota;
10774 +
10775 +       /* Event indicating connect message received */
10776 +       struct semaphore connect;
10777 +
10778 +       /* Mutex protecting services */
10779 +       struct mutex mutex;
10780 +       VCHIQ_INSTANCE_T *instance;
10781 +
10782 +       /* Processes incoming messages */
10783 +       struct task_struct *slot_handler_thread;
10784 +
10785 +       /* Processes recycled slots */
10786 +       struct task_struct *recycle_thread;
10787 +
10788 +       /* Processes synchronous messages */
10789 +       struct task_struct *sync_thread;
10790 +
10791 +       /* Local implementation of the trigger remote event */
10792 +       struct semaphore trigger_event;
10793 +
10794 +       /* Local implementation of the recycle remote event */
10795 +       struct semaphore recycle_event;
10796 +
10797 +       /* Local implementation of the sync trigger remote event */
10798 +       struct semaphore sync_trigger_event;
10799 +
10800 +       /* Local implementation of the sync release remote event */
10801 +       struct semaphore sync_release_event;
10802 +
10803 +       char *tx_data;
10804 +       char *rx_data;
10805 +       VCHIQ_SLOT_INFO_T *rx_info;
10806 +
10807 +       struct mutex slot_mutex;
10808 +
10809 +       struct mutex recycle_mutex;
10810 +
10811 +       struct mutex sync_mutex;
10812 +
10813 +       struct mutex bulk_transfer_mutex;
10814 +
10815 +       /* Indicates the byte position within the stream from where the next
10816 +       ** message will be read. The least significant bits are an index into
10817 +       ** the slot.The next bits are the index of the slot in
10818 +       ** remote->slot_queue. */
10819 +       int rx_pos;
10820 +
10821 +       /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
10822 +               from remote->tx_pos. */
10823 +       int local_tx_pos;
10824 +
10825 +       /* The slot_queue index of the slot to become available next. */
10826 +       int slot_queue_available;
10827 +
10828 +       /* A flag to indicate if any poll has been requested */
10829 +       int poll_needed;
10830 +
10831 +       /* Ths index of the previous slot used for data messages. */
10832 +       int previous_data_index;
10833 +
10834 +       /* The number of slots occupied by data messages. */
10835 +       unsigned short data_use_count;
10836 +
10837 +       /* The maximum number of slots to be occupied by data messages. */
10838 +       unsigned short data_quota;
10839 +
10840 +       /* An array of bit sets indicating which services must be polled. */
10841 +       atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
10842 +
10843 +       /* The number of the first unused service */
10844 +       int unused_service;
10845 +
10846 +       /* Signalled when a free slot becomes available. */
10847 +       struct semaphore slot_available_event;
10848 +
10849 +       struct semaphore slot_remove_event;
10850 +
10851 +       /* Signalled when a free data slot becomes available. */
10852 +       struct semaphore data_quota_event;
10853 +
10854 +       /* Incremented when there are bulk transfers which cannot be processed
10855 +        * whilst paused and must be processed on resume */
10856 +       int deferred_bulks;
10857 +
10858 +       struct state_stats_struct {
10859 +               int slot_stalls;
10860 +               int data_stalls;
10861 +               int ctrl_tx_count;
10862 +               int ctrl_rx_count;
10863 +               int error_count;
10864 +       } stats;
10865 +
10866 +       VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
10867 +       VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
10868 +       VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
10869 +
10870 +       VCHIQ_PLATFORM_STATE_T platform_state;
10871 +};
10872 +
10873 +struct bulk_waiter {
10874 +       VCHIQ_BULK_T *bulk;
10875 +       struct semaphore event;
10876 +       int actual;
10877 +};
10878 +
10879 +extern spinlock_t bulk_waiter_spinlock;
10880 +
10881 +extern int vchiq_core_log_level;
10882 +extern int vchiq_core_msg_log_level;
10883 +extern int vchiq_sync_log_level;
10884 +
10885 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
10886 +
10887 +extern const char *
10888 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
10889 +
10890 +extern VCHIQ_SLOT_ZERO_T *
10891 +vchiq_init_slots(void *mem_base, int mem_size);
10892 +
10893 +extern VCHIQ_STATUS_T
10894 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
10895 +       int is_master);
10896 +
10897 +extern VCHIQ_STATUS_T
10898 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10899 +
10900 +extern VCHIQ_SERVICE_T *
10901 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
10902 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
10903 +       VCHIQ_INSTANCE_T instance);
10904 +
10905 +extern VCHIQ_STATUS_T
10906 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
10907 +
10908 +extern VCHIQ_STATUS_T
10909 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
10910 +
10911 +extern void
10912 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
10913 +
10914 +extern void
10915 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
10916 +
10917 +extern VCHIQ_STATUS_T
10918 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
10919 +
10920 +extern VCHIQ_STATUS_T
10921 +vchiq_pause_internal(VCHIQ_STATE_T *state);
10922 +
10923 +extern VCHIQ_STATUS_T
10924 +vchiq_resume_internal(VCHIQ_STATE_T *state);
10925 +
10926 +extern void
10927 +remote_event_pollall(VCHIQ_STATE_T *state);
10928 +
10929 +extern VCHIQ_STATUS_T
10930 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
10931 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
10932 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
10933 +
10934 +extern void
10935 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
10936 +
10937 +extern void
10938 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
10939 +
10940 +extern void
10941 +vchiq_loud_error_header(void);
10942 +
10943 +extern void
10944 +vchiq_loud_error_footer(void);
10945 +
10946 +extern void
10947 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
10948 +
10949 +static inline VCHIQ_SERVICE_T *
10950 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
10951 +{
10952 +       VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
10953 +               (VCHIQ_MAX_STATES - 1)];
10954 +       if (!state)
10955 +               return NULL;
10956 +
10957 +       return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
10958 +}
10959 +
10960 +extern VCHIQ_SERVICE_T *
10961 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
10962 +
10963 +extern VCHIQ_SERVICE_T *
10964 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
10965 +
10966 +extern VCHIQ_SERVICE_T *
10967 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
10968 +       VCHIQ_SERVICE_HANDLE_T handle);
10969 +
10970 +extern VCHIQ_SERVICE_T *
10971 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
10972 +       int *pidx);
10973 +
10974 +extern void
10975 +lock_service(VCHIQ_SERVICE_T *service);
10976 +
10977 +extern void
10978 +unlock_service(VCHIQ_SERVICE_T *service);
10979 +
10980 +/* The following functions are called from vchiq_core, and external
10981 +** implementations must be provided. */
10982 +
10983 +extern VCHIQ_STATUS_T
10984 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
10985 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
10986 +
10987 +extern void
10988 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
10989 +
10990 +extern void
10991 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
10992 +
10993 +extern VCHIQ_STATUS_T
10994 +vchiq_copy_from_user(void *dst, const void *src, int size);
10995 +
10996 +extern void
10997 +remote_event_signal(REMOTE_EVENT_T *event);
10998 +
10999 +void
11000 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
11001 +
11002 +extern void
11003 +vchiq_platform_paused(VCHIQ_STATE_T *state);
11004 +
11005 +extern VCHIQ_STATUS_T
11006 +vchiq_platform_resume(VCHIQ_STATE_T *state);
11007 +
11008 +extern void
11009 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
11010 +
11011 +extern void
11012 +vchiq_dump(void *dump_context, const char *str, int len);
11013 +
11014 +extern void
11015 +vchiq_dump_platform_state(void *dump_context);
11016 +
11017 +extern void
11018 +vchiq_dump_platform_instances(void *dump_context);
11019 +
11020 +extern void
11021 +vchiq_dump_platform_service_state(void *dump_context,
11022 +       VCHIQ_SERVICE_T *service);
11023 +
11024 +extern VCHIQ_STATUS_T
11025 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
11026 +
11027 +extern VCHIQ_STATUS_T
11028 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
11029 +
11030 +extern void
11031 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
11032 +
11033 +extern void
11034 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
11035 +
11036 +extern VCHIQ_STATUS_T
11037 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
11038 +
11039 +extern VCHIQ_STATUS_T
11040 +vchiq_check_service(VCHIQ_SERVICE_T *service);
11041 +
11042 +extern void
11043 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
11044 +
11045 +extern VCHIQ_STATUS_T
11046 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
11047 +
11048 +extern VCHIQ_STATUS_T
11049 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
11050 +
11051 +extern VCHIQ_STATUS_T
11052 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
11053 +
11054 +extern void
11055 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
11056 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
11057 +
11058 +extern void
11059 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
11060 +
11061 +extern void
11062 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
11063 +
11064 +
11065 +extern void
11066 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
11067 +       size_t numBytes);
11068 +
11069 +#endif
11070 --- /dev/null
11071 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
11072 @@ -0,0 +1,89 @@
11073 +#!/usr/bin/perl -w
11074 +
11075 +use strict;
11076 +
11077 +#
11078 +# Generate a version from available information
11079 +#
11080 +
11081 +my $prefix = shift @ARGV;
11082 +my $root = shift @ARGV;
11083 +
11084 +
11085 +if ( not defined $root ) {
11086 +       die "usage: $0 prefix root-dir\n";
11087 +}
11088 +
11089 +if ( ! -d $root ) {
11090 +       die "root directory $root not found\n";
11091 +}
11092 +
11093 +my $version = "unknown";
11094 +my $tainted = "";
11095 +
11096 +if ( -d "$root/.git" ) {
11097 +       # attempt to work out git version. only do so
11098 +       # on a linux build host, as cygwin builds are
11099 +       # already slow enough
11100 +
11101 +       if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
11102 +               if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
11103 +                       $version = "no git version";
11104 +               }
11105 +               else {
11106 +                       $version = <F>;
11107 +                       $version =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
11108 +                       $version =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
11109 +               }
11110 +
11111 +               if (open(G, "git --git-dir $root/.git status --porcelain|")) {
11112 +                       $tainted = <G>;
11113 +                       $tainted =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
11114 +                       $tainted =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
11115 +                       if (length $tainted) {
11116 +                       $version = join ' ', $version, "(tainted)";
11117 +               }
11118 +               else {
11119 +                       $version = join ' ', $version, "(clean)";
11120 +         }             
11121 +               }
11122 +       }
11123 +}
11124 +
11125 +my $hostname = `hostname`;
11126 +$hostname =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
11127 +$hostname =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
11128 +
11129 +
11130 +print STDERR "Version $version\n";
11131 +print <<EOF;
11132 +#include "${prefix}_build_info.h"
11133 +#include <linux/broadcom/vc_debug_sym.h>
11134 +
11135 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
11136 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
11137 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time,    __TIME__ );
11138 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date,    __DATE__ );
11139 +
11140 +const char *vchiq_get_build_hostname( void )
11141 +{
11142 +   return vchiq_build_hostname;
11143 +}
11144 +
11145 +const char *vchiq_get_build_version( void )
11146 +{
11147 +   return vchiq_build_version;
11148 +}
11149 +
11150 +const char *vchiq_get_build_date( void )
11151 +{
11152 +   return vchiq_build_date;
11153 +}
11154 +
11155 +const char *vchiq_get_build_time( void )
11156 +{
11157 +   return vchiq_build_time;
11158 +}
11159 +EOF
11160 +
11161 +
11162 --- /dev/null
11163 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
11164 @@ -0,0 +1,41 @@
11165 +/**
11166 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11167 + *
11168 + * Redistribution and use in source and binary forms, with or without
11169 + * modification, are permitted provided that the following conditions
11170 + * are met:
11171 + * 1. Redistributions of source code must retain the above copyright
11172 + *    notice, this list of conditions, and the following disclaimer,
11173 + *    without modification.
11174 + * 2. Redistributions in binary form must reproduce the above copyright
11175 + *    notice, this list of conditions and the following disclaimer in the
11176 + *    documentation and/or other materials provided with the distribution.
11177 + * 3. The names of the above-listed copyright holders may not be used
11178 + *    to endorse or promote products derived from this software without
11179 + *    specific prior written permission.
11180 + *
11181 + * ALTERNATIVELY, this software may be distributed under the terms of the
11182 + * GNU General Public License ("GPL") version 2, as published by the Free
11183 + * Software Foundation.
11184 + *
11185 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11186 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11187 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11188 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11189 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11190 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11191 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11192 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11193 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11194 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11195 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11196 + */
11197 +
11198 +#ifndef VCHIQ_VCHIQ_H
11199 +#define VCHIQ_VCHIQ_H
11200 +
11201 +#include "vchiq_if.h"
11202 +#include "vchiq_util.h"
11203 +
11204 +#endif
11205 +
11206 --- /dev/null
11207 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
11208 @@ -0,0 +1,188 @@
11209 +/**
11210 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11211 + *
11212 + * Redistribution and use in source and binary forms, with or without
11213 + * modification, are permitted provided that the following conditions
11214 + * are met:
11215 + * 1. Redistributions of source code must retain the above copyright
11216 + *    notice, this list of conditions, and the following disclaimer,
11217 + *    without modification.
11218 + * 2. Redistributions in binary form must reproduce the above copyright
11219 + *    notice, this list of conditions and the following disclaimer in the
11220 + *    documentation and/or other materials provided with the distribution.
11221 + * 3. The names of the above-listed copyright holders may not be used
11222 + *    to endorse or promote products derived from this software without
11223 + *    specific prior written permission.
11224 + *
11225 + * ALTERNATIVELY, this software may be distributed under the terms of the
11226 + * GNU General Public License ("GPL") version 2, as published by the Free
11227 + * Software Foundation.
11228 + *
11229 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11230 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11231 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11232 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11233 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11234 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11235 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11236 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11237 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11238 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11239 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11240 + */
11241 +
11242 +#ifndef VCHIQ_IF_H
11243 +#define VCHIQ_IF_H
11244 +
11245 +#include "interface/vchi/vchi_mh.h"
11246 +
11247 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
11248 +
11249 +#define VCHIQ_SLOT_SIZE     4096
11250 +#define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
11251 +#define VCHIQ_CHANNEL_SIZE  VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
11252 +
11253 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
11254 +                       (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
11255 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
11256 +#define VCHIQ_GET_SERVICE_FOURCC(service)   vchiq_get_service_fourcc(service)
11257 +
11258 +typedef enum {
11259 +       VCHIQ_SERVICE_OPENED,         /* service, -, -             */
11260 +       VCHIQ_SERVICE_CLOSED,         /* service, -, -             */
11261 +       VCHIQ_MESSAGE_AVAILABLE,      /* service, header, -        */
11262 +       VCHIQ_BULK_TRANSMIT_DONE,     /* service, -, bulk_userdata */
11263 +       VCHIQ_BULK_RECEIVE_DONE,      /* service, -, bulk_userdata */
11264 +       VCHIQ_BULK_TRANSMIT_ABORTED,  /* service, -, bulk_userdata */
11265 +       VCHIQ_BULK_RECEIVE_ABORTED    /* service, -, bulk_userdata */
11266 +} VCHIQ_REASON_T;
11267 +
11268 +typedef enum {
11269 +       VCHIQ_ERROR   = -1,
11270 +       VCHIQ_SUCCESS = 0,
11271 +       VCHIQ_RETRY   = 1
11272 +} VCHIQ_STATUS_T;
11273 +
11274 +typedef enum {
11275 +       VCHIQ_BULK_MODE_CALLBACK,
11276 +       VCHIQ_BULK_MODE_BLOCKING,
11277 +       VCHIQ_BULK_MODE_NOCALLBACK,
11278 +       VCHIQ_BULK_MODE_WAITING         /* Reserved for internal use */
11279 +} VCHIQ_BULK_MODE_T;
11280 +
11281 +typedef enum {
11282 +       VCHIQ_SERVICE_OPTION_AUTOCLOSE,
11283 +       VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
11284 +       VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
11285 +       VCHIQ_SERVICE_OPTION_SYNCHRONOUS
11286 +} VCHIQ_SERVICE_OPTION_T;
11287 +
11288 +typedef struct vchiq_header_struct {
11289 +       /* The message identifier - opaque to applications. */
11290 +       int msgid;
11291 +
11292 +       /* Size of message data. */
11293 +       unsigned int size;
11294 +
11295 +       char data[0];           /* message */
11296 +} VCHIQ_HEADER_T;
11297 +
11298 +typedef struct {
11299 +       const void *data;
11300 +       unsigned int size;
11301 +} VCHIQ_ELEMENT_T;
11302 +
11303 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
11304 +
11305 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
11306 +       VCHIQ_SERVICE_HANDLE_T, void *);
11307 +
11308 +typedef struct vchiq_service_base_struct {
11309 +       int fourcc;
11310 +       VCHIQ_CALLBACK_T callback;
11311 +       void *userdata;
11312 +} VCHIQ_SERVICE_BASE_T;
11313 +
11314 +typedef struct vchiq_service_params_struct {
11315 +       int fourcc;
11316 +       VCHIQ_CALLBACK_T callback;
11317 +       void *userdata;
11318 +       short version;       /* Increment for non-trivial changes */
11319 +       short version_min;   /* Update for incompatible changes */
11320 +} VCHIQ_SERVICE_PARAMS_T;
11321 +
11322 +typedef struct vchiq_config_struct {
11323 +       unsigned int max_msg_size;
11324 +       unsigned int bulk_threshold; /* The message size above which it
11325 +                                       is better to use a bulk transfer
11326 +                                       (<= max_msg_size) */
11327 +       unsigned int max_outstanding_bulks;
11328 +       unsigned int max_services;
11329 +       short version;      /* The version of VCHIQ */
11330 +       short version_min;  /* The minimum compatible version of VCHIQ */
11331 +} VCHIQ_CONFIG_T;
11332 +
11333 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
11334 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
11335 +
11336 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
11337 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
11338 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
11339 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
11340 +       const VCHIQ_SERVICE_PARAMS_T *params,
11341 +       VCHIQ_SERVICE_HANDLE_T *pservice);
11342 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
11343 +       const VCHIQ_SERVICE_PARAMS_T *params,
11344 +       VCHIQ_SERVICE_HANDLE_T *pservice);
11345 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
11346 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
11347 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
11348 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
11349 +       VCHIQ_SERVICE_HANDLE_T service);
11350 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
11351 +
11352 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
11353 +       const VCHIQ_ELEMENT_T *elements, unsigned int count);
11354 +extern void           vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
11355 +       VCHIQ_HEADER_T *header);
11356 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11357 +       const void *data, unsigned int size, void *userdata);
11358 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11359 +       void *data, unsigned int size, void *userdata);
11360 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
11361 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11362 +       const void *offset, unsigned int size, void *userdata);
11363 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
11364 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
11365 +       void *offset, unsigned int size, void *userdata);
11366 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
11367 +       const void *data, unsigned int size, void *userdata,
11368 +       VCHIQ_BULK_MODE_T mode);
11369 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
11370 +       void *data, unsigned int size, void *userdata,
11371 +       VCHIQ_BULK_MODE_T mode);
11372 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
11373 +       VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
11374 +       void *userdata, VCHIQ_BULK_MODE_T mode);
11375 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
11376 +       VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
11377 +       void *userdata, VCHIQ_BULK_MODE_T mode);
11378 +extern int   vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
11379 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
11380 +extern int   vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
11381 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
11382 +       int config_size, VCHIQ_CONFIG_T *pconfig);
11383 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
11384 +       VCHIQ_SERVICE_OPTION_T option, int value);
11385 +
11386 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
11387 +       VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
11388 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
11389 +
11390 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
11391 +       void *ptr, size_t num_bytes);
11392 +
11393 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
11394 +      short *peer_version);
11395 +
11396 +#endif /* VCHIQ_IF_H */
11397 --- /dev/null
11398 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
11399 @@ -0,0 +1,129 @@
11400 +/**
11401 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11402 + *
11403 + * Redistribution and use in source and binary forms, with or without
11404 + * modification, are permitted provided that the following conditions
11405 + * are met:
11406 + * 1. Redistributions of source code must retain the above copyright
11407 + *    notice, this list of conditions, and the following disclaimer,
11408 + *    without modification.
11409 + * 2. Redistributions in binary form must reproduce the above copyright
11410 + *    notice, this list of conditions and the following disclaimer in the
11411 + *    documentation and/or other materials provided with the distribution.
11412 + * 3. The names of the above-listed copyright holders may not be used
11413 + *    to endorse or promote products derived from this software without
11414 + *    specific prior written permission.
11415 + *
11416 + * ALTERNATIVELY, this software may be distributed under the terms of the
11417 + * GNU General Public License ("GPL") version 2, as published by the Free
11418 + * Software Foundation.
11419 + *
11420 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11421 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11422 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11423 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11424 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11425 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11426 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11427 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11428 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11429 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11430 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11431 + */
11432 +
11433 +#ifndef VCHIQ_IOCTLS_H
11434 +#define VCHIQ_IOCTLS_H
11435 +
11436 +#include <linux/ioctl.h>
11437 +#include "vchiq_if.h"
11438 +
11439 +#define VCHIQ_IOC_MAGIC 0xc4
11440 +#define VCHIQ_INVALID_HANDLE (~0)
11441 +
11442 +typedef struct {
11443 +       VCHIQ_SERVICE_PARAMS_T params;
11444 +       int is_open;
11445 +       int is_vchi;
11446 +       unsigned int handle;       /* OUT */
11447 +} VCHIQ_CREATE_SERVICE_T;
11448 +
11449 +typedef struct {
11450 +       unsigned int handle;
11451 +       unsigned int count;
11452 +       const VCHIQ_ELEMENT_T *elements;
11453 +} VCHIQ_QUEUE_MESSAGE_T;
11454 +
11455 +typedef struct {
11456 +       unsigned int handle;
11457 +       void *data;
11458 +       unsigned int size;
11459 +       void *userdata;
11460 +       VCHIQ_BULK_MODE_T mode;
11461 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
11462 +
11463 +typedef struct {
11464 +       VCHIQ_REASON_T reason;
11465 +       VCHIQ_HEADER_T *header;
11466 +       void *service_userdata;
11467 +       void *bulk_userdata;
11468 +} VCHIQ_COMPLETION_DATA_T;
11469 +
11470 +typedef struct {
11471 +       unsigned int count;
11472 +       VCHIQ_COMPLETION_DATA_T *buf;
11473 +       unsigned int msgbufsize;
11474 +       unsigned int msgbufcount; /* IN/OUT */
11475 +       void **msgbufs;
11476 +} VCHIQ_AWAIT_COMPLETION_T;
11477 +
11478 +typedef struct {
11479 +       unsigned int handle;
11480 +       int blocking;
11481 +       unsigned int bufsize;
11482 +       void *buf;
11483 +} VCHIQ_DEQUEUE_MESSAGE_T;
11484 +
11485 +typedef struct {
11486 +       unsigned int config_size;
11487 +       VCHIQ_CONFIG_T *pconfig;
11488 +} VCHIQ_GET_CONFIG_T;
11489 +
11490 +typedef struct {
11491 +       unsigned int handle;
11492 +       VCHIQ_SERVICE_OPTION_T option;
11493 +       int value;
11494 +} VCHIQ_SET_SERVICE_OPTION_T;
11495 +
11496 +typedef struct {
11497 +       void     *virt_addr;
11498 +       size_t    num_bytes;
11499 +} VCHIQ_DUMP_MEM_T;
11500 +
11501 +#define VCHIQ_IOC_CONNECT              _IO(VCHIQ_IOC_MAGIC,   0)
11502 +#define VCHIQ_IOC_SHUTDOWN             _IO(VCHIQ_IOC_MAGIC,   1)
11503 +#define VCHIQ_IOC_CREATE_SERVICE \
11504 +       _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
11505 +#define VCHIQ_IOC_REMOVE_SERVICE       _IO(VCHIQ_IOC_MAGIC,   3)
11506 +#define VCHIQ_IOC_QUEUE_MESSAGE \
11507 +       _IOW(VCHIQ_IOC_MAGIC,  4, VCHIQ_QUEUE_MESSAGE_T)
11508 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
11509 +       _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
11510 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
11511 +       _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
11512 +#define VCHIQ_IOC_AWAIT_COMPLETION \
11513 +       _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
11514 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
11515 +       _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
11516 +#define VCHIQ_IOC_GET_CLIENT_ID        _IO(VCHIQ_IOC_MAGIC,   9)
11517 +#define VCHIQ_IOC_GET_CONFIG \
11518 +       _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
11519 +#define VCHIQ_IOC_CLOSE_SERVICE        _IO(VCHIQ_IOC_MAGIC,   11)
11520 +#define VCHIQ_IOC_USE_SERVICE          _IO(VCHIQ_IOC_MAGIC,   12)
11521 +#define VCHIQ_IOC_RELEASE_SERVICE      _IO(VCHIQ_IOC_MAGIC,   13)
11522 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
11523 +       _IOW(VCHIQ_IOC_MAGIC,  14, VCHIQ_SET_SERVICE_OPTION_T)
11524 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
11525 +       _IOW(VCHIQ_IOC_MAGIC,  15, VCHIQ_DUMP_MEM_T)
11526 +#define VCHIQ_IOC_MAX                  15
11527 +
11528 +#endif
11529 --- /dev/null
11530 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
11531 @@ -0,0 +1,454 @@
11532 +/**
11533 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11534 + *
11535 + * Redistribution and use in source and binary forms, with or without
11536 + * modification, are permitted provided that the following conditions
11537 + * are met:
11538 + * 1. Redistributions of source code must retain the above copyright
11539 + *    notice, this list of conditions, and the following disclaimer,
11540 + *    without modification.
11541 + * 2. Redistributions in binary form must reproduce the above copyright
11542 + *    notice, this list of conditions and the following disclaimer in the
11543 + *    documentation and/or other materials provided with the distribution.
11544 + * 3. The names of the above-listed copyright holders may not be used
11545 + *    to endorse or promote products derived from this software without
11546 + *    specific prior written permission.
11547 + *
11548 + * ALTERNATIVELY, this software may be distributed under the terms of the
11549 + * GNU General Public License ("GPL") version 2, as published by the Free
11550 + * Software Foundation.
11551 + *
11552 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11553 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11554 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11555 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11556 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11557 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11558 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11559 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11560 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11561 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11562 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11563 + */
11564 +
11565 +/* ---- Include Files ---------------------------------------------------- */
11566 +
11567 +#include <linux/kernel.h>
11568 +#include <linux/module.h>
11569 +#include <linux/mutex.h>
11570 +
11571 +#include "vchiq_core.h"
11572 +#include "vchiq_arm.h"
11573 +
11574 +/* ---- Public Variables ------------------------------------------------- */
11575 +
11576 +/* ---- Private Constants and Types -------------------------------------- */
11577 +
11578 +struct bulk_waiter_node {
11579 +       struct bulk_waiter bulk_waiter;
11580 +       int pid;
11581 +       struct list_head list;
11582 +};
11583 +
11584 +struct vchiq_instance_struct {
11585 +       VCHIQ_STATE_T *state;
11586 +
11587 +       int connected;
11588 +
11589 +       struct list_head bulk_waiter_list;
11590 +       struct mutex bulk_waiter_list_mutex;
11591 +};
11592 +
11593 +static VCHIQ_STATUS_T
11594 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11595 +       unsigned int size, VCHIQ_BULK_DIR_T dir);
11596 +
11597 +/****************************************************************************
11598 +*
11599 +*   vchiq_initialise
11600 +*
11601 +***************************************************************************/
11602 +#define VCHIQ_INIT_RETRIES 10
11603 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
11604 +{
11605 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
11606 +       VCHIQ_STATE_T *state;
11607 +       VCHIQ_INSTANCE_T instance = NULL;
11608 +        int i;
11609 +
11610 +       vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
11611 +
11612 +        /* VideoCore may not be ready due to boot up timing.
11613 +           It may never be ready if kernel and firmware are mismatched, so don't block forever. */
11614 +        for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
11615 +               state = vchiq_get_state();
11616 +               if (state)
11617 +                       break;
11618 +               udelay(500);
11619 +       }
11620 +       if (i==VCHIQ_INIT_RETRIES) {
11621 +               vchiq_log_error(vchiq_core_log_level,
11622 +                       "%s: videocore not initialized\n", __func__);
11623 +               goto failed;
11624 +       } else if (i>0) {
11625 +               vchiq_log_warning(vchiq_core_log_level,
11626 +                       "%s: videocore initialized after %d retries\n", __func__, i);
11627 +       }
11628 +
11629 +       instance = kzalloc(sizeof(*instance), GFP_KERNEL);
11630 +       if (!instance) {
11631 +               vchiq_log_error(vchiq_core_log_level,
11632 +                       "%s: error allocating vchiq instance\n", __func__);
11633 +               goto failed;
11634 +       }
11635 +
11636 +       instance->connected = 0;
11637 +       instance->state = state;
11638 +       mutex_init(&instance->bulk_waiter_list_mutex);
11639 +       INIT_LIST_HEAD(&instance->bulk_waiter_list);
11640 +
11641 +       *instanceOut = instance;
11642 +
11643 +       status = VCHIQ_SUCCESS;
11644 +
11645 +failed:
11646 +       vchiq_log_trace(vchiq_core_log_level,
11647 +               "%s(%p): returning %d", __func__, instance, status);
11648 +
11649 +       return status;
11650 +}
11651 +EXPORT_SYMBOL(vchiq_initialise);
11652 +
11653 +/****************************************************************************
11654 +*
11655 +*   vchiq_shutdown
11656 +*
11657 +***************************************************************************/
11658 +
11659 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
11660 +{
11661 +       VCHIQ_STATUS_T status;
11662 +       VCHIQ_STATE_T *state = instance->state;
11663 +
11664 +       vchiq_log_trace(vchiq_core_log_level,
11665 +               "%s(%p) called", __func__, instance);
11666 +
11667 +       if (mutex_lock_interruptible(&state->mutex) != 0)
11668 +               return VCHIQ_RETRY;
11669 +
11670 +       /* Remove all services */
11671 +       status = vchiq_shutdown_internal(state, instance);
11672 +
11673 +       mutex_unlock(&state->mutex);
11674 +
11675 +       vchiq_log_trace(vchiq_core_log_level,
11676 +               "%s(%p): returning %d", __func__, instance, status);
11677 +
11678 +       if (status == VCHIQ_SUCCESS) {
11679 +               struct list_head *pos, *next;
11680 +               list_for_each_safe(pos, next,
11681 +                               &instance->bulk_waiter_list) {
11682 +                       struct bulk_waiter_node *waiter;
11683 +                       waiter = list_entry(pos,
11684 +                                       struct bulk_waiter_node,
11685 +                                       list);
11686 +                       list_del(pos);
11687 +                       vchiq_log_info(vchiq_arm_log_level,
11688 +                                       "bulk_waiter - cleaned up %x "
11689 +                                       "for pid %d",
11690 +                                       (unsigned int)waiter, waiter->pid);
11691 +                       kfree(waiter);
11692 +               }
11693 +               kfree(instance);
11694 +       }
11695 +
11696 +       return status;
11697 +}
11698 +EXPORT_SYMBOL(vchiq_shutdown);
11699 +
11700 +/****************************************************************************
11701 +*
11702 +*   vchiq_is_connected
11703 +*
11704 +***************************************************************************/
11705 +
11706 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
11707 +{
11708 +       return instance->connected;
11709 +}
11710 +
11711 +/****************************************************************************
11712 +*
11713 +*   vchiq_connect
11714 +*
11715 +***************************************************************************/
11716 +
11717 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
11718 +{
11719 +       VCHIQ_STATUS_T status;
11720 +       VCHIQ_STATE_T *state = instance->state;
11721 +
11722 +       vchiq_log_trace(vchiq_core_log_level,
11723 +               "%s(%p) called", __func__, instance);
11724 +
11725 +       if (mutex_lock_interruptible(&state->mutex) != 0) {
11726 +               vchiq_log_trace(vchiq_core_log_level,
11727 +                       "%s: call to mutex_lock failed", __func__);
11728 +               status = VCHIQ_RETRY;
11729 +               goto failed;
11730 +       }
11731 +       status = vchiq_connect_internal(state, instance);
11732 +
11733 +       if (status == VCHIQ_SUCCESS)
11734 +               instance->connected = 1;
11735 +
11736 +       mutex_unlock(&state->mutex);
11737 +
11738 +failed:
11739 +       vchiq_log_trace(vchiq_core_log_level,
11740 +               "%s(%p): returning %d", __func__, instance, status);
11741 +
11742 +       return status;
11743 +}
11744 +EXPORT_SYMBOL(vchiq_connect);
11745 +
11746 +/****************************************************************************
11747 +*
11748 +*   vchiq_add_service
11749 +*
11750 +***************************************************************************/
11751 +
11752 +VCHIQ_STATUS_T vchiq_add_service(
11753 +       VCHIQ_INSTANCE_T              instance,
11754 +       const VCHIQ_SERVICE_PARAMS_T *params,
11755 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
11756 +{
11757 +       VCHIQ_STATUS_T status;
11758 +       VCHIQ_STATE_T *state = instance->state;
11759 +       VCHIQ_SERVICE_T *service = NULL;
11760 +       int srvstate;
11761 +
11762 +       vchiq_log_trace(vchiq_core_log_level,
11763 +               "%s(%p) called", __func__, instance);
11764 +
11765 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11766 +
11767 +       srvstate = vchiq_is_connected(instance)
11768 +               ? VCHIQ_SRVSTATE_LISTENING
11769 +               : VCHIQ_SRVSTATE_HIDDEN;
11770 +
11771 +       service = vchiq_add_service_internal(
11772 +               state,
11773 +               params,
11774 +               srvstate,
11775 +               instance);
11776 +
11777 +       if (service) {
11778 +               *phandle = service->handle;
11779 +               status = VCHIQ_SUCCESS;
11780 +       } else
11781 +               status = VCHIQ_ERROR;
11782 +
11783 +       vchiq_log_trace(vchiq_core_log_level,
11784 +               "%s(%p): returning %d", __func__, instance, status);
11785 +
11786 +       return status;
11787 +}
11788 +EXPORT_SYMBOL(vchiq_add_service);
11789 +
11790 +/****************************************************************************
11791 +*
11792 +*   vchiq_open_service
11793 +*
11794 +***************************************************************************/
11795 +
11796 +VCHIQ_STATUS_T vchiq_open_service(
11797 +       VCHIQ_INSTANCE_T              instance,
11798 +       const VCHIQ_SERVICE_PARAMS_T *params,
11799 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
11800 +{
11801 +       VCHIQ_STATUS_T   status = VCHIQ_ERROR;
11802 +       VCHIQ_STATE_T   *state = instance->state;
11803 +       VCHIQ_SERVICE_T *service = NULL;
11804 +
11805 +       vchiq_log_trace(vchiq_core_log_level,
11806 +               "%s(%p) called", __func__, instance);
11807 +
11808 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
11809 +
11810 +       if (!vchiq_is_connected(instance))
11811 +               goto failed;
11812 +
11813 +       service = vchiq_add_service_internal(state,
11814 +               params,
11815 +               VCHIQ_SRVSTATE_OPENING,
11816 +               instance);
11817 +
11818 +       if (service) {
11819 +               status = vchiq_open_service_internal(service, current->pid);
11820 +               if (status == VCHIQ_SUCCESS)
11821 +                       *phandle = service->handle;
11822 +               else
11823 +                       vchiq_remove_service(service->handle);
11824 +       }
11825 +
11826 +failed:
11827 +       vchiq_log_trace(vchiq_core_log_level,
11828 +               "%s(%p): returning %d", __func__, instance, status);
11829 +
11830 +       return status;
11831 +}
11832 +EXPORT_SYMBOL(vchiq_open_service);
11833 +
11834 +VCHIQ_STATUS_T
11835 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
11836 +       const void *data, unsigned int size, void *userdata)
11837 +{
11838 +       return vchiq_bulk_transfer(handle,
11839 +               VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11840 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
11841 +}
11842 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
11843 +
11844 +VCHIQ_STATUS_T
11845 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11846 +       unsigned int size, void *userdata)
11847 +{
11848 +       return vchiq_bulk_transfer(handle,
11849 +               VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11850 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
11851 +}
11852 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
11853 +
11854 +VCHIQ_STATUS_T
11855 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
11856 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11857 +{
11858 +       VCHIQ_STATUS_T status;
11859 +
11860 +       switch (mode) {
11861 +       case VCHIQ_BULK_MODE_NOCALLBACK:
11862 +       case VCHIQ_BULK_MODE_CALLBACK:
11863 +               status = vchiq_bulk_transfer(handle,
11864 +                       VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
11865 +                       mode, VCHIQ_BULK_TRANSMIT);
11866 +               break;
11867 +       case VCHIQ_BULK_MODE_BLOCKING:
11868 +               status = vchiq_blocking_bulk_transfer(handle,
11869 +                       (void *)data, size, VCHIQ_BULK_TRANSMIT);
11870 +               break;
11871 +       default:
11872 +               return VCHIQ_ERROR;
11873 +       }
11874 +
11875 +       return status;
11876 +}
11877 +EXPORT_SYMBOL(vchiq_bulk_transmit);
11878 +
11879 +VCHIQ_STATUS_T
11880 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11881 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
11882 +{
11883 +       VCHIQ_STATUS_T status;
11884 +
11885 +       switch (mode) {
11886 +       case VCHIQ_BULK_MODE_NOCALLBACK:
11887 +       case VCHIQ_BULK_MODE_CALLBACK:
11888 +               status = vchiq_bulk_transfer(handle,
11889 +                       VCHI_MEM_HANDLE_INVALID, data, size, userdata,
11890 +                       mode, VCHIQ_BULK_RECEIVE);
11891 +               break;
11892 +       case VCHIQ_BULK_MODE_BLOCKING:
11893 +               status = vchiq_blocking_bulk_transfer(handle,
11894 +                       (void *)data, size, VCHIQ_BULK_RECEIVE);
11895 +               break;
11896 +       default:
11897 +               return VCHIQ_ERROR;
11898 +       }
11899 +
11900 +       return status;
11901 +}
11902 +EXPORT_SYMBOL(vchiq_bulk_receive);
11903 +
11904 +static VCHIQ_STATUS_T
11905 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
11906 +       unsigned int size, VCHIQ_BULK_DIR_T dir)
11907 +{
11908 +       VCHIQ_INSTANCE_T instance;
11909 +       VCHIQ_SERVICE_T *service;
11910 +       VCHIQ_STATUS_T status;
11911 +       struct bulk_waiter_node *waiter = NULL;
11912 +       struct list_head *pos;
11913 +
11914 +       service = find_service_by_handle(handle);
11915 +       if (!service)
11916 +               return VCHIQ_ERROR;
11917 +
11918 +       instance = service->instance;
11919 +
11920 +       unlock_service(service);
11921 +
11922 +       mutex_lock(&instance->bulk_waiter_list_mutex);
11923 +       list_for_each(pos, &instance->bulk_waiter_list) {
11924 +               if (list_entry(pos, struct bulk_waiter_node,
11925 +                               list)->pid == current->pid) {
11926 +                       waiter = list_entry(pos,
11927 +                               struct bulk_waiter_node,
11928 +                               list);
11929 +                       list_del(pos);
11930 +                       break;
11931 +               }
11932 +       }
11933 +       mutex_unlock(&instance->bulk_waiter_list_mutex);
11934 +
11935 +       if (waiter) {
11936 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11937 +               if (bulk) {
11938 +                       /* This thread has an outstanding bulk transfer. */
11939 +                       if ((bulk->data != data) ||
11940 +                               (bulk->size != size)) {
11941 +                               /* This is not a retry of the previous one.
11942 +                               ** Cancel the signal when the transfer
11943 +                               ** completes. */
11944 +                               spin_lock(&bulk_waiter_spinlock);
11945 +                               bulk->userdata = NULL;
11946 +                               spin_unlock(&bulk_waiter_spinlock);
11947 +                       }
11948 +               }
11949 +       }
11950 +
11951 +       if (!waiter) {
11952 +               waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
11953 +               if (!waiter) {
11954 +                       vchiq_log_error(vchiq_core_log_level,
11955 +                               "%s - out of memory", __func__);
11956 +                       return VCHIQ_ERROR;
11957 +               }
11958 +       }
11959 +
11960 +       status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
11961 +               data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
11962 +               dir);
11963 +       if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
11964 +               !waiter->bulk_waiter.bulk) {
11965 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
11966 +               if (bulk) {
11967 +                       /* Cancel the signal when the transfer
11968 +                        ** completes. */
11969 +                       spin_lock(&bulk_waiter_spinlock);
11970 +                       bulk->userdata = NULL;
11971 +                       spin_unlock(&bulk_waiter_spinlock);
11972 +               }
11973 +               kfree(waiter);
11974 +       } else {
11975 +               waiter->pid = current->pid;
11976 +               mutex_lock(&instance->bulk_waiter_list_mutex);
11977 +               list_add(&waiter->list, &instance->bulk_waiter_list);
11978 +               mutex_unlock(&instance->bulk_waiter_list_mutex);
11979 +               vchiq_log_info(vchiq_arm_log_level,
11980 +                               "saved bulk_waiter %x for pid %d",
11981 +                               (unsigned int)waiter, current->pid);
11982 +       }
11983 +
11984 +       return status;
11985 +}
11986 --- /dev/null
11987 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
11988 @@ -0,0 +1,71 @@
11989 +/**
11990 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11991 + *
11992 + * Redistribution and use in source and binary forms, with or without
11993 + * modification, are permitted provided that the following conditions
11994 + * are met:
11995 + * 1. Redistributions of source code must retain the above copyright
11996 + *    notice, this list of conditions, and the following disclaimer,
11997 + *    without modification.
11998 + * 2. Redistributions in binary form must reproduce the above copyright
11999 + *    notice, this list of conditions and the following disclaimer in the
12000 + *    documentation and/or other materials provided with the distribution.
12001 + * 3. The names of the above-listed copyright holders may not be used
12002 + *    to endorse or promote products derived from this software without
12003 + *    specific prior written permission.
12004 + *
12005 + * ALTERNATIVELY, this software may be distributed under the terms of the
12006 + * GNU General Public License ("GPL") version 2, as published by the Free
12007 + * Software Foundation.
12008 + *
12009 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12010 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12011 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12012 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12013 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12014 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12015 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12016 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12017 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12018 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12019 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12020 + */
12021 +
12022 +#ifndef VCHIQ_MEMDRV_H
12023 +#define VCHIQ_MEMDRV_H
12024 +
12025 +/* ---- Include Files ----------------------------------------------------- */
12026 +
12027 +#include <linux/kernel.h>
12028 +#include "vchiq_if.h"
12029 +
12030 +/* ---- Constants and Types ---------------------------------------------- */
12031 +
12032 +typedef struct {
12033 +        void                   *armSharedMemVirt;
12034 +        dma_addr_t              armSharedMemPhys;
12035 +        size_t                  armSharedMemSize;
12036 +
12037 +        void                   *vcSharedMemVirt;
12038 +        dma_addr_t              vcSharedMemPhys;
12039 +        size_t                  vcSharedMemSize;
12040 +} VCHIQ_SHARED_MEM_INFO_T;
12041 +
12042 +/* ---- Variable Externs ------------------------------------------------- */
12043 +
12044 +/* ---- Function Prototypes ---------------------------------------------- */
12045 +
12046 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
12047 +
12048 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
12049 +
12050 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
12051 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
12052 +
12053 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
12054 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
12055 +
12056 +VCHIQ_STATUS_T vchiq_userdrv_resume(
12057 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
12058 +
12059 +#endif
12060 --- /dev/null
12061 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
12062 @@ -0,0 +1,58 @@
12063 +/**
12064 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12065 + *
12066 + * Redistribution and use in source and binary forms, with or without
12067 + * modification, are permitted provided that the following conditions
12068 + * are met:
12069 + * 1. Redistributions of source code must retain the above copyright
12070 + *    notice, this list of conditions, and the following disclaimer,
12071 + *    without modification.
12072 + * 2. Redistributions in binary form must reproduce the above copyright
12073 + *    notice, this list of conditions and the following disclaimer in the
12074 + *    documentation and/or other materials provided with the distribution.
12075 + * 3. The names of the above-listed copyright holders may not be used
12076 + *    to endorse or promote products derived from this software without
12077 + *    specific prior written permission.
12078 + *
12079 + * ALTERNATIVELY, this software may be distributed under the terms of the
12080 + * GNU General Public License ("GPL") version 2, as published by the Free
12081 + * Software Foundation.
12082 + *
12083 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12084 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12085 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12086 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12087 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12088 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12089 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12090 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12091 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12092 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12093 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12094 + */
12095 +
12096 +#ifndef VCHIQ_PAGELIST_H
12097 +#define VCHIQ_PAGELIST_H
12098 +
12099 +#ifndef PAGE_SIZE
12100 +#define PAGE_SIZE 4096
12101 +#endif
12102 +#define CACHE_LINE_SIZE 32
12103 +#define PAGELIST_WRITE 0
12104 +#define PAGELIST_READ 1
12105 +#define PAGELIST_READ_WITH_FRAGMENTS 2
12106 +
12107 +typedef struct pagelist_struct {
12108 +       unsigned long length;
12109 +       unsigned short type;
12110 +       unsigned short offset;
12111 +       unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
12112 +                                  pages at consecutive addresses. */
12113 +} PAGELIST_T;
12114 +
12115 +typedef struct fragments_struct {
12116 +       char headbuf[CACHE_LINE_SIZE];
12117 +       char tailbuf[CACHE_LINE_SIZE];
12118 +} FRAGMENTS_T;
12119 +
12120 +#endif /* VCHIQ_PAGELIST_H */
12121 --- /dev/null
12122 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
12123 @@ -0,0 +1,243 @@
12124 +/**
12125 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12126 + *
12127 + * Redistribution and use in source and binary forms, with or without
12128 + * modification, are permitted provided that the following conditions
12129 + * are met:
12130 + * 1. Redistributions of source code must retain the above copyright
12131 + *    notice, this list of conditions, and the following disclaimer,
12132 + *    without modification.
12133 + * 2. Redistributions in binary form must reproduce the above copyright
12134 + *    notice, this list of conditions and the following disclaimer in the
12135 + *    documentation and/or other materials provided with the distribution.
12136 + * 3. The names of the above-listed copyright holders may not be used
12137 + *    to endorse or promote products derived from this software without
12138 + *    specific prior written permission.
12139 + *
12140 + * ALTERNATIVELY, this software may be distributed under the terms of the
12141 + * GNU General Public License ("GPL") version 2, as published by the Free
12142 + * Software Foundation.
12143 + *
12144 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12145 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12146 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12147 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12148 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12149 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12150 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12151 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12152 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12153 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12154 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12155 + */
12156 +
12157 +
12158 +#include <linux/proc_fs.h>
12159 +#include "vchiq_core.h"
12160 +#include "vchiq_arm.h"
12161 +
12162 +struct vchiq_proc_info {
12163 +       /* Global 'vc' proc entry used by all instances */
12164 +       struct proc_dir_entry *vc_cfg_dir;
12165 +
12166 +       /* one entry per client process */
12167 +       struct proc_dir_entry *clients;
12168 +
12169 +       /* log categories */
12170 +       struct proc_dir_entry *log_categories;
12171 +};
12172 +
12173 +static struct vchiq_proc_info proc_info;
12174 +
12175 +struct proc_dir_entry *vchiq_proc_top(void)
12176 +{
12177 +       BUG_ON(proc_info.vc_cfg_dir == NULL);
12178 +       return proc_info.vc_cfg_dir;
12179 +}
12180 +
12181 +/****************************************************************************
12182 +*
12183 +*   log category entries
12184 +*
12185 +***************************************************************************/
12186 +#define PROC_WRITE_BUF_SIZE 256
12187 +
12188 +#define VCHIQ_LOG_ERROR_STR   "error"
12189 +#define VCHIQ_LOG_WARNING_STR "warning"
12190 +#define VCHIQ_LOG_INFO_STR    "info"
12191 +#define VCHIQ_LOG_TRACE_STR   "trace"
12192 +
12193 +static int log_cfg_read(char *buffer,
12194 +       char **start,
12195 +       off_t off,
12196 +       int count,
12197 +       int *eof,
12198 +       void *data)
12199 +{
12200 +       int len = 0;
12201 +       char *log_value = NULL;
12202 +
12203 +       switch (*((int *)data)) {
12204 +       case VCHIQ_LOG_ERROR:
12205 +               log_value = VCHIQ_LOG_ERROR_STR;
12206 +               break;
12207 +       case VCHIQ_LOG_WARNING:
12208 +               log_value = VCHIQ_LOG_WARNING_STR;
12209 +               break;
12210 +       case VCHIQ_LOG_INFO:
12211 +               log_value = VCHIQ_LOG_INFO_STR;
12212 +               break;
12213 +       case VCHIQ_LOG_TRACE:
12214 +               log_value = VCHIQ_LOG_TRACE_STR;
12215 +               break;
12216 +       default:
12217 +               break;
12218 +       }
12219 +
12220 +       len += sprintf(buffer + len,
12221 +               "%s\n",
12222 +               log_value ? log_value : "(null)");
12223 +
12224 +       return len;
12225 +}
12226 +
12227 +
12228 +static int log_cfg_write(struct file *file,
12229 +       const char __user *buffer,
12230 +       unsigned long count,
12231 +       void *data)
12232 +{
12233 +       int *log_module = data;
12234 +       char kbuf[PROC_WRITE_BUF_SIZE + 1];
12235 +
12236 +       (void)file;
12237 +
12238 +       memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
12239 +       if (count >= PROC_WRITE_BUF_SIZE)
12240 +               count = PROC_WRITE_BUF_SIZE;
12241 +
12242 +       if (copy_from_user(kbuf,
12243 +               buffer,
12244 +               count) != 0)
12245 +               return -EFAULT;
12246 +       kbuf[count - 1] = 0;
12247 +
12248 +       if (strncmp("error", kbuf, strlen("error")) == 0)
12249 +               *log_module = VCHIQ_LOG_ERROR;
12250 +       else if (strncmp("warning", kbuf, strlen("warning")) == 0)
12251 +               *log_module = VCHIQ_LOG_WARNING;
12252 +       else if (strncmp("info", kbuf, strlen("info")) == 0)
12253 +               *log_module = VCHIQ_LOG_INFO;
12254 +       else if (strncmp("trace", kbuf, strlen("trace")) == 0)
12255 +               *log_module = VCHIQ_LOG_TRACE;
12256 +       else
12257 +               *log_module = VCHIQ_LOG_DEFAULT;
12258 +
12259 +       return count;
12260 +}
12261 +
12262 +/* Log category proc entries */
12263 +struct vchiq_proc_log_entry {
12264 +       const char *name;
12265 +       int *plevel;
12266 +       struct proc_dir_entry *dir;
12267 +};
12268 +
12269 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
12270 +       { "core", &vchiq_core_log_level },
12271 +       { "msg",  &vchiq_core_msg_log_level },
12272 +       { "sync", &vchiq_sync_log_level },
12273 +       { "susp", &vchiq_susp_log_level },
12274 +       { "arm",  &vchiq_arm_log_level },
12275 +};
12276 +static int n_log_entries =
12277 +       sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
12278 +
12279 +/* create an entry under /proc/vc/log for each log category */
12280 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
12281 +{
12282 +       struct proc_dir_entry *dir;
12283 +       size_t i;
12284 +       int ret = 0;
12285 +#if 0
12286 +       dir = proc_mkdir("log", proc_info.vc_cfg_dir);
12287 +       if (!dir)
12288 +               return -ENOMEM;
12289 +       proc_info.log_categories = dir;
12290 +
12291 +       for (i = 0; i < n_log_entries; i++) {
12292 +               dir = create_proc_entry(vchiq_proc_log_entries[i].name,
12293 +                                       0644,
12294 +                                       proc_info.log_categories);
12295 +               if (!dir) {
12296 +                       ret = -ENOMEM;
12297 +                       break;
12298 +               }
12299 +
12300 +               dir->read_proc = &log_cfg_read;
12301 +               dir->write_proc = &log_cfg_write;
12302 +               dir->data = (void *)vchiq_proc_log_entries[i].plevel;
12303 +
12304 +               vchiq_proc_log_entries[i].dir = dir;
12305 +       }
12306 +#endif
12307 +       return ret;
12308 +}
12309 +
12310 +
12311 +int vchiq_proc_init(void)
12312 +{
12313 +       BUG_ON(proc_info.vc_cfg_dir != NULL);
12314 +
12315 +       proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
12316 +       if (proc_info.vc_cfg_dir == NULL)
12317 +               goto fail;
12318 +
12319 +       proc_info.clients = proc_mkdir("clients",
12320 +                               proc_info.vc_cfg_dir);
12321 +       if (!proc_info.clients)
12322 +               goto fail;
12323 +
12324 +       if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
12325 +               goto fail;
12326 +
12327 +       return 0;
12328 +
12329 +fail:
12330 +       vchiq_proc_deinit();
12331 +       vchiq_log_error(vchiq_arm_log_level,
12332 +               "%s: failed to create proc directory",
12333 +               __func__);
12334 +
12335 +       return -ENOMEM;
12336 +}
12337 +
12338 +/* remove all the proc entries */
12339 +void vchiq_proc_deinit(void)
12340 +{
12341 +       /* log category entries */
12342 +#if 0
12343 +       if (proc_info.log_categories) {
12344 +               size_t i;
12345 +               for (i = 0; i < n_log_entries; i++)
12346 +                       if (vchiq_proc_log_entries[i].dir)
12347 +                               remove_proc_entry(
12348 +                                       vchiq_proc_log_entries[i].name,
12349 +                                       proc_info.log_categories);
12350 +
12351 +               remove_proc_entry(proc_info.log_categories->name,
12352 +                                 proc_info.vc_cfg_dir);
12353 +       }
12354 +       if (proc_info.clients)
12355 +               remove_proc_entry(proc_info.clients->name,
12356 +                                 proc_info.vc_cfg_dir);
12357 +       if (proc_info.vc_cfg_dir)
12358 +               remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
12359 +#endif
12360 +}
12361 +
12362 +struct proc_dir_entry *vchiq_clients_top(void)
12363 +{
12364 +       return proc_info.clients;
12365 +}
12366 +
12367 --- /dev/null
12368 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
12369 @@ -0,0 +1,815 @@
12370 +/**
12371 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12372 + *
12373 + * Redistribution and use in source and binary forms, with or without
12374 + * modification, are permitted provided that the following conditions
12375 + * are met:
12376 + * 1. Redistributions of source code must retain the above copyright
12377 + *    notice, this list of conditions, and the following disclaimer,
12378 + *    without modification.
12379 + * 2. Redistributions in binary form must reproduce the above copyright
12380 + *    notice, this list of conditions and the following disclaimer in the
12381 + *    documentation and/or other materials provided with the distribution.
12382 + * 3. The names of the above-listed copyright holders may not be used
12383 + *    to endorse or promote products derived from this software without
12384 + *    specific prior written permission.
12385 + *
12386 + * ALTERNATIVELY, this software may be distributed under the terms of the
12387 + * GNU General Public License ("GPL") version 2, as published by the Free
12388 + * Software Foundation.
12389 + *
12390 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12391 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12392 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12393 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12394 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12395 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12396 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12397 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12398 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12399 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12400 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12401 + */
12402 +#include <linux/module.h>
12403 +#include <linux/types.h>
12404 +
12405 +#include "interface/vchi/vchi.h"
12406 +#include "vchiq.h"
12407 +#include "vchiq_core.h"
12408 +
12409 +#include "vchiq_util.h"
12410 +
12411 +#include <stddef.h>
12412 +
12413 +#define vchiq_status_to_vchi(status) ((int32_t)status)
12414 +
12415 +typedef struct {
12416 +       VCHIQ_SERVICE_HANDLE_T handle;
12417 +
12418 +       VCHIU_QUEUE_T queue;
12419 +
12420 +       VCHI_CALLBACK_T callback;
12421 +       void *callback_param;
12422 +} SHIM_SERVICE_T;
12423 +
12424 +/* ----------------------------------------------------------------------
12425 + * return pointer to the mphi message driver function table
12426 + * -------------------------------------------------------------------- */
12427 +const VCHI_MESSAGE_DRIVER_T *
12428 +vchi_mphi_message_driver_func_table(void)
12429 +{
12430 +       return NULL;
12431 +}
12432 +
12433 +/* ----------------------------------------------------------------------
12434 + * return a pointer to the 'single' connection driver fops
12435 + * -------------------------------------------------------------------- */
12436 +const VCHI_CONNECTION_API_T *
12437 +single_get_func_table(void)
12438 +{
12439 +       return NULL;
12440 +}
12441 +
12442 +VCHI_CONNECTION_T *vchi_create_connection(
12443 +       const VCHI_CONNECTION_API_T *function_table,
12444 +       const VCHI_MESSAGE_DRIVER_T *low_level)
12445 +{
12446 +       (void)function_table;
12447 +       (void)low_level;
12448 +       return NULL;
12449 +}
12450 +
12451 +/***********************************************************
12452 + * Name: vchi_msg_peek
12453 + *
12454 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
12455 + *             void **data,
12456 + *             uint32_t *msg_size,
12457 +
12458 +
12459 + *             VCHI_FLAGS_T flags
12460 + *
12461 + * Description: Routine to return a pointer to the current message (to allow in
12462 + *              place processing). The message can be removed using
12463 + *              vchi_msg_remove when you're finished
12464 + *
12465 + * Returns: int32_t - success == 0
12466 + *
12467 + ***********************************************************/
12468 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
12469 +       void **data,
12470 +       uint32_t *msg_size,
12471 +       VCHI_FLAGS_T flags)
12472 +{
12473 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12474 +       VCHIQ_HEADER_T *header;
12475 +
12476 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
12477 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12478 +
12479 +       if (flags == VCHI_FLAGS_NONE)
12480 +               if (vchiu_queue_is_empty(&service->queue))
12481 +                       return -1;
12482 +
12483 +       header = vchiu_queue_peek(&service->queue);
12484 +
12485 +       *data = header->data;
12486 +       *msg_size = header->size;
12487 +
12488 +       return 0;
12489 +}
12490 +EXPORT_SYMBOL(vchi_msg_peek);
12491 +
12492 +/***********************************************************
12493 + * Name: vchi_msg_remove
12494 + *
12495 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
12496 + *
12497 + * Description: Routine to remove a message (after it has been read with
12498 + *              vchi_msg_peek)
12499 + *
12500 + * Returns: int32_t - success == 0
12501 + *
12502 + ***********************************************************/
12503 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
12504 +{
12505 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12506 +       VCHIQ_HEADER_T *header;
12507 +
12508 +       header = vchiu_queue_pop(&service->queue);
12509 +
12510 +       vchiq_release_message(service->handle, header);
12511 +
12512 +       return 0;
12513 +}
12514 +EXPORT_SYMBOL(vchi_msg_remove);
12515 +
12516 +/***********************************************************
12517 + * Name: vchi_msg_queue
12518 + *
12519 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
12520 + *             const void *data,
12521 + *             uint32_t data_size,
12522 + *             VCHI_FLAGS_T flags,
12523 + *             void *msg_handle,
12524 + *
12525 + * Description: Thin wrapper to queue a message onto a connection
12526 + *
12527 + * Returns: int32_t - success == 0
12528 + *
12529 + ***********************************************************/
12530 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
12531 +       const void *data,
12532 +       uint32_t data_size,
12533 +       VCHI_FLAGS_T flags,
12534 +       void *msg_handle)
12535 +{
12536 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12537 +       VCHIQ_ELEMENT_T element = {data, data_size};
12538 +       VCHIQ_STATUS_T status;
12539 +
12540 +       (void)msg_handle;
12541 +
12542 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12543 +
12544 +       status = vchiq_queue_message(service->handle, &element, 1);
12545 +
12546 +       /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
12547 +       ** implement a retry mechanism since this function is supposed
12548 +       ** to block until queued
12549 +       */
12550 +       while (status == VCHIQ_RETRY) {
12551 +               msleep(1);
12552 +               status = vchiq_queue_message(service->handle, &element, 1);
12553 +       }
12554 +
12555 +       return vchiq_status_to_vchi(status);
12556 +}
12557 +EXPORT_SYMBOL(vchi_msg_queue);
12558 +
12559 +/***********************************************************
12560 + * Name: vchi_bulk_queue_receive
12561 + *
12562 + * Arguments:  VCHI_BULK_HANDLE_T handle,
12563 + *             void *data_dst,
12564 + *             const uint32_t data_size,
12565 + *             VCHI_FLAGS_T flags
12566 + *             void *bulk_handle
12567 + *
12568 + * Description: Routine to setup a rcv buffer
12569 + *
12570 + * Returns: int32_t - success == 0
12571 + *
12572 + ***********************************************************/
12573 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
12574 +       void *data_dst,
12575 +       uint32_t data_size,
12576 +       VCHI_FLAGS_T flags,
12577 +       void *bulk_handle)
12578 +{
12579 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12580 +       VCHIQ_BULK_MODE_T mode;
12581 +       VCHIQ_STATUS_T status;
12582 +
12583 +       switch ((int)flags) {
12584 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12585 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12586 +               WARN_ON(!service->callback);
12587 +               mode = VCHIQ_BULK_MODE_CALLBACK;
12588 +               break;
12589 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12590 +               mode = VCHIQ_BULK_MODE_BLOCKING;
12591 +               break;
12592 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12593 +       case VCHI_FLAGS_NONE:
12594 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
12595 +               break;
12596 +       default:
12597 +               WARN(1, "unsupported message\n");
12598 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
12599 +       }
12600 +
12601 +       status = vchiq_bulk_receive(service->handle, data_dst, data_size,
12602 +               bulk_handle, mode);
12603 +
12604 +       /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
12605 +       ** implement a retry mechanism since this function is supposed
12606 +       ** to block until queued
12607 +       */
12608 +       while (status == VCHIQ_RETRY) {
12609 +               msleep(1);
12610 +               status = vchiq_bulk_receive(service->handle, data_dst,
12611 +                       data_size, bulk_handle, mode);
12612 +       }
12613 +
12614 +       return vchiq_status_to_vchi(status);
12615 +}
12616 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
12617 +
12618 +/***********************************************************
12619 + * Name: vchi_bulk_queue_transmit
12620 + *
12621 + * Arguments:  VCHI_BULK_HANDLE_T handle,
12622 + *             const void *data_src,
12623 + *             uint32_t data_size,
12624 + *             VCHI_FLAGS_T flags,
12625 + *             void *bulk_handle
12626 + *
12627 + * Description: Routine to transmit some data
12628 + *
12629 + * Returns: int32_t - success == 0
12630 + *
12631 + ***********************************************************/
12632 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
12633 +       const void *data_src,
12634 +       uint32_t data_size,
12635 +       VCHI_FLAGS_T flags,
12636 +       void *bulk_handle)
12637 +{
12638 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12639 +       VCHIQ_BULK_MODE_T mode;
12640 +       VCHIQ_STATUS_T status;
12641 +
12642 +       switch ((int)flags) {
12643 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
12644 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12645 +               WARN_ON(!service->callback);
12646 +               mode = VCHIQ_BULK_MODE_CALLBACK;
12647 +               break;
12648 +       case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
12649 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
12650 +               mode = VCHIQ_BULK_MODE_BLOCKING;
12651 +               break;
12652 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
12653 +       case VCHI_FLAGS_NONE:
12654 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
12655 +               break;
12656 +       default:
12657 +               WARN(1, "unsupported message\n");
12658 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
12659 +       }
12660 +
12661 +       status = vchiq_bulk_transmit(service->handle, data_src, data_size,
12662 +               bulk_handle, mode);
12663 +
12664 +       /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
12665 +       ** implement a retry mechanism since this function is supposed
12666 +       ** to block until queued
12667 +       */
12668 +       while (status == VCHIQ_RETRY) {
12669 +               msleep(1);
12670 +               status = vchiq_bulk_transmit(service->handle, data_src,
12671 +                       data_size, bulk_handle, mode);
12672 +       }
12673 +
12674 +       return vchiq_status_to_vchi(status);
12675 +}
12676 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
12677 +
12678 +/***********************************************************
12679 + * Name: vchi_msg_dequeue
12680 + *
12681 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
12682 + *             void *data,
12683 + *             uint32_t max_data_size_to_read,
12684 + *             uint32_t *actual_msg_size
12685 + *             VCHI_FLAGS_T flags
12686 + *
12687 + * Description: Routine to dequeue a message into the supplied buffer
12688 + *
12689 + * Returns: int32_t - success == 0
12690 + *
12691 + ***********************************************************/
12692 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
12693 +       void *data,
12694 +       uint32_t max_data_size_to_read,
12695 +       uint32_t *actual_msg_size,
12696 +       VCHI_FLAGS_T flags)
12697 +{
12698 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12699 +       VCHIQ_HEADER_T *header;
12700 +
12701 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
12702 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12703 +
12704 +       if (flags == VCHI_FLAGS_NONE)
12705 +               if (vchiu_queue_is_empty(&service->queue))
12706 +                       return -1;
12707 +
12708 +       header = vchiu_queue_pop(&service->queue);
12709 +
12710 +       memcpy(data, header->data, header->size < max_data_size_to_read ?
12711 +               header->size : max_data_size_to_read);
12712 +
12713 +       *actual_msg_size = header->size;
12714 +
12715 +       vchiq_release_message(service->handle, header);
12716 +
12717 +       return 0;
12718 +}
12719 +EXPORT_SYMBOL(vchi_msg_dequeue);
12720 +
12721 +/***********************************************************
12722 + * Name: vchi_msg_queuev
12723 + *
12724 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
12725 + *             VCHI_MSG_VECTOR_T *vector,
12726 + *             uint32_t count,
12727 + *             VCHI_FLAGS_T flags,
12728 + *             void *msg_handle
12729 + *
12730 + * Description: Thin wrapper to queue a message onto a connection
12731 + *
12732 + * Returns: int32_t - success == 0
12733 + *
12734 + ***********************************************************/
12735 +
12736 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
12737 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
12738 +       offsetof(VCHIQ_ELEMENT_T, data));
12739 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
12740 +       offsetof(VCHIQ_ELEMENT_T, size));
12741 +
12742 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
12743 +       VCHI_MSG_VECTOR_T *vector,
12744 +       uint32_t count,
12745 +       VCHI_FLAGS_T flags,
12746 +       void *msg_handle)
12747 +{
12748 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12749 +
12750 +       (void)msg_handle;
12751 +
12752 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
12753 +
12754 +       return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
12755 +               (const VCHIQ_ELEMENT_T *)vector, count));
12756 +}
12757 +EXPORT_SYMBOL(vchi_msg_queuev);
12758 +
12759 +/***********************************************************
12760 + * Name: vchi_held_msg_release
12761 + *
12762 + * Arguments:  VCHI_HELD_MSG_T *message
12763 + *
12764 + * Description: Routine to release a held message (after it has been read with
12765 + *              vchi_msg_hold)
12766 + *
12767 + * Returns: int32_t - success == 0
12768 + *
12769 + ***********************************************************/
12770 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
12771 +{
12772 +       vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
12773 +               (VCHIQ_HEADER_T *)message->message);
12774 +
12775 +       return 0;
12776 +}
12777 +
12778 +/***********************************************************
12779 + * Name: vchi_msg_hold
12780 + *
12781 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
12782 + *             void **data,
12783 + *             uint32_t *msg_size,
12784 + *             VCHI_FLAGS_T flags,
12785 + *             VCHI_HELD_MSG_T *message_handle
12786 + *
12787 + * Description: Routine to return a pointer to the current message (to allow
12788 + *              in place processing). The message is dequeued - don't forget
12789 + *              to release the message using vchi_held_msg_release when you're
12790 + *              finished.
12791 + *
12792 + * Returns: int32_t - success == 0
12793 + *
12794 + ***********************************************************/
12795 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
12796 +       void **data,
12797 +       uint32_t *msg_size,
12798 +       VCHI_FLAGS_T flags,
12799 +       VCHI_HELD_MSG_T *message_handle)
12800 +{
12801 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12802 +       VCHIQ_HEADER_T *header;
12803 +
12804 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
12805 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
12806 +
12807 +       if (flags == VCHI_FLAGS_NONE)
12808 +               if (vchiu_queue_is_empty(&service->queue))
12809 +                       return -1;
12810 +
12811 +       header = vchiu_queue_pop(&service->queue);
12812 +
12813 +       *data = header->data;
12814 +       *msg_size = header->size;
12815 +
12816 +       message_handle->service =
12817 +               (struct opaque_vchi_service_t *)service->handle;
12818 +       message_handle->message = header;
12819 +
12820 +       return 0;
12821 +}
12822 +
12823 +/***********************************************************
12824 + * Name: vchi_initialise
12825 + *
12826 + * Arguments: VCHI_INSTANCE_T *instance_handle
12827 + *            VCHI_CONNECTION_T **connections
12828 + *            const uint32_t num_connections
12829 + *
12830 + * Description: Initialises the hardware but does not transmit anything
12831 + *              When run as a Host App this will be called twice hence the need
12832 + *              to malloc the state information
12833 + *
12834 + * Returns: 0 if successful, failure otherwise
12835 + *
12836 + ***********************************************************/
12837 +
12838 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
12839 +{
12840 +       VCHIQ_INSTANCE_T instance;
12841 +       VCHIQ_STATUS_T status;
12842 +
12843 +       status = vchiq_initialise(&instance);
12844 +
12845 +       *instance_handle = (VCHI_INSTANCE_T)instance;
12846 +
12847 +       return vchiq_status_to_vchi(status);
12848 +}
12849 +EXPORT_SYMBOL(vchi_initialise);
12850 +
12851 +/***********************************************************
12852 + * Name: vchi_connect
12853 + *
12854 + * Arguments: VCHI_CONNECTION_T **connections
12855 + *            const uint32_t num_connections
12856 + *            VCHI_INSTANCE_T instance_handle)
12857 + *
12858 + * Description: Starts the command service on each connection,
12859 + *              causing INIT messages to be pinged back and forth
12860 + *
12861 + * Returns: 0 if successful, failure otherwise
12862 + *
12863 + ***********************************************************/
12864 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
12865 +       const uint32_t num_connections,
12866 +       VCHI_INSTANCE_T instance_handle)
12867 +{
12868 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12869 +
12870 +       (void)connections;
12871 +       (void)num_connections;
12872 +
12873 +       return vchiq_connect(instance);
12874 +}
12875 +EXPORT_SYMBOL(vchi_connect);
12876 +
12877 +
12878 +/***********************************************************
12879 + * Name: vchi_disconnect
12880 + *
12881 + * Arguments: VCHI_INSTANCE_T instance_handle
12882 + *
12883 + * Description: Stops the command service on each connection,
12884 + *              causing DE-INIT messages to be pinged back and forth
12885 + *
12886 + * Returns: 0 if successful, failure otherwise
12887 + *
12888 + ***********************************************************/
12889 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
12890 +{
12891 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12892 +       return vchiq_status_to_vchi(vchiq_shutdown(instance));
12893 +}
12894 +EXPORT_SYMBOL(vchi_disconnect);
12895 +
12896 +
12897 +/***********************************************************
12898 + * Name: vchi_service_open
12899 + * Name: vchi_service_create
12900 + *
12901 + * Arguments: VCHI_INSTANCE_T *instance_handle
12902 + *            SERVICE_CREATION_T *setup,
12903 + *            VCHI_SERVICE_HANDLE_T *handle
12904 + *
12905 + * Description: Routine to open a service
12906 + *
12907 + * Returns: int32_t - success == 0
12908 + *
12909 + ***********************************************************/
12910 +
12911 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
12912 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
12913 +{
12914 +       SHIM_SERVICE_T *service =
12915 +               (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
12916 +
12917 +       switch (reason) {
12918 +       case VCHIQ_MESSAGE_AVAILABLE:
12919 +               vchiu_queue_push(&service->queue, header);
12920 +
12921 +               if (service->callback)
12922 +                       service->callback(service->callback_param,
12923 +                               VCHI_CALLBACK_MSG_AVAILABLE, NULL);
12924 +               break;
12925 +       case VCHIQ_BULK_TRANSMIT_DONE:
12926 +               if (service->callback)
12927 +                       service->callback(service->callback_param,
12928 +                               VCHI_CALLBACK_BULK_SENT, bulk_user);
12929 +               break;
12930 +       case VCHIQ_BULK_RECEIVE_DONE:
12931 +               if (service->callback)
12932 +                       service->callback(service->callback_param,
12933 +                               VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
12934 +               break;
12935 +       case VCHIQ_SERVICE_CLOSED:
12936 +               if (service->callback)
12937 +                       service->callback(service->callback_param,
12938 +                               VCHI_CALLBACK_SERVICE_CLOSED, NULL);
12939 +               break;
12940 +       case VCHIQ_SERVICE_OPENED:
12941 +               /* No equivalent VCHI reason */
12942 +               break;
12943 +       case VCHIQ_BULK_TRANSMIT_ABORTED:
12944 +               if (service->callback)
12945 +                       service->callback(service->callback_param,
12946 +                               VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
12947 +               break;
12948 +       case VCHIQ_BULK_RECEIVE_ABORTED:
12949 +               if (service->callback)
12950 +                       service->callback(service->callback_param,
12951 +                               VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
12952 +               break;
12953 +       default:
12954 +               WARN(1, "not supported\n");
12955 +               break;
12956 +       }
12957 +
12958 +       return VCHIQ_SUCCESS;
12959 +}
12960 +
12961 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
12962 +       SERVICE_CREATION_T *setup)
12963 +{
12964 +       SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
12965 +
12966 +       (void)instance;
12967 +
12968 +       if (service) {
12969 +               if (vchiu_queue_init(&service->queue, 64)) {
12970 +                       service->callback = setup->callback;
12971 +                       service->callback_param = setup->callback_param;
12972 +               } else {
12973 +                       kfree(service);
12974 +                       service = NULL;
12975 +               }
12976 +       }
12977 +
12978 +       return service;
12979 +}
12980 +
12981 +static void service_free(SHIM_SERVICE_T *service)
12982 +{
12983 +       if (service) {
12984 +               vchiu_queue_delete(&service->queue);
12985 +               kfree(service);
12986 +       }
12987 +}
12988 +
12989 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
12990 +       SERVICE_CREATION_T *setup,
12991 +       VCHI_SERVICE_HANDLE_T *handle)
12992 +{
12993 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
12994 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
12995 +       if (service) {
12996 +               VCHIQ_SERVICE_PARAMS_T params;
12997 +               VCHIQ_STATUS_T status;
12998 +
12999 +               memset(&params, 0, sizeof(params));
13000 +               params.fourcc = setup->service_id;
13001 +               params.callback = shim_callback;
13002 +               params.userdata = service;
13003 +               params.version = setup->version.version;
13004 +               params.version_min = setup->version.version_min;
13005 +
13006 +               status = vchiq_open_service(instance, &params,
13007 +                       &service->handle);
13008 +               if (status != VCHIQ_SUCCESS) {
13009 +                       service_free(service);
13010 +                       service = NULL;
13011 +               }
13012 +       }
13013 +
13014 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
13015 +
13016 +       return (service != NULL) ? 0 : -1;
13017 +}
13018 +EXPORT_SYMBOL(vchi_service_open);
13019 +
13020 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
13021 +       SERVICE_CREATION_T *setup,
13022 +       VCHI_SERVICE_HANDLE_T *handle)
13023 +{
13024 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
13025 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
13026 +       if (service) {
13027 +               VCHIQ_SERVICE_PARAMS_T params;
13028 +               VCHIQ_STATUS_T status;
13029 +
13030 +               memset(&params, 0, sizeof(params));
13031 +               params.fourcc = setup->service_id;
13032 +               params.callback = shim_callback;
13033 +               params.userdata = service;
13034 +               params.version = setup->version.version;
13035 +               params.version_min = setup->version.version_min;
13036 +               status = vchiq_add_service(instance, &params, &service->handle);
13037 +
13038 +               if (status != VCHIQ_SUCCESS) {
13039 +                       service_free(service);
13040 +                       service = NULL;
13041 +               }
13042 +       }
13043 +
13044 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
13045 +
13046 +       return (service != NULL) ? 0 : -1;
13047 +}
13048 +EXPORT_SYMBOL(vchi_service_create);
13049 +
13050 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
13051 +{
13052 +       int32_t ret = -1;
13053 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13054 +       if (service) {
13055 +               VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
13056 +               if (status == VCHIQ_SUCCESS) {
13057 +                       service_free(service);
13058 +                       service = NULL;
13059 +               }
13060 +
13061 +               ret = vchiq_status_to_vchi(status);
13062 +       }
13063 +       return ret;
13064 +}
13065 +EXPORT_SYMBOL(vchi_service_close);
13066 +
13067 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
13068 +{
13069 +       int32_t ret = -1;
13070 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13071 +       if (service) {
13072 +               VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
13073 +               if (status == VCHIQ_SUCCESS) {
13074 +                       service_free(service);
13075 +                       service = NULL;
13076 +               }
13077 +
13078 +               ret = vchiq_status_to_vchi(status);
13079 +       }
13080 +       return ret;
13081 +}
13082 +EXPORT_SYMBOL(vchi_service_destroy);
13083 +
13084 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
13085 +{
13086 +   int32_t ret = -1;
13087 +   SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13088 +   if(service)
13089 +   {
13090 +      VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
13091 +      ret = vchiq_status_to_vchi( status );
13092 +   }
13093 +   return ret;
13094 +}
13095 +EXPORT_SYMBOL(vchi_get_peer_version);
13096 +
13097 +/* ----------------------------------------------------------------------
13098 + * read a uint32_t from buffer.
13099 + * network format is defined to be little endian
13100 + * -------------------------------------------------------------------- */
13101 +uint32_t
13102 +vchi_readbuf_uint32(const void *_ptr)
13103 +{
13104 +       const unsigned char *ptr = _ptr;
13105 +       return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
13106 +}
13107 +
13108 +/* ----------------------------------------------------------------------
13109 + * write a uint32_t to buffer.
13110 + * network format is defined to be little endian
13111 + * -------------------------------------------------------------------- */
13112 +void
13113 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
13114 +{
13115 +       unsigned char *ptr = _ptr;
13116 +       ptr[0] = (unsigned char)((value >> 0)  & 0xFF);
13117 +       ptr[1] = (unsigned char)((value >> 8)  & 0xFF);
13118 +       ptr[2] = (unsigned char)((value >> 16) & 0xFF);
13119 +       ptr[3] = (unsigned char)((value >> 24) & 0xFF);
13120 +}
13121 +
13122 +/* ----------------------------------------------------------------------
13123 + * read a uint16_t from buffer.
13124 + * network format is defined to be little endian
13125 + * -------------------------------------------------------------------- */
13126 +uint16_t
13127 +vchi_readbuf_uint16(const void *_ptr)
13128 +{
13129 +       const unsigned char *ptr = _ptr;
13130 +       return ptr[0] | (ptr[1] << 8);
13131 +}
13132 +
13133 +/* ----------------------------------------------------------------------
13134 + * write a uint16_t into the buffer.
13135 + * network format is defined to be little endian
13136 + * -------------------------------------------------------------------- */
13137 +void
13138 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
13139 +{
13140 +       unsigned char *ptr = _ptr;
13141 +       ptr[0] = (value >> 0)  & 0xFF;
13142 +       ptr[1] = (value >> 8)  & 0xFF;
13143 +}
13144 +
13145 +/***********************************************************
13146 + * Name: vchi_service_use
13147 + *
13148 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13149 + *
13150 + * Description: Routine to increment refcount on a service
13151 + *
13152 + * Returns: void
13153 + *
13154 + ***********************************************************/
13155 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
13156 +{
13157 +       int32_t ret = -1;
13158 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13159 +       if (service)
13160 +               ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
13161 +       return ret;
13162 +}
13163 +EXPORT_SYMBOL(vchi_service_use);
13164 +
13165 +/***********************************************************
13166 + * Name: vchi_service_release
13167 + *
13168 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
13169 + *
13170 + * Description: Routine to decrement refcount on a service
13171 + *
13172 + * Returns: void
13173 + *
13174 + ***********************************************************/
13175 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
13176 +{
13177 +       int32_t ret = -1;
13178 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
13179 +       if (service)
13180 +               ret = vchiq_status_to_vchi(
13181 +                       vchiq_release_service(service->handle));
13182 +       return ret;
13183 +}
13184 +EXPORT_SYMBOL(vchi_service_release);
13185 --- /dev/null
13186 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
13187 @@ -0,0 +1,120 @@
13188 +/**
13189 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13190 + *
13191 + * Redistribution and use in source and binary forms, with or without
13192 + * modification, are permitted provided that the following conditions
13193 + * are met:
13194 + * 1. Redistributions of source code must retain the above copyright
13195 + *    notice, this list of conditions, and the following disclaimer,
13196 + *    without modification.
13197 + * 2. Redistributions in binary form must reproduce the above copyright
13198 + *    notice, this list of conditions and the following disclaimer in the
13199 + *    documentation and/or other materials provided with the distribution.
13200 + * 3. The names of the above-listed copyright holders may not be used
13201 + *    to endorse or promote products derived from this software without
13202 + *    specific prior written permission.
13203 + *
13204 + * ALTERNATIVELY, this software may be distributed under the terms of the
13205 + * GNU General Public License ("GPL") version 2, as published by the Free
13206 + * Software Foundation.
13207 + *
13208 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13209 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13210 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13211 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13212 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13213 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13214 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13215 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13216 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13217 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13218 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13219 + */
13220 +
13221 +#include "vchiq_util.h"
13222 +
13223 +static inline int is_pow2(int i)
13224 +{
13225 +       return i && !(i & (i - 1));
13226 +}
13227 +
13228 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
13229 +{
13230 +       WARN_ON(!is_pow2(size));
13231 +
13232 +       queue->size = size;
13233 +       queue->read = 0;
13234 +       queue->write = 0;
13235 +
13236 +       sema_init(&queue->pop, 0);
13237 +       sema_init(&queue->push, 0);
13238 +
13239 +       queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
13240 +       if (queue->storage == NULL) {
13241 +               vchiu_queue_delete(queue);
13242 +               return 0;
13243 +       }
13244 +       return 1;
13245 +}
13246 +
13247 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
13248 +{
13249 +       if (queue->storage != NULL)
13250 +               kfree(queue->storage);
13251 +}
13252 +
13253 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
13254 +{
13255 +       return queue->read == queue->write;
13256 +}
13257 +
13258 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
13259 +{
13260 +       return queue->write == queue->read + queue->size;
13261 +}
13262 +
13263 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
13264 +{
13265 +       while (queue->write == queue->read + queue->size) {
13266 +               if (down_interruptible(&queue->pop) != 0) {
13267 +                       flush_signals(current);
13268 +               }
13269 +       }
13270 +
13271 +       queue->storage[queue->write & (queue->size - 1)] = header;
13272 +
13273 +       queue->write++;
13274 +
13275 +       up(&queue->push);
13276 +}
13277 +
13278 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
13279 +{
13280 +       while (queue->write == queue->read) {
13281 +               if (down_interruptible(&queue->push) != 0) {
13282 +                       flush_signals(current);
13283 +               }
13284 +       }
13285 +
13286 +       up(&queue->push); // We haven't removed anything from the queue.
13287 +       return queue->storage[queue->read & (queue->size - 1)];
13288 +}
13289 +
13290 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
13291 +{
13292 +       VCHIQ_HEADER_T *header;
13293 +
13294 +       while (queue->write == queue->read) {
13295 +               if (down_interruptible(&queue->push) != 0) {
13296 +                       flush_signals(current);
13297 +               }
13298 +       }
13299 +
13300 +       header = queue->storage[queue->read & (queue->size - 1)];
13301 +
13302 +       queue->read++;
13303 +
13304 +       up(&queue->pop);
13305 +
13306 +       return header;
13307 +}
13308 --- /dev/null
13309 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
13310 @@ -0,0 +1,82 @@
13311 +/**
13312 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13313 + *
13314 + * Redistribution and use in source and binary forms, with or without
13315 + * modification, are permitted provided that the following conditions
13316 + * are met:
13317 + * 1. Redistributions of source code must retain the above copyright
13318 + *    notice, this list of conditions, and the following disclaimer,
13319 + *    without modification.
13320 + * 2. Redistributions in binary form must reproduce the above copyright
13321 + *    notice, this list of conditions and the following disclaimer in the
13322 + *    documentation and/or other materials provided with the distribution.
13323 + * 3. The names of the above-listed copyright holders may not be used
13324 + *    to endorse or promote products derived from this software without
13325 + *    specific prior written permission.
13326 + *
13327 + * ALTERNATIVELY, this software may be distributed under the terms of the
13328 + * GNU General Public License ("GPL") version 2, as published by the Free
13329 + * Software Foundation.
13330 + *
13331 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13332 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13333 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13334 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13335 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13336 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13337 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13338 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13339 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13340 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13341 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13342 + */
13343 +
13344 +#ifndef VCHIQ_UTIL_H
13345 +#define VCHIQ_UTIL_H
13346 +
13347 +#include <linux/types.h>
13348 +#include <linux/semaphore.h>
13349 +#include <linux/mutex.h>
13350 +#include <linux/bitops.h>
13351 +#include <linux/kthread.h>
13352 +#include <linux/wait.h>
13353 +#include <linux/vmalloc.h>
13354 +#include <linux/jiffies.h>
13355 +#include <linux/delay.h>
13356 +#include <linux/string.h>
13357 +#include <linux/types.h>
13358 +#include <linux/interrupt.h>
13359 +#include <linux/random.h>
13360 +#include <linux/sched.h>
13361 +#include <linux/ctype.h>
13362 +#include <linux/uaccess.h>
13363 +#include <linux/time.h>  /* for time_t */
13364 +#include <linux/slab.h>
13365 +#include <linux/vmalloc.h>
13366 +
13367 +#include "vchiq_if.h"
13368 +
13369 +typedef struct {
13370 +       int size;
13371 +       int read;
13372 +       int write;
13373 +
13374 +       struct semaphore pop;
13375 +       struct semaphore push;
13376 +
13377 +       VCHIQ_HEADER_T **storage;
13378 +} VCHIU_QUEUE_T;
13379 +
13380 +extern int  vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
13381 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
13382 +
13383 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
13384 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
13385 +
13386 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
13387 +
13388 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
13389 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
13390 +
13391 +#endif
13392 +
13393 --- /dev/null
13394 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
13395 @@ -0,0 +1,59 @@
13396 +/**
13397 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
13398 + *
13399 + * Redistribution and use in source and binary forms, with or without
13400 + * modification, are permitted provided that the following conditions
13401 + * are met:
13402 + * 1. Redistributions of source code must retain the above copyright
13403 + *    notice, this list of conditions, and the following disclaimer,
13404 + *    without modification.
13405 + * 2. Redistributions in binary form must reproduce the above copyright
13406 + *    notice, this list of conditions and the following disclaimer in the
13407 + *    documentation and/or other materials provided with the distribution.
13408 + * 3. The names of the above-listed copyright holders may not be used
13409 + *    to endorse or promote products derived from this software without
13410 + *    specific prior written permission.
13411 + *
13412 + * ALTERNATIVELY, this software may be distributed under the terms of the
13413 + * GNU General Public License ("GPL") version 2, as published by the Free
13414 + * Software Foundation.
13415 + *
13416 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13417 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
13418 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
13419 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13420 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13421 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
13422 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
13423 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13424 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13425 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13426 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13427 + */
13428 +#include "vchiq_build_info.h"
13429 +#include <linux/broadcom/vc_debug_sym.h>
13430 +
13431 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
13432 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
13433 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time,    __TIME__ );
13434 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date,    __DATE__ );
13435 +
13436 +const char *vchiq_get_build_hostname( void )
13437 +{
13438 +   return vchiq_build_hostname;
13439 +}
13440 +
13441 +const char *vchiq_get_build_version( void )
13442 +{
13443 +   return vchiq_build_version;
13444 +}
13445 +
13446 +const char *vchiq_get_build_date( void )
13447 +{
13448 +   return vchiq_build_date;
13449 +}
13450 +
13451 +const char *vchiq_get_build_time( void )
13452 +{
13453 +   return vchiq_build_time;
13454 +}
13455 --- /dev/null
13456 +++ b/drivers/misc/vc04_services/Kconfig
13457 @@ -0,0 +1,10 @@
13458 +config BCM2708_VCHIQ
13459 +       tristate "Videocore VCHIQ"
13460 +       depends on MACH_BCM2708
13461 +       default y
13462 +       help
13463 +               Kernel to VideoCore communication interface for the
13464 +               BCM2708 family of products.
13465 +               Defaults to Y when the Broadcom Videocore services
13466 +               are included in the build, N otherwise.
13467 +
13468 --- /dev/null
13469 +++ b/drivers/misc/vc04_services/Makefile
13470 @@ -0,0 +1,18 @@
13471 +ifeq ($(CONFIG_MACH_BCM2708),y)
13472 +
13473 +obj-$(CONFIG_BCM2708_VCHIQ)    += vchiq.o
13474 +
13475 +vchiq-objs := \
13476 +   interface/vchiq_arm/vchiq_core.o  \
13477 +   interface/vchiq_arm/vchiq_arm.o \
13478 +   interface/vchiq_arm/vchiq_kern_lib.o \
13479 +   interface/vchiq_arm/vchiq_2835_arm.o \
13480 +   interface/vchiq_arm/vchiq_proc.o \
13481 +   interface/vchiq_arm/vchiq_shim.o \
13482 +   interface/vchiq_arm/vchiq_util.o \
13483 +   interface/vchiq_arm/vchiq_connected.o \
13484 +
13485 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
13486 +
13487 +endif
13488 +
13489 --- /dev/null
13490 +++ b/include/linux/broadcom/vc_cma.h
13491 @@ -0,0 +1,30 @@
13492 +/*****************************************************************************
13493 +* Copyright 2012 Broadcom Corporation.  All rights reserved.
13494 +*
13495 +* Unless you and Broadcom execute a separate written software license
13496 +* agreement governing use of this software, this software is licensed to you
13497 +* under the terms of the GNU General Public License version 2, available at
13498 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
13499 +*
13500 +* Notwithstanding the above, under no circumstances may you combine this
13501 +* software in any way with any other Broadcom software provided under a
13502 +* license other than the GPL, without Broadcom's express prior written
13503 +* consent.
13504 +*****************************************************************************/
13505 +
13506 +#if !defined( VC_CMA_H )
13507 +#define VC_CMA_H
13508 +
13509 +#include <linux/ioctl.h>
13510 +
13511 +#define VC_CMA_IOC_MAGIC 0xc5
13512 +
13513 +#define VC_CMA_IOC_RESERVE _IO(VC_CMA_IOC_MAGIC, 0)
13514 +
13515 +#ifdef __KERNEL__
13516 +extern void __init vc_cma_early_init(void);
13517 +extern void __init vc_cma_reserve(void);
13518 +#endif
13519 +
13520 +#endif /* VC_CMA_H */
13521 +