kernel: refresh patches
[15.05/openwrt.git] / target / linux / brcm2708 / patches-3.14 / 0006-bcm2708-vchiq-driver.patch
1 From c9e2d1daa32fd2267d3a61ae3afc2f429746a01f Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 06/54] bcm2708 vchiq driver
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7
8 vchiq: create_pagelist copes with vmalloc memory
9
10 Signed-off-by: Daniel Stone <daniels@collabora.com>
11
12 vchiq: fix the shim message release
13
14 Signed-off-by: Daniel Stone <daniels@collabora.com>
15
16 vchiq: export additional symbols
17
18 Signed-off-by: Daniel Stone <daniels@collabora.com>
19 ---
20  drivers/misc/Kconfig                               |    1 +
21  drivers/misc/Makefile                              |    1 +
22  drivers/misc/vc04_services/Kconfig                 |    9 +
23  drivers/misc/vc04_services/Makefile                |   17 +
24  .../interface/vchi/connections/connection.h        |  328 ++
25  .../interface/vchi/message_drivers/message.h       |  204 ++
26  drivers/misc/vc04_services/interface/vchi/vchi.h   |  373 ++
27  .../misc/vc04_services/interface/vchi/vchi_cfg.h   |  224 ++
28  .../interface/vchi/vchi_cfg_internal.h             |   71 +
29  .../vc04_services/interface/vchi/vchi_common.h     |  163 +
30  .../misc/vc04_services/interface/vchi/vchi_mh.h    |   42 +
31  .../misc/vc04_services/interface/vchiq_arm/vchiq.h |   40 +
32  .../vc04_services/interface/vchiq_arm/vchiq_2835.h |   42 +
33  .../interface/vchiq_arm/vchiq_2835_arm.c           |  561 +++
34  .../vc04_services/interface/vchiq_arm/vchiq_arm.c  | 2813 ++++++++++++++
35  .../vc04_services/interface/vchiq_arm/vchiq_arm.h  |  212 ++
36  .../interface/vchiq_arm/vchiq_build_info.h         |   37 +
37  .../vc04_services/interface/vchiq_arm/vchiq_cfg.h  |   60 +
38  .../interface/vchiq_arm/vchiq_connected.c          |  119 +
39  .../interface/vchiq_arm/vchiq_connected.h          |   50 +
40  .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
41  .../vc04_services/interface/vchiq_arm/vchiq_core.h |  706 ++++
42  .../interface/vchiq_arm/vchiq_genversion           |   87 +
43  .../vc04_services/interface/vchiq_arm/vchiq_if.h   |  188 +
44  .../interface/vchiq_arm/vchiq_ioctl.h              |  129 +
45  .../interface/vchiq_arm/vchiq_kern_lib.c           |  456 +++
46  .../interface/vchiq_arm/vchiq_memdrv.h             |   71 +
47  .../interface/vchiq_arm/vchiq_pagelist.h           |   58 +
48  .../vc04_services/interface/vchiq_arm/vchiq_proc.c |  253 ++
49  .../vc04_services/interface/vchiq_arm/vchiq_shim.c |  828 +++++
50  .../vc04_services/interface/vchiq_arm/vchiq_util.c |  151 +
51  .../vc04_services/interface/vchiq_arm/vchiq_util.h |   81 +
52  .../interface/vchiq_arm/vchiq_version.c            |   59 +
53  33 files changed, 12258 insertions(+)
54  create mode 100644 drivers/misc/vc04_services/Kconfig
55  create mode 100644 drivers/misc/vc04_services/Makefile
56  create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
57  create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
58  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
59  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
60  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
61  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
62  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
63  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
64  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
65  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
66  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
67  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
68  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
69  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
70  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
71  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
72  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
73  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
74  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
75  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
76  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
77  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
78  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
79  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
80  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
81  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
82  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
83  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
84  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
85
86 --- a/drivers/misc/Kconfig
87 +++ b/drivers/misc/Kconfig
88 @@ -524,6 +524,7 @@ source "drivers/misc/carma/Kconfig"
89  source "drivers/misc/altera-stapl/Kconfig"
90  source "drivers/misc/mei/Kconfig"
91  source "drivers/misc/vmw_vmci/Kconfig"
92 +source "drivers/misc/vc04_services/Kconfig"
93  source "drivers/misc/mic/Kconfig"
94  source "drivers/misc/genwqe/Kconfig"
95  endmenu
96 --- a/drivers/misc/Makefile
97 +++ b/drivers/misc/Makefile
98 @@ -52,5 +52,6 @@ obj-$(CONFIG_INTEL_MEI)               += mei/
99  obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
100  obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
101  obj-$(CONFIG_SRAM)             += sram.o
102 +obj-y                          += vc04_services/
103  obj-y                          += mic/
104  obj-$(CONFIG_GENWQE)           += genwqe/
105 --- /dev/null
106 +++ b/drivers/misc/vc04_services/Kconfig
107 @@ -0,0 +1,9 @@
108 +config BCM2708_VCHIQ
109 +       tristate "Videocore VCHIQ"
110 +       depends on MACH_BCM2708
111 +       default y
112 +       help
113 +               Kernel to VideoCore communication interface for the
114 +               BCM2708 family of products.
115 +               Defaults to Y when the Broadcom Videocore services
116 +               are included in the build, N otherwise.
117 --- /dev/null
118 +++ b/drivers/misc/vc04_services/Makefile
119 @@ -0,0 +1,17 @@
120 +ifeq ($(CONFIG_MACH_BCM2708),y)
121 +
122 +obj-$(CONFIG_BCM2708_VCHIQ)    += vchiq.o
123 +
124 +vchiq-objs := \
125 +   interface/vchiq_arm/vchiq_core.o  \
126 +   interface/vchiq_arm/vchiq_arm.o \
127 +   interface/vchiq_arm/vchiq_kern_lib.o \
128 +   interface/vchiq_arm/vchiq_2835_arm.o \
129 +   interface/vchiq_arm/vchiq_proc.o \
130 +   interface/vchiq_arm/vchiq_shim.o \
131 +   interface/vchiq_arm/vchiq_util.o \
132 +   interface/vchiq_arm/vchiq_connected.o \
133 +
134 +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
135 +
136 +endif
137 --- /dev/null
138 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
139 @@ -0,0 +1,328 @@
140 +/**
141 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
142 + *
143 + * Redistribution and use in source and binary forms, with or without
144 + * modification, are permitted provided that the following conditions
145 + * are met:
146 + * 1. Redistributions of source code must retain the above copyright
147 + *    notice, this list of conditions, and the following disclaimer,
148 + *    without modification.
149 + * 2. Redistributions in binary form must reproduce the above copyright
150 + *    notice, this list of conditions and the following disclaimer in the
151 + *    documentation and/or other materials provided with the distribution.
152 + * 3. The names of the above-listed copyright holders may not be used
153 + *    to endorse or promote products derived from this software without
154 + *    specific prior written permission.
155 + *
156 + * ALTERNATIVELY, this software may be distributed under the terms of the
157 + * GNU General Public License ("GPL") version 2, as published by the Free
158 + * Software Foundation.
159 + *
160 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
161 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
162 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
163 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
164 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
165 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
166 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
167 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
168 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
169 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
170 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
171 + */
172 +
173 +#ifndef CONNECTION_H_
174 +#define CONNECTION_H_
175 +
176 +#include <linux/kernel.h>
177 +#include <linux/types.h>
178 +#include <linux/semaphore.h>
179 +
180 +#include "interface/vchi/vchi_cfg_internal.h"
181 +#include "interface/vchi/vchi_common.h"
182 +#include "interface/vchi/message_drivers/message.h"
183 +
184 +/******************************************************************************
185 + Global defs
186 + *****************************************************************************/
187 +
188 +// Opaque handle for a connection / service pair
189 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
190 +
191 +// opaque handle to the connection state information
192 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
193 +
194 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
195 +
196 +
197 +/******************************************************************************
198 + API
199 + *****************************************************************************/
200 +
201 +// Routine to init a connection with a particular low level driver
202 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
203 +                                                             const VCHI_MESSAGE_DRIVER_T * driver );
204 +
205 +// Routine to control CRC enabling at a connection level
206 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
207 +                                                  VCHI_CRC_CONTROL_T control );
208 +
209 +// Routine to create a service
210 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
211 +                                                      int32_t service_id,
212 +                                                      uint32_t rx_fifo_size,
213 +                                                      uint32_t tx_fifo_size,
214 +                                                      int server,
215 +                                                      VCHI_CALLBACK_T callback,
216 +                                                      void *callback_param,
217 +                                                      int32_t want_crc,
218 +                                                      int32_t want_unaligned_bulk_rx,
219 +                                                      int32_t want_unaligned_bulk_tx,
220 +                                                      VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
221 +
222 +// Routine to close a service
223 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
224 +
225 +// Routine to queue a message
226 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
227 +                                                            const void *data,
228 +                                                            uint32_t data_size,
229 +                                                            VCHI_FLAGS_T flags,
230 +                                                            void *msg_handle );
231 +
232 +// scatter-gather (vector) message queueing
233 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
234 +                                                             VCHI_MSG_VECTOR_T *vector,
235 +                                                             uint32_t count,
236 +                                                             VCHI_FLAGS_T flags,
237 +                                                             void *msg_handle );
238 +
239 +// Routine to dequeue a message
240 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
241 +                                                              void *data,
242 +                                                              uint32_t max_data_size_to_read,
243 +                                                              uint32_t *actual_msg_size,
244 +                                                              VCHI_FLAGS_T flags );
245 +
246 +// Routine to peek at a message
247 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
248 +                                                           void **data,
249 +                                                           uint32_t *msg_size,
250 +                                                           VCHI_FLAGS_T flags );
251 +
252 +// Routine to hold a message
253 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
254 +                                                           void **data,
255 +                                                           uint32_t *msg_size,
256 +                                                           VCHI_FLAGS_T flags,
257 +                                                           void **message_handle );
258 +
259 +// Routine to initialise a received message iterator
260 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
261 +                                                                VCHI_MSG_ITER_T *iter,
262 +                                                                VCHI_FLAGS_T flags );
263 +
264 +// Routine to release a held message
265 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
266 +                                                       void *message_handle );
267 +
268 +// Routine to get info on a held message
269 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
270 +                                                    void *message_handle,
271 +                                                    void **data,
272 +                                                    int32_t *msg_size,
273 +                                                    uint32_t *tx_timestamp,
274 +                                                    uint32_t *rx_timestamp );
275 +
276 +// Routine to check whether the iterator has a next message
277 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
278 +                                                       const VCHI_MSG_ITER_T *iter );
279 +
280 +// Routine to advance the iterator
281 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
282 +                                                    VCHI_MSG_ITER_T *iter,
283 +                                                    void **data,
284 +                                                    uint32_t *msg_size );
285 +
286 +// Routine to remove the last message returned by the iterator
287 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
288 +                                                      VCHI_MSG_ITER_T *iter );
289 +
290 +// Routine to hold the last message returned by the iterator
291 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
292 +                                                    VCHI_MSG_ITER_T *iter,
293 +                                                    void **msg_handle );
294 +
295 +// Routine to transmit bulk data
296 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
297 +                                                          const void *data_src,
298 +                                                          uint32_t data_size,
299 +                                                          VCHI_FLAGS_T flags,
300 +                                                          void *bulk_handle );
301 +
302 +// Routine to receive data
303 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
304 +                                                         void *data_dst,
305 +                                                         uint32_t data_size,
306 +                                                         VCHI_FLAGS_T flags,
307 +                                                         void *bulk_handle );
308 +
309 +// Routine to report if a server is available
310 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
311 +
312 +// Routine to report the number of RX slots available
313 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
314 +
315 +// Routine to report the RX slot size
316 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
317 +
318 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
319 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
320 +                                                     int32_t service,
321 +                                                     uint32_t length,
322 +                                                     MESSAGE_TX_CHANNEL_T channel,
323 +                                                     uint32_t channel_params,
324 +                                                     uint32_t data_length,
325 +                                                     uint32_t data_offset);
326 +
327 +// Callback to inform a service that a Xon or Xoff message has been received
328 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
329 +
330 +// Callback to inform a service that a server available reply message has been received
331 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
332 +
333 +// Callback to indicate that bulk auxiliary messages have arrived
334 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
335 +
336 +// Callback to indicate that bulk auxiliary messages have arrived
337 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
338 +
339 +// Callback with all the connection info you require
340 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
341 +
342 +// Callback to inform of a disconnect
343 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
344 +
345 +// Callback to inform of a power control request
346 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
347 +
348 +// allocate memory suitably aligned for this connection
349 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
350 +
351 +// free memory allocated by buffer_allocate
352 +typedef void   (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
353 +
354 +
355 +/******************************************************************************
356 + System driver struct
357 + *****************************************************************************/
358 +
359 +struct opaque_vchi_connection_api_t
360 +{
361 +   // Routine to init the connection
362 +   VCHI_CONNECTION_INIT_T                      init;
363 +
364 +   // Connection-level CRC control
365 +   VCHI_CONNECTION_CRC_CONTROL_T               crc_control;
366 +
367 +   // Routine to connect to or create service
368 +   VCHI_CONNECTION_SERVICE_CONNECT_T           service_connect;
369 +
370 +   // Routine to disconnect from a service
371 +   VCHI_CONNECTION_SERVICE_DISCONNECT_T        service_disconnect;
372 +
373 +   // Routine to queue a message
374 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T     service_queue_msg;
375 +
376 +   // scatter-gather (vector) message queue
377 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T    service_queue_msgv;
378 +
379 +   // Routine to dequeue a message
380 +   VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T   service_dequeue_msg;
381 +
382 +   // Routine to peek at a message
383 +   VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T      service_peek_msg;
384 +
385 +   // Routine to hold a message
386 +   VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T      service_hold_msg;
387 +
388 +   // Routine to initialise a received message iterator
389 +   VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
390 +
391 +   // Routine to release a message
392 +   VCHI_CONNECTION_HELD_MSG_RELEASE_T          held_msg_release;
393 +
394 +   // Routine to get information on a held message
395 +   VCHI_CONNECTION_HELD_MSG_INFO_T             held_msg_info;
396 +
397 +   // Routine to check for next message on iterator
398 +   VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T         msg_iter_has_next;
399 +
400 +   // Routine to get next message on iterator
401 +   VCHI_CONNECTION_MSG_ITER_NEXT_T             msg_iter_next;
402 +
403 +   // Routine to remove the last message returned by iterator
404 +   VCHI_CONNECTION_MSG_ITER_REMOVE_T           msg_iter_remove;
405 +
406 +   // Routine to hold the last message returned by iterator
407 +   VCHI_CONNECTION_MSG_ITER_HOLD_T             msg_iter_hold;
408 +
409 +   // Routine to transmit bulk data
410 +   VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T       bulk_queue_transmit;
411 +
412 +   // Routine to receive data
413 +   VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T        bulk_queue_receive;
414 +
415 +   // Routine to report the available servers
416 +   VCHI_CONNECTION_SERVER_PRESENT              server_present;
417 +
418 +   // Routine to report the number of RX slots available
419 +   VCHI_CONNECTION_RX_SLOTS_AVAILABLE          connection_rx_slots_available;
420 +
421 +   // Routine to report the RX slot size
422 +   VCHI_CONNECTION_RX_SLOT_SIZE                connection_rx_slot_size;
423 +
424 +   // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
425 +   VCHI_CONNECTION_RX_BULK_BUFFER_ADDED        rx_bulk_buffer_added;
426 +
427 +   // Callback to inform a service that a Xon or Xoff message has been received
428 +   VCHI_CONNECTION_FLOW_CONTROL                flow_control;
429 +
430 +   // Callback to inform a service that a server available reply message has been received
431 +   VCHI_CONNECTION_SERVER_AVAILABLE_REPLY      server_available_reply;
432 +
433 +   // Callback to indicate that bulk auxiliary messages have arrived
434 +   VCHI_CONNECTION_BULK_AUX_RECEIVED           bulk_aux_received;
435 +
436 +   // Callback to indicate that a bulk auxiliary message has been transmitted
437 +   VCHI_CONNECTION_BULK_AUX_TRANSMITTED        bulk_aux_transmitted;
438 +
439 +   // Callback to provide information about the connection
440 +   VCHI_CONNECTION_INFO                        connection_info;
441 +
442 +   // Callback to notify that peer has requested disconnect
443 +   VCHI_CONNECTION_DISCONNECT                  disconnect;
444 +
445 +   // Callback to notify that peer has requested power change
446 +   VCHI_CONNECTION_POWER_CONTROL               power_control;
447 +
448 +   // allocate memory suitably aligned for this connection
449 +   VCHI_BUFFER_ALLOCATE                        buffer_allocate;
450 +
451 +   // free memory allocated by buffer_allocate
452 +   VCHI_BUFFER_FREE                            buffer_free;
453 +
454 +};
455 +
456 +struct vchi_connection_t {
457 +   const VCHI_CONNECTION_API_T *api;
458 +   VCHI_CONNECTION_STATE_T     *state;
459 +#ifdef VCHI_COARSE_LOCKING
460 +   struct semaphore             sem;
461 +#endif
462 +};
463 +
464 +
465 +#endif /* CONNECTION_H_ */
466 +
467 +/****************************** End of file **********************************/
468 --- /dev/null
469 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
470 @@ -0,0 +1,204 @@
471 +/**
472 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
473 + *
474 + * Redistribution and use in source and binary forms, with or without
475 + * modification, are permitted provided that the following conditions
476 + * are met:
477 + * 1. Redistributions of source code must retain the above copyright
478 + *    notice, this list of conditions, and the following disclaimer,
479 + *    without modification.
480 + * 2. Redistributions in binary form must reproduce the above copyright
481 + *    notice, this list of conditions and the following disclaimer in the
482 + *    documentation and/or other materials provided with the distribution.
483 + * 3. The names of the above-listed copyright holders may not be used
484 + *    to endorse or promote products derived from this software without
485 + *    specific prior written permission.
486 + *
487 + * ALTERNATIVELY, this software may be distributed under the terms of the
488 + * GNU General Public License ("GPL") version 2, as published by the Free
489 + * Software Foundation.
490 + *
491 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
492 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
493 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
494 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
495 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
496 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
497 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
498 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
499 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
500 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
501 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
502 + */
503 +
504 +#ifndef _VCHI_MESSAGE_H_
505 +#define _VCHI_MESSAGE_H_
506 +
507 +#include <linux/kernel.h>
508 +#include <linux/types.h>
509 +#include <linux/semaphore.h>
510 +
511 +#include "interface/vchi/vchi_cfg_internal.h"
512 +#include "interface/vchi/vchi_common.h"
513 +
514 +
515 +typedef enum message_event_type {
516 +   MESSAGE_EVENT_NONE,
517 +   MESSAGE_EVENT_NOP,
518 +   MESSAGE_EVENT_MESSAGE,
519 +   MESSAGE_EVENT_SLOT_COMPLETE,
520 +   MESSAGE_EVENT_RX_BULK_PAUSED,
521 +   MESSAGE_EVENT_RX_BULK_COMPLETE,
522 +   MESSAGE_EVENT_TX_COMPLETE,
523 +   MESSAGE_EVENT_MSG_DISCARDED
524 +} MESSAGE_EVENT_TYPE_T;
525 +
526 +typedef enum vchi_msg_flags
527 +{
528 +   VCHI_MSG_FLAGS_NONE                  = 0x0,
529 +   VCHI_MSG_FLAGS_TERMINATE_DMA         = 0x1
530 +} VCHI_MSG_FLAGS_T;
531 +
532 +typedef enum message_tx_channel
533 +{
534 +   MESSAGE_TX_CHANNEL_MESSAGE           = 0,
535 +   MESSAGE_TX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
536 +} MESSAGE_TX_CHANNEL_T;
537 +
538 +// Macros used for cycling through bulk channels
539 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
540 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
541 +
542 +typedef enum message_rx_channel
543 +{
544 +   MESSAGE_RX_CHANNEL_MESSAGE           = 0,
545 +   MESSAGE_RX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
546 +} MESSAGE_RX_CHANNEL_T;
547 +
548 +// Message receive slot information
549 +typedef struct rx_msg_slot_info {
550 +
551 +   struct rx_msg_slot_info *next;
552 +   //struct slot_info *prev;
553 +#if !defined VCHI_COARSE_LOCKING
554 +   struct semaphore   sem;
555 +#endif
556 +
557 +   uint8_t           *addr;               // base address of slot
558 +   uint32_t           len;                // length of slot in bytes
559 +
560 +   uint32_t           write_ptr;          // hardware causes this to advance
561 +   uint32_t           read_ptr;           // this module does the reading
562 +   int                active;             // is this slot in the hardware dma fifo?
563 +   uint32_t           msgs_parsed;        // count how many messages are in this slot
564 +   uint32_t           msgs_released;      // how many messages have been released
565 +   void              *state;              // connection state information
566 +   uint8_t            ref_count[VCHI_MAX_SERVICES_PER_CONNECTION];          // reference count for slots held by services
567 +} RX_MSG_SLOTINFO_T;
568 +
569 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
570 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
571 +// driver will be tasked with sending the aligned core section.
572 +typedef struct rx_bulk_slotinfo_t {
573 +   struct rx_bulk_slotinfo_t *next;
574 +
575 +   struct semaphore *blocking;
576 +
577 +   // needed by DMA
578 +   void        *addr;
579 +   uint32_t     len;
580 +
581 +   // needed for the callback
582 +   void        *service;
583 +   void        *handle;
584 +   VCHI_FLAGS_T flags;
585 +} RX_BULK_SLOTINFO_T;
586 +
587 +
588 +/* ----------------------------------------------------------------------
589 + * each connection driver will have a pool of the following struct.
590 + *
591 + * the pool will be managed by vchi_qman_*
592 + * this means there will be multiple queues (single linked lists)
593 + * a given struct message_info will be on exactly one of these queues
594 + * at any one time
595 + * -------------------------------------------------------------------- */
596 +typedef struct rx_message_info {
597 +
598 +   struct message_info *next;
599 +   //struct message_info *prev;
600 +
601 +   uint8_t    *addr;
602 +   uint32_t   len;
603 +   RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
604 +   uint32_t   tx_timestamp;
605 +   uint32_t   rx_timestamp;
606 +
607 +} RX_MESSAGE_INFO_T;
608 +
609 +typedef struct {
610 +   MESSAGE_EVENT_TYPE_T type;
611 +
612 +   struct {
613 +      // for messages
614 +      void    *addr;           // address of message
615 +      uint16_t slot_delta;     // whether this message indicated slot delta
616 +      uint32_t len;            // length of message
617 +      RX_MSG_SLOTINFO_T *slot; // slot this message is in
618 +      int32_t  service;   // service id this message is destined for
619 +      uint32_t tx_timestamp;   // timestamp from the header
620 +      uint32_t rx_timestamp;   // timestamp when we parsed it
621 +   } message;
622 +
623 +   // FIXME: cleanup slot reporting...
624 +   RX_MSG_SLOTINFO_T *rx_msg;
625 +   RX_BULK_SLOTINFO_T *rx_bulk;
626 +   void *tx_handle;
627 +   MESSAGE_TX_CHANNEL_T tx_channel;
628 +
629 +} MESSAGE_EVENT_T;
630 +
631 +
632 +// callbacks
633 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
634 +
635 +typedef struct {
636 +   VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
637 +} VCHI_MESSAGE_DRIVER_OPEN_T;
638 +
639 +
640 +// handle to this instance of message driver (as returned by ->open)
641 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
642 +
643 +struct opaque_vchi_message_driver_t {
644 +   VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
645 +   int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
646 +   int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
647 +   int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
648 +   int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot );      // rx message
649 +   int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot );  // rx data (bulk)
650 +   int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle );      // tx (message & bulk)
651 +   void    (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event );     // get the next event from message_driver
652 +   int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
653 +   int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
654 +                            *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
655 +
656 +   int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
657 +   int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
658 +   void *  (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
659 +   void    (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
660 +   int     (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
661 +   int     (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
662 +
663 +   int32_t  (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
664 +   uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
665 +   int     (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
666 +   int     (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
667 +   void    (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
668 +   void    (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
669 +};
670 +
671 +
672 +#endif // _VCHI_MESSAGE_H_
673 +
674 +/****************************** End of file ***********************************/
675 --- /dev/null
676 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
677 @@ -0,0 +1,373 @@
678 +/**
679 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
680 + *
681 + * Redistribution and use in source and binary forms, with or without
682 + * modification, are permitted provided that the following conditions
683 + * are met:
684 + * 1. Redistributions of source code must retain the above copyright
685 + *    notice, this list of conditions, and the following disclaimer,
686 + *    without modification.
687 + * 2. Redistributions in binary form must reproduce the above copyright
688 + *    notice, this list of conditions and the following disclaimer in the
689 + *    documentation and/or other materials provided with the distribution.
690 + * 3. The names of the above-listed copyright holders may not be used
691 + *    to endorse or promote products derived from this software without
692 + *    specific prior written permission.
693 + *
694 + * ALTERNATIVELY, this software may be distributed under the terms of the
695 + * GNU General Public License ("GPL") version 2, as published by the Free
696 + * Software Foundation.
697 + *
698 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
699 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
700 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
701 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
702 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
703 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
704 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
705 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
706 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
707 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
708 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
709 + */
710 +
711 +#ifndef VCHI_H_
712 +#define VCHI_H_
713 +
714 +#include "interface/vchi/vchi_cfg.h"
715 +#include "interface/vchi/vchi_common.h"
716 +#include "interface/vchi/connections/connection.h"
717 +#include "vchi_mh.h"
718 +
719 +
720 +/******************************************************************************
721 + Global defs
722 + *****************************************************************************/
723 +
724 +#define VCHI_BULK_ROUND_UP(x)     ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
725 +#define VCHI_BULK_ROUND_DOWN(x)   (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
726 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
727 +
728 +#ifdef USE_VCHIQ_ARM
729 +#define VCHI_BULK_ALIGNED(x)      1
730 +#else
731 +#define VCHI_BULK_ALIGNED(x)      (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
732 +#endif
733 +
734 +struct vchi_version {
735 +       uint32_t version;
736 +       uint32_t version_min;
737 +};
738 +#define VCHI_VERSION(v_) { v_, v_ }
739 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
740 +
741 +typedef enum
742 +{
743 +   VCHI_VEC_POINTER,
744 +   VCHI_VEC_HANDLE,
745 +   VCHI_VEC_LIST
746 +} VCHI_MSG_VECTOR_TYPE_T;
747 +
748 +typedef struct vchi_msg_vector_ex {
749 +
750 +   VCHI_MSG_VECTOR_TYPE_T type;
751 +   union
752 +   {
753 +      // a memory handle
754 +      struct
755 +      {
756 +         VCHI_MEM_HANDLE_T handle;
757 +         uint32_t offset;
758 +         int32_t vec_len;
759 +      } handle;
760 +
761 +      // an ordinary data pointer
762 +      struct
763 +      {
764 +         const void *vec_base;
765 +         int32_t vec_len;
766 +      } ptr;
767 +
768 +      // a nested vector list
769 +      struct
770 +      {
771 +         struct vchi_msg_vector_ex *vec;
772 +         uint32_t vec_len;
773 +      } list;
774 +   } u;
775 +} VCHI_MSG_VECTOR_EX_T;
776 +
777 +
778 +// Construct an entry in a msg vector for a pointer (p) of length (l)
779 +#define VCHI_VEC_POINTER(p,l)  VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
780 +
781 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
782 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE,  { { (h), (o), (l) } }
783 +
784 +// Macros to manipulate 'FOURCC' values
785 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
786 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
787 +
788 +
789 +// Opaque service information
790 +struct opaque_vchi_service_t;
791 +
792 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
793 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
794 +typedef struct
795 +{
796 +   struct opaque_vchi_service_t *service;
797 +   void *message;
798 +} VCHI_HELD_MSG_T;
799 +
800 +
801 +
802 +// structure used to provide the information needed to open a server or a client
803 +typedef struct {
804 +       struct vchi_version version;
805 +       int32_t service_id;
806 +       VCHI_CONNECTION_T *connection;
807 +       uint32_t rx_fifo_size;
808 +       uint32_t tx_fifo_size;
809 +       VCHI_CALLBACK_T callback;
810 +       void *callback_param;
811 +       /* client intends to receive bulk transfers of
812 +               odd lengths or into unaligned buffers */
813 +       int32_t want_unaligned_bulk_rx;
814 +       /* client intends to transmit bulk transfers of
815 +               odd lengths or out of unaligned buffers */
816 +       int32_t want_unaligned_bulk_tx;
817 +       /* client wants to check CRCs on (bulk) xfers.
818 +               Only needs to be set at 1 end - will do both directions. */
819 +       int32_t want_crc;
820 +} SERVICE_CREATION_T;
821 +
822 +// Opaque handle for a VCHI instance
823 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
824 +
825 +// Opaque handle for a server or client
826 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
827 +
828 +// Service registration & startup
829 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
830 +
831 +typedef struct service_info_tag {
832 +   const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
833 +   VCHI_SERVICE_INIT init;          /* Service initialisation function */
834 +   void *vll_handle;                /* VLL handle; NULL when unloaded or a "static VLL" in build */
835 +} SERVICE_INFO_T;
836 +
837 +/******************************************************************************
838 + Global funcs - implementation is specific to which side you are on (local / remote)
839 + *****************************************************************************/
840 +
841 +#ifdef __cplusplus
842 +extern "C" {
843 +#endif
844 +
845 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
846 +                                                   const VCHI_MESSAGE_DRIVER_T * low_level);
847 +
848 +
849 +// Routine used to initialise the vchi on both local + remote connections
850 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
851 +
852 +extern int32_t vchi_exit( void );
853 +
854 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
855 +                             const uint32_t num_connections,
856 +                             VCHI_INSTANCE_T instance_handle );
857 +
858 +//When this is called, ensure that all services have no data pending.
859 +//Bulk transfers can remain 'queued'
860 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
861 +
862 +// Global control over bulk CRC checking
863 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
864 +                                 VCHI_CRC_CONTROL_T control );
865 +
866 +// helper functions
867 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
868 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
869 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
870 +
871 +
872 +/******************************************************************************
873 + Global service API
874 + *****************************************************************************/
875 +// Routine to create a named service
876 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
877 +                                    SERVICE_CREATION_T *setup,
878 +                                    VCHI_SERVICE_HANDLE_T *handle );
879 +
880 +// Routine to destory a service
881 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
882 +
883 +// Routine to open a named service
884 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
885 +                                  SERVICE_CREATION_T *setup,
886 +                                  VCHI_SERVICE_HANDLE_T *handle);
887 +
888 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
889 +                                      short *peer_version );
890 +
891 +// Routine to close a named service
892 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
893 +
894 +// Routine to increment ref count on a named service
895 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
896 +
897 +// Routine to decrement ref count on a named service
898 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
899 +
900 +// Routine to send a message accross a service
901 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
902 +                               const void *data,
903 +                               uint32_t data_size,
904 +                               VCHI_FLAGS_T flags,
905 +                               void *msg_handle );
906 +
907 +// scatter-gather (vector) and send message
908 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
909 +                            VCHI_MSG_VECTOR_EX_T *vector,
910 +                            uint32_t count,
911 +                            VCHI_FLAGS_T flags,
912 +                            void *msg_handle );
913 +
914 +// legacy scatter-gather (vector) and send message, only handles pointers
915 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
916 +                         VCHI_MSG_VECTOR_T *vector,
917 +                         uint32_t count,
918 +                         VCHI_FLAGS_T flags,
919 +                         void *msg_handle );
920 +
921 +// Routine to receive a msg from a service
922 +// Dequeue is equivalent to hold, copy into client buffer, release
923 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
924 +                                 void *data,
925 +                                 uint32_t max_data_size_to_read,
926 +                                 uint32_t *actual_msg_size,
927 +                                 VCHI_FLAGS_T flags );
928 +
929 +// Routine to look at a message in place.
930 +// The message is not dequeued, so a subsequent call to peek or dequeue
931 +// will return the same message.
932 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
933 +                              void **data,
934 +                              uint32_t *msg_size,
935 +                              VCHI_FLAGS_T flags );
936 +
937 +// Routine to remove a message after it has been read in place with peek
938 +// The first message on the queue is dequeued.
939 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
940 +
941 +// Routine to look at a message in place.
942 +// The message is dequeued, so the caller is left holding it; the descriptor is
943 +// filled in and must be released when the user has finished with the message.
944 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
945 +                              void **data,        // } may be NULL, as info can be
946 +                              uint32_t *msg_size, // } obtained from HELD_MSG_T
947 +                              VCHI_FLAGS_T flags,
948 +                              VCHI_HELD_MSG_T *message_descriptor );
949 +
950 +// Initialise an iterator to look through messages in place
951 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
952 +                                    VCHI_MSG_ITER_T *iter,
953 +                                    VCHI_FLAGS_T flags );
954 +
955 +/******************************************************************************
956 + Global service support API - operations on held messages and message iterators
957 + *****************************************************************************/
958 +
959 +// Routine to get the address of a held message
960 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
961 +
962 +// Routine to get the size of a held message
963 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
964 +
965 +// Routine to get the transmit timestamp as written into the header by the peer
966 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
967 +
968 +// Routine to get the reception timestamp, written as we parsed the header
969 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
970 +
971 +// Routine to release a held message after it has been processed
972 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
973 +
974 +// Indicates whether the iterator has a next message.
975 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
976 +
977 +// Return the pointer and length for the next message and advance the iterator.
978 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
979 +                                   void **data,
980 +                                   uint32_t *msg_size );
981 +
982 +// Remove the last message returned by vchi_msg_iter_next.
983 +// Can only be called once after each call to vchi_msg_iter_next.
984 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
985 +
986 +// Hold the last message returned by vchi_msg_iter_next.
987 +// Can only be called once after each call to vchi_msg_iter_next.
988 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
989 +                                   VCHI_HELD_MSG_T *message );
990 +
991 +// Return information for the next message, and hold it, advancing the iterator.
992 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
993 +                                        void **data,        // } may be NULL
994 +                                        uint32_t *msg_size, // }
995 +                                        VCHI_HELD_MSG_T *message );
996 +
997 +
998 +/******************************************************************************
999 + Global bulk API
1000 + *****************************************************************************/
1001 +
1002 +// Routine to prepare interface for a transfer from the other side
1003 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1004 +                                        void *data_dst,
1005 +                                        uint32_t data_size,
1006 +                                        VCHI_FLAGS_T flags,
1007 +                                        void *transfer_handle );
1008 +
1009 +
1010 +// Prepare interface for a transfer from the other side into relocatable memory.
1011 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1012 +                                       VCHI_MEM_HANDLE_T h_dst,
1013 +                                       uint32_t offset,
1014 +                                       uint32_t data_size,
1015 +                                       const VCHI_FLAGS_T flags,
1016 +                                       void * const bulk_handle );
1017 +
1018 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1019 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1020 +                                         const void *data_src,
1021 +                                         uint32_t data_size,
1022 +                                         VCHI_FLAGS_T flags,
1023 +                                         void *transfer_handle );
1024 +
1025 +
1026 +/******************************************************************************
1027 + Configuration plumbing
1028 + *****************************************************************************/
1029 +
1030 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1031 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1032 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1033 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1034 +
1035 +// declare all message drivers here
1036 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1037 +
1038 +#ifdef __cplusplus
1039 +}
1040 +#endif
1041 +
1042 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1043 +                                               VCHI_MEM_HANDLE_T h_src,
1044 +                                               uint32_t offset,
1045 +                                               uint32_t data_size,
1046 +                                               VCHI_FLAGS_T flags,
1047 +                                               void *transfer_handle );
1048 +#endif /* VCHI_H_ */
1049 +
1050 +/****************************** End of file **********************************/
1051 --- /dev/null
1052 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1053 @@ -0,0 +1,224 @@
1054 +/**
1055 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1056 + *
1057 + * Redistribution and use in source and binary forms, with or without
1058 + * modification, are permitted provided that the following conditions
1059 + * are met:
1060 + * 1. Redistributions of source code must retain the above copyright
1061 + *    notice, this list of conditions, and the following disclaimer,
1062 + *    without modification.
1063 + * 2. Redistributions in binary form must reproduce the above copyright
1064 + *    notice, this list of conditions and the following disclaimer in the
1065 + *    documentation and/or other materials provided with the distribution.
1066 + * 3. The names of the above-listed copyright holders may not be used
1067 + *    to endorse or promote products derived from this software without
1068 + *    specific prior written permission.
1069 + *
1070 + * ALTERNATIVELY, this software may be distributed under the terms of the
1071 + * GNU General Public License ("GPL") version 2, as published by the Free
1072 + * Software Foundation.
1073 + *
1074 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1075 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1076 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1077 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1078 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1079 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1080 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1081 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1082 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1083 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1084 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1085 + */
1086 +
1087 +#ifndef VCHI_CFG_H_
1088 +#define VCHI_CFG_H_
1089 +
1090 +/****************************************************************************************
1091 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1092 + * services.
1093 + ***************************************************************************************/
1094 +
1095 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1096 +/* Really determined by the message driver, and should be available from a run-time call. */
1097 +#ifndef VCHI_BULK_ALIGN
1098 +#   if __VCCOREVER__ >= 0x04000000
1099 +#       define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1100 +#   else
1101 +#       define VCHI_BULK_ALIGN 16
1102 +#   endif
1103 +#endif
1104 +
1105 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1106 +/* May be less than or greater than VCHI_BULK_ALIGN */
1107 +/* Really determined by the message driver, and should be available from a run-time call. */
1108 +#ifndef VCHI_BULK_GRANULARITY
1109 +#   if __VCCOREVER__ >= 0x04000000
1110 +#       define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1111 +#   else
1112 +#       define VCHI_BULK_GRANULARITY 16
1113 +#   endif
1114 +#endif
1115 +
1116 +/* The largest possible message to be queued with vchi_msg_queue. */
1117 +#ifndef VCHI_MAX_MSG_SIZE
1118 +#   if defined VCHI_LOCAL_HOST_PORT
1119 +#       define VCHI_MAX_MSG_SIZE     16384         // makes file transfers fast, but should they be using bulk?
1120 +#   else
1121 +#       define VCHI_MAX_MSG_SIZE      4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1122 +#   endif
1123 +#endif
1124 +
1125 +/******************************************************************************************
1126 + * Defines below are system configuration options, and should not be used by VCHI services.
1127 + *****************************************************************************************/
1128 +
1129 +/* How many connections can we support? A localhost implementation uses 2 connections,
1130 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1131 + * driver. */
1132 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1133 +#   define VCHI_MAX_NUM_CONNECTIONS 3
1134 +#endif
1135 +
1136 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1137 + * amount of static memory. */
1138 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1139 +#  define VCHI_MAX_SERVICES_PER_CONNECTION 36
1140 +#endif
1141 +
1142 +/* Adjust if using a message driver that supports more logical TX channels */
1143 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1144 +#   define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1145 +#endif
1146 +
1147 +/* Adjust if using a message driver that supports more logical RX channels */
1148 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1149 +#   define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1150 +#endif
1151 +
1152 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1153 + * receive queue space, less message headers. */
1154 +#ifndef VCHI_NUM_READ_SLOTS
1155 +#  if defined(VCHI_LOCAL_HOST_PORT)
1156 +#     define VCHI_NUM_READ_SLOTS 4
1157 +#  else
1158 +#     define VCHI_NUM_READ_SLOTS 48
1159 +#  endif
1160 +#endif
1161 +
1162 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1163 + * performance. Only define on VideoCore end, talking to host.
1164 + */
1165 +//#define VCHI_MSG_RX_OVERRUN
1166 +
1167 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1168 + * underneath VCHI will usually have its own buffering. */
1169 +#ifndef VCHI_NUM_WRITE_SLOTS
1170 +#  define VCHI_NUM_WRITE_SLOTS 4
1171 +#endif
1172 +
1173 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1174 + * then it's taking up too much buffer space, and the peer service will be told to stop
1175 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1176 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1177 + * is too high. */
1178 +#ifndef VCHI_XOFF_THRESHOLD
1179 +#  define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1180 +#endif
1181 +
1182 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1183 + * service has dequeued/released enough messages that it's now occupying
1184 + * VCHI_XON_THRESHOLD slots or fewer. */
1185 +#ifndef VCHI_XON_THRESHOLD
1186 +#  define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1187 +#endif
1188 +
1189 +/* A size below which a bulk transfer omits the handshake completely and always goes
1190 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1191 + * can guarantee this by enabling unaligned transmits).
1192 + * Not API. */
1193 +#ifndef VCHI_MIN_BULK_SIZE
1194 +#  define VCHI_MIN_BULK_SIZE    ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1195 +#endif
1196 +
1197 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1198 + * speed and latency; the smaller the chunk size the better change of messages and other
1199 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1200 + * break transmissions into chunks.
1201 + */
1202 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1203 +#  define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1204 +#endif
1205 +
1206 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1207 + * with multiple-line frames. Only use if the receiver can cope. */
1208 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1209 +#  define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1210 +#endif
1211 +
1212 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1213 + * vchi_msg_queue will be blocked. */
1214 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1215 +#  define VCHI_TX_MSG_QUEUE_SIZE           256
1216 +#endif
1217 +
1218 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1219 + * will be suspended until older messages are dequeued/released. */
1220 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1221 +#  define VCHI_RX_MSG_QUEUE_SIZE           256
1222 +#endif
1223 +
1224 +/* Really should be able to cope if we run out of received message descriptors, by
1225 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1226 + * under the carpet. */
1227 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1228 +#  undef VCHI_RX_MSG_QUEUE_SIZE
1229 +#  define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1230 +#endif
1231 +
1232 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1233 + * will be blocked. */
1234 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1235 +#  define VCHI_TX_BULK_QUEUE_SIZE           64
1236 +#endif
1237 +
1238 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1239 + * will be blocked. */
1240 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1241 +#  define VCHI_RX_BULK_QUEUE_SIZE           64
1242 +#endif
1243 +
1244 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1245 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1246 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1247 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1248 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1249 +#  define VCHI_MAX_PEER_BULK_REQUESTS       32
1250 +#endif
1251 +
1252 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1253 + * transmitter on and off.
1254 + */
1255 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1256 +
1257 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1258 +
1259 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1260 + * negative for no IDLE.
1261 + */
1262 +#  ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1263 +#    define VCHI_CCP2TX_IDLE_TIMEOUT        5
1264 +#  endif
1265 +
1266 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1267 + * negative for no OFF.
1268 + */
1269 +#  ifndef VCHI_CCP2TX_OFF_TIMEOUT
1270 +#    define VCHI_CCP2TX_OFF_TIMEOUT         1000
1271 +#  endif
1272 +
1273 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1274 +
1275 +#endif /* VCHI_CFG_H_ */
1276 +
1277 +/****************************** End of file **********************************/
1278 --- /dev/null
1279 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1280 @@ -0,0 +1,71 @@
1281 +/**
1282 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1283 + *
1284 + * Redistribution and use in source and binary forms, with or without
1285 + * modification, are permitted provided that the following conditions
1286 + * are met:
1287 + * 1. Redistributions of source code must retain the above copyright
1288 + *    notice, this list of conditions, and the following disclaimer,
1289 + *    without modification.
1290 + * 2. Redistributions in binary form must reproduce the above copyright
1291 + *    notice, this list of conditions and the following disclaimer in the
1292 + *    documentation and/or other materials provided with the distribution.
1293 + * 3. The names of the above-listed copyright holders may not be used
1294 + *    to endorse or promote products derived from this software without
1295 + *    specific prior written permission.
1296 + *
1297 + * ALTERNATIVELY, this software may be distributed under the terms of the
1298 + * GNU General Public License ("GPL") version 2, as published by the Free
1299 + * Software Foundation.
1300 + *
1301 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1302 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1303 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1304 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1305 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1306 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1307 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1308 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1309 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1310 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1311 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1312 + */
1313 +
1314 +#ifndef VCHI_CFG_INTERNAL_H_
1315 +#define VCHI_CFG_INTERNAL_H_
1316 +
1317 +/****************************************************************************************
1318 + * Control optimisation attempts.
1319 + ***************************************************************************************/
1320 +
1321 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1322 +#define VCHI_COARSE_LOCKING
1323 +
1324 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1325 +// (only relevant if VCHI_COARSE_LOCKING)
1326 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1327 +
1328 +// Avoid lock on non-blocking peek
1329 +// (only relevant if VCHI_COARSE_LOCKING)
1330 +#define VCHI_AVOID_PEEK_LOCK
1331 +
1332 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1333 +#define VCHI_MULTIPLE_HANDLER_THREADS
1334 +
1335 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1336 +// our way through the pool of descriptors.
1337 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1338 +
1339 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1340 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1341 +
1342 +// Don't use message descriptors for TX messages that don't need them
1343 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1344 +
1345 +// Nano-locks for multiqueue
1346 +//#define VCHI_MQUEUE_NANOLOCKS
1347 +
1348 +// Lock-free(er) dequeuing
1349 +//#define VCHI_RX_NANOLOCKS
1350 +
1351 +#endif /*VCHI_CFG_INTERNAL_H_*/
1352 --- /dev/null
1353 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1354 @@ -0,0 +1,163 @@
1355 +/**
1356 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1357 + *
1358 + * Redistribution and use in source and binary forms, with or without
1359 + * modification, are permitted provided that the following conditions
1360 + * are met:
1361 + * 1. Redistributions of source code must retain the above copyright
1362 + *    notice, this list of conditions, and the following disclaimer,
1363 + *    without modification.
1364 + * 2. Redistributions in binary form must reproduce the above copyright
1365 + *    notice, this list of conditions and the following disclaimer in the
1366 + *    documentation and/or other materials provided with the distribution.
1367 + * 3. The names of the above-listed copyright holders may not be used
1368 + *    to endorse or promote products derived from this software without
1369 + *    specific prior written permission.
1370 + *
1371 + * ALTERNATIVELY, this software may be distributed under the terms of the
1372 + * GNU General Public License ("GPL") version 2, as published by the Free
1373 + * Software Foundation.
1374 + *
1375 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1376 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1377 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1378 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1379 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1380 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1381 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1382 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1383 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1384 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1385 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1386 + */
1387 +
1388 +#ifndef VCHI_COMMON_H_
1389 +#define VCHI_COMMON_H_
1390 +
1391 +
1392 +//flags used when sending messages (must be bitmapped)
1393 +typedef enum
1394 +{
1395 +   VCHI_FLAGS_NONE                      = 0x0,
1396 +   VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE   = 0x1,   // waits for message to be received, or sent (NB. not the same as being seen on other side)
1397 +   VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2,   // run a callback when message sent
1398 +   VCHI_FLAGS_BLOCK_UNTIL_QUEUED        = 0x4,   // return once the transfer is in a queue ready to go
1399 +   VCHI_FLAGS_ALLOW_PARTIAL             = 0x8,
1400 +   VCHI_FLAGS_BLOCK_UNTIL_DATA_READ     = 0x10,
1401 +   VCHI_FLAGS_CALLBACK_WHEN_DATA_READ   = 0x20,
1402 +
1403 +   VCHI_FLAGS_ALIGN_SLOT            = 0x000080,  // internal use only
1404 +   VCHI_FLAGS_BULK_AUX_QUEUED       = 0x010000,  // internal use only
1405 +   VCHI_FLAGS_BULK_AUX_COMPLETE     = 0x020000,  // internal use only
1406 +   VCHI_FLAGS_BULK_DATA_QUEUED      = 0x040000,  // internal use only
1407 +   VCHI_FLAGS_BULK_DATA_COMPLETE    = 0x080000,  // internal use only
1408 +   VCHI_FLAGS_INTERNAL              = 0xFF0000
1409 +} VCHI_FLAGS_T;
1410 +
1411 +// constants for vchi_crc_control()
1412 +typedef enum {
1413 +   VCHI_CRC_NOTHING = -1,
1414 +   VCHI_CRC_PER_SERVICE = 0,
1415 +   VCHI_CRC_EVERYTHING = 1,
1416 +} VCHI_CRC_CONTROL_T;
1417 +
1418 +//callback reasons when an event occurs on a service
1419 +typedef enum
1420 +{
1421 +   VCHI_CALLBACK_REASON_MIN,
1422 +
1423 +   //This indicates that there is data available
1424 +   //handle is the msg id that was transmitted with the data
1425 +   //    When a message is received and there was no FULL message available previously, send callback
1426 +   //    Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1427 +   VCHI_CALLBACK_MSG_AVAILABLE,
1428 +   VCHI_CALLBACK_MSG_SENT,
1429 +   VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1430 +
1431 +   // This indicates that a transfer from the other side has completed
1432 +   VCHI_CALLBACK_BULK_RECEIVED,
1433 +   //This indicates that data queued up to be sent has now gone
1434 +   //handle is the msg id that was used when sending the data
1435 +   VCHI_CALLBACK_BULK_SENT,
1436 +   VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1437 +   VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1438 +
1439 +   VCHI_CALLBACK_SERVICE_CLOSED,
1440 +
1441 +   // this side has sent XOFF to peer due to lack of data consumption by service
1442 +   // (suggests the service may need to take some recovery action if it has
1443 +   // been deliberately holding off consuming data)
1444 +   VCHI_CALLBACK_SENT_XOFF,
1445 +   VCHI_CALLBACK_SENT_XON,
1446 +
1447 +   // indicates that a bulk transfer has finished reading the source buffer
1448 +   VCHI_CALLBACK_BULK_DATA_READ,
1449 +
1450 +   // power notification events (currently host side only)
1451 +   VCHI_CALLBACK_PEER_OFF,
1452 +   VCHI_CALLBACK_PEER_SUSPENDED,
1453 +   VCHI_CALLBACK_PEER_ON,
1454 +   VCHI_CALLBACK_PEER_RESUMED,
1455 +   VCHI_CALLBACK_FORCED_POWER_OFF,
1456 +
1457 +#ifdef USE_VCHIQ_ARM
1458 +   // some extra notifications provided by vchiq_arm
1459 +   VCHI_CALLBACK_SERVICE_OPENED,
1460 +   VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1461 +   VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1462 +#endif
1463 +
1464 +   VCHI_CALLBACK_REASON_MAX
1465 +} VCHI_CALLBACK_REASON_T;
1466 +
1467 +//Calback used by all services / bulk transfers
1468 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1469 +                                 VCHI_CALLBACK_REASON_T reason,
1470 +                                 void *handle ); //for transmitting msg's only
1471 +
1472 +
1473 +
1474 +/*
1475 + * Define vector struct for scatter-gather (vector) operations
1476 + * Vectors can be nested - if a vector element has negative length, then
1477 + * the data pointer is treated as pointing to another vector array, with
1478 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1479 + * you can do this:
1480 + *
1481 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1482 + * {
1483 + *    VCHI_MSG_VECTOR_T nv[2];
1484 + *    nv[0].vec_base = my_header;
1485 + *    nv[0].vec_len = sizeof my_header;
1486 + *    nv[1].vec_base = v;
1487 + *    nv[1].vec_len = -n;
1488 + *    ...
1489 + *
1490 + */
1491 +typedef struct vchi_msg_vector {
1492 +   const void *vec_base;
1493 +   int32_t vec_len;
1494 +} VCHI_MSG_VECTOR_T;
1495 +
1496 +// Opaque type for a connection API
1497 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1498 +
1499 +// Opaque type for a message driver
1500 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1501 +
1502 +
1503 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1504 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1505 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1506 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1507 +// is used again after messages for that service are removed/dequeued by any
1508 +// means other than vchi_msg_iter_... calls on the iterator itself.
1509 +typedef struct {
1510 +   struct opaque_vchi_service_t *service;
1511 +   void *last;
1512 +   void *next;
1513 +   void *remove;
1514 +} VCHI_MSG_ITER_T;
1515 +
1516 +
1517 +#endif // VCHI_COMMON_H_
1518 --- /dev/null
1519 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1520 @@ -0,0 +1,42 @@
1521 +/**
1522 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1523 + *
1524 + * Redistribution and use in source and binary forms, with or without
1525 + * modification, are permitted provided that the following conditions
1526 + * are met:
1527 + * 1. Redistributions of source code must retain the above copyright
1528 + *    notice, this list of conditions, and the following disclaimer,
1529 + *    without modification.
1530 + * 2. Redistributions in binary form must reproduce the above copyright
1531 + *    notice, this list of conditions and the following disclaimer in the
1532 + *    documentation and/or other materials provided with the distribution.
1533 + * 3. The names of the above-listed copyright holders may not be used
1534 + *    to endorse or promote products derived from this software without
1535 + *    specific prior written permission.
1536 + *
1537 + * ALTERNATIVELY, this software may be distributed under the terms of the
1538 + * GNU General Public License ("GPL") version 2, as published by the Free
1539 + * Software Foundation.
1540 + *
1541 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1542 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1543 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1544 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1545 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1546 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1547 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1548 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1549 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1550 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1551 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1552 + */
1553 +
1554 +#ifndef VCHI_MH_H_
1555 +#define VCHI_MH_H_
1556 +
1557 +#include <linux/types.h>
1558 +
1559 +typedef int32_t VCHI_MEM_HANDLE_T;
1560 +#define VCHI_MEM_HANDLE_INVALID 0
1561 +
1562 +#endif
1563 --- /dev/null
1564 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1565 @@ -0,0 +1,40 @@
1566 +/**
1567 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1568 + *
1569 + * Redistribution and use in source and binary forms, with or without
1570 + * modification, are permitted provided that the following conditions
1571 + * are met:
1572 + * 1. Redistributions of source code must retain the above copyright
1573 + *    notice, this list of conditions, and the following disclaimer,
1574 + *    without modification.
1575 + * 2. Redistributions in binary form must reproduce the above copyright
1576 + *    notice, this list of conditions and the following disclaimer in the
1577 + *    documentation and/or other materials provided with the distribution.
1578 + * 3. The names of the above-listed copyright holders may not be used
1579 + *    to endorse or promote products derived from this software without
1580 + *    specific prior written permission.
1581 + *
1582 + * ALTERNATIVELY, this software may be distributed under the terms of the
1583 + * GNU General Public License ("GPL") version 2, as published by the Free
1584 + * Software Foundation.
1585 + *
1586 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1587 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1588 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1589 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1590 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1591 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1592 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1593 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1594 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1595 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1596 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1597 + */
1598 +
1599 +#ifndef VCHIQ_VCHIQ_H
1600 +#define VCHIQ_VCHIQ_H
1601 +
1602 +#include "vchiq_if.h"
1603 +#include "vchiq_util.h"
1604 +
1605 +#endif
1606 --- /dev/null
1607 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1608 @@ -0,0 +1,42 @@
1609 +/**
1610 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1611 + *
1612 + * Redistribution and use in source and binary forms, with or without
1613 + * modification, are permitted provided that the following conditions
1614 + * are met:
1615 + * 1. Redistributions of source code must retain the above copyright
1616 + *    notice, this list of conditions, and the following disclaimer,
1617 + *    without modification.
1618 + * 2. Redistributions in binary form must reproduce the above copyright
1619 + *    notice, this list of conditions and the following disclaimer in the
1620 + *    documentation and/or other materials provided with the distribution.
1621 + * 3. The names of the above-listed copyright holders may not be used
1622 + *    to endorse or promote products derived from this software without
1623 + *    specific prior written permission.
1624 + *
1625 + * ALTERNATIVELY, this software may be distributed under the terms of the
1626 + * GNU General Public License ("GPL") version 2, as published by the Free
1627 + * Software Foundation.
1628 + *
1629 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1630 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1631 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1632 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1633 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1634 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1635 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1636 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1637 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1638 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1639 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1640 + */
1641 +
1642 +#ifndef VCHIQ_2835_H
1643 +#define VCHIQ_2835_H
1644 +
1645 +#include "vchiq_pagelist.h"
1646 +
1647 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1648 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
1649 +
1650 +#endif /* VCHIQ_2835_H */
1651 --- /dev/null
1652 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1653 @@ -0,0 +1,561 @@
1654 +/**
1655 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1656 + *
1657 + * Redistribution and use in source and binary forms, with or without
1658 + * modification, are permitted provided that the following conditions
1659 + * are met:
1660 + * 1. Redistributions of source code must retain the above copyright
1661 + *    notice, this list of conditions, and the following disclaimer,
1662 + *    without modification.
1663 + * 2. Redistributions in binary form must reproduce the above copyright
1664 + *    notice, this list of conditions and the following disclaimer in the
1665 + *    documentation and/or other materials provided with the distribution.
1666 + * 3. The names of the above-listed copyright holders may not be used
1667 + *    to endorse or promote products derived from this software without
1668 + *    specific prior written permission.
1669 + *
1670 + * ALTERNATIVELY, this software may be distributed under the terms of the
1671 + * GNU General Public License ("GPL") version 2, as published by the Free
1672 + * Software Foundation.
1673 + *
1674 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1675 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1676 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1677 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1678 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1679 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1680 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1681 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1682 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1683 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1684 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1685 + */
1686 +
1687 +#include <linux/kernel.h>
1688 +#include <linux/types.h>
1689 +#include <linux/errno.h>
1690 +#include <linux/interrupt.h>
1691 +#include <linux/irq.h>
1692 +#include <linux/pagemap.h>
1693 +#include <linux/dma-mapping.h>
1694 +#include <linux/version.h>
1695 +#include <linux/io.h>
1696 +#include <linux/uaccess.h>
1697 +#include <asm/pgtable.h>
1698 +
1699 +#include <mach/irqs.h>
1700 +
1701 +#include <mach/platform.h>
1702 +#include <mach/vcio.h>
1703 +
1704 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1705 +
1706 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1707 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1708 +
1709 +#include "vchiq_arm.h"
1710 +#include "vchiq_2835.h"
1711 +#include "vchiq_connected.h"
1712 +
1713 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1714 +
1715 +typedef struct vchiq_2835_state_struct {
1716 +   int inited;
1717 +   VCHIQ_ARM_STATE_T arm_state;
1718 +} VCHIQ_2835_ARM_STATE_T;
1719 +
1720 +static char *g_slot_mem;
1721 +static int g_slot_mem_size;
1722 +dma_addr_t g_slot_phys;
1723 +static FRAGMENTS_T *g_fragments_base;
1724 +static FRAGMENTS_T *g_free_fragments;
1725 +struct semaphore g_free_fragments_sema;
1726 +
1727 +extern int vchiq_arm_log_level;
1728 +
1729 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1730 +
1731 +static irqreturn_t
1732 +vchiq_doorbell_irq(int irq, void *dev_id);
1733 +
1734 +static int
1735 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1736 +                struct task_struct *task, PAGELIST_T ** ppagelist);
1737 +
1738 +static void
1739 +free_pagelist(PAGELIST_T *pagelist, int actual);
1740 +
1741 +int __init
1742 +vchiq_platform_init(VCHIQ_STATE_T *state)
1743 +{
1744 +       VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1745 +       int frag_mem_size;
1746 +       int err;
1747 +       int i;
1748 +
1749 +       /* Allocate space for the channels in coherent memory */
1750 +       g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1751 +       frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1752 +
1753 +       g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1754 +               &g_slot_phys, GFP_ATOMIC);
1755 +
1756 +       if (!g_slot_mem) {
1757 +               vchiq_log_error(vchiq_arm_log_level,
1758 +                       "Unable to allocate channel memory");
1759 +               err = -ENOMEM;
1760 +               goto failed_alloc;
1761 +       }
1762 +
1763 +       WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1764 +
1765 +       vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1766 +       if (!vchiq_slot_zero) {
1767 +               err = -EINVAL;
1768 +               goto failed_init_slots;
1769 +       }
1770 +
1771 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1772 +               (int)g_slot_phys + g_slot_mem_size;
1773 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1774 +               MAX_FRAGMENTS;
1775 +
1776 +       g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1777 +       g_slot_mem_size += frag_mem_size;
1778 +
1779 +       g_free_fragments = g_fragments_base;
1780 +       for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1781 +               *(FRAGMENTS_T **)&g_fragments_base[i] =
1782 +                       &g_fragments_base[i + 1];
1783 +       }
1784 +       *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1785 +       sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1786 +
1787 +       if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1788 +               VCHIQ_SUCCESS) {
1789 +               err = -EINVAL;
1790 +               goto failed_vchiq_init;
1791 +       }
1792 +
1793 +       err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1794 +               IRQF_IRQPOLL, "VCHIQ doorbell",
1795 +               state);
1796 +       if (err < 0) {
1797 +               vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1798 +                       "irq=%d err=%d", __func__,
1799 +                       VCHIQ_DOORBELL_IRQ, err);
1800 +               goto failed_request_irq;
1801 +       }
1802 +
1803 +       /* Send the base address of the slots to VideoCore */
1804 +
1805 +       dsb(); /* Ensure all writes have completed */
1806 +
1807 +       bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1808 +
1809 +       vchiq_log_info(vchiq_arm_log_level,
1810 +               "vchiq_init - done (slots %x, phys %x)",
1811 +               (unsigned int)vchiq_slot_zero, g_slot_phys);
1812 +
1813 +   vchiq_call_connected_callbacks();
1814 +
1815 +   return 0;
1816 +
1817 +failed_request_irq:
1818 +failed_vchiq_init:
1819 +failed_init_slots:
1820 +   dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1821 +
1822 +failed_alloc:
1823 +   return err;
1824 +}
1825 +
1826 +void __exit
1827 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1828 +{
1829 +   free_irq(VCHIQ_DOORBELL_IRQ, state);
1830 +   dma_free_coherent(NULL, g_slot_mem_size,
1831 +                     g_slot_mem, g_slot_phys);
1832 +}
1833 +
1834 +
1835 +VCHIQ_STATUS_T
1836 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1837 +{
1838 +   VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1839 +   state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1840 +   ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1841 +   status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1842 +   if(status != VCHIQ_SUCCESS)
1843 +   {
1844 +      ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1845 +   }
1846 +   return status;
1847 +}
1848 +
1849 +VCHIQ_ARM_STATE_T*
1850 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1851 +{
1852 +   if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1853 +   {
1854 +      BUG();
1855 +   }
1856 +   return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1857 +}
1858 +
1859 +void
1860 +remote_event_signal(REMOTE_EVENT_T *event)
1861 +{
1862 +       wmb();
1863 +
1864 +       event->fired = 1;
1865 +
1866 +       dsb();         /* data barrier operation */
1867 +
1868 +       if (event->armed) {
1869 +               /* trigger vc interrupt */
1870 +
1871 +               writel(0, __io_address(ARM_0_BELL2));
1872 +       }
1873 +}
1874 +
1875 +int
1876 +vchiq_copy_from_user(void *dst, const void *src, int size)
1877 +{
1878 +       if ((uint32_t)src < TASK_SIZE) {
1879 +               return copy_from_user(dst, src, size);
1880 +       } else {
1881 +               memcpy(dst, src, size);
1882 +               return 0;
1883 +       }
1884 +}
1885 +
1886 +VCHIQ_STATUS_T
1887 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1888 +       void *offset, int size, int dir)
1889 +{
1890 +       PAGELIST_T *pagelist;
1891 +       int ret;
1892 +
1893 +       WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1894 +
1895 +       ret = create_pagelist((char __user *)offset, size,
1896 +                       (dir == VCHIQ_BULK_RECEIVE)
1897 +                       ? PAGELIST_READ
1898 +                       : PAGELIST_WRITE,
1899 +                       current,
1900 +                       &pagelist);
1901 +       if (ret != 0)
1902 +               return VCHIQ_ERROR;
1903 +
1904 +       bulk->handle = memhandle;
1905 +       bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1906 +
1907 +       /* Store the pagelist address in remote_data, which isn't used by the
1908 +          slave. */
1909 +       bulk->remote_data = pagelist;
1910 +
1911 +       return VCHIQ_SUCCESS;
1912 +}
1913 +
1914 +void
1915 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1916 +{
1917 +       if (bulk && bulk->remote_data && bulk->actual)
1918 +               free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1919 +}
1920 +
1921 +void
1922 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1923 +{
1924 +       /*
1925 +        * This should only be called on the master (VideoCore) side, but
1926 +        * provide an implementation to avoid the need for ifdefery.
1927 +        */
1928 +       BUG();
1929 +}
1930 +
1931 +void
1932 +vchiq_dump_platform_state(void *dump_context)
1933 +{
1934 +       char buf[80];
1935 +       int len;
1936 +       len = snprintf(buf, sizeof(buf),
1937 +               "  Platform: 2835 (VC master)");
1938 +       vchiq_dump(dump_context, buf, len + 1);
1939 +}
1940 +
1941 +VCHIQ_STATUS_T
1942 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1943 +{
1944 +   return VCHIQ_ERROR;
1945 +}
1946 +
1947 +VCHIQ_STATUS_T
1948 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1949 +{
1950 +   return VCHIQ_SUCCESS;
1951 +}
1952 +
1953 +void
1954 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1955 +{
1956 +}
1957 +
1958 +void
1959 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
1960 +{
1961 +}
1962 +
1963 +int
1964 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
1965 +{
1966 +   return 1; // autosuspend not supported - videocore always wanted
1967 +}
1968 +
1969 +int
1970 +vchiq_platform_use_suspend_timer(void)
1971 +{
1972 +   return 0;
1973 +}
1974 +void
1975 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
1976 +{
1977 +       vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
1978 +}
1979 +void
1980 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
1981 +{
1982 +       (void)state;
1983 +}
1984 +/*
1985 + * Local functions
1986 + */
1987 +
1988 +static irqreturn_t
1989 +vchiq_doorbell_irq(int irq, void *dev_id)
1990 +{
1991 +       VCHIQ_STATE_T *state = dev_id;
1992 +       irqreturn_t ret = IRQ_NONE;
1993 +       unsigned int status;
1994 +
1995 +       /* Read (and clear) the doorbell */
1996 +       status = readl(__io_address(ARM_0_BELL0));
1997 +
1998 +       if (status & 0x4) {  /* Was the doorbell rung? */
1999 +               remote_event_pollall(state);
2000 +               ret = IRQ_HANDLED;
2001 +       }
2002 +
2003 +       return ret;
2004 +}
2005 +
2006 +/* There is a potential problem with partial cache lines (pages?)
2007 +** at the ends of the block when reading. If the CPU accessed anything in
2008 +** the same line (page?) then it may have pulled old data into the cache,
2009 +** obscuring the new data underneath. We can solve this by transferring the
2010 +** partial cache lines separately, and allowing the ARM to copy into the
2011 +** cached area.
2012 +
2013 +** N.B. This implementation plays slightly fast and loose with the Linux
2014 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2015 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2016 +** from increased speed as a result.
2017 +*/
2018 +
2019 +static int
2020 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2021 +       struct task_struct *task, PAGELIST_T ** ppagelist)
2022 +{
2023 +       PAGELIST_T *pagelist;
2024 +       struct page **pages;
2025 +       struct page *page;
2026 +       unsigned long *addrs;
2027 +       unsigned int num_pages, offset, i;
2028 +       char *addr, *base_addr, *next_addr;
2029 +       int run, addridx, actual_pages;
2030 +        unsigned long *need_release;
2031 +
2032 +       offset = (unsigned int)buf & (PAGE_SIZE - 1);
2033 +       num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2034 +
2035 +       *ppagelist = NULL;
2036 +
2037 +       /* Allocate enough storage to hold the page pointers and the page
2038 +       ** list
2039 +       */
2040 +       pagelist = kmalloc(sizeof(PAGELIST_T) +
2041 +                           (num_pages * sizeof(unsigned long)) +
2042 +                           sizeof(unsigned long) +
2043 +                           (num_pages * sizeof(pages[0])),
2044 +                           GFP_KERNEL);
2045 +
2046 +       vchiq_log_trace(vchiq_arm_log_level,
2047 +               "create_pagelist - %x", (unsigned int)pagelist);
2048 +       if (!pagelist)
2049 +               return -ENOMEM;
2050 +
2051 +       addrs = pagelist->addrs;
2052 +        need_release = (unsigned long *)(addrs + num_pages);
2053 +       pages = (struct page **)(addrs + num_pages + 1);
2054 +
2055 +       if (is_vmalloc_addr(buf)) {
2056 +               for (actual_pages = 0; actual_pages < num_pages; actual_pages++) {
2057 +                       pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE));
2058 +               }
2059 +                *need_release = 0; /* do not try and release vmalloc pages */
2060 +       } else {
2061 +               down_read(&task->mm->mmap_sem);
2062 +               actual_pages = get_user_pages(task, task->mm,
2063 +                                         (unsigned long)buf & ~(PAGE_SIZE - 1),
2064 +                                         num_pages,
2065 +                                         (type == PAGELIST_READ) /*Write */ ,
2066 +                                         0 /*Force */ ,
2067 +                                         pages,
2068 +                                         NULL /*vmas */);
2069 +               up_read(&task->mm->mmap_sem);
2070 +
2071 +               if (actual_pages != num_pages) {
2072 +                       vchiq_log_info(vchiq_arm_log_level,
2073 +                                      "create_pagelist - only %d/%d pages locked",
2074 +                                      actual_pages,
2075 +                                      num_pages);
2076 +
2077 +                       /* This is probably due to the process being killed */
2078 +                       while (actual_pages > 0)
2079 +                       {
2080 +                               actual_pages--;
2081 +                               page_cache_release(pages[actual_pages]);
2082 +                       }
2083 +                       kfree(pagelist);
2084 +                       if (actual_pages == 0)
2085 +                               actual_pages = -ENOMEM;
2086 +                       return actual_pages;
2087 +               }
2088 +                *need_release = 1; /* release user pages */
2089 +       }
2090 +
2091 +       pagelist->length = count;
2092 +       pagelist->type = type;
2093 +       pagelist->offset = offset;
2094 +
2095 +       /* Group the pages into runs of contiguous pages */
2096 +
2097 +       base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2098 +       next_addr = base_addr + PAGE_SIZE;
2099 +       addridx = 0;
2100 +       run = 0;
2101 +
2102 +       for (i = 1; i < num_pages; i++) {
2103 +               addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2104 +               if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2105 +                       next_addr += PAGE_SIZE;
2106 +                       run++;
2107 +               } else {
2108 +                       addrs[addridx] = (unsigned long)base_addr + run;
2109 +                       addridx++;
2110 +                       base_addr = addr;
2111 +                       next_addr = addr + PAGE_SIZE;
2112 +                       run = 0;
2113 +               }
2114 +       }
2115 +
2116 +       addrs[addridx] = (unsigned long)base_addr + run;
2117 +       addridx++;
2118 +
2119 +       /* Partial cache lines (fragments) require special measures */
2120 +       if ((type == PAGELIST_READ) &&
2121 +               ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2122 +               ((pagelist->offset + pagelist->length) &
2123 +               (CACHE_LINE_SIZE - 1)))) {
2124 +               FRAGMENTS_T *fragments;
2125 +
2126 +               if (down_interruptible(&g_free_fragments_sema) != 0) {
2127 +                       kfree(pagelist);
2128 +                       return -EINTR;
2129 +               }
2130 +
2131 +               WARN_ON(g_free_fragments == NULL);
2132 +
2133 +               down(&g_free_fragments_mutex);
2134 +               fragments = (FRAGMENTS_T *) g_free_fragments;
2135 +               WARN_ON(fragments == NULL);
2136 +               g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2137 +               up(&g_free_fragments_mutex);
2138 +               pagelist->type =
2139 +                        PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2140 +                                                        g_fragments_base);
2141 +       }
2142 +
2143 +       for (page = virt_to_page(pagelist);
2144 +               page <= virt_to_page(addrs + num_pages - 1); page++) {
2145 +               flush_dcache_page(page);
2146 +       }
2147 +
2148 +       *ppagelist = pagelist;
2149 +
2150 +       return 0;
2151 +}
2152 +
2153 +static void
2154 +free_pagelist(PAGELIST_T *pagelist, int actual)
2155 +{
2156 +        unsigned long *need_release;
2157 +       struct page **pages;
2158 +       unsigned int num_pages, i;
2159 +
2160 +       vchiq_log_trace(vchiq_arm_log_level,
2161 +               "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2162 +
2163 +       num_pages =
2164 +               (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2165 +               PAGE_SIZE;
2166 +
2167 +        need_release = (unsigned long *)(pagelist->addrs + num_pages);
2168 +       pages = (struct page **)(pagelist->addrs + num_pages + 1);
2169 +
2170 +       /* Deal with any partial cache lines (fragments) */
2171 +       if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2172 +               FRAGMENTS_T *fragments = g_fragments_base +
2173 +                       (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2174 +               int head_bytes, tail_bytes;
2175 +               head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2176 +                       (CACHE_LINE_SIZE - 1);
2177 +               tail_bytes = (pagelist->offset + actual) &
2178 +                       (CACHE_LINE_SIZE - 1);
2179 +
2180 +               if ((actual >= 0) && (head_bytes != 0)) {
2181 +                       if (head_bytes > actual)
2182 +                               head_bytes = actual;
2183 +
2184 +                       memcpy((char *)page_address(pages[0]) +
2185 +                               pagelist->offset,
2186 +                               fragments->headbuf,
2187 +                               head_bytes);
2188 +               }
2189 +               if ((actual >= 0) && (head_bytes < actual) &&
2190 +                       (tail_bytes != 0)) {
2191 +                       memcpy((char *)page_address(pages[num_pages - 1]) +
2192 +                               ((pagelist->offset + actual) &
2193 +                               (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2194 +                               fragments->tailbuf, tail_bytes);
2195 +               }
2196 +
2197 +               down(&g_free_fragments_mutex);
2198 +               *(FRAGMENTS_T **) fragments = g_free_fragments;
2199 +               g_free_fragments = fragments;
2200 +               up(&g_free_fragments_mutex);
2201 +               up(&g_free_fragments_sema);
2202 +       }
2203 +
2204 +        if (*need_release) {
2205 +               for (i = 0; i < num_pages; i++) {
2206 +                       if (pagelist->type != PAGELIST_WRITE)
2207 +                               set_page_dirty(pages[i]);
2208 +
2209 +                       page_cache_release(pages[i]);
2210 +               }
2211 +        }
2212 +
2213 +       kfree(pagelist);
2214 +}
2215 --- /dev/null
2216 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2217 @@ -0,0 +1,2813 @@
2218 +/**
2219 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2220 + *
2221 + * Redistribution and use in source and binary forms, with or without
2222 + * modification, are permitted provided that the following conditions
2223 + * are met:
2224 + * 1. Redistributions of source code must retain the above copyright
2225 + *    notice, this list of conditions, and the following disclaimer,
2226 + *    without modification.
2227 + * 2. Redistributions in binary form must reproduce the above copyright
2228 + *    notice, this list of conditions and the following disclaimer in the
2229 + *    documentation and/or other materials provided with the distribution.
2230 + * 3. The names of the above-listed copyright holders may not be used
2231 + *    to endorse or promote products derived from this software without
2232 + *    specific prior written permission.
2233 + *
2234 + * ALTERNATIVELY, this software may be distributed under the terms of the
2235 + * GNU General Public License ("GPL") version 2, as published by the Free
2236 + * Software Foundation.
2237 + *
2238 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2239 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2240 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2241 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2242 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2243 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2244 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2245 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2246 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2247 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2248 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2249 + */
2250 +
2251 +#include <linux/kernel.h>
2252 +#include <linux/module.h>
2253 +#include <linux/types.h>
2254 +#include <linux/errno.h>
2255 +#include <linux/cdev.h>
2256 +#include <linux/fs.h>
2257 +#include <linux/device.h>
2258 +#include <linux/mm.h>
2259 +#include <linux/highmem.h>
2260 +#include <linux/pagemap.h>
2261 +#include <linux/bug.h>
2262 +#include <linux/semaphore.h>
2263 +#include <linux/list.h>
2264 +#include <linux/proc_fs.h>
2265 +
2266 +#include "vchiq_core.h"
2267 +#include "vchiq_ioctl.h"
2268 +#include "vchiq_arm.h"
2269 +
2270 +#define DEVICE_NAME "vchiq"
2271 +
2272 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2273 +#undef MODULE_PARAM_PREFIX
2274 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2275 +
2276 +#define VCHIQ_MINOR 0
2277 +
2278 +/* Some per-instance constants */
2279 +#define MAX_COMPLETIONS 16
2280 +#define MAX_SERVICES 64
2281 +#define MAX_ELEMENTS 8
2282 +#define MSG_QUEUE_SIZE 64
2283 +
2284 +#define KEEPALIVE_VER 1
2285 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2286 +
2287 +/* Run time control of log level, based on KERN_XXX level. */
2288 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2289 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2290 +
2291 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2292 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2293 +
2294 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2295 +static const char *const suspend_state_names[] = {
2296 +       "VC_SUSPEND_FORCE_CANCELED",
2297 +       "VC_SUSPEND_REJECTED",
2298 +       "VC_SUSPEND_FAILED",
2299 +       "VC_SUSPEND_IDLE",
2300 +       "VC_SUSPEND_REQUESTED",
2301 +       "VC_SUSPEND_IN_PROGRESS",
2302 +       "VC_SUSPEND_SUSPENDED"
2303 +};
2304 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2305 +static const char *const resume_state_names[] = {
2306 +       "VC_RESUME_FAILED",
2307 +       "VC_RESUME_IDLE",
2308 +       "VC_RESUME_REQUESTED",
2309 +       "VC_RESUME_IN_PROGRESS",
2310 +       "VC_RESUME_RESUMED"
2311 +};
2312 +/* The number of times we allow force suspend to timeout before actually
2313 +** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
2314 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2315 +*/
2316 +#define FORCE_SUSPEND_FAIL_MAX 8
2317 +
2318 +/* The time in ms allowed for videocore to go idle when force suspend has been
2319 + * requested */
2320 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2321 +
2322 +
2323 +static void suspend_timer_callback(unsigned long context);
2324 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2325 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2326 +
2327 +
2328 +typedef struct user_service_struct {
2329 +       VCHIQ_SERVICE_T *service;
2330 +       void *userdata;
2331 +       VCHIQ_INSTANCE_T instance;
2332 +       int is_vchi;
2333 +       int dequeue_pending;
2334 +       int message_available_pos;
2335 +       int msg_insert;
2336 +       int msg_remove;
2337 +       struct semaphore insert_event;
2338 +       struct semaphore remove_event;
2339 +       VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2340 +} USER_SERVICE_T;
2341 +
2342 +struct bulk_waiter_node {
2343 +       struct bulk_waiter bulk_waiter;
2344 +       int pid;
2345 +       struct list_head list;
2346 +};
2347 +
2348 +struct vchiq_instance_struct {
2349 +       VCHIQ_STATE_T *state;
2350 +       VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2351 +       int completion_insert;
2352 +       int completion_remove;
2353 +       struct semaphore insert_event;
2354 +       struct semaphore remove_event;
2355 +       struct mutex completion_mutex;
2356 +
2357 +       int connected;
2358 +       int closing;
2359 +       int pid;
2360 +       int mark;
2361 +
2362 +       struct list_head bulk_waiter_list;
2363 +       struct mutex bulk_waiter_list_mutex;
2364 +
2365 +       struct proc_dir_entry *proc_entry;
2366 +};
2367 +
2368 +typedef struct dump_context_struct {
2369 +       char __user *buf;
2370 +       size_t actual;
2371 +       size_t space;
2372 +       loff_t offset;
2373 +} DUMP_CONTEXT_T;
2374 +
2375 +static struct cdev    vchiq_cdev;
2376 +static dev_t          vchiq_devid;
2377 +static VCHIQ_STATE_T g_state;
2378 +static struct class  *vchiq_class;
2379 +static struct device *vchiq_dev;
2380 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2381 +
2382 +static const char *const ioctl_names[] = {
2383 +       "CONNECT",
2384 +       "SHUTDOWN",
2385 +       "CREATE_SERVICE",
2386 +       "REMOVE_SERVICE",
2387 +       "QUEUE_MESSAGE",
2388 +       "QUEUE_BULK_TRANSMIT",
2389 +       "QUEUE_BULK_RECEIVE",
2390 +       "AWAIT_COMPLETION",
2391 +       "DEQUEUE_MESSAGE",
2392 +       "GET_CLIENT_ID",
2393 +       "GET_CONFIG",
2394 +       "CLOSE_SERVICE",
2395 +       "USE_SERVICE",
2396 +       "RELEASE_SERVICE",
2397 +       "SET_SERVICE_OPTION",
2398 +       "DUMP_PHYS_MEM"
2399 +};
2400 +
2401 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2402 +       (VCHIQ_IOC_MAX + 1));
2403 +
2404 +static void
2405 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2406 +
2407 +/****************************************************************************
2408 +*
2409 +*   add_completion
2410 +*
2411 +***************************************************************************/
2412 +
2413 +static VCHIQ_STATUS_T
2414 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2415 +       VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2416 +       void *bulk_userdata)
2417 +{
2418 +       VCHIQ_COMPLETION_DATA_T *completion;
2419 +       DEBUG_INITIALISE(g_state.local)
2420 +
2421 +       while (instance->completion_insert ==
2422 +               (instance->completion_remove + MAX_COMPLETIONS)) {
2423 +               /* Out of space - wait for the client */
2424 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2425 +               vchiq_log_trace(vchiq_arm_log_level,
2426 +                       "add_completion - completion queue full");
2427 +               DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2428 +               if (down_interruptible(&instance->remove_event) != 0) {
2429 +                       vchiq_log_info(vchiq_arm_log_level,
2430 +                               "service_callback interrupted");
2431 +                       return VCHIQ_RETRY;
2432 +               } else if (instance->closing) {
2433 +                       vchiq_log_info(vchiq_arm_log_level,
2434 +                               "service_callback closing");
2435 +                       return VCHIQ_ERROR;
2436 +               }
2437 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2438 +       }
2439 +
2440 +       completion =
2441 +                &instance->completions[instance->completion_insert &
2442 +                (MAX_COMPLETIONS - 1)];
2443 +
2444 +       completion->header = header;
2445 +       completion->reason = reason;
2446 +       /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2447 +       completion->service_userdata = user_service->service;
2448 +       completion->bulk_userdata = bulk_userdata;
2449 +
2450 +       if (reason == VCHIQ_SERVICE_CLOSED)
2451 +               /* Take an extra reference, to be held until
2452 +                  this CLOSED notification is delivered. */
2453 +               lock_service(user_service->service);
2454 +
2455 +       /* A write barrier is needed here to ensure that the entire completion
2456 +               record is written out before the insert point. */
2457 +       wmb();
2458 +
2459 +       if (reason == VCHIQ_MESSAGE_AVAILABLE)
2460 +               user_service->message_available_pos =
2461 +                       instance->completion_insert;
2462 +       instance->completion_insert++;
2463 +
2464 +       up(&instance->insert_event);
2465 +
2466 +       return VCHIQ_SUCCESS;
2467 +}
2468 +
2469 +/****************************************************************************
2470 +*
2471 +*   service_callback
2472 +*
2473 +***************************************************************************/
2474 +
2475 +static VCHIQ_STATUS_T
2476 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2477 +       VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2478 +{
2479 +       /* How do we ensure the callback goes to the right client?
2480 +       ** The service_user data points to a USER_SERVICE_T record containing
2481 +       ** the original callback and the user state structure, which contains a
2482 +       ** circular buffer for completion records.
2483 +       */
2484 +       USER_SERVICE_T *user_service;
2485 +       VCHIQ_SERVICE_T *service;
2486 +       VCHIQ_INSTANCE_T instance;
2487 +       DEBUG_INITIALISE(g_state.local)
2488 +
2489 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2490 +
2491 +       service = handle_to_service(handle);
2492 +       BUG_ON(!service);
2493 +       user_service = (USER_SERVICE_T *)service->base.userdata;
2494 +       instance = user_service->instance;
2495 +
2496 +       if (!instance || instance->closing)
2497 +               return VCHIQ_SUCCESS;
2498 +
2499 +       vchiq_log_trace(vchiq_arm_log_level,
2500 +               "service_callback - service %lx(%d), reason %d, header %lx, "
2501 +               "instance %lx, bulk_userdata %lx",
2502 +               (unsigned long)user_service,
2503 +               service->localport,
2504 +               reason, (unsigned long)header,
2505 +               (unsigned long)instance, (unsigned long)bulk_userdata);
2506 +
2507 +       if (header && user_service->is_vchi) {
2508 +               spin_lock(&msg_queue_spinlock);
2509 +               while (user_service->msg_insert ==
2510 +                       (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2511 +                       spin_unlock(&msg_queue_spinlock);
2512 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2513 +                       DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2514 +                       vchiq_log_trace(vchiq_arm_log_level,
2515 +                               "service_callback - msg queue full");
2516 +                       /* If there is no MESSAGE_AVAILABLE in the completion
2517 +                       ** queue, add one
2518 +                       */
2519 +                       if ((user_service->message_available_pos -
2520 +                               instance->completion_remove) < 0) {
2521 +                               VCHIQ_STATUS_T status;
2522 +                               vchiq_log_info(vchiq_arm_log_level,
2523 +                                       "Inserting extra MESSAGE_AVAILABLE");
2524 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2525 +                               status = add_completion(instance, reason,
2526 +                                       NULL, user_service, bulk_userdata);
2527 +                               if (status != VCHIQ_SUCCESS) {
2528 +                                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2529 +                                       return status;
2530 +                               }
2531 +                       }
2532 +
2533 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2534 +                       if (down_interruptible(&user_service->remove_event)
2535 +                               != 0) {
2536 +                               vchiq_log_info(vchiq_arm_log_level,
2537 +                                       "service_callback interrupted");
2538 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2539 +                               return VCHIQ_RETRY;
2540 +                       } else if (instance->closing) {
2541 +                               vchiq_log_info(vchiq_arm_log_level,
2542 +                                       "service_callback closing");
2543 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2544 +                               return VCHIQ_ERROR;
2545 +                       }
2546 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2547 +                       spin_lock(&msg_queue_spinlock);
2548 +               }
2549 +
2550 +               user_service->msg_queue[user_service->msg_insert &
2551 +                       (MSG_QUEUE_SIZE - 1)] = header;
2552 +               user_service->msg_insert++;
2553 +               spin_unlock(&msg_queue_spinlock);
2554 +
2555 +               up(&user_service->insert_event);
2556 +
2557 +               /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2558 +               ** there is a MESSAGE_AVAILABLE in the completion queue then
2559 +               ** bypass the completion queue.
2560 +               */
2561 +               if (((user_service->message_available_pos -
2562 +                       instance->completion_remove) >= 0) ||
2563 +                       user_service->dequeue_pending) {
2564 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2565 +                       user_service->dequeue_pending = 0;
2566 +                       return VCHIQ_SUCCESS;
2567 +               }
2568 +
2569 +               header = NULL;
2570 +       }
2571 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2572 +
2573 +       return add_completion(instance, reason, header, user_service,
2574 +               bulk_userdata);
2575 +}
2576 +
2577 +/****************************************************************************
2578 +*
2579 +*   user_service_free
2580 +*
2581 +***************************************************************************/
2582 +static void
2583 +user_service_free(void *userdata)
2584 +{
2585 +       kfree(userdata);
2586 +}
2587 +
2588 +/****************************************************************************
2589 +*
2590 +*   vchiq_ioctl
2591 +*
2592 +***************************************************************************/
2593 +
2594 +static long
2595 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2596 +{
2597 +       VCHIQ_INSTANCE_T instance = file->private_data;
2598 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2599 +       VCHIQ_SERVICE_T *service = NULL;
2600 +       long ret = 0;
2601 +       int i, rc;
2602 +       DEBUG_INITIALISE(g_state.local)
2603 +
2604 +       vchiq_log_trace(vchiq_arm_log_level,
2605 +                "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2606 +               (unsigned int)instance,
2607 +               ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2608 +               (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2609 +               ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2610 +
2611 +       switch (cmd) {
2612 +       case VCHIQ_IOC_SHUTDOWN:
2613 +               if (!instance->connected)
2614 +                       break;
2615 +
2616 +               /* Remove all services */
2617 +               i = 0;
2618 +               while ((service = next_service_by_instance(instance->state,
2619 +                       instance, &i)) != NULL) {
2620 +                       status = vchiq_remove_service(service->handle);
2621 +                       unlock_service(service);
2622 +                       if (status != VCHIQ_SUCCESS)
2623 +                               break;
2624 +               }
2625 +               service = NULL;
2626 +
2627 +               if (status == VCHIQ_SUCCESS) {
2628 +                       /* Wake the completion thread and ask it to exit */
2629 +                       instance->closing = 1;
2630 +                       up(&instance->insert_event);
2631 +               }
2632 +
2633 +               break;
2634 +
2635 +       case VCHIQ_IOC_CONNECT:
2636 +               if (instance->connected) {
2637 +                       ret = -EINVAL;
2638 +                       break;
2639 +               }
2640 +               rc = mutex_lock_interruptible(&instance->state->mutex);
2641 +               if (rc != 0) {
2642 +                       vchiq_log_error(vchiq_arm_log_level,
2643 +                               "vchiq: connect: could not lock mutex for "
2644 +                               "state %d: %d",
2645 +                               instance->state->id, rc);
2646 +                       ret = -EINTR;
2647 +                       break;
2648 +               }
2649 +               status = vchiq_connect_internal(instance->state, instance);
2650 +               mutex_unlock(&instance->state->mutex);
2651 +
2652 +               if (status == VCHIQ_SUCCESS)
2653 +                       instance->connected = 1;
2654 +               else
2655 +                       vchiq_log_error(vchiq_arm_log_level,
2656 +                               "vchiq: could not connect: %d", status);
2657 +               break;
2658 +
2659 +       case VCHIQ_IOC_CREATE_SERVICE: {
2660 +               VCHIQ_CREATE_SERVICE_T args;
2661 +               USER_SERVICE_T *user_service = NULL;
2662 +               void *userdata;
2663 +               int srvstate;
2664 +
2665 +               if (copy_from_user
2666 +                        (&args, (const void __user *)arg,
2667 +                         sizeof(args)) != 0) {
2668 +                       ret = -EFAULT;
2669 +                       break;
2670 +               }
2671 +
2672 +               user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2673 +               if (!user_service) {
2674 +                       ret = -ENOMEM;
2675 +                       break;
2676 +               }
2677 +
2678 +               if (args.is_open) {
2679 +                       if (!instance->connected) {
2680 +                               ret = -ENOTCONN;
2681 +                               kfree(user_service);
2682 +                               break;
2683 +                       }
2684 +                       srvstate = VCHIQ_SRVSTATE_OPENING;
2685 +               } else {
2686 +                       srvstate =
2687 +                                instance->connected ?
2688 +                                VCHIQ_SRVSTATE_LISTENING :
2689 +                                VCHIQ_SRVSTATE_HIDDEN;
2690 +               }
2691 +
2692 +               userdata = args.params.userdata;
2693 +               args.params.callback = service_callback;
2694 +               args.params.userdata = user_service;
2695 +               service = vchiq_add_service_internal(
2696 +                               instance->state,
2697 +                               &args.params, srvstate,
2698 +                               instance, user_service_free);
2699 +
2700 +               if (service != NULL) {
2701 +                       user_service->service = service;
2702 +                       user_service->userdata = userdata;
2703 +                       user_service->instance = instance;
2704 +                       user_service->is_vchi = args.is_vchi;
2705 +                       user_service->dequeue_pending = 0;
2706 +                       user_service->message_available_pos =
2707 +                               instance->completion_remove - 1;
2708 +                       user_service->msg_insert = 0;
2709 +                       user_service->msg_remove = 0;
2710 +                       sema_init(&user_service->insert_event, 0);
2711 +                       sema_init(&user_service->remove_event, 0);
2712 +
2713 +                       if (args.is_open) {
2714 +                               status = vchiq_open_service_internal
2715 +                                       (service, instance->pid);
2716 +                               if (status != VCHIQ_SUCCESS) {
2717 +                                       vchiq_remove_service(service->handle);
2718 +                                       service = NULL;
2719 +                                       ret = (status == VCHIQ_RETRY) ?
2720 +                                               -EINTR : -EIO;
2721 +                                       break;
2722 +                               }
2723 +                       }
2724 +
2725 +                       if (copy_to_user((void __user *)
2726 +                               &(((VCHIQ_CREATE_SERVICE_T __user *)
2727 +                                       arg)->handle),
2728 +                               (const void *)&service->handle,
2729 +                               sizeof(service->handle)) != 0) {
2730 +                               ret = -EFAULT;
2731 +                               vchiq_remove_service(service->handle);
2732 +                       }
2733 +
2734 +                       service = NULL;
2735 +               } else {
2736 +                       ret = -EEXIST;
2737 +                       kfree(user_service);
2738 +               }
2739 +       } break;
2740 +
2741 +       case VCHIQ_IOC_CLOSE_SERVICE: {
2742 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2743 +
2744 +               service = find_service_for_instance(instance, handle);
2745 +               if (service != NULL)
2746 +                       status = vchiq_close_service(service->handle);
2747 +               else
2748 +                       ret = -EINVAL;
2749 +       } break;
2750 +
2751 +       case VCHIQ_IOC_REMOVE_SERVICE: {
2752 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2753 +
2754 +               service = find_service_for_instance(instance, handle);
2755 +               if (service != NULL)
2756 +                       status = vchiq_remove_service(service->handle);
2757 +               else
2758 +                       ret = -EINVAL;
2759 +       } break;
2760 +
2761 +       case VCHIQ_IOC_USE_SERVICE:
2762 +       case VCHIQ_IOC_RELEASE_SERVICE: {
2763 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2764 +
2765 +               service = find_service_for_instance(instance, handle);
2766 +               if (service != NULL) {
2767 +                       status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2768 +                               vchiq_use_service_internal(service) :
2769 +                               vchiq_release_service_internal(service);
2770 +                       if (status != VCHIQ_SUCCESS) {
2771 +                               vchiq_log_error(vchiq_susp_log_level,
2772 +                                       "%s: cmd %s returned error %d for "
2773 +                                       "service %c%c%c%c:%03d",
2774 +                                       __func__,
2775 +                                       (cmd == VCHIQ_IOC_USE_SERVICE) ?
2776 +                                               "VCHIQ_IOC_USE_SERVICE" :
2777 +                                               "VCHIQ_IOC_RELEASE_SERVICE",
2778 +                                       status,
2779 +                                       VCHIQ_FOURCC_AS_4CHARS(
2780 +                                               service->base.fourcc),
2781 +                                       service->client_id);
2782 +                               ret = -EINVAL;
2783 +                       }
2784 +               } else
2785 +                       ret = -EINVAL;
2786 +       } break;
2787 +
2788 +       case VCHIQ_IOC_QUEUE_MESSAGE: {
2789 +               VCHIQ_QUEUE_MESSAGE_T args;
2790 +               if (copy_from_user
2791 +                        (&args, (const void __user *)arg,
2792 +                         sizeof(args)) != 0) {
2793 +                       ret = -EFAULT;
2794 +                       break;
2795 +               }
2796 +
2797 +               service = find_service_for_instance(instance, args.handle);
2798 +
2799 +               if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2800 +                       /* Copy elements into kernel space */
2801 +                       VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2802 +                       if (copy_from_user(elements, args.elements,
2803 +                               args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2804 +                               status = vchiq_queue_message
2805 +                                       (args.handle,
2806 +                                       elements, args.count);
2807 +                       else
2808 +                               ret = -EFAULT;
2809 +               } else {
2810 +                       ret = -EINVAL;
2811 +               }
2812 +       } break;
2813 +
2814 +       case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2815 +       case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2816 +               VCHIQ_QUEUE_BULK_TRANSFER_T args;
2817 +               struct bulk_waiter_node *waiter = NULL;
2818 +               VCHIQ_BULK_DIR_T dir =
2819 +                       (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2820 +                       VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2821 +
2822 +               if (copy_from_user
2823 +                       (&args, (const void __user *)arg,
2824 +                       sizeof(args)) != 0) {
2825 +                       ret = -EFAULT;
2826 +                       break;
2827 +               }
2828 +
2829 +               service = find_service_for_instance(instance, args.handle);
2830 +               if (!service) {
2831 +                       ret = -EINVAL;
2832 +                       break;
2833 +               }
2834 +
2835 +               if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2836 +                       waiter = kzalloc(sizeof(struct bulk_waiter_node),
2837 +                               GFP_KERNEL);
2838 +                       if (!waiter) {
2839 +                               ret = -ENOMEM;
2840 +                               break;
2841 +                       }
2842 +                       args.userdata = &waiter->bulk_waiter;
2843 +               } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2844 +                       struct list_head *pos;
2845 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
2846 +                       list_for_each(pos, &instance->bulk_waiter_list) {
2847 +                               if (list_entry(pos, struct bulk_waiter_node,
2848 +                                       list)->pid == current->pid) {
2849 +                                       waiter = list_entry(pos,
2850 +                                               struct bulk_waiter_node,
2851 +                                               list);
2852 +                                       list_del(pos);
2853 +                                       break;
2854 +                               }
2855 +
2856 +                       }
2857 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
2858 +                       if (!waiter) {
2859 +                               vchiq_log_error(vchiq_arm_log_level,
2860 +                                       "no bulk_waiter found for pid %d",
2861 +                                       current->pid);
2862 +                               ret = -ESRCH;
2863 +                               break;
2864 +                       }
2865 +                       vchiq_log_info(vchiq_arm_log_level,
2866 +                               "found bulk_waiter %x for pid %d",
2867 +                               (unsigned int)waiter, current->pid);
2868 +                       args.userdata = &waiter->bulk_waiter;
2869 +               }
2870 +               status = vchiq_bulk_transfer
2871 +                       (args.handle,
2872 +                        VCHI_MEM_HANDLE_INVALID,
2873 +                        args.data, args.size,
2874 +                        args.userdata, args.mode,
2875 +                        dir);
2876 +               if (!waiter)
2877 +                       break;
2878 +               if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2879 +                       !waiter->bulk_waiter.bulk) {
2880 +                       if (waiter->bulk_waiter.bulk) {
2881 +                               /* Cancel the signal when the transfer
2882 +                               ** completes. */
2883 +                               spin_lock(&bulk_waiter_spinlock);
2884 +                               waiter->bulk_waiter.bulk->userdata = NULL;
2885 +                               spin_unlock(&bulk_waiter_spinlock);
2886 +                       }
2887 +                       kfree(waiter);
2888 +               } else {
2889 +                       const VCHIQ_BULK_MODE_T mode_waiting =
2890 +                               VCHIQ_BULK_MODE_WAITING;
2891 +                       waiter->pid = current->pid;
2892 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
2893 +                       list_add(&waiter->list, &instance->bulk_waiter_list);
2894 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
2895 +                       vchiq_log_info(vchiq_arm_log_level,
2896 +                               "saved bulk_waiter %x for pid %d",
2897 +                               (unsigned int)waiter, current->pid);
2898 +
2899 +                       if (copy_to_user((void __user *)
2900 +                               &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2901 +                                       arg)->mode),
2902 +                               (const void *)&mode_waiting,
2903 +                               sizeof(mode_waiting)) != 0)
2904 +                               ret = -EFAULT;
2905 +               }
2906 +       } break;
2907 +
2908 +       case VCHIQ_IOC_AWAIT_COMPLETION: {
2909 +               VCHIQ_AWAIT_COMPLETION_T args;
2910 +
2911 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2912 +               if (!instance->connected) {
2913 +                       ret = -ENOTCONN;
2914 +                       break;
2915 +               }
2916 +
2917 +               if (copy_from_user(&args, (const void __user *)arg,
2918 +                       sizeof(args)) != 0) {
2919 +                       ret = -EFAULT;
2920 +                       break;
2921 +               }
2922 +
2923 +               mutex_lock(&instance->completion_mutex);
2924 +
2925 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2926 +               while ((instance->completion_remove ==
2927 +                       instance->completion_insert)
2928 +                       && !instance->closing) {
2929 +                       int rc;
2930 +                       DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2931 +                       mutex_unlock(&instance->completion_mutex);
2932 +                       rc = down_interruptible(&instance->insert_event);
2933 +                       mutex_lock(&instance->completion_mutex);
2934 +                       if (rc != 0) {
2935 +                               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2936 +                               vchiq_log_info(vchiq_arm_log_level,
2937 +                                       "AWAIT_COMPLETION interrupted");
2938 +                               ret = -EINTR;
2939 +                               break;
2940 +                       }
2941 +               }
2942 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2943 +
2944 +               /* A read memory barrier is needed to stop prefetch of a stale
2945 +               ** completion record
2946 +               */
2947 +               rmb();
2948 +
2949 +               if (ret == 0) {
2950 +                       int msgbufcount = args.msgbufcount;
2951 +                       for (ret = 0; ret < args.count; ret++) {
2952 +                               VCHIQ_COMPLETION_DATA_T *completion;
2953 +                               VCHIQ_SERVICE_T *service;
2954 +                               USER_SERVICE_T *user_service;
2955 +                               VCHIQ_HEADER_T *header;
2956 +                               if (instance->completion_remove ==
2957 +                                       instance->completion_insert)
2958 +                                       break;
2959 +                               completion = &instance->completions[
2960 +                                       instance->completion_remove &
2961 +                                       (MAX_COMPLETIONS - 1)];
2962 +
2963 +                               service = completion->service_userdata;
2964 +                               user_service = service->base.userdata;
2965 +                               completion->service_userdata =
2966 +                                       user_service->userdata;
2967 +
2968 +                               header = completion->header;
2969 +                               if (header) {
2970 +                                       void __user *msgbuf;
2971 +                                       int msglen;
2972 +
2973 +                                       msglen = header->size +
2974 +                                               sizeof(VCHIQ_HEADER_T);
2975 +                                       /* This must be a VCHIQ-style service */
2976 +                                       if (args.msgbufsize < msglen) {
2977 +                                               vchiq_log_error(
2978 +                                                       vchiq_arm_log_level,
2979 +                                                       "header %x: msgbufsize"
2980 +                                                       " %x < msglen %x",
2981 +                                                       (unsigned int)header,
2982 +                                                       args.msgbufsize,
2983 +                                                       msglen);
2984 +                                               WARN(1, "invalid message "
2985 +                                                       "size\n");
2986 +                                               if (ret == 0)
2987 +                                                       ret = -EMSGSIZE;
2988 +                                               break;
2989 +                                       }
2990 +                                       if (msgbufcount <= 0)
2991 +                                               /* Stall here for lack of a
2992 +                                               ** buffer for the message. */
2993 +                                               break;
2994 +                                       /* Get the pointer from user space */
2995 +                                       msgbufcount--;
2996 +                                       if (copy_from_user(&msgbuf,
2997 +                                               (const void __user *)
2998 +                                               &args.msgbufs[msgbufcount],
2999 +                                               sizeof(msgbuf)) != 0) {
3000 +                                               if (ret == 0)
3001 +                                                       ret = -EFAULT;
3002 +                                               break;
3003 +                                       }
3004 +
3005 +                                       /* Copy the message to user space */
3006 +                                       if (copy_to_user(msgbuf, header,
3007 +                                               msglen) != 0) {
3008 +                                               if (ret == 0)
3009 +                                                       ret = -EFAULT;
3010 +                                               break;
3011 +                                       }
3012 +
3013 +                                       /* Now it has been copied, the message
3014 +                                       ** can be released. */
3015 +                                       vchiq_release_message(service->handle,
3016 +                                               header);
3017 +
3018 +                                       /* The completion must point to the
3019 +                                       ** msgbuf. */
3020 +                                       completion->header = msgbuf;
3021 +                               }
3022 +
3023 +                               if (completion->reason ==
3024 +                                       VCHIQ_SERVICE_CLOSED)
3025 +                                       unlock_service(service);
3026 +
3027 +                               if (copy_to_user((void __user *)(
3028 +                                       (size_t)args.buf +
3029 +                                       ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3030 +                                       completion,
3031 +                                       sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3032 +                                               if (ret == 0)
3033 +                                                       ret = -EFAULT;
3034 +                                       break;
3035 +                               }
3036 +
3037 +                               instance->completion_remove++;
3038 +                       }
3039 +
3040 +                       if (msgbufcount != args.msgbufcount) {
3041 +                               if (copy_to_user((void __user *)
3042 +                                       &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3043 +                                               msgbufcount,
3044 +                                       &msgbufcount,
3045 +                                       sizeof(msgbufcount)) != 0) {
3046 +                                       ret = -EFAULT;
3047 +                               }
3048 +                       }
3049 +               }
3050 +
3051 +               if (ret != 0)
3052 +                       up(&instance->remove_event);
3053 +               mutex_unlock(&instance->completion_mutex);
3054 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3055 +       } break;
3056 +
3057 +       case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3058 +               VCHIQ_DEQUEUE_MESSAGE_T args;
3059 +               USER_SERVICE_T *user_service;
3060 +               VCHIQ_HEADER_T *header;
3061 +
3062 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3063 +               if (copy_from_user
3064 +                        (&args, (const void __user *)arg,
3065 +                         sizeof(args)) != 0) {
3066 +                       ret = -EFAULT;
3067 +                       break;
3068 +               }
3069 +               service = find_service_for_instance(instance, args.handle);
3070 +               if (!service) {
3071 +                       ret = -EINVAL;
3072 +                       break;
3073 +               }
3074 +               user_service = (USER_SERVICE_T *)service->base.userdata;
3075 +               if (user_service->is_vchi == 0) {
3076 +                       ret = -EINVAL;
3077 +                       break;
3078 +               }
3079 +
3080 +               spin_lock(&msg_queue_spinlock);
3081 +               if (user_service->msg_remove == user_service->msg_insert) {
3082 +                       if (!args.blocking) {
3083 +                               spin_unlock(&msg_queue_spinlock);
3084 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3085 +                               ret = -EWOULDBLOCK;
3086 +                               break;
3087 +                       }
3088 +                       user_service->dequeue_pending = 1;
3089 +                       do {
3090 +                               spin_unlock(&msg_queue_spinlock);
3091 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3092 +                               if (down_interruptible(
3093 +                                       &user_service->insert_event) != 0) {
3094 +                                       vchiq_log_info(vchiq_arm_log_level,
3095 +                                               "DEQUEUE_MESSAGE interrupted");
3096 +                                       ret = -EINTR;
3097 +                                       break;
3098 +                               }
3099 +                               spin_lock(&msg_queue_spinlock);
3100 +                       } while (user_service->msg_remove ==
3101 +                               user_service->msg_insert);
3102 +
3103 +                       if (ret)
3104 +                               break;
3105 +               }
3106 +
3107 +               BUG_ON((int)(user_service->msg_insert -
3108 +                       user_service->msg_remove) < 0);
3109 +
3110 +               header = user_service->msg_queue[user_service->msg_remove &
3111 +                       (MSG_QUEUE_SIZE - 1)];
3112 +               user_service->msg_remove++;
3113 +               spin_unlock(&msg_queue_spinlock);
3114 +
3115 +               up(&user_service->remove_event);
3116 +               if (header == NULL)
3117 +                       ret = -ENOTCONN;
3118 +               else if (header->size <= args.bufsize) {
3119 +                       /* Copy to user space if msgbuf is not NULL */
3120 +                       if ((args.buf == NULL) ||
3121 +                               (copy_to_user((void __user *)args.buf,
3122 +                               header->data,
3123 +                               header->size) == 0)) {
3124 +                               ret = header->size;
3125 +                               vchiq_release_message(
3126 +                                       service->handle,
3127 +                                       header);
3128 +                       } else
3129 +                               ret = -EFAULT;
3130 +               } else {
3131 +                       vchiq_log_error(vchiq_arm_log_level,
3132 +                               "header %x: bufsize %x < size %x",
3133 +                               (unsigned int)header, args.bufsize,
3134 +                               header->size);
3135 +                       WARN(1, "invalid size\n");
3136 +                       ret = -EMSGSIZE;
3137 +               }
3138 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3139 +       } break;
3140 +
3141 +       case VCHIQ_IOC_GET_CLIENT_ID: {
3142 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3143 +
3144 +               ret = vchiq_get_client_id(handle);
3145 +       } break;
3146 +
3147 +       case VCHIQ_IOC_GET_CONFIG: {
3148 +               VCHIQ_GET_CONFIG_T args;
3149 +               VCHIQ_CONFIG_T config;
3150 +
3151 +               if (copy_from_user(&args, (const void __user *)arg,
3152 +                       sizeof(args)) != 0) {
3153 +                       ret = -EFAULT;
3154 +                       break;
3155 +               }
3156 +               if (args.config_size > sizeof(config)) {
3157 +                       ret = -EINVAL;
3158 +                       break;
3159 +               }
3160 +               status = vchiq_get_config(instance, args.config_size, &config);
3161 +               if (status == VCHIQ_SUCCESS) {
3162 +                       if (copy_to_user((void __user *)args.pconfig,
3163 +                                   &config, args.config_size) != 0) {
3164 +                               ret = -EFAULT;
3165 +                               break;
3166 +                       }
3167 +               }
3168 +       } break;
3169 +
3170 +       case VCHIQ_IOC_SET_SERVICE_OPTION: {
3171 +               VCHIQ_SET_SERVICE_OPTION_T args;
3172 +
3173 +               if (copy_from_user(
3174 +                       &args, (const void __user *)arg,
3175 +                       sizeof(args)) != 0) {
3176 +                       ret = -EFAULT;
3177 +                       break;
3178 +               }
3179 +
3180 +               service = find_service_for_instance(instance, args.handle);
3181 +               if (!service) {
3182 +                       ret = -EINVAL;
3183 +                       break;
3184 +               }
3185 +
3186 +               status = vchiq_set_service_option(
3187 +                               args.handle, args.option, args.value);
3188 +       } break;
3189 +
3190 +       case VCHIQ_IOC_DUMP_PHYS_MEM: {
3191 +               VCHIQ_DUMP_MEM_T  args;
3192 +
3193 +               if (copy_from_user
3194 +                        (&args, (const void __user *)arg,
3195 +                         sizeof(args)) != 0) {
3196 +                       ret = -EFAULT;
3197 +                       break;
3198 +               }
3199 +               dump_phys_mem(args.virt_addr, args.num_bytes);
3200 +       } break;
3201 +
3202 +       default:
3203 +               ret = -ENOTTY;
3204 +               break;
3205 +       }
3206 +
3207 +       if (service)
3208 +               unlock_service(service);
3209 +
3210 +       if (ret == 0) {
3211 +               if (status == VCHIQ_ERROR)
3212 +                       ret = -EIO;
3213 +               else if (status == VCHIQ_RETRY)
3214 +                       ret = -EINTR;
3215 +       }
3216 +
3217 +       if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3218 +               (ret != -EWOULDBLOCK))
3219 +               vchiq_log_info(vchiq_arm_log_level,
3220 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
3221 +                       (unsigned long)instance,
3222 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3223 +                               ioctl_names[_IOC_NR(cmd)] :
3224 +                               "<invalid>",
3225 +                       status, ret);
3226 +       else
3227 +               vchiq_log_trace(vchiq_arm_log_level,
3228 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
3229 +                       (unsigned long)instance,
3230 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3231 +                               ioctl_names[_IOC_NR(cmd)] :
3232 +                               "<invalid>",
3233 +                       status, ret);
3234 +
3235 +       return ret;
3236 +}
3237 +
3238 +/****************************************************************************
3239 +*
3240 +*   vchiq_open
3241 +*
3242 +***************************************************************************/
3243 +
3244 +static int
3245 +vchiq_open(struct inode *inode, struct file *file)
3246 +{
3247 +       int dev = iminor(inode) & 0x0f;
3248 +       vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3249 +       switch (dev) {
3250 +       case VCHIQ_MINOR: {
3251 +               int ret;
3252 +               VCHIQ_STATE_T *state = vchiq_get_state();
3253 +               VCHIQ_INSTANCE_T instance;
3254 +
3255 +               if (!state) {
3256 +                       vchiq_log_error(vchiq_arm_log_level,
3257 +                               "vchiq has no connection to VideoCore");
3258 +                       return -ENOTCONN;
3259 +               }
3260 +
3261 +               instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3262 +               if (!instance)
3263 +                       return -ENOMEM;
3264 +
3265 +               instance->state = state;
3266 +               instance->pid = current->tgid;
3267 +
3268 +               ret = vchiq_proc_add_instance(instance);
3269 +               if (ret != 0) {
3270 +                       kfree(instance);
3271 +                       return ret;
3272 +               }
3273 +
3274 +               sema_init(&instance->insert_event, 0);
3275 +               sema_init(&instance->remove_event, 0);
3276 +               mutex_init(&instance->completion_mutex);
3277 +               mutex_init(&instance->bulk_waiter_list_mutex);
3278 +               INIT_LIST_HEAD(&instance->bulk_waiter_list);
3279 +
3280 +               file->private_data = instance;
3281 +       } break;
3282 +
3283 +       default:
3284 +               vchiq_log_error(vchiq_arm_log_level,
3285 +                       "Unknown minor device: %d", dev);
3286 +               return -ENXIO;
3287 +       }
3288 +
3289 +       return 0;
3290 +}
3291 +
3292 +/****************************************************************************
3293 +*
3294 +*   vchiq_release
3295 +*
3296 +***************************************************************************/
3297 +
3298 +static int
3299 +vchiq_release(struct inode *inode, struct file *file)
3300 +{
3301 +       int dev = iminor(inode) & 0x0f;
3302 +       int ret = 0;
3303 +       switch (dev) {
3304 +       case VCHIQ_MINOR: {
3305 +               VCHIQ_INSTANCE_T instance = file->private_data;
3306 +               VCHIQ_STATE_T *state = vchiq_get_state();
3307 +               VCHIQ_SERVICE_T *service;
3308 +               int i;
3309 +
3310 +               vchiq_log_info(vchiq_arm_log_level,
3311 +                       "vchiq_release: instance=%lx",
3312 +                       (unsigned long)instance);
3313 +
3314 +               if (!state) {
3315 +                       ret = -EPERM;
3316 +                       goto out;
3317 +               }
3318 +
3319 +               /* Ensure videocore is awake to allow termination. */
3320 +               vchiq_use_internal(instance->state, NULL,
3321 +                               USE_TYPE_VCHIQ);
3322 +
3323 +               mutex_lock(&instance->completion_mutex);
3324 +
3325 +               /* Wake the completion thread and ask it to exit */
3326 +               instance->closing = 1;
3327 +               up(&instance->insert_event);
3328 +
3329 +               mutex_unlock(&instance->completion_mutex);
3330 +
3331 +               /* Wake the slot handler if the completion queue is full. */
3332 +               up(&instance->remove_event);
3333 +
3334 +               /* Mark all services for termination... */
3335 +               i = 0;
3336 +               while ((service = next_service_by_instance(state, instance,
3337 +                       &i)) != NULL) {
3338 +                       USER_SERVICE_T *user_service = service->base.userdata;
3339 +
3340 +                       /* Wake the slot handler if the msg queue is full. */
3341 +                       up(&user_service->remove_event);
3342 +
3343 +                       vchiq_terminate_service_internal(service);
3344 +                       unlock_service(service);
3345 +               }
3346 +
3347 +               /* ...and wait for them to die */
3348 +               i = 0;
3349 +               while ((service = next_service_by_instance(state, instance, &i))
3350 +                       != NULL) {
3351 +                       USER_SERVICE_T *user_service = service->base.userdata;
3352 +
3353 +                       down(&service->remove_event);
3354 +
3355 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3356 +
3357 +                       spin_lock(&msg_queue_spinlock);
3358 +
3359 +                       while (user_service->msg_remove !=
3360 +                               user_service->msg_insert) {
3361 +                               VCHIQ_HEADER_T *header = user_service->
3362 +                                       msg_queue[user_service->msg_remove &
3363 +                                               (MSG_QUEUE_SIZE - 1)];
3364 +                               user_service->msg_remove++;
3365 +                               spin_unlock(&msg_queue_spinlock);
3366 +
3367 +                               if (header)
3368 +                                       vchiq_release_message(
3369 +                                               service->handle,
3370 +                                               header);
3371 +                               spin_lock(&msg_queue_spinlock);
3372 +                       }
3373 +
3374 +                       spin_unlock(&msg_queue_spinlock);
3375 +
3376 +                       unlock_service(service);
3377 +               }
3378 +
3379 +               /* Release any closed services */
3380 +               while (instance->completion_remove !=
3381 +                       instance->completion_insert) {
3382 +                       VCHIQ_COMPLETION_DATA_T *completion;
3383 +                       VCHIQ_SERVICE_T *service;
3384 +                       completion = &instance->completions[
3385 +                               instance->completion_remove &
3386 +                               (MAX_COMPLETIONS - 1)];
3387 +                       service = completion->service_userdata;
3388 +                       if (completion->reason == VCHIQ_SERVICE_CLOSED)
3389 +                               unlock_service(service);
3390 +                       instance->completion_remove++;
3391 +               }
3392 +
3393 +               /* Release the PEER service count. */
3394 +               vchiq_release_internal(instance->state, NULL);
3395 +
3396 +               {
3397 +                       struct list_head *pos, *next;
3398 +                       list_for_each_safe(pos, next,
3399 +                               &instance->bulk_waiter_list) {
3400 +                               struct bulk_waiter_node *waiter;
3401 +                               waiter = list_entry(pos,
3402 +                                       struct bulk_waiter_node,
3403 +                                       list);
3404 +                               list_del(pos);
3405 +                               vchiq_log_info(vchiq_arm_log_level,
3406 +                                       "bulk_waiter - cleaned up %x "
3407 +                                       "for pid %d",
3408 +                                       (unsigned int)waiter, waiter->pid);
3409 +                               kfree(waiter);
3410 +                       }
3411 +               }
3412 +
3413 +               vchiq_proc_remove_instance(instance);
3414 +
3415 +               kfree(instance);
3416 +               file->private_data = NULL;
3417 +       } break;
3418 +
3419 +       default:
3420 +               vchiq_log_error(vchiq_arm_log_level,
3421 +                       "Unknown minor device: %d", dev);
3422 +               ret = -ENXIO;
3423 +       }
3424 +
3425 +out:
3426 +       return ret;
3427 +}
3428 +
3429 +/****************************************************************************
3430 +*
3431 +*   vchiq_dump
3432 +*
3433 +***************************************************************************/
3434 +
3435 +void
3436 +vchiq_dump(void *dump_context, const char *str, int len)
3437 +{
3438 +       DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3439 +
3440 +       if (context->actual < context->space) {
3441 +               int copy_bytes;
3442 +               if (context->offset > 0) {
3443 +                       int skip_bytes = min(len, (int)context->offset);
3444 +                       str += skip_bytes;
3445 +                       len -= skip_bytes;
3446 +                       context->offset -= skip_bytes;
3447 +                       if (context->offset > 0)
3448 +                               return;
3449 +               }
3450 +               copy_bytes = min(len, (int)(context->space - context->actual));
3451 +               if (copy_bytes == 0)
3452 +                       return;
3453 +               if (copy_to_user(context->buf + context->actual, str,
3454 +                       copy_bytes))
3455 +                       context->actual = -EFAULT;
3456 +               context->actual += copy_bytes;
3457 +               len -= copy_bytes;
3458 +
3459 +               /* If tne terminating NUL is included in the length, then it
3460 +               ** marks the end of a line and should be replaced with a
3461 +               ** carriage return. */
3462 +               if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3463 +                       char cr = '\n';
3464 +                       if (copy_to_user(context->buf + context->actual - 1,
3465 +                               &cr, 1))
3466 +                               context->actual = -EFAULT;
3467 +               }
3468 +       }
3469 +}
3470 +
3471 +/****************************************************************************
3472 +*
3473 +*   vchiq_dump_platform_instance_state
3474 +*
3475 +***************************************************************************/
3476 +
3477 +void
3478 +vchiq_dump_platform_instances(void *dump_context)
3479 +{
3480 +       VCHIQ_STATE_T *state = vchiq_get_state();
3481 +       char buf[80];
3482 +       int len;
3483 +       int i;
3484 +
3485 +       /* There is no list of instances, so instead scan all services,
3486 +               marking those that have been dumped. */
3487 +
3488 +       for (i = 0; i < state->unused_service; i++) {
3489 +               VCHIQ_SERVICE_T *service = state->services[i];
3490 +               VCHIQ_INSTANCE_T instance;
3491 +
3492 +               if (service && (service->base.callback == service_callback)) {
3493 +                       instance = service->instance;
3494 +                       if (instance)
3495 +                               instance->mark = 0;
3496 +               }
3497 +       }
3498 +
3499 +       for (i = 0; i < state->unused_service; i++) {
3500 +               VCHIQ_SERVICE_T *service = state->services[i];
3501 +               VCHIQ_INSTANCE_T instance;
3502 +
3503 +               if (service && (service->base.callback == service_callback)) {
3504 +                       instance = service->instance;
3505 +                       if (instance && !instance->mark) {
3506 +                               len = snprintf(buf, sizeof(buf),
3507 +                                       "Instance %x: pid %d,%s completions "
3508 +                                               "%d/%d",
3509 +                                       (unsigned int)instance, instance->pid,
3510 +                                       instance->connected ? " connected, " :
3511 +                                               "",
3512 +                                       instance->completion_insert -
3513 +                                               instance->completion_remove,
3514 +                                       MAX_COMPLETIONS);
3515 +
3516 +                               vchiq_dump(dump_context, buf, len + 1);
3517 +
3518 +                               instance->mark = 1;
3519 +                       }
3520 +               }
3521 +       }
3522 +}
3523 +
3524 +/****************************************************************************
3525 +*
3526 +*   vchiq_dump_platform_service_state
3527 +*
3528 +***************************************************************************/
3529 +
3530 +void
3531 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3532 +{
3533 +       USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3534 +       char buf[80];
3535 +       int len;
3536 +
3537 +       len = snprintf(buf, sizeof(buf), "  instance %x",
3538 +               (unsigned int)service->instance);
3539 +
3540 +       if ((service->base.callback == service_callback) &&
3541 +               user_service->is_vchi) {
3542 +               len += snprintf(buf + len, sizeof(buf) - len,
3543 +                       ", %d/%d messages",
3544 +                       user_service->msg_insert - user_service->msg_remove,
3545 +                       MSG_QUEUE_SIZE);
3546 +
3547 +               if (user_service->dequeue_pending)
3548 +                       len += snprintf(buf + len, sizeof(buf) - len,
3549 +                               " (dequeue pending)");
3550 +       }
3551 +
3552 +       vchiq_dump(dump_context, buf, len + 1);
3553 +}
3554 +
3555 +/****************************************************************************
3556 +*
3557 +*   dump_user_mem
3558 +*
3559 +***************************************************************************/
3560 +
3561 +static void
3562 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3563 +{
3564 +       int            rc;
3565 +       uint8_t       *end_virt_addr = virt_addr + num_bytes;
3566 +       int            num_pages;
3567 +       int            offset;
3568 +       int            end_offset;
3569 +       int            page_idx;
3570 +       int            prev_idx;
3571 +       struct page   *page;
3572 +       struct page  **pages;
3573 +       uint8_t       *kmapped_virt_ptr;
3574 +
3575 +       /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3576 +
3577 +       virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3578 +       end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3579 +               ~0x0fuL);
3580 +
3581 +       offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3582 +       end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3583 +
3584 +       num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3585 +
3586 +       pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3587 +       if (pages == NULL) {
3588 +               vchiq_log_error(vchiq_arm_log_level,
3589 +                       "Unable to allocation memory for %d pages\n",
3590 +                       num_pages);
3591 +               return;
3592 +       }
3593 +
3594 +       down_read(&current->mm->mmap_sem);
3595 +       rc = get_user_pages(current,      /* task */
3596 +               current->mm,              /* mm */
3597 +               (unsigned long)virt_addr, /* start */
3598 +               num_pages,                /* len */
3599 +               0,                        /* write */
3600 +               0,                        /* force */
3601 +               pages,                    /* pages (array of page pointers) */
3602 +               NULL);                    /* vmas */
3603 +       up_read(&current->mm->mmap_sem);
3604 +
3605 +       prev_idx = -1;
3606 +       page = NULL;
3607 +
3608 +       while (offset < end_offset) {
3609 +
3610 +               int page_offset = offset % PAGE_SIZE;
3611 +               page_idx = offset / PAGE_SIZE;
3612 +
3613 +               if (page_idx != prev_idx) {
3614 +
3615 +                       if (page != NULL)
3616 +                               kunmap(page);
3617 +                       page = pages[page_idx];
3618 +                       kmapped_virt_ptr = kmap(page);
3619 +
3620 +                       prev_idx = page_idx;
3621 +               }
3622 +
3623 +               if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3624 +                       vchiq_log_dump_mem("ph",
3625 +                               (uint32_t)(unsigned long)&kmapped_virt_ptr[
3626 +                                       page_offset],
3627 +                               &kmapped_virt_ptr[page_offset], 16);
3628 +
3629 +               offset += 16;
3630 +       }
3631 +       if (page != NULL)
3632 +               kunmap(page);
3633 +
3634 +       for (page_idx = 0; page_idx < num_pages; page_idx++)
3635 +               page_cache_release(pages[page_idx]);
3636 +
3637 +       kfree(pages);
3638 +}
3639 +
3640 +/****************************************************************************
3641 +*
3642 +*   vchiq_read
3643 +*
3644 +***************************************************************************/
3645 +
3646 +static ssize_t
3647 +vchiq_read(struct file *file, char __user *buf,
3648 +       size_t count, loff_t *ppos)
3649 +{
3650 +       DUMP_CONTEXT_T context;
3651 +       context.buf = buf;
3652 +       context.actual = 0;
3653 +       context.space = count;
3654 +       context.offset = *ppos;
3655 +
3656 +       vchiq_dump_state(&context, &g_state);
3657 +
3658 +       *ppos += context.actual;
3659 +
3660 +       return context.actual;
3661 +}
3662 +
3663 +VCHIQ_STATE_T *
3664 +vchiq_get_state(void)
3665 +{
3666 +
3667 +       if (g_state.remote == NULL)
3668 +               printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3669 +       else if (g_state.remote->initialised != 1)
3670 +               printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3671 +                       __func__, g_state.remote->initialised);
3672 +
3673 +       return ((g_state.remote != NULL) &&
3674 +               (g_state.remote->initialised == 1)) ? &g_state : NULL;
3675 +}
3676 +
3677 +static const struct file_operations
3678 +vchiq_fops = {
3679 +       .owner = THIS_MODULE,
3680 +       .unlocked_ioctl = vchiq_ioctl,
3681 +       .open = vchiq_open,
3682 +       .release = vchiq_release,
3683 +       .read = vchiq_read
3684 +};
3685 +
3686 +/*
3687 + * Autosuspend related functionality
3688 + */
3689 +
3690 +int
3691 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3692 +{
3693 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3694 +       if (!arm_state)
3695 +               /* autosuspend not supported - always return wanted */
3696 +               return 1;
3697 +       else if (arm_state->blocked_count)
3698 +               return 1;
3699 +       else if (!arm_state->videocore_use_count)
3700 +               /* usage count zero - check for override unless we're forcing */
3701 +               if (arm_state->resume_blocked)
3702 +                       return 0;
3703 +               else
3704 +                       return vchiq_platform_videocore_wanted(state);
3705 +       else
3706 +               /* non-zero usage count - videocore still required */
3707 +               return 1;
3708 +}
3709 +
3710 +static VCHIQ_STATUS_T
3711 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3712 +       VCHIQ_HEADER_T *header,
3713 +       VCHIQ_SERVICE_HANDLE_T service_user,
3714 +       void *bulk_user)
3715 +{
3716 +       vchiq_log_error(vchiq_susp_log_level,
3717 +               "%s callback reason %d", __func__, reason);
3718 +       return 0;
3719 +}
3720 +
3721 +static int
3722 +vchiq_keepalive_thread_func(void *v)
3723 +{
3724 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3725 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3726 +
3727 +       VCHIQ_STATUS_T status;
3728 +       VCHIQ_INSTANCE_T instance;
3729 +       VCHIQ_SERVICE_HANDLE_T ka_handle;
3730 +
3731 +       VCHIQ_SERVICE_PARAMS_T params = {
3732 +               .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3733 +               .callback    = vchiq_keepalive_vchiq_callback,
3734 +               .version     = KEEPALIVE_VER,
3735 +               .version_min = KEEPALIVE_VER_MIN
3736 +       };
3737 +
3738 +       status = vchiq_initialise(&instance);
3739 +       if (status != VCHIQ_SUCCESS) {
3740 +               vchiq_log_error(vchiq_susp_log_level,
3741 +                       "%s vchiq_initialise failed %d", __func__, status);
3742 +               goto exit;
3743 +       }
3744 +
3745 +       status = vchiq_connect(instance);
3746 +       if (status != VCHIQ_SUCCESS) {
3747 +               vchiq_log_error(vchiq_susp_log_level,
3748 +                       "%s vchiq_connect failed %d", __func__, status);
3749 +               goto shutdown;
3750 +       }
3751 +
3752 +       status = vchiq_add_service(instance, &params, &ka_handle);
3753 +       if (status != VCHIQ_SUCCESS) {
3754 +               vchiq_log_error(vchiq_susp_log_level,
3755 +                       "%s vchiq_open_service failed %d", __func__, status);
3756 +               goto shutdown;
3757 +       }
3758 +
3759 +       while (1) {
3760 +               long rc = 0, uc = 0;
3761 +               if (wait_for_completion_interruptible(&arm_state->ka_evt)
3762 +                               != 0) {
3763 +                       vchiq_log_error(vchiq_susp_log_level,
3764 +                               "%s interrupted", __func__);
3765 +                       flush_signals(current);
3766 +                       continue;
3767 +               }
3768 +
3769 +               /* read and clear counters.  Do release_count then use_count to
3770 +                * prevent getting more releases than uses */
3771 +               rc = atomic_xchg(&arm_state->ka_release_count, 0);
3772 +               uc = atomic_xchg(&arm_state->ka_use_count, 0);
3773 +
3774 +               /* Call use/release service the requisite number of times.
3775 +                * Process use before release so use counts don't go negative */
3776 +               while (uc--) {
3777 +                       atomic_inc(&arm_state->ka_use_ack_count);
3778 +                       status = vchiq_use_service(ka_handle);
3779 +                       if (status != VCHIQ_SUCCESS) {
3780 +                               vchiq_log_error(vchiq_susp_log_level,
3781 +                                       "%s vchiq_use_service error %d",
3782 +                                       __func__, status);
3783 +                       }
3784 +               }
3785 +               while (rc--) {
3786 +                       status = vchiq_release_service(ka_handle);
3787 +                       if (status != VCHIQ_SUCCESS) {
3788 +                               vchiq_log_error(vchiq_susp_log_level,
3789 +                                       "%s vchiq_release_service error %d",
3790 +                                       __func__, status);
3791 +                       }
3792 +               }
3793 +       }
3794 +
3795 +shutdown:
3796 +       vchiq_shutdown(instance);
3797 +exit:
3798 +       return 0;
3799 +}
3800 +
3801 +
3802 +
3803 +VCHIQ_STATUS_T
3804 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3805 +{
3806 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3807 +
3808 +       if (arm_state) {
3809 +               rwlock_init(&arm_state->susp_res_lock);
3810 +
3811 +               init_completion(&arm_state->ka_evt);
3812 +               atomic_set(&arm_state->ka_use_count, 0);
3813 +               atomic_set(&arm_state->ka_use_ack_count, 0);
3814 +               atomic_set(&arm_state->ka_release_count, 0);
3815 +
3816 +               init_completion(&arm_state->vc_suspend_complete);
3817 +
3818 +               init_completion(&arm_state->vc_resume_complete);
3819 +               /* Initialise to 'done' state.  We only want to block on resume
3820 +                * completion while videocore is suspended. */
3821 +               set_resume_state(arm_state, VC_RESUME_RESUMED);
3822 +
3823 +               init_completion(&arm_state->resume_blocker);
3824 +               /* Initialise to 'done' state.  We only want to block on this
3825 +                * completion while resume is blocked */
3826 +               complete_all(&arm_state->resume_blocker);
3827 +
3828 +               init_completion(&arm_state->blocked_blocker);
3829 +               /* Initialise to 'done' state.  We only want to block on this
3830 +                * completion while things are waiting on the resume blocker */
3831 +               complete_all(&arm_state->blocked_blocker);
3832 +
3833 +               arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3834 +               arm_state->suspend_timer_running = 0;
3835 +               init_timer(&arm_state->suspend_timer);
3836 +               arm_state->suspend_timer.data = (unsigned long)(state);
3837 +               arm_state->suspend_timer.function = suspend_timer_callback;
3838 +
3839 +               arm_state->first_connect = 0;
3840 +
3841 +       }
3842 +       return status;
3843 +}
3844 +
3845 +/*
3846 +** Functions to modify the state variables;
3847 +**     set_suspend_state
3848 +**     set_resume_state
3849 +**
3850 +** There are more state variables than we might like, so ensure they remain in
3851 +** step.  Suspend and resume state are maintained separately, since most of
3852 +** these state machines can operate independently.  However, there are a few
3853 +** states where state transitions in one state machine cause a reset to the
3854 +** other state machine.  In addition, there are some completion events which
3855 +** need to occur on state machine reset and end-state(s), so these are also
3856 +** dealt with in these functions.
3857 +**
3858 +** In all states we set the state variable according to the input, but in some
3859 +** cases we perform additional steps outlined below;
3860 +**
3861 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3862 +**                     The suspend completion is completed after any suspend
3863 +**                     attempt.  When we reset the state machine we also reset
3864 +**                     the completion.  This reset occurs when videocore is
3865 +**                     resumed, and also if we initiate suspend after a suspend
3866 +**                     failure.
3867 +**
3868 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3869 +**                     suspend - ie from this point on we must try to suspend
3870 +**                     before resuming can occur.  We therefore also reset the
3871 +**                     resume state machine to VC_RESUME_IDLE in this state.
3872 +**
3873 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3874 +**                     complete_all on the suspend completion to notify
3875 +**                     anything waiting for suspend to happen.
3876 +**
3877 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3878 +**                     initiate resume, so no need to alter resume state.
3879 +**                     We call complete_all on the suspend completion to notify
3880 +**                     of suspend rejection.
3881 +**
3882 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
3883 +**                     suspend completion and reset the resume state machine.
3884 +**
3885 +** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
3886 +**                     resume completion is in it's 'done' state whenever
3887 +**                     videcore is running.  Therfore, the VC_RESUME_IDLE state
3888 +**                     implies that videocore is suspended.
3889 +**                     Hence, any thread which needs to wait until videocore is
3890 +**                     running can wait on this completion - it will only block
3891 +**                     if videocore is suspended.
3892 +**
3893 +** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
3894 +**                     Call complete_all on the resume completion to unblock
3895 +**                     any threads waiting for resume.  Also reset the suspend
3896 +**                     state machine to it's idle state.
3897 +**
3898 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3899 +*/
3900 +
3901 +inline void
3902 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3903 +       enum vc_suspend_status new_state)
3904 +{
3905 +       /* set the state in all cases */
3906 +       arm_state->vc_suspend_state = new_state;
3907 +
3908 +       /* state specific additional actions */
3909 +       switch (new_state) {
3910 +       case VC_SUSPEND_FORCE_CANCELED:
3911 +               complete_all(&arm_state->vc_suspend_complete);
3912 +               break;
3913 +       case VC_SUSPEND_REJECTED:
3914 +               complete_all(&arm_state->vc_suspend_complete);
3915 +               break;
3916 +       case VC_SUSPEND_FAILED:
3917 +               complete_all(&arm_state->vc_suspend_complete);
3918 +               arm_state->vc_resume_state = VC_RESUME_RESUMED;
3919 +               complete_all(&arm_state->vc_resume_complete);
3920 +               break;
3921 +       case VC_SUSPEND_IDLE:
3922 +               reinit_completion(&arm_state->vc_suspend_complete);
3923 +               break;
3924 +       case VC_SUSPEND_REQUESTED:
3925 +               break;
3926 +       case VC_SUSPEND_IN_PROGRESS:
3927 +               set_resume_state(arm_state, VC_RESUME_IDLE);
3928 +               break;
3929 +       case VC_SUSPEND_SUSPENDED:
3930 +               complete_all(&arm_state->vc_suspend_complete);
3931 +               break;
3932 +       default:
3933 +               BUG();
3934 +               break;
3935 +       }
3936 +}
3937 +
3938 +inline void
3939 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3940 +       enum vc_resume_status new_state)
3941 +{
3942 +       /* set the state in all cases */
3943 +       arm_state->vc_resume_state = new_state;
3944 +
3945 +       /* state specific additional actions */
3946 +       switch (new_state) {
3947 +       case VC_RESUME_FAILED:
3948 +               break;
3949 +       case VC_RESUME_IDLE:
3950 +               reinit_completion(&arm_state->vc_resume_complete);
3951 +               break;
3952 +       case VC_RESUME_REQUESTED:
3953 +               break;
3954 +       case VC_RESUME_IN_PROGRESS:
3955 +               break;
3956 +       case VC_RESUME_RESUMED:
3957 +               complete_all(&arm_state->vc_resume_complete);
3958 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3959 +               break;
3960 +       default:
3961 +               BUG();
3962 +               break;
3963 +       }
3964 +}
3965 +
3966 +
3967 +/* should be called with the write lock held */
3968 +inline void
3969 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3970 +{
3971 +       del_timer(&arm_state->suspend_timer);
3972 +       arm_state->suspend_timer.expires = jiffies +
3973 +               msecs_to_jiffies(arm_state->
3974 +                       suspend_timer_timeout);
3975 +       add_timer(&arm_state->suspend_timer);
3976 +       arm_state->suspend_timer_running = 1;
3977 +}
3978 +
3979 +/* should be called with the write lock held */
3980 +static inline void
3981 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3982 +{
3983 +       if (arm_state->suspend_timer_running) {
3984 +               del_timer(&arm_state->suspend_timer);
3985 +               arm_state->suspend_timer_running = 0;
3986 +       }
3987 +}
3988 +
3989 +static inline int
3990 +need_resume(VCHIQ_STATE_T *state)
3991 +{
3992 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3993 +       return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
3994 +                       (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
3995 +                       vchiq_videocore_wanted(state);
3996 +}
3997 +
3998 +static int
3999 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4000 +{
4001 +       int status = VCHIQ_SUCCESS;
4002 +       const unsigned long timeout_val =
4003 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4004 +       int resume_count = 0;
4005 +
4006 +       /* Allow any threads which were blocked by the last force suspend to
4007 +        * complete if they haven't already.  Only give this one shot; if
4008 +        * blocked_count is incremented after blocked_blocker is completed
4009 +        * (which only happens when blocked_count hits 0) then those threads
4010 +        * will have to wait until next time around */
4011 +       if (arm_state->blocked_count) {
4012 +               reinit_completion(&arm_state->blocked_blocker);
4013 +               write_unlock_bh(&arm_state->susp_res_lock);
4014 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4015 +                       "blocked clients", __func__);
4016 +               if (wait_for_completion_interruptible_timeout(
4017 +                               &arm_state->blocked_blocker, timeout_val)
4018 +                                       <= 0) {
4019 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4020 +                               "previously blocked clients failed" , __func__);
4021 +                       status = VCHIQ_ERROR;
4022 +                       write_lock_bh(&arm_state->susp_res_lock);
4023 +                       goto out;
4024 +               }
4025 +               vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4026 +                       "clients resumed", __func__);
4027 +               write_lock_bh(&arm_state->susp_res_lock);
4028 +       }
4029 +
4030 +       /* We need to wait for resume to complete if it's in process */
4031 +       while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4032 +                       arm_state->vc_resume_state > VC_RESUME_IDLE) {
4033 +               if (resume_count > 1) {
4034 +                       status = VCHIQ_ERROR;
4035 +                       vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4036 +                               "many times for resume" , __func__);
4037 +                       goto out;
4038 +               }
4039 +               write_unlock_bh(&arm_state->susp_res_lock);
4040 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4041 +                       __func__);
4042 +               if (wait_for_completion_interruptible_timeout(
4043 +                               &arm_state->vc_resume_complete, timeout_val)
4044 +                                       <= 0) {
4045 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4046 +                               "resume failed (%s)", __func__,
4047 +                               resume_state_names[arm_state->vc_resume_state +
4048 +                                                       VC_RESUME_NUM_OFFSET]);
4049 +                       status = VCHIQ_ERROR;
4050 +                       write_lock_bh(&arm_state->susp_res_lock);
4051 +                       goto out;
4052 +               }
4053 +               vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4054 +               write_lock_bh(&arm_state->susp_res_lock);
4055 +               resume_count++;
4056 +       }
4057 +       reinit_completion(&arm_state->resume_blocker);
4058 +       arm_state->resume_blocked = 1;
4059 +
4060 +out:
4061 +       return status;
4062 +}
4063 +
4064 +static inline void
4065 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4066 +{
4067 +       complete_all(&arm_state->resume_blocker);
4068 +       arm_state->resume_blocked = 0;
4069 +}
4070 +
4071 +/* Initiate suspend via slot handler. Should be called with the write lock
4072 + * held */
4073 +VCHIQ_STATUS_T
4074 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4075 +{
4076 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
4077 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4078 +
4079 +       if (!arm_state)
4080 +               goto out;
4081 +
4082 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4083 +       status = VCHIQ_SUCCESS;
4084 +
4085 +
4086 +       switch (arm_state->vc_suspend_state) {
4087 +       case VC_SUSPEND_REQUESTED:
4088 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4089 +                       "requested", __func__);
4090 +               break;
4091 +       case VC_SUSPEND_IN_PROGRESS:
4092 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4093 +                       "progress", __func__);
4094 +               break;
4095 +
4096 +       default:
4097 +               /* We don't expect to be in other states, so log but continue
4098 +                * anyway */
4099 +               vchiq_log_error(vchiq_susp_log_level,
4100 +                       "%s unexpected suspend state %s", __func__,
4101 +                       suspend_state_names[arm_state->vc_suspend_state +
4102 +                                               VC_SUSPEND_NUM_OFFSET]);
4103 +               /* fall through */
4104 +       case VC_SUSPEND_REJECTED:
4105 +       case VC_SUSPEND_FAILED:
4106 +               /* Ensure any idle state actions have been run */
4107 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4108 +               /* fall through */
4109 +       case VC_SUSPEND_IDLE:
4110 +               vchiq_log_info(vchiq_susp_log_level,
4111 +                       "%s: suspending", __func__);
4112 +               set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4113 +               /* kick the slot handler thread to initiate suspend */
4114 +               request_poll(state, NULL, 0);
4115 +               break;
4116 +       }
4117 +
4118 +out:
4119 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4120 +       return status;
4121 +}
4122 +
4123 +void
4124 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4125 +{
4126 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4127 +       int susp = 0;
4128 +
4129 +       if (!arm_state)
4130 +               goto out;
4131 +
4132 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4133 +
4134 +       write_lock_bh(&arm_state->susp_res_lock);
4135 +       if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4136 +                       arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4137 +               set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4138 +               susp = 1;
4139 +       }
4140 +       write_unlock_bh(&arm_state->susp_res_lock);
4141 +
4142 +       if (susp)
4143 +               vchiq_platform_suspend(state);
4144 +
4145 +out:
4146 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4147 +       return;
4148 +}
4149 +
4150 +
4151 +static void
4152 +output_timeout_error(VCHIQ_STATE_T *state)
4153 +{
4154 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4155 +       char service_err[50] = "";
4156 +       int vc_use_count = arm_state->videocore_use_count;
4157 +       int active_services = state->unused_service;
4158 +       int i;
4159 +
4160 +       if (!arm_state->videocore_use_count) {
4161 +               snprintf(service_err, 50, " Videocore usecount is 0");
4162 +               goto output_msg;
4163 +       }
4164 +       for (i = 0; i < active_services; i++) {
4165 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
4166 +               if (service_ptr && service_ptr->service_use_count &&
4167 +                       (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4168 +                       snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4169 +                               "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4170 +                                       service_ptr->base.fourcc),
4171 +                                service_ptr->client_id,
4172 +                                service_ptr->service_use_count,
4173 +                                service_ptr->service_use_count ==
4174 +                                        vc_use_count ? "" : " (+ more)");
4175 +                       break;
4176 +               }
4177 +       }
4178 +
4179 +output_msg:
4180 +       vchiq_log_error(vchiq_susp_log_level,
4181 +               "timed out waiting for vc suspend (%d).%s",
4182 +                arm_state->autosuspend_override, service_err);
4183 +
4184 +}
4185 +
4186 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4187 +** We don't actually force suspend, since videocore may get into a bad state
4188 +** if we force suspend at a bad time.  Instead, we wait for autosuspend to
4189 +** determine a good point to suspend.  If this doesn't happen within 100ms we
4190 +** report failure.
4191 +**
4192 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4193 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4194 +*/
4195 +VCHIQ_STATUS_T
4196 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4197 +{
4198 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4199 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
4200 +       long rc = 0;
4201 +       int repeat = -1;
4202 +
4203 +       if (!arm_state)
4204 +               goto out;
4205 +
4206 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4207 +
4208 +       write_lock_bh(&arm_state->susp_res_lock);
4209 +
4210 +       status = block_resume(arm_state);
4211 +       if (status != VCHIQ_SUCCESS)
4212 +               goto unlock;
4213 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4214 +               /* Already suspended - just block resume and exit */
4215 +               vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4216 +                       __func__);
4217 +               status = VCHIQ_SUCCESS;
4218 +               goto unlock;
4219 +       } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4220 +               /* initiate suspend immediately in the case that we're waiting
4221 +                * for the timeout */
4222 +               stop_suspend_timer(arm_state);
4223 +               if (!vchiq_videocore_wanted(state)) {
4224 +                       vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4225 +                               "idle, initiating suspend", __func__);
4226 +                       status = vchiq_arm_vcsuspend(state);
4227 +               } else if (arm_state->autosuspend_override <
4228 +                                               FORCE_SUSPEND_FAIL_MAX) {
4229 +                       vchiq_log_info(vchiq_susp_log_level, "%s letting "
4230 +                               "videocore go idle", __func__);
4231 +                       status = VCHIQ_SUCCESS;
4232 +               } else {
4233 +                       vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4234 +                               "many times - attempting suspend", __func__);
4235 +                       status = vchiq_arm_vcsuspend(state);
4236 +               }
4237 +       } else {
4238 +               vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4239 +                       "in progress - wait for completion", __func__);
4240 +               status = VCHIQ_SUCCESS;
4241 +       }
4242 +
4243 +       /* Wait for suspend to happen due to system idle (not forced..) */
4244 +       if (status != VCHIQ_SUCCESS)
4245 +               goto unblock_resume;
4246 +
4247 +       do {
4248 +               write_unlock_bh(&arm_state->susp_res_lock);
4249 +
4250 +               rc = wait_for_completion_interruptible_timeout(
4251 +                               &arm_state->vc_suspend_complete,
4252 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4253 +
4254 +               write_lock_bh(&arm_state->susp_res_lock);
4255 +               if (rc < 0) {
4256 +                       vchiq_log_warning(vchiq_susp_log_level, "%s "
4257 +                               "interrupted waiting for suspend", __func__);
4258 +                       status = VCHIQ_ERROR;
4259 +                       goto unblock_resume;
4260 +               } else if (rc == 0) {
4261 +                       if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4262 +                               /* Repeat timeout once if in progress */
4263 +                               if (repeat < 0) {
4264 +                                       repeat = 1;
4265 +                                       continue;
4266 +                               }
4267 +                       }
4268 +                       arm_state->autosuspend_override++;
4269 +                       output_timeout_error(state);
4270 +
4271 +                       status = VCHIQ_RETRY;
4272 +                       goto unblock_resume;
4273 +               }
4274 +       } while (0 < (repeat--));
4275 +
4276 +       /* Check and report state in case we need to abort ARM suspend */
4277 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4278 +               status = VCHIQ_RETRY;
4279 +               vchiq_log_error(vchiq_susp_log_level,
4280 +                       "%s videocore suspend failed (state %s)", __func__,
4281 +                       suspend_state_names[arm_state->vc_suspend_state +
4282 +                                               VC_SUSPEND_NUM_OFFSET]);
4283 +               /* Reset the state only if it's still in an error state.
4284 +                * Something could have already initiated another suspend. */
4285 +               if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4286 +                       set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4287 +
4288 +               goto unblock_resume;
4289 +       }
4290 +
4291 +       /* successfully suspended - unlock and exit */
4292 +       goto unlock;
4293 +
4294 +unblock_resume:
4295 +       /* all error states need to unblock resume before exit */
4296 +       unblock_resume(arm_state);
4297 +
4298 +unlock:
4299 +       write_unlock_bh(&arm_state->susp_res_lock);
4300 +
4301 +out:
4302 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4303 +       return status;
4304 +}
4305 +
4306 +void
4307 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4308 +{
4309 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4310 +
4311 +       if (!arm_state)
4312 +               goto out;
4313 +
4314 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4315 +
4316 +       write_lock_bh(&arm_state->susp_res_lock);
4317 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4318 +                       arm_state->first_connect &&
4319 +                       !vchiq_videocore_wanted(state)) {
4320 +               vchiq_arm_vcsuspend(state);
4321 +       }
4322 +       write_unlock_bh(&arm_state->susp_res_lock);
4323 +
4324 +out:
4325 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4326 +       return;
4327 +}
4328 +
4329 +
4330 +int
4331 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4332 +{
4333 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4334 +       int resume = 0;
4335 +       int ret = -1;
4336 +
4337 +       if (!arm_state)
4338 +               goto out;
4339 +
4340 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4341 +
4342 +       write_lock_bh(&arm_state->susp_res_lock);
4343 +       unblock_resume(arm_state);
4344 +       resume = vchiq_check_resume(state);
4345 +       write_unlock_bh(&arm_state->susp_res_lock);
4346 +
4347 +       if (resume) {
4348 +               if (wait_for_completion_interruptible(
4349 +                       &arm_state->vc_resume_complete) < 0) {
4350 +                       vchiq_log_error(vchiq_susp_log_level,
4351 +                               "%s interrupted", __func__);
4352 +                       /* failed, cannot accurately derive suspend
4353 +                        * state, so exit early. */
4354 +                       goto out;
4355 +               }
4356 +       }
4357 +
4358 +       read_lock_bh(&arm_state->susp_res_lock);
4359 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4360 +               vchiq_log_info(vchiq_susp_log_level,
4361 +                               "%s: Videocore remains suspended", __func__);
4362 +       } else {
4363 +               vchiq_log_info(vchiq_susp_log_level,
4364 +                               "%s: Videocore resumed", __func__);
4365 +               ret = 0;
4366 +       }
4367 +       read_unlock_bh(&arm_state->susp_res_lock);
4368 +out:
4369 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4370 +       return ret;
4371 +}
4372 +
4373 +/* This function should be called with the write lock held */
4374 +int
4375 +vchiq_check_resume(VCHIQ_STATE_T *state)
4376 +{
4377 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4378 +       int resume = 0;
4379 +
4380 +       if (!arm_state)
4381 +               goto out;
4382 +
4383 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4384 +
4385 +       if (need_resume(state)) {
4386 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
4387 +               request_poll(state, NULL, 0);
4388 +               resume = 1;
4389 +       }
4390 +
4391 +out:
4392 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4393 +       return resume;
4394 +}
4395 +
4396 +void
4397 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4398 +{
4399 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4400 +       int res = 0;
4401 +
4402 +       if (!arm_state)
4403 +               goto out;
4404 +
4405 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4406 +
4407 +       write_lock_bh(&arm_state->susp_res_lock);
4408 +       if (arm_state->wake_address == 0) {
4409 +               vchiq_log_info(vchiq_susp_log_level,
4410 +                                       "%s: already awake", __func__);
4411 +               goto unlock;
4412 +       }
4413 +       if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4414 +               vchiq_log_info(vchiq_susp_log_level,
4415 +                                       "%s: already resuming", __func__);
4416 +               goto unlock;
4417 +       }
4418 +
4419 +       if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4420 +               set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4421 +               res = 1;
4422 +       } else
4423 +               vchiq_log_trace(vchiq_susp_log_level,
4424 +                               "%s: not resuming (resume state %s)", __func__,
4425 +                               resume_state_names[arm_state->vc_resume_state +
4426 +                                                       VC_RESUME_NUM_OFFSET]);
4427 +
4428 +unlock:
4429 +       write_unlock_bh(&arm_state->susp_res_lock);
4430 +
4431 +       if (res)
4432 +               vchiq_platform_resume(state);
4433 +
4434 +out:
4435 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4436 +       return;
4437 +
4438 +}
4439 +
4440 +
4441 +
4442 +VCHIQ_STATUS_T
4443 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4444 +               enum USE_TYPE_E use_type)
4445 +{
4446 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4447 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4448 +       char entity[16];
4449 +       int *entity_uc;
4450 +       int local_uc, local_entity_uc;
4451 +
4452 +       if (!arm_state)
4453 +               goto out;
4454 +
4455 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4456 +
4457 +       if (use_type == USE_TYPE_VCHIQ) {
4458 +               sprintf(entity, "VCHIQ:   ");
4459 +               entity_uc = &arm_state->peer_use_count;
4460 +       } else if (service) {
4461 +               sprintf(entity, "%c%c%c%c:%03d",
4462 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4463 +                       service->client_id);
4464 +               entity_uc = &service->service_use_count;
4465 +       } else {
4466 +               vchiq_log_error(vchiq_susp_log_level, "%s null service "
4467 +                               "ptr", __func__);
4468 +               ret = VCHIQ_ERROR;
4469 +               goto out;
4470 +       }
4471 +
4472 +       write_lock_bh(&arm_state->susp_res_lock);
4473 +       while (arm_state->resume_blocked) {
4474 +               /* If we call 'use' while force suspend is waiting for suspend,
4475 +                * then we're about to block the thread which the force is
4476 +                * waiting to complete, so we're bound to just time out. In this
4477 +                * case, set the suspend state such that the wait will be
4478 +                * canceled, so we can complete as quickly as possible. */
4479 +               if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4480 +                               VC_SUSPEND_IDLE) {
4481 +                       set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4482 +                       break;
4483 +               }
4484 +               /* If suspend is already in progress then we need to block */
4485 +               if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4486 +                       /* Indicate that there are threads waiting on the resume
4487 +                        * blocker.  These need to be allowed to complete before
4488 +                        * a _second_ call to force suspend can complete,
4489 +                        * otherwise low priority threads might never actually
4490 +                        * continue */
4491 +                       arm_state->blocked_count++;
4492 +                       write_unlock_bh(&arm_state->susp_res_lock);
4493 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4494 +                               "blocked - waiting...", __func__, entity);
4495 +                       if (wait_for_completion_killable(
4496 +                                       &arm_state->resume_blocker) != 0) {
4497 +                               vchiq_log_error(vchiq_susp_log_level, "%s %s "
4498 +                                       "wait for resume blocker interrupted",
4499 +                                       __func__, entity);
4500 +                               ret = VCHIQ_ERROR;
4501 +                               write_lock_bh(&arm_state->susp_res_lock);
4502 +                               arm_state->blocked_count--;
4503 +                               write_unlock_bh(&arm_state->susp_res_lock);
4504 +                               goto out;
4505 +                       }
4506 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4507 +                               "unblocked", __func__, entity);
4508 +                       write_lock_bh(&arm_state->susp_res_lock);
4509 +                       if (--arm_state->blocked_count == 0)
4510 +                               complete_all(&arm_state->blocked_blocker);
4511 +               }
4512 +       }
4513 +
4514 +       stop_suspend_timer(arm_state);
4515 +
4516 +       local_uc = ++arm_state->videocore_use_count;
4517 +       local_entity_uc = ++(*entity_uc);
4518 +
4519 +       /* If there's a pending request which hasn't yet been serviced then
4520 +        * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
4521 +        * vc_resume_complete will block until we either resume or fail to
4522 +        * suspend */
4523 +       if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4524 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4525 +
4526 +       if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4527 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
4528 +               vchiq_log_info(vchiq_susp_log_level,
4529 +                       "%s %s count %d, state count %d",
4530 +                       __func__, entity, local_entity_uc, local_uc);
4531 +               request_poll(state, NULL, 0);
4532 +       } else
4533 +               vchiq_log_trace(vchiq_susp_log_level,
4534 +                       "%s %s count %d, state count %d",
4535 +                       __func__, entity, *entity_uc, local_uc);
4536 +
4537 +
4538 +       write_unlock_bh(&arm_state->susp_res_lock);
4539 +
4540 +       /* Completion is in a done state when we're not suspended, so this won't
4541 +        * block for the non-suspended case. */
4542 +       if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4543 +               vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4544 +                       __func__, entity);
4545 +               if (wait_for_completion_killable(
4546 +                               &arm_state->vc_resume_complete) != 0) {
4547 +                       vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4548 +                               "resume interrupted", __func__, entity);
4549 +                       ret = VCHIQ_ERROR;
4550 +                       goto out;
4551 +               }
4552 +               vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4553 +                       entity);
4554 +       }
4555 +
4556 +       if (ret == VCHIQ_SUCCESS) {
4557 +               VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4558 +               long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4559 +               while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4560 +                       /* Send the use notify to videocore */
4561 +                       status = vchiq_send_remote_use_active(state);
4562 +                       if (status == VCHIQ_SUCCESS)
4563 +                               ack_cnt--;
4564 +                       else
4565 +                               atomic_add(ack_cnt,
4566 +                                       &arm_state->ka_use_ack_count);
4567 +               }
4568 +       }
4569 +
4570 +out:
4571 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4572 +       return ret;
4573 +}
4574 +
4575 +VCHIQ_STATUS_T
4576 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4577 +{
4578 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4579 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4580 +       char entity[16];
4581 +       int *entity_uc;
4582 +       int local_uc, local_entity_uc;
4583 +
4584 +       if (!arm_state)
4585 +               goto out;
4586 +
4587 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4588 +
4589 +       if (service) {
4590 +               sprintf(entity, "%c%c%c%c:%03d",
4591 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4592 +                       service->client_id);
4593 +               entity_uc = &service->service_use_count;
4594 +       } else {
4595 +               sprintf(entity, "PEER:   ");
4596 +               entity_uc = &arm_state->peer_use_count;
4597 +       }
4598 +
4599 +       write_lock_bh(&arm_state->susp_res_lock);
4600 +       if (!arm_state->videocore_use_count || !(*entity_uc)) {
4601 +               /* Don't use BUG_ON - don't allow user thread to crash kernel */
4602 +               WARN_ON(!arm_state->videocore_use_count);
4603 +               WARN_ON(!(*entity_uc));
4604 +               ret = VCHIQ_ERROR;
4605 +               goto unlock;
4606 +       }
4607 +       local_uc = --arm_state->videocore_use_count;
4608 +       local_entity_uc = --(*entity_uc);
4609 +
4610 +       if (!vchiq_videocore_wanted(state)) {
4611 +               if (vchiq_platform_use_suspend_timer() &&
4612 +                               !arm_state->resume_blocked) {
4613 +                       /* Only use the timer if we're not trying to force
4614 +                        * suspend (=> resume_blocked) */
4615 +                       start_suspend_timer(arm_state);
4616 +               } else {
4617 +                       vchiq_log_info(vchiq_susp_log_level,
4618 +                               "%s %s count %d, state count %d - suspending",
4619 +                               __func__, entity, *entity_uc,
4620 +                               arm_state->videocore_use_count);
4621 +                       vchiq_arm_vcsuspend(state);
4622 +               }
4623 +       } else
4624 +               vchiq_log_trace(vchiq_susp_log_level,
4625 +                       "%s %s count %d, state count %d",
4626 +                       __func__, entity, *entity_uc,
4627 +                       arm_state->videocore_use_count);
4628 +
4629 +unlock:
4630 +       write_unlock_bh(&arm_state->susp_res_lock);
4631 +
4632 +out:
4633 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4634 +       return ret;
4635 +}
4636 +
4637 +void
4638 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4639 +{
4640 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4641 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4642 +       atomic_inc(&arm_state->ka_use_count);
4643 +       complete(&arm_state->ka_evt);
4644 +}
4645 +
4646 +void
4647 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4648 +{
4649 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4650 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4651 +       atomic_inc(&arm_state->ka_release_count);
4652 +       complete(&arm_state->ka_evt);
4653 +}
4654 +
4655 +VCHIQ_STATUS_T
4656 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4657 +{
4658 +       return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4659 +}
4660 +
4661 +VCHIQ_STATUS_T
4662 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4663 +{
4664 +       return vchiq_release_internal(service->state, service);
4665 +}
4666 +
4667 +static void suspend_timer_callback(unsigned long context)
4668 +{
4669 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4670 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4671 +       if (!arm_state)
4672 +               goto out;
4673 +       vchiq_log_info(vchiq_susp_log_level,
4674 +               "%s - suspend timer expired - check suspend", __func__);
4675 +       vchiq_check_suspend(state);
4676 +out:
4677 +       return;
4678 +}
4679 +
4680 +VCHIQ_STATUS_T
4681 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4682 +{
4683 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4684 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4685 +       if (service) {
4686 +               ret = vchiq_use_internal(service->state, service,
4687 +                               USE_TYPE_SERVICE_NO_RESUME);
4688 +               unlock_service(service);
4689 +       }
4690 +       return ret;
4691 +}
4692 +
4693 +VCHIQ_STATUS_T
4694 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4695 +{
4696 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4697 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4698 +       if (service) {
4699 +               ret = vchiq_use_internal(service->state, service,
4700 +                               USE_TYPE_SERVICE);
4701 +               unlock_service(service);
4702 +       }
4703 +       return ret;
4704 +}
4705 +
4706 +VCHIQ_STATUS_T
4707 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4708 +{
4709 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4710 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4711 +       if (service) {
4712 +               ret = vchiq_release_internal(service->state, service);
4713 +               unlock_service(service);
4714 +       }
4715 +       return ret;
4716 +}
4717 +
4718 +void
4719 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4720 +{
4721 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4722 +       int i, j = 0;
4723 +       /* Only dump 64 services */
4724 +       static const int local_max_services = 64;
4725 +       /* If there's more than 64 services, only dump ones with
4726 +        * non-zero counts */
4727 +       int only_nonzero = 0;
4728 +       static const char *nz = "<-- preventing suspend";
4729 +
4730 +       enum vc_suspend_status vc_suspend_state;
4731 +       enum vc_resume_status  vc_resume_state;
4732 +       int peer_count;
4733 +       int vc_use_count;
4734 +       int active_services;
4735 +       struct service_data_struct {
4736 +               int fourcc;
4737 +               int clientid;
4738 +               int use_count;
4739 +       } service_data[local_max_services];
4740 +
4741 +       if (!arm_state)
4742 +               return;
4743 +
4744 +       read_lock_bh(&arm_state->susp_res_lock);
4745 +       vc_suspend_state = arm_state->vc_suspend_state;
4746 +       vc_resume_state  = arm_state->vc_resume_state;
4747 +       peer_count = arm_state->peer_use_count;
4748 +       vc_use_count = arm_state->videocore_use_count;
4749 +       active_services = state->unused_service;
4750 +       if (active_services > local_max_services)
4751 +               only_nonzero = 1;
4752 +
4753 +       for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4754 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
4755 +               if (!service_ptr)
4756 +                       continue;
4757 +
4758 +               if (only_nonzero && !service_ptr->service_use_count)
4759 +                       continue;
4760 +
4761 +               if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4762 +                       service_data[j].fourcc = service_ptr->base.fourcc;
4763 +                       service_data[j].clientid = service_ptr->client_id;
4764 +                       service_data[j++].use_count = service_ptr->
4765 +                                                       service_use_count;
4766 +               }
4767 +       }
4768 +
4769 +       read_unlock_bh(&arm_state->susp_res_lock);
4770 +
4771 +       vchiq_log_warning(vchiq_susp_log_level,
4772 +               "-- Videcore suspend state: %s --",
4773 +               suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4774 +       vchiq_log_warning(vchiq_susp_log_level,
4775 +               "-- Videcore resume state: %s --",
4776 +               resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4777 +
4778 +       if (only_nonzero)
4779 +               vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4780 +                       "services (%d).  Only dumping up to first %d services "
4781 +                       "with non-zero use-count", active_services,
4782 +                       local_max_services);
4783 +
4784 +       for (i = 0; i < j; i++) {
4785 +               vchiq_log_warning(vchiq_susp_log_level,
4786 +                       "----- %c%c%c%c:%d service count %d %s",
4787 +                       VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4788 +                       service_data[i].clientid,
4789 +                       service_data[i].use_count,
4790 +                       service_data[i].use_count ? nz : "");
4791 +       }
4792 +       vchiq_log_warning(vchiq_susp_log_level,
4793 +               "----- VCHIQ use count count %d", peer_count);
4794 +       vchiq_log_warning(vchiq_susp_log_level,
4795 +               "--- Overall vchiq instance use count %d", vc_use_count);
4796 +
4797 +       vchiq_dump_platform_use_state(state);
4798 +}
4799 +
4800 +VCHIQ_STATUS_T
4801 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4802 +{
4803 +       VCHIQ_ARM_STATE_T *arm_state;
4804 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4805 +
4806 +       if (!service || !service->state)
4807 +               goto out;
4808 +
4809 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4810 +
4811 +       arm_state = vchiq_platform_get_arm_state(service->state);
4812 +
4813 +       read_lock_bh(&arm_state->susp_res_lock);
4814 +       if (service->service_use_count)
4815 +               ret = VCHIQ_SUCCESS;
4816 +       read_unlock_bh(&arm_state->susp_res_lock);
4817 +
4818 +       if (ret == VCHIQ_ERROR) {
4819 +               vchiq_log_error(vchiq_susp_log_level,
4820 +                       "%s ERROR - %c%c%c%c:%d service count %d, "
4821 +                       "state count %d, videocore suspend state %s", __func__,
4822 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4823 +                       service->client_id, service->service_use_count,
4824 +                       arm_state->videocore_use_count,
4825 +                       suspend_state_names[arm_state->vc_suspend_state +
4826 +                                               VC_SUSPEND_NUM_OFFSET]);
4827 +               vchiq_dump_service_use_state(service->state);
4828 +       }
4829 +out:
4830 +       return ret;
4831 +}
4832 +
4833 +/* stub functions */
4834 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4835 +{
4836 +       (void)state;
4837 +}
4838 +
4839 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4840 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4841 +{
4842 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4843 +       vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4844 +               get_conn_state_name(oldstate), get_conn_state_name(newstate));
4845 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4846 +               write_lock_bh(&arm_state->susp_res_lock);
4847 +               if (!arm_state->first_connect) {
4848 +                       char threadname[10];
4849 +                       arm_state->first_connect = 1;
4850 +                       write_unlock_bh(&arm_state->susp_res_lock);
4851 +                       snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4852 +                               state->id);
4853 +                       arm_state->ka_thread = kthread_create(
4854 +                               &vchiq_keepalive_thread_func,
4855 +                               (void *)state,
4856 +                               threadname);
4857 +                       if (arm_state->ka_thread == NULL) {
4858 +                               vchiq_log_error(vchiq_susp_log_level,
4859 +                                       "vchiq: FATAL: couldn't create thread %s",
4860 +                                       threadname);
4861 +                       } else {
4862 +                               wake_up_process(arm_state->ka_thread);
4863 +                       }
4864 +               } else
4865 +                       write_unlock_bh(&arm_state->susp_res_lock);
4866 +       }
4867 +}
4868 +
4869 +
4870 +/****************************************************************************
4871 +*
4872 +*   vchiq_init - called when the module is loaded.
4873 +*
4874 +***************************************************************************/
4875 +
4876 +static int __init
4877 +vchiq_init(void)
4878 +{
4879 +       int err;
4880 +       void *ptr_err;
4881 +
4882 +       /* create proc entries */
4883 +       err = vchiq_proc_init();
4884 +       if (err != 0)
4885 +               goto failed_proc_init;
4886 +
4887 +       err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4888 +       if (err != 0) {
4889 +               vchiq_log_error(vchiq_arm_log_level,
4890 +                       "Unable to allocate device number");
4891 +               goto failed_alloc_chrdev;
4892 +       }
4893 +       cdev_init(&vchiq_cdev, &vchiq_fops);
4894 +       vchiq_cdev.owner = THIS_MODULE;
4895 +       err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4896 +       if (err != 0) {
4897 +               vchiq_log_error(vchiq_arm_log_level,
4898 +                       "Unable to register device");
4899 +               goto failed_cdev_add;
4900 +       }
4901 +
4902 +       /* create sysfs entries */
4903 +       vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4904 +       ptr_err = vchiq_class;
4905 +       if (IS_ERR(ptr_err))
4906 +               goto failed_class_create;
4907 +
4908 +       vchiq_dev = device_create(vchiq_class, NULL,
4909 +               vchiq_devid, NULL, "vchiq");
4910 +       ptr_err = vchiq_dev;
4911 +       if (IS_ERR(ptr_err))
4912 +               goto failed_device_create;
4913 +
4914 +       err = vchiq_platform_init(&g_state);
4915 +       if (err != 0)
4916 +               goto failed_platform_init;
4917 +
4918 +       vchiq_log_info(vchiq_arm_log_level,
4919 +               "vchiq: initialised - version %d (min %d), device %d.%d",
4920 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4921 +               MAJOR(vchiq_devid), MINOR(vchiq_devid));
4922 +
4923 +       return 0;
4924 +
4925 +failed_platform_init:
4926 +       device_destroy(vchiq_class, vchiq_devid);
4927 +failed_device_create:
4928 +       class_destroy(vchiq_class);
4929 +failed_class_create:
4930 +       cdev_del(&vchiq_cdev);
4931 +       err = PTR_ERR(ptr_err);
4932 +failed_cdev_add:
4933 +       unregister_chrdev_region(vchiq_devid, 1);
4934 +failed_alloc_chrdev:
4935 +       vchiq_proc_deinit();
4936 +failed_proc_init:
4937 +       vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4938 +       return err;
4939 +}
4940 +
4941 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4942 +{
4943 +       VCHIQ_SERVICE_T *service;
4944 +       int use_count = 0, i;
4945 +       i = 0;
4946 +       while ((service = next_service_by_instance(instance->state,
4947 +               instance, &i)) != NULL) {
4948 +               use_count += service->service_use_count;
4949 +               unlock_service(service);
4950 +       }
4951 +       return use_count;
4952 +}
4953 +
4954 +/* read the per-process use-count */
4955 +static int proc_read_use_count(char *page, char **start,
4956 +                              off_t off, int count,
4957 +                              int *eof, void *data)
4958 +{
4959 +       VCHIQ_INSTANCE_T instance = data;
4960 +       int len, use_count;
4961 +
4962 +       use_count = vchiq_instance_get_use_count(instance);
4963 +       len = snprintf(page+off, count, "%d\n", use_count);
4964 +
4965 +       return len;
4966 +}
4967 +
4968 +/* add an instance (process) to the proc entries */
4969 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
4970 +{
4971 +#if 1
4972 +       return 0;
4973 +#else
4974 +       char pidstr[32];
4975 +       struct proc_dir_entry *top, *use_count;
4976 +       struct proc_dir_entry *clients = vchiq_clients_top();
4977 +       int pid = instance->pid;
4978 +
4979 +       snprintf(pidstr, sizeof(pidstr), "%d", pid);
4980 +       top = proc_mkdir(pidstr, clients);
4981 +       if (!top)
4982 +               goto fail_top;
4983 +
4984 +       use_count = create_proc_read_entry("use_count",
4985 +                                          0444, top,
4986 +                                          proc_read_use_count,
4987 +                                          instance);
4988 +       if (!use_count)
4989 +               goto fail_use_count;
4990 +
4991 +       instance->proc_entry = top;
4992 +
4993 +       return 0;
4994 +
4995 +fail_use_count:
4996 +       remove_proc_entry(top->name, clients);
4997 +fail_top:
4998 +       return -ENOMEM;
4999 +#endif
5000 +}
5001 +
5002 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
5003 +{
5004 +#if 0
5005 +       struct proc_dir_entry *clients = vchiq_clients_top();
5006 +       remove_proc_entry("use_count", instance->proc_entry);
5007 +       remove_proc_entry(instance->proc_entry->name, clients);
5008 +#endif
5009 +}
5010 +
5011 +/****************************************************************************
5012 +*
5013 +*   vchiq_exit - called when the module is unloaded.
5014 +*
5015 +***************************************************************************/
5016 +
5017 +static void __exit
5018 +vchiq_exit(void)
5019 +{
5020 +       vchiq_platform_exit(&g_state);
5021 +       device_destroy(vchiq_class, vchiq_devid);
5022 +       class_destroy(vchiq_class);
5023 +       cdev_del(&vchiq_cdev);
5024 +       unregister_chrdev_region(vchiq_devid, 1);
5025 +}
5026 +
5027 +module_init(vchiq_init);
5028 +module_exit(vchiq_exit);
5029 +MODULE_LICENSE("GPL");
5030 +MODULE_AUTHOR("Broadcom Corporation");
5031 --- /dev/null
5032 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5033 @@ -0,0 +1,212 @@
5034 +/**
5035 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5036 + *
5037 + * Redistribution and use in source and binary forms, with or without
5038 + * modification, are permitted provided that the following conditions
5039 + * are met:
5040 + * 1. Redistributions of source code must retain the above copyright
5041 + *    notice, this list of conditions, and the following disclaimer,
5042 + *    without modification.
5043 + * 2. Redistributions in binary form must reproduce the above copyright
5044 + *    notice, this list of conditions and the following disclaimer in the
5045 + *    documentation and/or other materials provided with the distribution.
5046 + * 3. The names of the above-listed copyright holders may not be used
5047 + *    to endorse or promote products derived from this software without
5048 + *    specific prior written permission.
5049 + *
5050 + * ALTERNATIVELY, this software may be distributed under the terms of the
5051 + * GNU General Public License ("GPL") version 2, as published by the Free
5052 + * Software Foundation.
5053 + *
5054 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5055 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5056 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5057 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5058 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5059 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5060 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5061 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5062 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5063 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5064 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5065 + */
5066 +
5067 +#ifndef VCHIQ_ARM_H
5068 +#define VCHIQ_ARM_H
5069 +
5070 +#include <linux/mutex.h>
5071 +#include <linux/semaphore.h>
5072 +#include <linux/atomic.h>
5073 +#include "vchiq_core.h"
5074 +
5075 +
5076 +enum vc_suspend_status {
5077 +       VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5078 +       VC_SUSPEND_REJECTED = -2,  /* Videocore rejected suspend request */
5079 +       VC_SUSPEND_FAILED = -1,    /* Videocore suspend failed */
5080 +       VC_SUSPEND_IDLE = 0,       /* VC active, no suspend actions */
5081 +       VC_SUSPEND_REQUESTED,      /* User has requested suspend */
5082 +       VC_SUSPEND_IN_PROGRESS,    /* Slot handler has recvd suspend request */
5083 +       VC_SUSPEND_SUSPENDED       /* Videocore suspend succeeded */
5084 +};
5085 +
5086 +enum vc_resume_status {
5087 +       VC_RESUME_FAILED = -1, /* Videocore resume failed */
5088 +       VC_RESUME_IDLE = 0,    /* VC suspended, no resume actions */
5089 +       VC_RESUME_REQUESTED,   /* User has requested resume */
5090 +       VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5091 +       VC_RESUME_RESUMED      /* Videocore resumed successfully (active) */
5092 +};
5093 +
5094 +
5095 +enum USE_TYPE_E {
5096 +       USE_TYPE_SERVICE,
5097 +       USE_TYPE_SERVICE_NO_RESUME,
5098 +       USE_TYPE_VCHIQ
5099 +};
5100 +
5101 +
5102 +
5103 +typedef struct vchiq_arm_state_struct {
5104 +       /* Keepalive-related data */
5105 +       struct task_struct *ka_thread;
5106 +       struct completion ka_evt;
5107 +       atomic_t ka_use_count;
5108 +       atomic_t ka_use_ack_count;
5109 +       atomic_t ka_release_count;
5110 +
5111 +       struct completion vc_suspend_complete;
5112 +       struct completion vc_resume_complete;
5113 +
5114 +       rwlock_t susp_res_lock;
5115 +       enum vc_suspend_status vc_suspend_state;
5116 +       enum vc_resume_status vc_resume_state;
5117 +
5118 +       unsigned int wake_address;
5119 +
5120 +       struct timer_list suspend_timer;
5121 +       int suspend_timer_timeout;
5122 +       int suspend_timer_running;
5123 +
5124 +       /* Global use count for videocore.
5125 +       ** This is equal to the sum of the use counts for all services.  When
5126 +       ** this hits zero the videocore suspend procedure will be initiated.
5127 +       */
5128 +       int videocore_use_count;
5129 +
5130 +       /* Use count to track requests from videocore peer.
5131 +       ** This use count is not associated with a service, so needs to be
5132 +       ** tracked separately with the state.
5133 +       */
5134 +       int peer_use_count;
5135 +
5136 +       /* Flag to indicate whether resume is blocked.  This happens when the
5137 +       ** ARM is suspending
5138 +       */
5139 +       struct completion resume_blocker;
5140 +       int resume_blocked;
5141 +       struct completion blocked_blocker;
5142 +       int blocked_count;
5143 +
5144 +       int autosuspend_override;
5145 +
5146 +       /* Flag to indicate that the first vchiq connect has made it through.
5147 +       ** This means that both sides should be fully ready, and we should
5148 +       ** be able to suspend after this point.
5149 +       */
5150 +       int first_connect;
5151 +
5152 +       unsigned long long suspend_start_time;
5153 +       unsigned long long sleep_start_time;
5154 +       unsigned long long resume_start_time;
5155 +       unsigned long long last_wake_time;
5156 +
5157 +} VCHIQ_ARM_STATE_T;
5158 +
5159 +extern int vchiq_arm_log_level;
5160 +extern int vchiq_susp_log_level;
5161 +
5162 +extern int __init
5163 +vchiq_platform_init(VCHIQ_STATE_T *state);
5164 +
5165 +extern void __exit
5166 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5167 +
5168 +extern VCHIQ_STATE_T *
5169 +vchiq_get_state(void);
5170 +
5171 +extern VCHIQ_STATUS_T
5172 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5173 +
5174 +extern VCHIQ_STATUS_T
5175 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5176 +
5177 +extern int
5178 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5179 +
5180 +extern VCHIQ_STATUS_T
5181 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5182 +
5183 +extern VCHIQ_STATUS_T
5184 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5185 +
5186 +extern int
5187 +vchiq_check_resume(VCHIQ_STATE_T *state);
5188 +
5189 +extern void
5190 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5191 +
5192 +extern VCHIQ_STATUS_T
5193 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5194 +
5195 +extern VCHIQ_STATUS_T
5196 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5197 +
5198 +extern VCHIQ_STATUS_T
5199 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5200 +
5201 +extern VCHIQ_STATUS_T
5202 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5203 +
5204 +extern int
5205 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5206 +
5207 +extern int
5208 +vchiq_platform_use_suspend_timer(void);
5209 +
5210 +extern void
5211 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5212 +
5213 +extern void
5214 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5215 +
5216 +extern VCHIQ_ARM_STATE_T*
5217 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5218 +
5219 +extern int
5220 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5221 +
5222 +extern VCHIQ_STATUS_T
5223 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5224 +               enum USE_TYPE_E use_type);
5225 +extern VCHIQ_STATUS_T
5226 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5227 +
5228 +void
5229 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5230 +       enum vc_suspend_status new_state);
5231 +
5232 +void
5233 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5234 +       enum vc_resume_status new_state);
5235 +
5236 +void
5237 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5238 +
5239 +extern int vchiq_proc_init(void);
5240 +extern void vchiq_proc_deinit(void);
5241 +extern struct proc_dir_entry *vchiq_proc_top(void);
5242 +extern struct proc_dir_entry *vchiq_clients_top(void);
5243 +
5244 +
5245 +#endif /* VCHIQ_ARM_H */
5246 --- /dev/null
5247 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5248 @@ -0,0 +1,37 @@
5249 +/**
5250 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5251 + *
5252 + * Redistribution and use in source and binary forms, with or without
5253 + * modification, are permitted provided that the following conditions
5254 + * are met:
5255 + * 1. Redistributions of source code must retain the above copyright
5256 + *    notice, this list of conditions, and the following disclaimer,
5257 + *    without modification.
5258 + * 2. Redistributions in binary form must reproduce the above copyright
5259 + *    notice, this list of conditions and the following disclaimer in the
5260 + *    documentation and/or other materials provided with the distribution.
5261 + * 3. The names of the above-listed copyright holders may not be used
5262 + *    to endorse or promote products derived from this software without
5263 + *    specific prior written permission.
5264 + *
5265 + * ALTERNATIVELY, this software may be distributed under the terms of the
5266 + * GNU General Public License ("GPL") version 2, as published by the Free
5267 + * Software Foundation.
5268 + *
5269 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5270 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5271 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5272 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5273 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5274 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5275 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5276 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5277 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5278 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5279 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5280 + */
5281 +
5282 +const char *vchiq_get_build_hostname(void);
5283 +const char *vchiq_get_build_version(void);
5284 +const char *vchiq_get_build_time(void);
5285 +const char *vchiq_get_build_date(void);
5286 --- /dev/null
5287 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5288 @@ -0,0 +1,60 @@
5289 +/**
5290 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5291 + *
5292 + * Redistribution and use in source and binary forms, with or without
5293 + * modification, are permitted provided that the following conditions
5294 + * are met:
5295 + * 1. Redistributions of source code must retain the above copyright
5296 + *    notice, this list of conditions, and the following disclaimer,
5297 + *    without modification.
5298 + * 2. Redistributions in binary form must reproduce the above copyright
5299 + *    notice, this list of conditions and the following disclaimer in the
5300 + *    documentation and/or other materials provided with the distribution.
5301 + * 3. The names of the above-listed copyright holders may not be used
5302 + *    to endorse or promote products derived from this software without
5303 + *    specific prior written permission.
5304 + *
5305 + * ALTERNATIVELY, this software may be distributed under the terms of the
5306 + * GNU General Public License ("GPL") version 2, as published by the Free
5307 + * Software Foundation.
5308 + *
5309 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5310 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5311 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5312 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5313 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5314 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5315 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5316 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5317 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5318 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5319 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5320 + */
5321 +
5322 +#ifndef VCHIQ_CFG_H
5323 +#define VCHIQ_CFG_H
5324 +
5325 +#define VCHIQ_MAGIC              VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5326 +/* The version of VCHIQ - change with any non-trivial change */
5327 +#define VCHIQ_VERSION            6
5328 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5329 +** incompatible change */
5330 +#define VCHIQ_VERSION_MIN        3
5331 +
5332 +#define VCHIQ_MAX_STATES         1
5333 +#define VCHIQ_MAX_SERVICES       4096
5334 +#define VCHIQ_MAX_SLOTS          128
5335 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5336 +
5337 +#define VCHIQ_NUM_CURRENT_BULKS        32
5338 +#define VCHIQ_NUM_SERVICE_BULKS        4
5339 +
5340 +#ifndef VCHIQ_ENABLE_DEBUG
5341 +#define VCHIQ_ENABLE_DEBUG             1
5342 +#endif
5343 +
5344 +#ifndef VCHIQ_ENABLE_STATS
5345 +#define VCHIQ_ENABLE_STATS             1
5346 +#endif
5347 +
5348 +#endif /* VCHIQ_CFG_H */
5349 --- /dev/null
5350 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5351 @@ -0,0 +1,119 @@
5352 +/**
5353 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5354 + *
5355 + * Redistribution and use in source and binary forms, with or without
5356 + * modification, are permitted provided that the following conditions
5357 + * are met:
5358 + * 1. Redistributions of source code must retain the above copyright
5359 + *    notice, this list of conditions, and the following disclaimer,
5360 + *    without modification.
5361 + * 2. Redistributions in binary form must reproduce the above copyright
5362 + *    notice, this list of conditions and the following disclaimer in the
5363 + *    documentation and/or other materials provided with the distribution.
5364 + * 3. The names of the above-listed copyright holders may not be used
5365 + *    to endorse or promote products derived from this software without
5366 + *    specific prior written permission.
5367 + *
5368 + * ALTERNATIVELY, this software may be distributed under the terms of the
5369 + * GNU General Public License ("GPL") version 2, as published by the Free
5370 + * Software Foundation.
5371 + *
5372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5373 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5374 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5375 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5376 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5377 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5378 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5379 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5380 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5381 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5382 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5383 + */
5384 +
5385 +#include "vchiq_connected.h"
5386 +#include "vchiq_core.h"
5387 +#include <linux/module.h>
5388 +#include <linux/mutex.h>
5389 +
5390 +#define  MAX_CALLBACKS  10
5391 +
5392 +static   int                        g_connected;
5393 +static   int                        g_num_deferred_callbacks;
5394 +static   VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5395 +static   int                        g_once_init;
5396 +static   struct mutex               g_connected_mutex;
5397 +
5398 +/****************************************************************************
5399 +*
5400 +* Function to initialize our lock.
5401 +*
5402 +***************************************************************************/
5403 +
5404 +static void connected_init(void)
5405 +{
5406 +       if (!g_once_init) {
5407 +               mutex_init(&g_connected_mutex);
5408 +               g_once_init = 1;
5409 +       }
5410 +}
5411 +
5412 +/****************************************************************************
5413 +*
5414 +* This function is used to defer initialization until the vchiq stack is
5415 +* initialized. If the stack is already initialized, then the callback will
5416 +* be made immediately, otherwise it will be deferred until
5417 +* vchiq_call_connected_callbacks is called.
5418 +*
5419 +***************************************************************************/
5420 +
5421 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5422 +{
5423 +       connected_init();
5424 +
5425 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5426 +               return;
5427 +
5428 +       if (g_connected)
5429 +               /* We're already connected. Call the callback immediately. */
5430 +
5431 +               callback();
5432 +       else {
5433 +               if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5434 +                       vchiq_log_error(vchiq_core_log_level,
5435 +                               "There already %d callback registered - "
5436 +                               "please increase MAX_CALLBACKS",
5437 +                               g_num_deferred_callbacks);
5438 +               else {
5439 +                       g_deferred_callback[g_num_deferred_callbacks] =
5440 +                               callback;
5441 +                       g_num_deferred_callbacks++;
5442 +               }
5443 +       }
5444 +       mutex_unlock(&g_connected_mutex);
5445 +}
5446 +
5447 +/****************************************************************************
5448 +*
5449 +* This function is called by the vchiq stack once it has been connected to
5450 +* the videocore and clients can start to use the stack.
5451 +*
5452 +***************************************************************************/
5453 +
5454 +void vchiq_call_connected_callbacks(void)
5455 +{
5456 +       int i;
5457 +
5458 +       connected_init();
5459 +
5460 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5461 +               return;
5462 +
5463 +       for (i = 0; i <  g_num_deferred_callbacks; i++)
5464 +               g_deferred_callback[i]();
5465 +
5466 +       g_num_deferred_callbacks = 0;
5467 +       g_connected = 1;
5468 +       mutex_unlock(&g_connected_mutex);
5469 +}
5470 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5471 --- /dev/null
5472 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5473 @@ -0,0 +1,50 @@
5474 +/**
5475 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5476 + *
5477 + * Redistribution and use in source and binary forms, with or without
5478 + * modification, are permitted provided that the following conditions
5479 + * are met:
5480 + * 1. Redistributions of source code must retain the above copyright
5481 + *    notice, this list of conditions, and the following disclaimer,
5482 + *    without modification.
5483 + * 2. Redistributions in binary form must reproduce the above copyright
5484 + *    notice, this list of conditions and the following disclaimer in the
5485 + *    documentation and/or other materials provided with the distribution.
5486 + * 3. The names of the above-listed copyright holders may not be used
5487 + *    to endorse or promote products derived from this software without
5488 + *    specific prior written permission.
5489 + *
5490 + * ALTERNATIVELY, this software may be distributed under the terms of the
5491 + * GNU General Public License ("GPL") version 2, as published by the Free
5492 + * Software Foundation.
5493 + *
5494 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5495 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5496 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5497 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5498 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5499 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5500 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5501 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5502 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5503 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5504 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5505 + */
5506 +
5507 +#ifndef VCHIQ_CONNECTED_H
5508 +#define VCHIQ_CONNECTED_H
5509 +
5510 +/* ---- Include Files ----------------------------------------------------- */
5511 +
5512 +/* ---- Constants and Types ---------------------------------------------- */
5513 +
5514 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5515 +
5516 +/* ---- Variable Externs ------------------------------------------------- */
5517 +
5518 +/* ---- Function Prototypes ---------------------------------------------- */
5519 +
5520 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5521 +void vchiq_call_connected_callbacks(void);
5522 +
5523 +#endif /* VCHIQ_CONNECTED_H */
5524 --- /dev/null
5525 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5526 @@ -0,0 +1,3824 @@
5527 +/**
5528 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5529 + *
5530 + * Redistribution and use in source and binary forms, with or without
5531 + * modification, are permitted provided that the following conditions
5532 + * are met:
5533 + * 1. Redistributions of source code must retain the above copyright
5534 + *    notice, this list of conditions, and the following disclaimer,
5535 + *    without modification.
5536 + * 2. Redistributions in binary form must reproduce the above copyright
5537 + *    notice, this list of conditions and the following disclaimer in the
5538 + *    documentation and/or other materials provided with the distribution.
5539 + * 3. The names of the above-listed copyright holders may not be used
5540 + *    to endorse or promote products derived from this software without
5541 + *    specific prior written permission.
5542 + *
5543 + * ALTERNATIVELY, this software may be distributed under the terms of the
5544 + * GNU General Public License ("GPL") version 2, as published by the Free
5545 + * Software Foundation.
5546 + *
5547 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5548 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5549 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5550 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5551 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5552 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5553 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5554 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5555 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5556 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5557 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5558 + */
5559 +
5560 +#include "vchiq_core.h"
5561 +
5562 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5563 +
5564 +#define HANDLE_STATE_SHIFT 12
5565 +
5566 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5567 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5568 +#define SLOT_INDEX_FROM_DATA(state, data) \
5569 +       (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5570 +       VCHIQ_SLOT_SIZE)
5571 +#define SLOT_INDEX_FROM_INFO(state, info) \
5572 +       ((unsigned int)(info - state->slot_info))
5573 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5574 +       ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5575 +
5576 +
5577 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5578 +
5579 +
5580 +struct vchiq_open_payload {
5581 +       int fourcc;
5582 +       int client_id;
5583 +       short version;
5584 +       short version_min;
5585 +};
5586 +
5587 +struct vchiq_openack_payload {
5588 +       short version;
5589 +};
5590 +
5591 +/* we require this for consistency between endpoints */
5592 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5593 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5594 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5595 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5596 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5597 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5598 +
5599 +/* Run time control of log level, based on KERN_XXX level. */
5600 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5601 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5602 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5603 +
5604 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5605 +
5606 +static DEFINE_SPINLOCK(service_spinlock);
5607 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5608 +DEFINE_SPINLOCK(quota_spinlock);
5609 +
5610 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5611 +static unsigned int handle_seq;
5612 +
5613 +static const char *const srvstate_names[] = {
5614 +       "FREE",
5615 +       "HIDDEN",
5616 +       "LISTENING",
5617 +       "OPENING",
5618 +       "OPEN",
5619 +       "OPENSYNC",
5620 +       "CLOSESENT",
5621 +       "CLOSERECVD",
5622 +       "CLOSEWAIT",
5623 +       "CLOSED"
5624 +};
5625 +
5626 +static const char *const reason_names[] = {
5627 +       "SERVICE_OPENED",
5628 +       "SERVICE_CLOSED",
5629 +       "MESSAGE_AVAILABLE",
5630 +       "BULK_TRANSMIT_DONE",
5631 +       "BULK_RECEIVE_DONE",
5632 +       "BULK_TRANSMIT_ABORTED",
5633 +       "BULK_RECEIVE_ABORTED"
5634 +};
5635 +
5636 +static const char *const conn_state_names[] = {
5637 +       "DISCONNECTED",
5638 +       "CONNECTING",
5639 +       "CONNECTED",
5640 +       "PAUSING",
5641 +       "PAUSE_SENT",
5642 +       "PAUSED",
5643 +       "RESUMING",
5644 +       "PAUSE_TIMEOUT",
5645 +       "RESUME_TIMEOUT"
5646 +};
5647 +
5648 +
5649 +static void
5650 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5651 +
5652 +static const char *msg_type_str(unsigned int msg_type)
5653 +{
5654 +       switch (msg_type) {
5655 +       case VCHIQ_MSG_PADDING:       return "PADDING";
5656 +       case VCHIQ_MSG_CONNECT:       return "CONNECT";
5657 +       case VCHIQ_MSG_OPEN:          return "OPEN";
5658 +       case VCHIQ_MSG_OPENACK:       return "OPENACK";
5659 +       case VCHIQ_MSG_CLOSE:         return "CLOSE";
5660 +       case VCHIQ_MSG_DATA:          return "DATA";
5661 +       case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
5662 +       case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
5663 +       case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
5664 +       case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
5665 +       case VCHIQ_MSG_PAUSE:         return "PAUSE";
5666 +       case VCHIQ_MSG_RESUME:        return "RESUME";
5667 +       case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
5668 +       case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
5669 +       case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
5670 +       }
5671 +       return "???";
5672 +}
5673 +
5674 +static inline void
5675 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5676 +{
5677 +       vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5678 +               service->state->id, service->localport,
5679 +               srvstate_names[service->srvstate],
5680 +               srvstate_names[newstate]);
5681 +       service->srvstate = newstate;
5682 +}
5683 +
5684 +VCHIQ_SERVICE_T *
5685 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5686 +{
5687 +       VCHIQ_SERVICE_T *service;
5688 +
5689 +       spin_lock(&service_spinlock);
5690 +       service = handle_to_service(handle);
5691 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5692 +               (service->handle == handle)) {
5693 +               BUG_ON(service->ref_count == 0);
5694 +               service->ref_count++;
5695 +       } else
5696 +               service = NULL;
5697 +       spin_unlock(&service_spinlock);
5698 +
5699 +       if (!service)
5700 +               vchiq_log_info(vchiq_core_log_level,
5701 +                       "Invalid service handle 0x%x", handle);
5702 +
5703 +       return service;
5704 +}
5705 +
5706 +VCHIQ_SERVICE_T *
5707 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5708 +{
5709 +       VCHIQ_SERVICE_T *service = NULL;
5710 +       if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5711 +               spin_lock(&service_spinlock);
5712 +               service = state->services[localport];
5713 +               if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5714 +                       BUG_ON(service->ref_count == 0);
5715 +                       service->ref_count++;
5716 +               } else
5717 +                       service = NULL;
5718 +               spin_unlock(&service_spinlock);
5719 +       }
5720 +
5721 +       if (!service)
5722 +               vchiq_log_info(vchiq_core_log_level,
5723 +                       "Invalid port %d", localport);
5724 +
5725 +       return service;
5726 +}
5727 +
5728 +VCHIQ_SERVICE_T *
5729 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5730 +       VCHIQ_SERVICE_HANDLE_T handle) {
5731 +       VCHIQ_SERVICE_T *service;
5732 +
5733 +       spin_lock(&service_spinlock);
5734 +       service = handle_to_service(handle);
5735 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5736 +               (service->handle == handle) &&
5737 +               (service->instance == instance)) {
5738 +               BUG_ON(service->ref_count == 0);
5739 +               service->ref_count++;
5740 +       } else
5741 +               service = NULL;
5742 +       spin_unlock(&service_spinlock);
5743 +
5744 +       if (!service)
5745 +               vchiq_log_info(vchiq_core_log_level,
5746 +                       "Invalid service handle 0x%x", handle);
5747 +
5748 +       return service;
5749 +}
5750 +
5751 +VCHIQ_SERVICE_T *
5752 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5753 +       int *pidx)
5754 +{
5755 +       VCHIQ_SERVICE_T *service = NULL;
5756 +       int idx = *pidx;
5757 +
5758 +       spin_lock(&service_spinlock);
5759 +       while (idx < state->unused_service) {
5760 +               VCHIQ_SERVICE_T *srv = state->services[idx++];
5761 +               if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5762 +                       (srv->instance == instance)) {
5763 +                       service = srv;
5764 +                       BUG_ON(service->ref_count == 0);
5765 +                       service->ref_count++;
5766 +                       break;
5767 +               }
5768 +       }
5769 +       spin_unlock(&service_spinlock);
5770 +
5771 +       *pidx = idx;
5772 +
5773 +       return service;
5774 +}
5775 +
5776 +void
5777 +lock_service(VCHIQ_SERVICE_T *service)
5778 +{
5779 +       spin_lock(&service_spinlock);
5780 +       BUG_ON(!service || (service->ref_count == 0));
5781 +       if (service)
5782 +               service->ref_count++;
5783 +       spin_unlock(&service_spinlock);
5784 +}
5785 +
5786 +void
5787 +unlock_service(VCHIQ_SERVICE_T *service)
5788 +{
5789 +       VCHIQ_STATE_T *state = service->state;
5790 +       spin_lock(&service_spinlock);
5791 +       BUG_ON(!service || (service->ref_count == 0));
5792 +       if (service && service->ref_count) {
5793 +               service->ref_count--;
5794 +               if (!service->ref_count) {
5795 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5796 +                       state->services[service->localport] = NULL;
5797 +               } else
5798 +                       service = NULL;
5799 +       }
5800 +       spin_unlock(&service_spinlock);
5801 +
5802 +       if (service && service->userdata_term)
5803 +               service->userdata_term(service->base.userdata);
5804 +
5805 +       kfree(service);
5806 +}
5807 +
5808 +int
5809 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5810 +{
5811 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5812 +       int id;
5813 +
5814 +       id = service ? service->client_id : 0;
5815 +       if (service)
5816 +               unlock_service(service);
5817 +
5818 +       return id;
5819 +}
5820 +
5821 +void *
5822 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5823 +{
5824 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
5825 +
5826 +       return service ? service->base.userdata : NULL;
5827 +}
5828 +
5829 +int
5830 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5831 +{
5832 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
5833 +
5834 +       return service ? service->base.fourcc : 0;
5835 +}
5836 +
5837 +static void
5838 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5839 +{
5840 +       VCHIQ_STATE_T *state = service->state;
5841 +       VCHIQ_SERVICE_QUOTA_T *service_quota;
5842 +
5843 +       service->closing = 1;
5844 +
5845 +       /* Synchronise with other threads. */
5846 +       mutex_lock(&state->recycle_mutex);
5847 +       mutex_unlock(&state->recycle_mutex);
5848 +       if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5849 +               /* If we're pausing then the slot_mutex is held until resume
5850 +                * by the slot handler.  Therefore don't try to acquire this
5851 +                * mutex if we're the slot handler and in the pause sent state.
5852 +                * We don't need to in this case anyway. */
5853 +               mutex_lock(&state->slot_mutex);
5854 +               mutex_unlock(&state->slot_mutex);
5855 +       }
5856 +
5857 +       /* Unblock any sending thread. */
5858 +       service_quota = &state->service_quotas[service->localport];
5859 +       up(&service_quota->quota_event);
5860 +}
5861 +
5862 +static void
5863 +mark_service_closing(VCHIQ_SERVICE_T *service)
5864 +{
5865 +       mark_service_closing_internal(service, 0);
5866 +}
5867 +
5868 +static inline VCHIQ_STATUS_T
5869 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5870 +       VCHIQ_HEADER_T *header, void *bulk_userdata)
5871 +{
5872 +       VCHIQ_STATUS_T status;
5873 +       vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5874 +               service->state->id, service->localport, reason_names[reason],
5875 +               (unsigned int)header, (unsigned int)bulk_userdata);
5876 +       status = service->base.callback(reason, header, service->handle,
5877 +               bulk_userdata);
5878 +       if (status == VCHIQ_ERROR) {
5879 +               vchiq_log_warning(vchiq_core_log_level,
5880 +                       "%d: ignoring ERROR from callback to service %x",
5881 +                       service->state->id, service->handle);
5882 +               status = VCHIQ_SUCCESS;
5883 +       }
5884 +       return status;
5885 +}
5886 +
5887 +inline void
5888 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5889 +{
5890 +       VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5891 +       vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5892 +               conn_state_names[oldstate],
5893 +               conn_state_names[newstate]);
5894 +       state->conn_state = newstate;
5895 +       vchiq_platform_conn_state_changed(state, oldstate, newstate);
5896 +}
5897 +
5898 +static inline void
5899 +remote_event_create(REMOTE_EVENT_T *event)
5900 +{
5901 +       event->armed = 0;
5902 +       /* Don't clear the 'fired' flag because it may already have been set
5903 +       ** by the other side. */
5904 +       sema_init(event->event, 0);
5905 +}
5906 +
5907 +static inline void
5908 +remote_event_destroy(REMOTE_EVENT_T *event)
5909 +{
5910 +       (void)event;
5911 +}
5912 +
5913 +static inline int
5914 +remote_event_wait(REMOTE_EVENT_T *event)
5915 +{
5916 +       if (!event->fired) {
5917 +               event->armed = 1;
5918 +               dsb();
5919 +               if (!event->fired) {
5920 +                       if (down_interruptible(event->event) != 0) {
5921 +                               event->armed = 0;
5922 +                               return 0;
5923 +                       }
5924 +               }
5925 +               event->armed = 0;
5926 +               wmb();
5927 +       }
5928 +
5929 +       event->fired = 0;
5930 +       return 1;
5931 +}
5932 +
5933 +static inline void
5934 +remote_event_signal_local(REMOTE_EVENT_T *event)
5935 +{
5936 +       event->armed = 0;
5937 +       up(event->event);
5938 +}
5939 +
5940 +static inline void
5941 +remote_event_poll(REMOTE_EVENT_T *event)
5942 +{
5943 +       if (event->fired && event->armed)
5944 +               remote_event_signal_local(event);
5945 +}
5946 +
5947 +void
5948 +remote_event_pollall(VCHIQ_STATE_T *state)
5949 +{
5950 +       remote_event_poll(&state->local->sync_trigger);
5951 +       remote_event_poll(&state->local->sync_release);
5952 +       remote_event_poll(&state->local->trigger);
5953 +       remote_event_poll(&state->local->recycle);
5954 +}
5955 +
5956 +/* Round up message sizes so that any space at the end of a slot is always big
5957 +** enough for a header. This relies on header size being a power of two, which
5958 +** has been verified earlier by a static assertion. */
5959 +
5960 +static inline unsigned int
5961 +calc_stride(unsigned int size)
5962 +{
5963 +       /* Allow room for the header */
5964 +       size += sizeof(VCHIQ_HEADER_T);
5965 +
5966 +       /* Round up */
5967 +       return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
5968 +               - 1);
5969 +}
5970 +
5971 +/* Called by the slot handler thread */
5972 +static VCHIQ_SERVICE_T *
5973 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
5974 +{
5975 +       int i;
5976 +
5977 +       WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
5978 +
5979 +       for (i = 0; i < state->unused_service; i++) {
5980 +               VCHIQ_SERVICE_T *service = state->services[i];
5981 +               if (service &&
5982 +                       (service->public_fourcc == fourcc) &&
5983 +                       ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
5984 +                       ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
5985 +                       (service->remoteport == VCHIQ_PORT_FREE)))) {
5986 +                       lock_service(service);
5987 +                       return service;
5988 +               }
5989 +       }
5990 +
5991 +       return NULL;
5992 +}
5993 +
5994 +/* Called by the slot handler thread */
5995 +static VCHIQ_SERVICE_T *
5996 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
5997 +{
5998 +       int i;
5999 +       for (i = 0; i < state->unused_service; i++) {
6000 +               VCHIQ_SERVICE_T *service = state->services[i];
6001 +               if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6002 +                       && (service->remoteport == port)) {
6003 +                       lock_service(service);
6004 +                       return service;
6005 +               }
6006 +       }
6007 +       return NULL;
6008 +}
6009 +
6010 +inline void
6011 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6012 +{
6013 +       uint32_t value;
6014 +
6015 +       if (service) {
6016 +               do {
6017 +                       value = atomic_read(&service->poll_flags);
6018 +               } while (atomic_cmpxchg(&service->poll_flags, value,
6019 +                       value | (1 << poll_type)) != value);
6020 +
6021 +               do {
6022 +                       value = atomic_read(&state->poll_services[
6023 +                               service->localport>>5]);
6024 +               } while (atomic_cmpxchg(
6025 +                       &state->poll_services[service->localport>>5],
6026 +                       value, value | (1 << (service->localport & 0x1f)))
6027 +                       != value);
6028 +       }
6029 +
6030 +       state->poll_needed = 1;
6031 +       wmb();
6032 +
6033 +       /* ... and ensure the slot handler runs. */
6034 +       remote_event_signal_local(&state->local->trigger);
6035 +}
6036 +
6037 +/* Called from queue_message, by the slot handler and application threads,
6038 +** with slot_mutex held */
6039 +static VCHIQ_HEADER_T *
6040 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6041 +{
6042 +       VCHIQ_SHARED_STATE_T *local = state->local;
6043 +       int tx_pos = state->local_tx_pos;
6044 +       int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6045 +
6046 +       if (space > slot_space) {
6047 +               VCHIQ_HEADER_T *header;
6048 +               /* Fill the remaining space with padding */
6049 +               WARN_ON(state->tx_data == NULL);
6050 +               header = (VCHIQ_HEADER_T *)
6051 +                       (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6052 +               header->msgid = VCHIQ_MSGID_PADDING;
6053 +               header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6054 +
6055 +               tx_pos += slot_space;
6056 +       }
6057 +
6058 +       /* If necessary, get the next slot. */
6059 +       if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6060 +               int slot_index;
6061 +
6062 +               /* If there is no free slot... */
6063 +
6064 +               if (down_trylock(&state->slot_available_event) != 0) {
6065 +                       /* ...wait for one. */
6066 +
6067 +                       VCHIQ_STATS_INC(state, slot_stalls);
6068 +
6069 +                       /* But first, flush through the last slot. */
6070 +                       state->local_tx_pos = tx_pos;
6071 +                       local->tx_pos = tx_pos;
6072 +                       remote_event_signal(&state->remote->trigger);
6073 +
6074 +                       if (!is_blocking ||
6075 +                               (down_interruptible(
6076 +                               &state->slot_available_event) != 0))
6077 +                               return NULL; /* No space available */
6078 +               }
6079 +
6080 +               BUG_ON(tx_pos ==
6081 +                       (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6082 +
6083 +               slot_index = local->slot_queue[
6084 +                       SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6085 +                       VCHIQ_SLOT_QUEUE_MASK];
6086 +               state->tx_data =
6087 +                       (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6088 +       }
6089 +
6090 +       state->local_tx_pos = tx_pos + space;
6091 +
6092 +       return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6093 +}
6094 +
6095 +/* Called by the recycle thread. */
6096 +static void
6097 +process_free_queue(VCHIQ_STATE_T *state)
6098 +{
6099 +       VCHIQ_SHARED_STATE_T *local = state->local;
6100 +       BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6101 +       int slot_queue_available;
6102 +
6103 +       /* Use a read memory barrier to ensure that any state that may have
6104 +       ** been modified by another thread is not masked by stale prefetched
6105 +       ** values. */
6106 +       rmb();
6107 +
6108 +       /* Find slots which have been freed by the other side, and return them
6109 +       ** to the available queue. */
6110 +       slot_queue_available = state->slot_queue_available;
6111 +
6112 +       while (slot_queue_available != local->slot_queue_recycle) {
6113 +               unsigned int pos;
6114 +               int slot_index = local->slot_queue[slot_queue_available++ &
6115 +                       VCHIQ_SLOT_QUEUE_MASK];
6116 +               char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6117 +               int data_found = 0;
6118 +
6119 +               vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6120 +                       state->id, slot_index, (unsigned int)data,
6121 +                       local->slot_queue_recycle, slot_queue_available);
6122 +
6123 +               /* Initialise the bitmask for services which have used this
6124 +               ** slot */
6125 +               BITSET_ZERO(service_found);
6126 +
6127 +               pos = 0;
6128 +
6129 +               while (pos < VCHIQ_SLOT_SIZE) {
6130 +                       VCHIQ_HEADER_T *header =
6131 +                               (VCHIQ_HEADER_T *)(data + pos);
6132 +                       int msgid = header->msgid;
6133 +                       if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6134 +                               int port = VCHIQ_MSG_SRCPORT(msgid);
6135 +                               VCHIQ_SERVICE_QUOTA_T *service_quota =
6136 +                                       &state->service_quotas[port];
6137 +                               int count;
6138 +                               spin_lock(&quota_spinlock);
6139 +                               count = service_quota->message_use_count;
6140 +                               if (count > 0)
6141 +                                       service_quota->message_use_count =
6142 +                                               count - 1;
6143 +                               spin_unlock(&quota_spinlock);
6144 +
6145 +                               if (count == service_quota->message_quota)
6146 +                                       /* Signal the service that it
6147 +                                       ** has dropped below its quota
6148 +                                       */
6149 +                                       up(&service_quota->quota_event);
6150 +                               else if (count == 0) {
6151 +                                       vchiq_log_error(vchiq_core_log_level,
6152 +                                               "service %d "
6153 +                                               "message_use_count=%d "
6154 +                                               "(header %x, msgid %x, "
6155 +                                               "header->msgid %x, "
6156 +                                               "header->size %x)",
6157 +                                               port,
6158 +                                               service_quota->
6159 +                                                       message_use_count,
6160 +                                               (unsigned int)header, msgid,
6161 +                                               header->msgid,
6162 +                                               header->size);
6163 +                                       WARN(1, "invalid message use count\n");
6164 +                               }
6165 +                               if (!BITSET_IS_SET(service_found, port)) {
6166 +                                       /* Set the found bit for this service */
6167 +                                       BITSET_SET(service_found, port);
6168 +
6169 +                                       spin_lock(&quota_spinlock);
6170 +                                       count = service_quota->slot_use_count;
6171 +                                       if (count > 0)
6172 +                                               service_quota->slot_use_count =
6173 +                                                       count - 1;
6174 +                                       spin_unlock(&quota_spinlock);
6175 +
6176 +                                       if (count > 0) {
6177 +                                               /* Signal the service in case
6178 +                                               ** it has dropped below its
6179 +                                               ** quota */
6180 +                                               up(&service_quota->quota_event);
6181 +                                               vchiq_log_trace(
6182 +                                                       vchiq_core_log_level,
6183 +                                                       "%d: pfq:%d %x@%x - "
6184 +                                                       "slot_use->%d",
6185 +                                                       state->id, port,
6186 +                                                       header->size,
6187 +                                                       (unsigned int)header,
6188 +                                                       count - 1);
6189 +                                       } else {
6190 +                                               vchiq_log_error(
6191 +                                                       vchiq_core_log_level,
6192 +                                                               "service %d "
6193 +                                                               "slot_use_count"
6194 +                                                               "=%d (header %x"
6195 +                                                               ", msgid %x, "
6196 +                                                               "header->msgid"
6197 +                                                               " %x, header->"
6198 +                                                               "size %x)",
6199 +                                                       port, count,
6200 +                                                       (unsigned int)header,
6201 +                                                       msgid,
6202 +                                                       header->msgid,
6203 +                                                       header->size);
6204 +                                               WARN(1, "bad slot use count\n");
6205 +                                       }
6206 +                               }
6207 +
6208 +                               data_found = 1;
6209 +                       }
6210 +
6211 +                       pos += calc_stride(header->size);
6212 +                       if (pos > VCHIQ_SLOT_SIZE) {
6213 +                               vchiq_log_error(vchiq_core_log_level,
6214 +                                       "pfq - pos %x: header %x, msgid %x, "
6215 +                                       "header->msgid %x, header->size %x",
6216 +                                       pos, (unsigned int)header, msgid,
6217 +                                       header->msgid, header->size);
6218 +                               WARN(1, "invalid slot position\n");
6219 +                       }
6220 +               }
6221 +
6222 +               if (data_found) {
6223 +                       int count;
6224 +                       spin_lock(&quota_spinlock);
6225 +                       count = state->data_use_count;
6226 +                       if (count > 0)
6227 +                               state->data_use_count =
6228 +                                       count - 1;
6229 +                       spin_unlock(&quota_spinlock);
6230 +                       if (count == state->data_quota)
6231 +                               up(&state->data_quota_event);
6232 +               }
6233 +
6234 +               state->slot_queue_available = slot_queue_available;
6235 +               up(&state->slot_available_event);
6236 +       }
6237 +}
6238 +
6239 +/* Called by the slot handler and application threads */
6240 +static VCHIQ_STATUS_T
6241 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6242 +       int msgid, const VCHIQ_ELEMENT_T *elements,
6243 +       int count, int size, int is_blocking)
6244 +{
6245 +       VCHIQ_SHARED_STATE_T *local;
6246 +       VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6247 +       VCHIQ_HEADER_T *header;
6248 +       int type = VCHIQ_MSG_TYPE(msgid);
6249 +
6250 +       unsigned int stride;
6251 +
6252 +       local = state->local;
6253 +
6254 +       stride = calc_stride(size);
6255 +
6256 +       WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6257 +
6258 +       if ((type != VCHIQ_MSG_RESUME) &&
6259 +               (mutex_lock_interruptible(&state->slot_mutex) != 0))
6260 +               return VCHIQ_RETRY;
6261 +
6262 +       if (type == VCHIQ_MSG_DATA) {
6263 +               int tx_end_index;
6264 +
6265 +               BUG_ON(!service);
6266 +
6267 +               if (service->closing) {
6268 +                       /* The service has been closed */
6269 +                       mutex_unlock(&state->slot_mutex);
6270 +                       return VCHIQ_ERROR;
6271 +               }
6272 +
6273 +               service_quota = &state->service_quotas[service->localport];
6274 +
6275 +               spin_lock(&quota_spinlock);
6276 +
6277 +               /* Ensure this service doesn't use more than its quota of
6278 +               ** messages or slots */
6279 +               tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6280 +                       state->local_tx_pos + stride - 1);
6281 +
6282 +               /* Ensure data messages don't use more than their quota of
6283 +               ** slots */
6284 +               while ((tx_end_index != state->previous_data_index) &&
6285 +                       (state->data_use_count == state->data_quota)) {
6286 +                       VCHIQ_STATS_INC(state, data_stalls);
6287 +                       spin_unlock(&quota_spinlock);
6288 +                       mutex_unlock(&state->slot_mutex);
6289 +
6290 +                       if (down_interruptible(&state->data_quota_event)
6291 +                               != 0)
6292 +                               return VCHIQ_RETRY;
6293 +
6294 +                       mutex_lock(&state->slot_mutex);
6295 +                       spin_lock(&quota_spinlock);
6296 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6297 +                               state->local_tx_pos + stride - 1);
6298 +                       if ((tx_end_index == state->previous_data_index) ||
6299 +                               (state->data_use_count < state->data_quota)) {
6300 +                               /* Pass the signal on to other waiters */
6301 +                               up(&state->data_quota_event);
6302 +                               break;
6303 +                       }
6304 +               }
6305 +
6306 +               while ((service_quota->message_use_count ==
6307 +                               service_quota->message_quota) ||
6308 +                       ((tx_end_index != service_quota->previous_tx_index) &&
6309 +                       (service_quota->slot_use_count ==
6310 +                               service_quota->slot_quota))) {
6311 +                       spin_unlock(&quota_spinlock);
6312 +                       vchiq_log_trace(vchiq_core_log_level,
6313 +                               "%d: qm:%d %s,%x - quota stall "
6314 +                               "(msg %d, slot %d)",
6315 +                               state->id, service->localport,
6316 +                               msg_type_str(type), size,
6317 +                               service_quota->message_use_count,
6318 +                               service_quota->slot_use_count);
6319 +                       VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6320 +                       mutex_unlock(&state->slot_mutex);
6321 +                       if (down_interruptible(&service_quota->quota_event)
6322 +                               != 0)
6323 +                               return VCHIQ_RETRY;
6324 +                       if (service->closing)
6325 +                               return VCHIQ_ERROR;
6326 +                       if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6327 +                               return VCHIQ_RETRY;
6328 +                       if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6329 +                               /* The service has been closed */
6330 +                               mutex_unlock(&state->slot_mutex);
6331 +                               return VCHIQ_ERROR;
6332 +                       }
6333 +                       spin_lock(&quota_spinlock);
6334 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6335 +                               state->local_tx_pos + stride - 1);
6336 +               }
6337 +
6338 +               spin_unlock(&quota_spinlock);
6339 +       }
6340 +
6341 +       header = reserve_space(state, stride, is_blocking);
6342 +
6343 +       if (!header) {
6344 +               if (service)
6345 +                       VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6346 +               mutex_unlock(&state->slot_mutex);
6347 +               return VCHIQ_RETRY;
6348 +       }
6349 +
6350 +       if (type == VCHIQ_MSG_DATA) {
6351 +               int i, pos;
6352 +               int tx_end_index;
6353 +               int slot_use_count;
6354 +
6355 +               vchiq_log_info(vchiq_core_log_level,
6356 +                       "%d: qm %s@%x,%x (%d->%d)",
6357 +                       state->id,
6358 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6359 +                       (unsigned int)header, size,
6360 +                       VCHIQ_MSG_SRCPORT(msgid),
6361 +                       VCHIQ_MSG_DSTPORT(msgid));
6362 +
6363 +               BUG_ON(!service);
6364 +
6365 +               for (i = 0, pos = 0; i < (unsigned int)count;
6366 +                       pos += elements[i++].size)
6367 +                       if (elements[i].size) {
6368 +                               if (vchiq_copy_from_user
6369 +                                       (header->data + pos, elements[i].data,
6370 +                                       (size_t) elements[i].size) !=
6371 +                                       VCHIQ_SUCCESS) {
6372 +                                       mutex_unlock(&state->slot_mutex);
6373 +                                       VCHIQ_SERVICE_STATS_INC(service,
6374 +                                               error_count);
6375 +                                       return VCHIQ_ERROR;
6376 +                               }
6377 +                               if (i == 0) {
6378 +                                       if (vchiq_core_msg_log_level >=
6379 +                                               VCHIQ_LOG_INFO)
6380 +                                               vchiq_log_dump_mem("Sent", 0,
6381 +                                                       header->data + pos,
6382 +                                                       min(64u,
6383 +                                                       elements[0].size));
6384 +                               }
6385 +                       }
6386 +
6387 +               spin_lock(&quota_spinlock);
6388 +               service_quota->message_use_count++;
6389 +
6390 +               tx_end_index =
6391 +                       SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6392 +
6393 +               /* If this transmission can't fit in the last slot used by any
6394 +               ** service, the data_use_count must be increased. */
6395 +               if (tx_end_index != state->previous_data_index) {
6396 +                       state->previous_data_index = tx_end_index;
6397 +                       state->data_use_count++;
6398 +               }
6399 +
6400 +               /* If this isn't the same slot last used by this service,
6401 +               ** the service's slot_use_count must be increased. */
6402 +               if (tx_end_index != service_quota->previous_tx_index) {
6403 +                       service_quota->previous_tx_index = tx_end_index;
6404 +                       slot_use_count = ++service_quota->slot_use_count;
6405 +               } else {
6406 +                       slot_use_count = 0;
6407 +               }
6408 +
6409 +               spin_unlock(&quota_spinlock);
6410 +
6411 +               if (slot_use_count)
6412 +                       vchiq_log_trace(vchiq_core_log_level,
6413 +                               "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6414 +                               state->id, service->localport,
6415 +                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6416 +                               slot_use_count, header);
6417 +
6418 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6419 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6420 +       } else {
6421 +               vchiq_log_info(vchiq_core_log_level,
6422 +                       "%d: qm %s@%x,%x (%d->%d)", state->id,
6423 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6424 +                       (unsigned int)header, size,
6425 +                       VCHIQ_MSG_SRCPORT(msgid),
6426 +                       VCHIQ_MSG_DSTPORT(msgid));
6427 +               if (size != 0) {
6428 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
6429 +                       memcpy(header->data, elements[0].data,
6430 +                               elements[0].size);
6431 +               }
6432 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
6433 +       }
6434 +
6435 +       header->msgid = msgid;
6436 +       header->size = size;
6437 +
6438 +       {
6439 +               int svc_fourcc;
6440 +
6441 +               svc_fourcc = service
6442 +                       ? service->base.fourcc
6443 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6444 +
6445 +               vchiq_log_info(vchiq_core_msg_log_level,
6446 +                       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6447 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6448 +                       VCHIQ_MSG_TYPE(msgid),
6449 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6450 +                       VCHIQ_MSG_SRCPORT(msgid),
6451 +                       VCHIQ_MSG_DSTPORT(msgid),
6452 +                       size);
6453 +       }
6454 +
6455 +       /* Make sure the new header is visible to the peer. */
6456 +       wmb();
6457 +
6458 +       /* Make the new tx_pos visible to the peer. */
6459 +       local->tx_pos = state->local_tx_pos;
6460 +       wmb();
6461 +
6462 +       if (service && (type == VCHIQ_MSG_CLOSE))
6463 +               vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6464 +
6465 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6466 +               mutex_unlock(&state->slot_mutex);
6467 +
6468 +       remote_event_signal(&state->remote->trigger);
6469 +
6470 +       return VCHIQ_SUCCESS;
6471 +}
6472 +
6473 +/* Called by the slot handler and application threads */
6474 +static VCHIQ_STATUS_T
6475 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6476 +       int msgid, const VCHIQ_ELEMENT_T *elements,
6477 +       int count, int size, int is_blocking)
6478 +{
6479 +       VCHIQ_SHARED_STATE_T *local;
6480 +       VCHIQ_HEADER_T *header;
6481 +
6482 +       local = state->local;
6483 +
6484 +       if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6485 +               (mutex_lock_interruptible(&state->sync_mutex) != 0))
6486 +               return VCHIQ_RETRY;
6487 +
6488 +       remote_event_wait(&local->sync_release);
6489 +
6490 +       rmb();
6491 +
6492 +       header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6493 +               local->slot_sync);
6494 +
6495 +       {
6496 +               int oldmsgid = header->msgid;
6497 +               if (oldmsgid != VCHIQ_MSGID_PADDING)
6498 +                       vchiq_log_error(vchiq_core_log_level,
6499 +                               "%d: qms - msgid %x, not PADDING",
6500 +                               state->id, oldmsgid);
6501 +       }
6502 +
6503 +       if (service) {
6504 +               int i, pos;
6505 +
6506 +               vchiq_log_info(vchiq_sync_log_level,
6507 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
6508 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6509 +                       (unsigned int)header, size,
6510 +                       VCHIQ_MSG_SRCPORT(msgid),
6511 +                       VCHIQ_MSG_DSTPORT(msgid));
6512 +
6513 +               for (i = 0, pos = 0; i < (unsigned int)count;
6514 +                       pos += elements[i++].size)
6515 +                       if (elements[i].size) {
6516 +                               if (vchiq_copy_from_user
6517 +                                       (header->data + pos, elements[i].data,
6518 +                                       (size_t) elements[i].size) !=
6519 +                                       VCHIQ_SUCCESS) {
6520 +                                       mutex_unlock(&state->sync_mutex);
6521 +                                       VCHIQ_SERVICE_STATS_INC(service,
6522 +                                               error_count);
6523 +                                       return VCHIQ_ERROR;
6524 +                               }
6525 +                               if (i == 0) {
6526 +                                       if (vchiq_sync_log_level >=
6527 +                                               VCHIQ_LOG_TRACE)
6528 +                                               vchiq_log_dump_mem("Sent Sync",
6529 +                                                       0, header->data + pos,
6530 +                                                       min(64u,
6531 +                                                       elements[0].size));
6532 +                               }
6533 +                       }
6534 +
6535 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6536 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6537 +       } else {
6538 +               vchiq_log_info(vchiq_sync_log_level,
6539 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
6540 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6541 +                       (unsigned int)header, size,
6542 +                       VCHIQ_MSG_SRCPORT(msgid),
6543 +                       VCHIQ_MSG_DSTPORT(msgid));
6544 +               if (size != 0) {
6545 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
6546 +                       memcpy(header->data, elements[0].data,
6547 +                               elements[0].size);
6548 +               }
6549 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
6550 +       }
6551 +
6552 +       header->size = size;
6553 +       header->msgid = msgid;
6554 +
6555 +       if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6556 +               int svc_fourcc;
6557 +
6558 +               svc_fourcc = service
6559 +                       ? service->base.fourcc
6560 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6561 +
6562 +               vchiq_log_trace(vchiq_sync_log_level,
6563 +                       "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6564 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6565 +                       VCHIQ_MSG_TYPE(msgid),
6566 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6567 +                       VCHIQ_MSG_SRCPORT(msgid),
6568 +                       VCHIQ_MSG_DSTPORT(msgid),
6569 +                       size);
6570 +       }
6571 +
6572 +       /* Make sure the new header is visible to the peer. */
6573 +       wmb();
6574 +
6575 +       remote_event_signal(&state->remote->sync_trigger);
6576 +
6577 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6578 +               mutex_unlock(&state->sync_mutex);
6579 +
6580 +       return VCHIQ_SUCCESS;
6581 +}
6582 +
6583 +static inline void
6584 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6585 +{
6586 +       slot->use_count++;
6587 +}
6588 +
6589 +static void
6590 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6591 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6592 +{
6593 +       int release_count;
6594 +
6595 +       mutex_lock(&state->recycle_mutex);
6596 +
6597 +       if (header) {
6598 +               int msgid = header->msgid;
6599 +               if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6600 +                       (service && service->closing)) {
6601 +                       mutex_unlock(&state->recycle_mutex);
6602 +                       return;
6603 +               }
6604 +
6605 +               /* Rewrite the message header to prevent a double
6606 +               ** release */
6607 +               header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6608 +       }
6609 +
6610 +       release_count = slot_info->release_count;
6611 +       slot_info->release_count = ++release_count;
6612 +
6613 +       if (release_count == slot_info->use_count) {
6614 +               int slot_queue_recycle;
6615 +               /* Add to the freed queue */
6616 +
6617 +               /* A read barrier is necessary here to prevent speculative
6618 +               ** fetches of remote->slot_queue_recycle from overtaking the
6619 +               ** mutex. */
6620 +               rmb();
6621 +
6622 +               slot_queue_recycle = state->remote->slot_queue_recycle;
6623 +               state->remote->slot_queue[slot_queue_recycle &
6624 +                       VCHIQ_SLOT_QUEUE_MASK] =
6625 +                       SLOT_INDEX_FROM_INFO(state, slot_info);
6626 +               state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6627 +               vchiq_log_info(vchiq_core_log_level,
6628 +                       "%d: release_slot %d - recycle->%x",
6629 +                       state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6630 +                       state->remote->slot_queue_recycle);
6631 +
6632 +               /* A write barrier is necessary, but remote_event_signal
6633 +               ** contains one. */
6634 +               remote_event_signal(&state->remote->recycle);
6635 +       }
6636 +
6637 +       mutex_unlock(&state->recycle_mutex);
6638 +}
6639 +
6640 +/* Called by the slot handler - don't hold the bulk mutex */
6641 +static VCHIQ_STATUS_T
6642 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6643 +       int retry_poll)
6644 +{
6645 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6646 +
6647 +       vchiq_log_trace(vchiq_core_log_level,
6648 +               "%d: nb:%d %cx - p=%x rn=%x r=%x",
6649 +               service->state->id, service->localport,
6650 +               (queue == &service->bulk_tx) ? 't' : 'r',
6651 +               queue->process, queue->remote_notify, queue->remove);
6652 +
6653 +       if (service->state->is_master) {
6654 +               while (queue->remote_notify != queue->process) {
6655 +                       VCHIQ_BULK_T *bulk =
6656 +                               &queue->bulks[BULK_INDEX(queue->remote_notify)];
6657 +                       int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6658 +                               VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6659 +                       int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6660 +                               service->remoteport);
6661 +                       VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6662 +                       /* Only reply to non-dummy bulk requests */
6663 +                       if (bulk->remote_data) {
6664 +                               status = queue_message(service->state, NULL,
6665 +                                       msgid, &element, 1, 4, 0);
6666 +                               if (status != VCHIQ_SUCCESS)
6667 +                                       break;
6668 +                       }
6669 +                       queue->remote_notify++;
6670 +               }
6671 +       } else {
6672 +               queue->remote_notify = queue->process;
6673 +       }
6674 +
6675 +       if (status == VCHIQ_SUCCESS) {
6676 +               while (queue->remove != queue->remote_notify) {
6677 +                       VCHIQ_BULK_T *bulk =
6678 +                               &queue->bulks[BULK_INDEX(queue->remove)];
6679 +
6680 +                       /* Only generate callbacks for non-dummy bulk
6681 +                       ** requests, and non-terminated services */
6682 +                       if (bulk->data && service->instance) {
6683 +                               if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6684 +                                       if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6685 +                                               VCHIQ_SERVICE_STATS_INC(service,
6686 +                                                       bulk_tx_count);
6687 +                                               VCHIQ_SERVICE_STATS_ADD(service,
6688 +                                                       bulk_tx_bytes,
6689 +                                                       bulk->actual);
6690 +                                       } else {
6691 +                                               VCHIQ_SERVICE_STATS_INC(service,
6692 +                                                       bulk_rx_count);
6693 +                                               VCHIQ_SERVICE_STATS_ADD(service,
6694 +                                                       bulk_rx_bytes,
6695 +                                                       bulk->actual);
6696 +                                       }
6697 +                               } else {
6698 +                                       VCHIQ_SERVICE_STATS_INC(service,
6699 +                                               bulk_aborted_count);
6700 +                               }
6701 +                               if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6702 +                                       struct bulk_waiter *waiter;
6703 +                                       spin_lock(&bulk_waiter_spinlock);
6704 +                                       waiter = bulk->userdata;
6705 +                                       if (waiter) {
6706 +                                               waiter->actual = bulk->actual;
6707 +                                               up(&waiter->event);
6708 +                                       }
6709 +                                       spin_unlock(&bulk_waiter_spinlock);
6710 +                               } else if (bulk->mode ==
6711 +                                       VCHIQ_BULK_MODE_CALLBACK) {
6712 +                                       VCHIQ_REASON_T reason = (bulk->dir ==
6713 +                                               VCHIQ_BULK_TRANSMIT) ?
6714 +                                               ((bulk->actual ==
6715 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
6716 +                                               VCHIQ_BULK_TRANSMIT_ABORTED :
6717 +                                               VCHIQ_BULK_TRANSMIT_DONE) :
6718 +                                               ((bulk->actual ==
6719 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
6720 +                                               VCHIQ_BULK_RECEIVE_ABORTED :
6721 +                                               VCHIQ_BULK_RECEIVE_DONE);
6722 +                                       status = make_service_callback(service,
6723 +                                               reason, NULL, bulk->userdata);
6724 +                                       if (status == VCHIQ_RETRY)
6725 +                                               break;
6726 +                               }
6727 +                       }
6728 +
6729 +                       queue->remove++;
6730 +                       up(&service->bulk_remove_event);
6731 +               }
6732 +               if (!retry_poll)
6733 +                       status = VCHIQ_SUCCESS;
6734 +       }
6735 +
6736 +       if (status == VCHIQ_RETRY)
6737 +               request_poll(service->state, service,
6738 +                       (queue == &service->bulk_tx) ?
6739 +                       VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6740 +
6741 +       return status;
6742 +}
6743 +
6744 +/* Called by the slot handler thread */
6745 +static void
6746 +poll_services(VCHIQ_STATE_T *state)
6747 +{
6748 +       int group, i;
6749 +
6750 +       for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6751 +               uint32_t flags;
6752 +               flags = atomic_xchg(&state->poll_services[group], 0);
6753 +               for (i = 0; flags; i++) {
6754 +                       if (flags & (1 << i)) {
6755 +                               VCHIQ_SERVICE_T *service =
6756 +                                       find_service_by_port(state,
6757 +                                               (group<<5) + i);
6758 +                               uint32_t service_flags;
6759 +                               flags &= ~(1 << i);
6760 +                               if (!service)
6761 +                                       continue;
6762 +                               service_flags =
6763 +                                       atomic_xchg(&service->poll_flags, 0);
6764 +                               if (service_flags &
6765 +                                       (1 << VCHIQ_POLL_REMOVE)) {
6766 +                                       vchiq_log_info(vchiq_core_log_level,
6767 +                                               "%d: ps - remove %d<->%d",
6768 +                                               state->id, service->localport,
6769 +                                               service->remoteport);
6770 +
6771 +                                       /* Make it look like a client, because
6772 +                                          it must be removed and not left in
6773 +                                          the LISTENING state. */
6774 +                                       service->public_fourcc =
6775 +                                               VCHIQ_FOURCC_INVALID;
6776 +
6777 +                                       if (vchiq_close_service_internal(
6778 +                                               service, 0/*!close_recvd*/) !=
6779 +                                               VCHIQ_SUCCESS)
6780 +                                               request_poll(state, service,
6781 +                                                       VCHIQ_POLL_REMOVE);
6782 +                               } else if (service_flags &
6783 +                                       (1 << VCHIQ_POLL_TERMINATE)) {
6784 +                                       vchiq_log_info(vchiq_core_log_level,
6785 +                                               "%d: ps - terminate %d<->%d",
6786 +                                               state->id, service->localport,
6787 +                                               service->remoteport);
6788 +                                       if (vchiq_close_service_internal(
6789 +                                               service, 0/*!close_recvd*/) !=
6790 +                                               VCHIQ_SUCCESS)
6791 +                                               request_poll(state, service,
6792 +                                                       VCHIQ_POLL_TERMINATE);
6793 +                               }
6794 +                               if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6795 +                                       notify_bulks(service,
6796 +                                               &service->bulk_tx,
6797 +                                               1/*retry_poll*/);
6798 +                               if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6799 +                                       notify_bulks(service,
6800 +                                               &service->bulk_rx,
6801 +                                               1/*retry_poll*/);
6802 +                               unlock_service(service);
6803 +                       }
6804 +               }
6805 +       }
6806 +}
6807 +
6808 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6809 +static int
6810 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6811 +{
6812 +       VCHIQ_STATE_T *state = service->state;
6813 +       int resolved = 0;
6814 +       int rc;
6815 +
6816 +       while ((queue->process != queue->local_insert) &&
6817 +               (queue->process != queue->remote_insert)) {
6818 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6819 +
6820 +               vchiq_log_trace(vchiq_core_log_level,
6821 +                       "%d: rb:%d %cx - li=%x ri=%x p=%x",
6822 +                       state->id, service->localport,
6823 +                       (queue == &service->bulk_tx) ? 't' : 'r',
6824 +                       queue->local_insert, queue->remote_insert,
6825 +                       queue->process);
6826 +
6827 +               WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6828 +               WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6829 +
6830 +               rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6831 +               if (rc != 0)
6832 +                       break;
6833 +
6834 +               vchiq_transfer_bulk(bulk);
6835 +               mutex_unlock(&state->bulk_transfer_mutex);
6836 +
6837 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6838 +                       const char *header = (queue == &service->bulk_tx) ?
6839 +                               "Send Bulk to" : "Recv Bulk from";
6840 +                       if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6841 +                               vchiq_log_info(vchiq_core_msg_log_level,
6842 +                                       "%s %c%c%c%c d:%d len:%d %x<->%x",
6843 +                                       header,
6844 +                                       VCHIQ_FOURCC_AS_4CHARS(
6845 +                                               service->base.fourcc),
6846 +                                       service->remoteport,
6847 +                                       bulk->size,
6848 +                                       (unsigned int)bulk->data,
6849 +                                       (unsigned int)bulk->remote_data);
6850 +                       else
6851 +                               vchiq_log_info(vchiq_core_msg_log_level,
6852 +                                       "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6853 +                                       " rx len:%d %x<->%x",
6854 +                                       header,
6855 +                                       VCHIQ_FOURCC_AS_4CHARS(
6856 +                                               service->base.fourcc),
6857 +                                       service->remoteport,
6858 +                                       bulk->size,
6859 +                                       bulk->remote_size,
6860 +                                       (unsigned int)bulk->data,
6861 +                                       (unsigned int)bulk->remote_data);
6862 +               }
6863 +
6864 +               vchiq_complete_bulk(bulk);
6865 +               queue->process++;
6866 +               resolved++;
6867 +       }
6868 +       return resolved;
6869 +}
6870 +
6871 +/* Called with the bulk_mutex held */
6872 +static void
6873 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6874 +{
6875 +       int is_tx = (queue == &service->bulk_tx);
6876 +       vchiq_log_trace(vchiq_core_log_level,
6877 +               "%d: aob:%d %cx - li=%x ri=%x p=%x",
6878 +               service->state->id, service->localport, is_tx ? 't' : 'r',
6879 +               queue->local_insert, queue->remote_insert, queue->process);
6880 +
6881 +       WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6882 +       WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6883 +
6884 +       while ((queue->process != queue->local_insert) ||
6885 +               (queue->process != queue->remote_insert)) {
6886 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6887 +
6888 +               if (queue->process == queue->remote_insert) {
6889 +                       /* fabricate a matching dummy bulk */
6890 +                       bulk->remote_data = NULL;
6891 +                       bulk->remote_size = 0;
6892 +                       queue->remote_insert++;
6893 +               }
6894 +
6895 +               if (queue->process != queue->local_insert) {
6896 +                       vchiq_complete_bulk(bulk);
6897 +
6898 +                       vchiq_log_info(vchiq_core_msg_log_level,
6899 +                               "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6900 +                               "rx len:%d",
6901 +                               is_tx ? "Send Bulk to" : "Recv Bulk from",
6902 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6903 +                               service->remoteport,
6904 +                               bulk->size,
6905 +                               bulk->remote_size);
6906 +               } else {
6907 +                       /* fabricate a matching dummy bulk */
6908 +                       bulk->data = NULL;
6909 +                       bulk->size = 0;
6910 +                       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6911 +                       bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6912 +                               VCHIQ_BULK_RECEIVE;
6913 +                       queue->local_insert++;
6914 +               }
6915 +
6916 +               queue->process++;
6917 +       }
6918 +}
6919 +
6920 +/* Called from the slot handler thread */
6921 +static void
6922 +pause_bulks(VCHIQ_STATE_T *state)
6923 +{
6924 +       if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6925 +               WARN_ON_ONCE(1);
6926 +               atomic_set(&pause_bulks_count, 1);
6927 +               return;
6928 +       }
6929 +
6930 +       /* Block bulk transfers from all services */
6931 +       mutex_lock(&state->bulk_transfer_mutex);
6932 +}
6933 +
6934 +/* Called from the slot handler thread */
6935 +static void
6936 +resume_bulks(VCHIQ_STATE_T *state)
6937 +{
6938 +       int i;
6939 +       if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
6940 +               WARN_ON_ONCE(1);
6941 +               atomic_set(&pause_bulks_count, 0);
6942 +               return;
6943 +       }
6944 +
6945 +       /* Allow bulk transfers from all services */
6946 +       mutex_unlock(&state->bulk_transfer_mutex);
6947 +
6948 +       if (state->deferred_bulks == 0)
6949 +               return;
6950 +
6951 +       /* Deal with any bulks which had to be deferred due to being in
6952 +        * paused state.  Don't try to match up to number of deferred bulks
6953 +        * in case we've had something come and close the service in the
6954 +        * interim - just process all bulk queues for all services */
6955 +       vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
6956 +               __func__, state->deferred_bulks);
6957 +
6958 +       for (i = 0; i < state->unused_service; i++) {
6959 +               VCHIQ_SERVICE_T *service = state->services[i];
6960 +               int resolved_rx = 0;
6961 +               int resolved_tx = 0;
6962 +               if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
6963 +                       continue;
6964 +
6965 +               mutex_lock(&service->bulk_mutex);
6966 +               resolved_rx = resolve_bulks(service, &service->bulk_rx);
6967 +               resolved_tx = resolve_bulks(service, &service->bulk_tx);
6968 +               mutex_unlock(&service->bulk_mutex);
6969 +               if (resolved_rx)
6970 +                       notify_bulks(service, &service->bulk_rx, 1);
6971 +               if (resolved_tx)
6972 +                       notify_bulks(service, &service->bulk_tx, 1);
6973 +       }
6974 +       state->deferred_bulks = 0;
6975 +}
6976 +
6977 +static int
6978 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
6979 +{
6980 +       VCHIQ_SERVICE_T *service = NULL;
6981 +       int msgid, size;
6982 +       int type;
6983 +       unsigned int localport, remoteport;
6984 +
6985 +       msgid = header->msgid;
6986 +       size = header->size;
6987 +       type = VCHIQ_MSG_TYPE(msgid);
6988 +       localport = VCHIQ_MSG_DSTPORT(msgid);
6989 +       remoteport = VCHIQ_MSG_SRCPORT(msgid);
6990 +       if (size >= sizeof(struct vchiq_open_payload)) {
6991 +               const struct vchiq_open_payload *payload =
6992 +                       (struct vchiq_open_payload *)header->data;
6993 +               unsigned int fourcc;
6994 +
6995 +               fourcc = payload->fourcc;
6996 +               vchiq_log_info(vchiq_core_log_level,
6997 +                       "%d: prs OPEN@%x (%d->'%c%c%c%c')",
6998 +                       state->id, (unsigned int)header,
6999 +                       localport,
7000 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc));
7001 +
7002 +               service = get_listening_service(state, fourcc);
7003 +
7004 +               if (service) {
7005 +                       /* A matching service exists */
7006 +                       short version = payload->version;
7007 +                       short version_min = payload->version_min;
7008 +                       if ((service->version < version_min) ||
7009 +                               (version < service->version_min)) {
7010 +                               /* Version mismatch */
7011 +                               vchiq_loud_error_header();
7012 +                               vchiq_loud_error("%d: service %d (%c%c%c%c) "
7013 +                                       "version mismatch - local (%d, min %d)"
7014 +                                       " vs. remote (%d, min %d)",
7015 +                                       state->id, service->localport,
7016 +                                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
7017 +                                       service->version, service->version_min,
7018 +                                       version, version_min);
7019 +                               vchiq_loud_error_footer();
7020 +                               unlock_service(service);
7021 +                               service = NULL;
7022 +                               goto fail_open;
7023 +                       }
7024 +                       service->peer_version = version;
7025 +
7026 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7027 +                               struct vchiq_openack_payload ack_payload = {
7028 +                                       service->version
7029 +                               };
7030 +                               VCHIQ_ELEMENT_T body = {
7031 +                                       &ack_payload,
7032 +                                       sizeof(ack_payload)
7033 +                               };
7034 +
7035 +                               /* Acknowledge the OPEN */
7036 +                               if (service->sync) {
7037 +                                       if (queue_message_sync(state, NULL,
7038 +                                               VCHIQ_MAKE_MSG(
7039 +                                                       VCHIQ_MSG_OPENACK,
7040 +                                                       service->localport,
7041 +                                                       remoteport),
7042 +                                               &body, 1, sizeof(ack_payload),
7043 +                                               0) == VCHIQ_RETRY)
7044 +                                               goto bail_not_ready;
7045 +                               } else {
7046 +                                       if (queue_message(state, NULL,
7047 +                                               VCHIQ_MAKE_MSG(
7048 +                                                       VCHIQ_MSG_OPENACK,
7049 +                                                       service->localport,
7050 +                                                       remoteport),
7051 +                                               &body, 1, sizeof(ack_payload),
7052 +                                               0) == VCHIQ_RETRY)
7053 +                                               goto bail_not_ready;
7054 +                               }
7055 +
7056 +                               /* The service is now open */
7057 +                               vchiq_set_service_state(service,
7058 +                                       service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7059 +                                       : VCHIQ_SRVSTATE_OPEN);
7060 +                       }
7061 +
7062 +                       service->remoteport = remoteport;
7063 +                       service->client_id = ((int *)header->data)[1];
7064 +                       if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7065 +                               NULL, NULL) == VCHIQ_RETRY) {
7066 +                               /* Bail out if not ready */
7067 +                               service->remoteport = VCHIQ_PORT_FREE;
7068 +                               goto bail_not_ready;
7069 +                       }
7070 +
7071 +                       /* Success - the message has been dealt with */
7072 +                       unlock_service(service);
7073 +                       return 1;
7074 +               }
7075 +       }
7076 +
7077 +fail_open:
7078 +       /* No available service, or an invalid request - send a CLOSE */
7079 +       if (queue_message(state, NULL,
7080 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7081 +               NULL, 0, 0, 0) == VCHIQ_RETRY)
7082 +               goto bail_not_ready;
7083 +
7084 +       return 1;
7085 +
7086 +bail_not_ready:
7087 +       if (service)
7088 +               unlock_service(service);
7089 +
7090 +       return 0;
7091 +}
7092 +
7093 +/* Called by the slot handler thread */
7094 +static void
7095 +parse_rx_slots(VCHIQ_STATE_T *state)
7096 +{
7097 +       VCHIQ_SHARED_STATE_T *remote = state->remote;
7098 +       VCHIQ_SERVICE_T *service = NULL;
7099 +       int tx_pos;
7100 +       DEBUG_INITIALISE(state->local)
7101 +
7102 +       tx_pos = remote->tx_pos;
7103 +
7104 +       while (state->rx_pos != tx_pos) {
7105 +               VCHIQ_HEADER_T *header;
7106 +               int msgid, size;
7107 +               int type;
7108 +               unsigned int localport, remoteport;
7109 +
7110 +               DEBUG_TRACE(PARSE_LINE);
7111 +               if (!state->rx_data) {
7112 +                       int rx_index;
7113 +                       WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7114 +                       rx_index = remote->slot_queue[
7115 +                               SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7116 +                               VCHIQ_SLOT_QUEUE_MASK];
7117 +                       state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7118 +                               rx_index);
7119 +                       state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7120 +
7121 +                       /* Initialise use_count to one, and increment
7122 +                       ** release_count at the end of the slot to avoid
7123 +                       ** releasing the slot prematurely. */
7124 +                       state->rx_info->use_count = 1;
7125 +                       state->rx_info->release_count = 0;
7126 +               }
7127 +
7128 +               header = (VCHIQ_HEADER_T *)(state->rx_data +
7129 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
7130 +               DEBUG_VALUE(PARSE_HEADER, (int)header);
7131 +               msgid = header->msgid;
7132 +               DEBUG_VALUE(PARSE_MSGID, msgid);
7133 +               size = header->size;
7134 +               type = VCHIQ_MSG_TYPE(msgid);
7135 +               localport = VCHIQ_MSG_DSTPORT(msgid);
7136 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
7137 +
7138 +               if (type != VCHIQ_MSG_DATA)
7139 +                       VCHIQ_STATS_INC(state, ctrl_rx_count);
7140 +
7141 +               switch (type) {
7142 +               case VCHIQ_MSG_OPENACK:
7143 +               case VCHIQ_MSG_CLOSE:
7144 +               case VCHIQ_MSG_DATA:
7145 +               case VCHIQ_MSG_BULK_RX:
7146 +               case VCHIQ_MSG_BULK_TX:
7147 +               case VCHIQ_MSG_BULK_RX_DONE:
7148 +               case VCHIQ_MSG_BULK_TX_DONE:
7149 +                       service = find_service_by_port(state, localport);
7150 +                       if ((!service || service->remoteport != remoteport) &&
7151 +                               (localport == 0) &&
7152 +                               (type == VCHIQ_MSG_CLOSE)) {
7153 +                               /* This could be a CLOSE from a client which
7154 +                                  hadn't yet received the OPENACK - look for
7155 +                                  the connected service */
7156 +                               if (service)
7157 +                                       unlock_service(service);
7158 +                               service = get_connected_service(state,
7159 +                                       remoteport);
7160 +                               if (service)
7161 +                                       vchiq_log_warning(vchiq_core_log_level,
7162 +                                               "%d: prs %s@%x (%d->%d) - "
7163 +                                               "found connected service %d",
7164 +                                               state->id, msg_type_str(type),
7165 +                                               (unsigned int)header,
7166 +                                               remoteport, localport,
7167 +                                               service->localport);
7168 +                       }
7169 +
7170 +                       if (!service) {
7171 +                               vchiq_log_error(vchiq_core_log_level,
7172 +                                       "%d: prs %s@%x (%d->%d) - "
7173 +                                       "invalid/closed service %d",
7174 +                                       state->id, msg_type_str(type),
7175 +                                       (unsigned int)header,
7176 +                                       remoteport, localport, localport);
7177 +                               goto skip_message;
7178 +                       }
7179 +                       break;
7180 +               default:
7181 +                       break;
7182 +               }
7183 +
7184 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7185 +                       int svc_fourcc;
7186 +
7187 +                       svc_fourcc = service
7188 +                               ? service->base.fourcc
7189 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7190 +                       vchiq_log_info(vchiq_core_msg_log_level,
7191 +                               "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7192 +                               "len:%d",
7193 +                               msg_type_str(type), type,
7194 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7195 +                               remoteport, localport, size);
7196 +                       if (size > 0)
7197 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
7198 +                                       min(64, size));
7199 +               }
7200 +
7201 +               if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7202 +                       > VCHIQ_SLOT_SIZE) {
7203 +                       vchiq_log_error(vchiq_core_log_level,
7204 +                               "header %x (msgid %x) - size %x too big for "
7205 +                               "slot",
7206 +                               (unsigned int)header, (unsigned int)msgid,
7207 +                               (unsigned int)size);
7208 +                       WARN(1, "oversized for slot\n");
7209 +               }
7210 +
7211 +               switch (type) {
7212 +               case VCHIQ_MSG_OPEN:
7213 +                       WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7214 +                       if (!parse_open(state, header))
7215 +                               goto bail_not_ready;
7216 +                       break;
7217 +               case VCHIQ_MSG_OPENACK:
7218 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
7219 +                               const struct vchiq_openack_payload *payload =
7220 +                                       (struct vchiq_openack_payload *)
7221 +                                       header->data;
7222 +                               service->peer_version = payload->version;
7223 +                       }
7224 +                       vchiq_log_info(vchiq_core_log_level,
7225 +                               "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7226 +                               state->id, (unsigned int)header, size,
7227 +                               remoteport, localport, service->peer_version);
7228 +                       if (service->srvstate ==
7229 +                               VCHIQ_SRVSTATE_OPENING) {
7230 +                               service->remoteport = remoteport;
7231 +                               vchiq_set_service_state(service,
7232 +                                       VCHIQ_SRVSTATE_OPEN);
7233 +                               up(&service->remove_event);
7234 +                       } else
7235 +                               vchiq_log_error(vchiq_core_log_level,
7236 +                                       "OPENACK received in state %s",
7237 +                                       srvstate_names[service->srvstate]);
7238 +                       break;
7239 +               case VCHIQ_MSG_CLOSE:
7240 +                       WARN_ON(size != 0); /* There should be no data */
7241 +
7242 +                       vchiq_log_info(vchiq_core_log_level,
7243 +                               "%d: prs CLOSE@%x (%d->%d)",
7244 +                               state->id, (unsigned int)header,
7245 +                               remoteport, localport);
7246 +
7247 +                       mark_service_closing_internal(service, 1);
7248 +
7249 +                       if (vchiq_close_service_internal(service,
7250 +                               1/*close_recvd*/) == VCHIQ_RETRY)
7251 +                               goto bail_not_ready;
7252 +
7253 +                       vchiq_log_info(vchiq_core_log_level,
7254 +                               "Close Service %c%c%c%c s:%u d:%d",
7255 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7256 +                               service->localport,
7257 +                               service->remoteport);
7258 +                       break;
7259 +               case VCHIQ_MSG_DATA:
7260 +                       vchiq_log_trace(vchiq_core_log_level,
7261 +                               "%d: prs DATA@%x,%x (%d->%d)",
7262 +                               state->id, (unsigned int)header, size,
7263 +                               remoteport, localport);
7264 +
7265 +                       if ((service->remoteport == remoteport)
7266 +                               && (service->srvstate ==
7267 +                               VCHIQ_SRVSTATE_OPEN)) {
7268 +                               header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7269 +                               claim_slot(state->rx_info);
7270 +                               DEBUG_TRACE(PARSE_LINE);
7271 +                               if (make_service_callback(service,
7272 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
7273 +                                       NULL) == VCHIQ_RETRY) {
7274 +                                       DEBUG_TRACE(PARSE_LINE);
7275 +                                       goto bail_not_ready;
7276 +                               }
7277 +                               VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7278 +                               VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7279 +                                       size);
7280 +                       } else {
7281 +                               VCHIQ_STATS_INC(state, error_count);
7282 +                       }
7283 +                       break;
7284 +               case VCHIQ_MSG_CONNECT:
7285 +                       vchiq_log_info(vchiq_core_log_level,
7286 +                               "%d: prs CONNECT@%x",
7287 +                               state->id, (unsigned int)header);
7288 +                       up(&state->connect);
7289 +                       break;
7290 +               case VCHIQ_MSG_BULK_RX:
7291 +               case VCHIQ_MSG_BULK_TX: {
7292 +                       VCHIQ_BULK_QUEUE_T *queue;
7293 +                       WARN_ON(!state->is_master);
7294 +                       queue = (type == VCHIQ_MSG_BULK_RX) ?
7295 +                               &service->bulk_tx : &service->bulk_rx;
7296 +                       if ((service->remoteport == remoteport)
7297 +                               && (service->srvstate ==
7298 +                               VCHIQ_SRVSTATE_OPEN)) {
7299 +                               VCHIQ_BULK_T *bulk;
7300 +                               int resolved = 0;
7301 +
7302 +                               DEBUG_TRACE(PARSE_LINE);
7303 +                               if (mutex_lock_interruptible(
7304 +                                       &service->bulk_mutex) != 0) {
7305 +                                       DEBUG_TRACE(PARSE_LINE);
7306 +                                       goto bail_not_ready;
7307 +                               }
7308 +
7309 +                               WARN_ON(!(queue->remote_insert < queue->remove +
7310 +                                       VCHIQ_NUM_SERVICE_BULKS));
7311 +                               bulk = &queue->bulks[
7312 +                                       BULK_INDEX(queue->remote_insert)];
7313 +                               bulk->remote_data =
7314 +                                       (void *)((int *)header->data)[0];
7315 +                               bulk->remote_size = ((int *)header->data)[1];
7316 +                               wmb();
7317 +
7318 +                               vchiq_log_info(vchiq_core_log_level,
7319 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
7320 +                                       state->id, msg_type_str(type),
7321 +                                       (unsigned int)header,
7322 +                                       remoteport, localport,
7323 +                                       bulk->remote_size,
7324 +                                       (unsigned int)bulk->remote_data);
7325 +
7326 +                               queue->remote_insert++;
7327 +
7328 +                               if (atomic_read(&pause_bulks_count)) {
7329 +                                       state->deferred_bulks++;
7330 +                                       vchiq_log_info(vchiq_core_log_level,
7331 +                                               "%s: deferring bulk (%d)",
7332 +                                               __func__,
7333 +                                               state->deferred_bulks);
7334 +                                       if (state->conn_state !=
7335 +                                               VCHIQ_CONNSTATE_PAUSE_SENT)
7336 +                                               vchiq_log_error(
7337 +                                                       vchiq_core_log_level,
7338 +                                                       "%s: bulks paused in "
7339 +                                                       "unexpected state %s",
7340 +                                                       __func__,
7341 +                                                       conn_state_names[
7342 +                                                       state->conn_state]);
7343 +                               } else if (state->conn_state ==
7344 +                                       VCHIQ_CONNSTATE_CONNECTED) {
7345 +                                       DEBUG_TRACE(PARSE_LINE);
7346 +                                       resolved = resolve_bulks(service,
7347 +                                               queue);
7348 +                               }
7349 +
7350 +                               mutex_unlock(&service->bulk_mutex);
7351 +                               if (resolved)
7352 +                                       notify_bulks(service, queue,
7353 +                                               1/*retry_poll*/);
7354 +                       }
7355 +               } break;
7356 +               case VCHIQ_MSG_BULK_RX_DONE:
7357 +               case VCHIQ_MSG_BULK_TX_DONE:
7358 +                       WARN_ON(state->is_master);
7359 +                       if ((service->remoteport == remoteport)
7360 +                               && (service->srvstate !=
7361 +                               VCHIQ_SRVSTATE_FREE)) {
7362 +                               VCHIQ_BULK_QUEUE_T *queue;
7363 +                               VCHIQ_BULK_T *bulk;
7364 +
7365 +                               queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7366 +                                       &service->bulk_rx : &service->bulk_tx;
7367 +
7368 +                               DEBUG_TRACE(PARSE_LINE);
7369 +                               if (mutex_lock_interruptible(
7370 +                                       &service->bulk_mutex) != 0) {
7371 +                                       DEBUG_TRACE(PARSE_LINE);
7372 +                                       goto bail_not_ready;
7373 +                               }
7374 +                               if ((int)(queue->remote_insert -
7375 +                                       queue->local_insert) >= 0) {
7376 +                                       vchiq_log_error(vchiq_core_log_level,
7377 +                                               "%d: prs %s@%x (%d->%d) "
7378 +                                               "unexpected (ri=%d,li=%d)",
7379 +                                               state->id, msg_type_str(type),
7380 +                                               (unsigned int)header,
7381 +                                               remoteport, localport,
7382 +                                               queue->remote_insert,
7383 +                                               queue->local_insert);
7384 +                                       mutex_unlock(&service->bulk_mutex);
7385 +                                       break;
7386 +                               }
7387 +
7388 +                               BUG_ON(queue->process == queue->local_insert);
7389 +                               BUG_ON(queue->process != queue->remote_insert);
7390 +
7391 +                               bulk = &queue->bulks[
7392 +                                       BULK_INDEX(queue->remote_insert)];
7393 +                               bulk->actual = *(int *)header->data;
7394 +                               queue->remote_insert++;
7395 +
7396 +                               vchiq_log_info(vchiq_core_log_level,
7397 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
7398 +                                       state->id, msg_type_str(type),
7399 +                                       (unsigned int)header,
7400 +                                       remoteport, localport,
7401 +                                       bulk->actual, (unsigned int)bulk->data);
7402 +
7403 +                               vchiq_log_trace(vchiq_core_log_level,
7404 +                                       "%d: prs:%d %cx li=%x ri=%x p=%x",
7405 +                                       state->id, localport,
7406 +                                       (type == VCHIQ_MSG_BULK_RX_DONE) ?
7407 +                                               'r' : 't',
7408 +                                       queue->local_insert,
7409 +                                       queue->remote_insert, queue->process);
7410 +
7411 +                               DEBUG_TRACE(PARSE_LINE);
7412 +                               WARN_ON(queue->process == queue->local_insert);
7413 +                               vchiq_complete_bulk(bulk);
7414 +                               queue->process++;
7415 +                               mutex_unlock(&service->bulk_mutex);
7416 +                               DEBUG_TRACE(PARSE_LINE);
7417 +                               notify_bulks(service, queue, 1/*retry_poll*/);
7418 +                               DEBUG_TRACE(PARSE_LINE);
7419 +                       }
7420 +                       break;
7421 +               case VCHIQ_MSG_PADDING:
7422 +                       vchiq_log_trace(vchiq_core_log_level,
7423 +                               "%d: prs PADDING@%x,%x",
7424 +                               state->id, (unsigned int)header, size);
7425 +                       break;
7426 +               case VCHIQ_MSG_PAUSE:
7427 +                       /* If initiated, signal the application thread */
7428 +                       vchiq_log_trace(vchiq_core_log_level,
7429 +                               "%d: prs PAUSE@%x,%x",
7430 +                               state->id, (unsigned int)header, size);
7431 +                       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7432 +                               vchiq_log_error(vchiq_core_log_level,
7433 +                                       "%d: PAUSE received in state PAUSED",
7434 +                                       state->id);
7435 +                               break;
7436 +                       }
7437 +                       if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7438 +                               /* Send a PAUSE in response */
7439 +                               if (queue_message(state, NULL,
7440 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7441 +                                       NULL, 0, 0, 0) == VCHIQ_RETRY)
7442 +                                       goto bail_not_ready;
7443 +                               if (state->is_master)
7444 +                                       pause_bulks(state);
7445 +                       }
7446 +                       /* At this point slot_mutex is held */
7447 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7448 +                       vchiq_platform_paused(state);
7449 +                       break;
7450 +               case VCHIQ_MSG_RESUME:
7451 +                       vchiq_log_trace(vchiq_core_log_level,
7452 +                               "%d: prs RESUME@%x,%x",
7453 +                               state->id, (unsigned int)header, size);
7454 +                       /* Release the slot mutex */
7455 +                       mutex_unlock(&state->slot_mutex);
7456 +                       if (state->is_master)
7457 +                               resume_bulks(state);
7458 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7459 +                       vchiq_platform_resumed(state);
7460 +                       break;
7461 +
7462 +               case VCHIQ_MSG_REMOTE_USE:
7463 +                       vchiq_on_remote_use(state);
7464 +                       break;
7465 +               case VCHIQ_MSG_REMOTE_RELEASE:
7466 +                       vchiq_on_remote_release(state);
7467 +                       break;
7468 +               case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7469 +                       vchiq_on_remote_use_active(state);
7470 +                       break;
7471 +
7472 +               default:
7473 +                       vchiq_log_error(vchiq_core_log_level,
7474 +                               "%d: prs invalid msgid %x@%x,%x",
7475 +                               state->id, msgid, (unsigned int)header, size);
7476 +                       WARN(1, "invalid message\n");
7477 +                       break;
7478 +               }
7479 +
7480 +skip_message:
7481 +               if (service) {
7482 +                       unlock_service(service);
7483 +                       service = NULL;
7484 +               }
7485 +
7486 +               state->rx_pos += calc_stride(size);
7487 +
7488 +               DEBUG_TRACE(PARSE_LINE);
7489 +               /* Perform some housekeeping when the end of the slot is
7490 +               ** reached. */
7491 +               if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7492 +                       /* Remove the extra reference count. */
7493 +                       release_slot(state, state->rx_info, NULL, NULL);
7494 +                       state->rx_data = NULL;
7495 +               }
7496 +       }
7497 +
7498 +bail_not_ready:
7499 +       if (service)
7500 +               unlock_service(service);
7501 +}
7502 +
7503 +/* Called by the slot handler thread */
7504 +static int
7505 +slot_handler_func(void *v)
7506 +{
7507 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7508 +       VCHIQ_SHARED_STATE_T *local = state->local;
7509 +       DEBUG_INITIALISE(local)
7510 +
7511 +       while (1) {
7512 +               DEBUG_COUNT(SLOT_HANDLER_COUNT);
7513 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7514 +               remote_event_wait(&local->trigger);
7515 +
7516 +               rmb();
7517 +
7518 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7519 +               if (state->poll_needed) {
7520 +                       /* Check if we need to suspend - may change our
7521 +                        * conn_state */
7522 +                       vchiq_platform_check_suspend(state);
7523 +
7524 +                       state->poll_needed = 0;
7525 +
7526 +                       /* Handle service polling and other rare conditions here
7527 +                       ** out of the mainline code */
7528 +                       switch (state->conn_state) {
7529 +                       case VCHIQ_CONNSTATE_CONNECTED:
7530 +                               /* Poll the services as requested */
7531 +                               poll_services(state);
7532 +                               break;
7533 +
7534 +                       case VCHIQ_CONNSTATE_PAUSING:
7535 +                               if (state->is_master)
7536 +                                       pause_bulks(state);
7537 +                               if (queue_message(state, NULL,
7538 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7539 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
7540 +                                       vchiq_set_conn_state(state,
7541 +                                               VCHIQ_CONNSTATE_PAUSE_SENT);
7542 +                               } else {
7543 +                                       if (state->is_master)
7544 +                                               resume_bulks(state);
7545 +                                       /* Retry later */
7546 +                                       state->poll_needed = 1;
7547 +                               }
7548 +                               break;
7549 +
7550 +                       case VCHIQ_CONNSTATE_PAUSED:
7551 +                               vchiq_platform_resume(state);
7552 +                               break;
7553 +
7554 +                       case VCHIQ_CONNSTATE_RESUMING:
7555 +                               if (queue_message(state, NULL,
7556 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7557 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
7558 +                                       if (state->is_master)
7559 +                                               resume_bulks(state);
7560 +                                       vchiq_set_conn_state(state,
7561 +                                               VCHIQ_CONNSTATE_CONNECTED);
7562 +                                       vchiq_platform_resumed(state);
7563 +                               } else {
7564 +                                       /* This should really be impossible,
7565 +                                       ** since the PAUSE should have flushed
7566 +                                       ** through outstanding messages. */
7567 +                                       vchiq_log_error(vchiq_core_log_level,
7568 +                                               "Failed to send RESUME "
7569 +                                               "message");
7570 +                                       BUG();
7571 +                               }
7572 +                               break;
7573 +
7574 +                       case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7575 +                       case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7576 +                               vchiq_platform_handle_timeout(state);
7577 +                               break;
7578 +                       default:
7579 +                               break;
7580 +                       }
7581 +
7582 +
7583 +               }
7584 +
7585 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7586 +               parse_rx_slots(state);
7587 +       }
7588 +       return 0;
7589 +}
7590 +
7591 +
7592 +/* Called by the recycle thread */
7593 +static int
7594 +recycle_func(void *v)
7595 +{
7596 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7597 +       VCHIQ_SHARED_STATE_T *local = state->local;
7598 +
7599 +       while (1) {
7600 +               remote_event_wait(&local->recycle);
7601 +
7602 +               process_free_queue(state);
7603 +       }
7604 +       return 0;
7605 +}
7606 +
7607 +
7608 +/* Called by the sync thread */
7609 +static int
7610 +sync_func(void *v)
7611 +{
7612 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7613 +       VCHIQ_SHARED_STATE_T *local = state->local;
7614 +       VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7615 +               state->remote->slot_sync);
7616 +
7617 +       while (1) {
7618 +               VCHIQ_SERVICE_T *service;
7619 +               int msgid, size;
7620 +               int type;
7621 +               unsigned int localport, remoteport;
7622 +
7623 +               remote_event_wait(&local->sync_trigger);
7624 +
7625 +               rmb();
7626 +
7627 +               msgid = header->msgid;
7628 +               size = header->size;
7629 +               type = VCHIQ_MSG_TYPE(msgid);
7630 +               localport = VCHIQ_MSG_DSTPORT(msgid);
7631 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
7632 +
7633 +               service = find_service_by_port(state, localport);
7634 +
7635 +               if (!service) {
7636 +                       vchiq_log_error(vchiq_sync_log_level,
7637 +                               "%d: sf %s@%x (%d->%d) - "
7638 +                               "invalid/closed service %d",
7639 +                               state->id, msg_type_str(type),
7640 +                               (unsigned int)header,
7641 +                               remoteport, localport, localport);
7642 +                       release_message_sync(state, header);
7643 +                       continue;
7644 +               }
7645 +
7646 +               if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7647 +                       int svc_fourcc;
7648 +
7649 +                       svc_fourcc = service
7650 +                               ? service->base.fourcc
7651 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7652 +                       vchiq_log_trace(vchiq_sync_log_level,
7653 +                               "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7654 +                               msg_type_str(type),
7655 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7656 +                               remoteport, localport, size);
7657 +                       if (size > 0)
7658 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
7659 +                                       min(64, size));
7660 +               }
7661 +
7662 +               switch (type) {
7663 +               case VCHIQ_MSG_OPENACK:
7664 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
7665 +                               const struct vchiq_openack_payload *payload =
7666 +                                       (struct vchiq_openack_payload *)
7667 +                                       header->data;
7668 +                               service->peer_version = payload->version;
7669 +                       }
7670 +                       vchiq_log_info(vchiq_sync_log_level,
7671 +                               "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7672 +                               state->id, (unsigned int)header, size,
7673 +                               remoteport, localport, service->peer_version);
7674 +                       if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7675 +                               service->remoteport = remoteport;
7676 +                               vchiq_set_service_state(service,
7677 +                                       VCHIQ_SRVSTATE_OPENSYNC);
7678 +                               up(&service->remove_event);
7679 +                       }
7680 +                       release_message_sync(state, header);
7681 +                       break;
7682 +
7683 +               case VCHIQ_MSG_DATA:
7684 +                       vchiq_log_trace(vchiq_sync_log_level,
7685 +                               "%d: sf DATA@%x,%x (%d->%d)",
7686 +                               state->id, (unsigned int)header, size,
7687 +                               remoteport, localport);
7688 +
7689 +                       if ((service->remoteport == remoteport) &&
7690 +                               (service->srvstate ==
7691 +                               VCHIQ_SRVSTATE_OPENSYNC)) {
7692 +                               if (make_service_callback(service,
7693 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
7694 +                                       NULL) == VCHIQ_RETRY)
7695 +                                       vchiq_log_error(vchiq_sync_log_level,
7696 +                                               "synchronous callback to "
7697 +                                               "service %d returns "
7698 +                                               "VCHIQ_RETRY",
7699 +                                               localport);
7700 +                       }
7701 +                       break;
7702 +
7703 +               default:
7704 +                       vchiq_log_error(vchiq_sync_log_level,
7705 +                               "%d: sf unexpected msgid %x@%x,%x",
7706 +                               state->id, msgid, (unsigned int)header, size);
7707 +                       release_message_sync(state, header);
7708 +                       break;
7709 +               }
7710 +
7711 +               unlock_service(service);
7712 +       }
7713 +
7714 +       return 0;
7715 +}
7716 +
7717 +
7718 +static void
7719 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7720 +{
7721 +       queue->local_insert = 0;
7722 +       queue->remote_insert = 0;
7723 +       queue->process = 0;
7724 +       queue->remote_notify = 0;
7725 +       queue->remove = 0;
7726 +}
7727 +
7728 +
7729 +inline const char *
7730 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7731 +{
7732 +       return conn_state_names[conn_state];
7733 +}
7734 +
7735 +
7736 +VCHIQ_SLOT_ZERO_T *
7737 +vchiq_init_slots(void *mem_base, int mem_size)
7738 +{
7739 +       int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7740 +       VCHIQ_SLOT_ZERO_T *slot_zero =
7741 +               (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7742 +       int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7743 +       int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7744 +
7745 +       /* Ensure there is enough memory to run an absolutely minimum system */
7746 +       num_slots -= first_data_slot;
7747 +
7748 +       if (num_slots < 4) {
7749 +               vchiq_log_error(vchiq_core_log_level,
7750 +                       "vchiq_init_slots - insufficient memory %x bytes",
7751 +                       mem_size);
7752 +               return NULL;
7753 +       }
7754 +
7755 +       memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7756 +
7757 +       slot_zero->magic = VCHIQ_MAGIC;
7758 +       slot_zero->version = VCHIQ_VERSION;
7759 +       slot_zero->version_min = VCHIQ_VERSION_MIN;
7760 +       slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7761 +       slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7762 +       slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7763 +       slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7764 +
7765 +       slot_zero->master.slot_sync = first_data_slot;
7766 +       slot_zero->master.slot_first = first_data_slot + 1;
7767 +       slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7768 +       slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7769 +       slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7770 +       slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7771 +
7772 +       return slot_zero;
7773 +}
7774 +
7775 +VCHIQ_STATUS_T
7776 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7777 +                int is_master)
7778 +{
7779 +       VCHIQ_SHARED_STATE_T *local;
7780 +       VCHIQ_SHARED_STATE_T *remote;
7781 +       VCHIQ_STATUS_T status;
7782 +       char threadname[10];
7783 +       static int id;
7784 +       int i;
7785 +
7786 +       vchiq_log_warning(vchiq_core_log_level,
7787 +               "%s: slot_zero = 0x%08lx, is_master = %d",
7788 +               __func__, (unsigned long)slot_zero, is_master);
7789 +
7790 +       /* Check the input configuration */
7791 +
7792 +       if (slot_zero->magic != VCHIQ_MAGIC) {
7793 +               vchiq_loud_error_header();
7794 +               vchiq_loud_error("Invalid VCHIQ magic value found.");
7795 +               vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7796 +                       (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7797 +               vchiq_loud_error_footer();
7798 +               return VCHIQ_ERROR;
7799 +       }
7800 +
7801 +       if (slot_zero->version < VCHIQ_VERSION_MIN) {
7802 +               vchiq_loud_error_header();
7803 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
7804 +               vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7805 +                       "(minimum %d)",
7806 +                       (unsigned int)slot_zero, slot_zero->version,
7807 +                       VCHIQ_VERSION_MIN);
7808 +               vchiq_loud_error("Restart with a newer VideoCore image.");
7809 +               vchiq_loud_error_footer();
7810 +               return VCHIQ_ERROR;
7811 +       }
7812 +
7813 +       if (VCHIQ_VERSION < slot_zero->version_min) {
7814 +               vchiq_loud_error_header();
7815 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
7816 +               vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7817 +                       "minimum %d)",
7818 +                       (unsigned int)slot_zero, VCHIQ_VERSION,
7819 +                       slot_zero->version_min);
7820 +               vchiq_loud_error("Restart with a newer kernel.");
7821 +               vchiq_loud_error_footer();
7822 +               return VCHIQ_ERROR;
7823 +       }
7824 +
7825 +       if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7826 +                (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7827 +                (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7828 +                (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7829 +               vchiq_loud_error_header();
7830 +               if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7831 +                       vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7832 +                               "(expected %x)",
7833 +                               (unsigned int)slot_zero,
7834 +                               slot_zero->slot_zero_size,
7835 +                               sizeof(VCHIQ_SLOT_ZERO_T));
7836 +               if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7837 +                       vchiq_loud_error("slot_zero=%x: slot_size=%d "
7838 +                               "(expected %d",
7839 +                               (unsigned int)slot_zero, slot_zero->slot_size,
7840 +                               VCHIQ_SLOT_SIZE);
7841 +               if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7842 +                       vchiq_loud_error("slot_zero=%x: max_slots=%d "
7843 +                               "(expected %d)",
7844 +                               (unsigned int)slot_zero, slot_zero->max_slots,
7845 +                               VCHIQ_MAX_SLOTS);
7846 +               if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7847 +                       vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7848 +                               "(expected %d)",
7849 +                               (unsigned int)slot_zero,
7850 +                               slot_zero->max_slots_per_side,
7851 +                               VCHIQ_MAX_SLOTS_PER_SIDE);
7852 +               vchiq_loud_error_footer();
7853 +               return VCHIQ_ERROR;
7854 +       }
7855 +
7856 +       if (is_master) {
7857 +               local = &slot_zero->master;
7858 +               remote = &slot_zero->slave;
7859 +       } else {
7860 +               local = &slot_zero->slave;
7861 +               remote = &slot_zero->master;
7862 +       }
7863 +
7864 +       if (local->initialised) {
7865 +               vchiq_loud_error_header();
7866 +               if (remote->initialised)
7867 +                       vchiq_loud_error("local state has already been "
7868 +                               "initialised");
7869 +               else
7870 +                       vchiq_loud_error("master/slave mismatch - two %ss",
7871 +                               is_master ? "master" : "slave");
7872 +               vchiq_loud_error_footer();
7873 +               return VCHIQ_ERROR;
7874 +       }
7875 +
7876 +       memset(state, 0, sizeof(VCHIQ_STATE_T));
7877 +
7878 +       state->id = id++;
7879 +       state->is_master = is_master;
7880 +
7881 +       /*
7882 +               initialize shared state pointers
7883 +        */
7884 +
7885 +       state->local = local;
7886 +       state->remote = remote;
7887 +       state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7888 +
7889 +       /*
7890 +               initialize events and mutexes
7891 +        */
7892 +
7893 +       sema_init(&state->connect, 0);
7894 +       mutex_init(&state->mutex);
7895 +       sema_init(&state->trigger_event, 0);
7896 +       sema_init(&state->recycle_event, 0);
7897 +       sema_init(&state->sync_trigger_event, 0);
7898 +       sema_init(&state->sync_release_event, 0);
7899 +
7900 +       mutex_init(&state->slot_mutex);
7901 +       mutex_init(&state->recycle_mutex);
7902 +       mutex_init(&state->sync_mutex);
7903 +       mutex_init(&state->bulk_transfer_mutex);
7904 +
7905 +       sema_init(&state->slot_available_event, 0);
7906 +       sema_init(&state->slot_remove_event, 0);
7907 +       sema_init(&state->data_quota_event, 0);
7908 +
7909 +       state->slot_queue_available = 0;
7910 +
7911 +       for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7912 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
7913 +                       &state->service_quotas[i];
7914 +               sema_init(&service_quota->quota_event, 0);
7915 +       }
7916 +
7917 +       for (i = local->slot_first; i <= local->slot_last; i++) {
7918 +               local->slot_queue[state->slot_queue_available++] = i;
7919 +               up(&state->slot_available_event);
7920 +       }
7921 +
7922 +       state->default_slot_quota = state->slot_queue_available/2;
7923 +       state->default_message_quota =
7924 +               min((unsigned short)(state->default_slot_quota * 256),
7925 +               (unsigned short)~0);
7926 +
7927 +       state->previous_data_index = -1;
7928 +       state->data_use_count = 0;
7929 +       state->data_quota = state->slot_queue_available - 1;
7930 +
7931 +       local->trigger.event = &state->trigger_event;
7932 +       remote_event_create(&local->trigger);
7933 +       local->tx_pos = 0;
7934 +
7935 +       local->recycle.event = &state->recycle_event;
7936 +       remote_event_create(&local->recycle);
7937 +       local->slot_queue_recycle = state->slot_queue_available;
7938 +
7939 +       local->sync_trigger.event = &state->sync_trigger_event;
7940 +       remote_event_create(&local->sync_trigger);
7941 +
7942 +       local->sync_release.event = &state->sync_release_event;
7943 +       remote_event_create(&local->sync_release);
7944 +
7945 +       /* At start-of-day, the slot is empty and available */
7946 +       ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
7947 +               = VCHIQ_MSGID_PADDING;
7948 +       remote_event_signal_local(&local->sync_release);
7949 +
7950 +       local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
7951 +
7952 +       status = vchiq_platform_init_state(state);
7953 +
7954 +       /*
7955 +               bring up slot handler thread
7956 +        */
7957 +       snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
7958 +       state->slot_handler_thread = kthread_create(&slot_handler_func,
7959 +               (void *)state,
7960 +               threadname);
7961 +
7962 +       if (state->slot_handler_thread == NULL) {
7963 +               vchiq_loud_error_header();
7964 +               vchiq_loud_error("couldn't create thread %s", threadname);
7965 +               vchiq_loud_error_footer();
7966 +               return VCHIQ_ERROR;
7967 +       }
7968 +       set_user_nice(state->slot_handler_thread, -19);
7969 +       wake_up_process(state->slot_handler_thread);
7970 +
7971 +       snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
7972 +       state->recycle_thread = kthread_create(&recycle_func,
7973 +               (void *)state,
7974 +               threadname);
7975 +       if (state->recycle_thread == NULL) {
7976 +               vchiq_loud_error_header();
7977 +               vchiq_loud_error("couldn't create thread %s", threadname);
7978 +               vchiq_loud_error_footer();
7979 +               return VCHIQ_ERROR;
7980 +       }
7981 +       set_user_nice(state->recycle_thread, -19);
7982 +       wake_up_process(state->recycle_thread);
7983 +
7984 +       snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
7985 +       state->sync_thread = kthread_create(&sync_func,
7986 +               (void *)state,
7987 +               threadname);
7988 +       if (state->sync_thread == NULL) {
7989 +               vchiq_loud_error_header();
7990 +               vchiq_loud_error("couldn't create thread %s", threadname);
7991 +               vchiq_loud_error_footer();
7992 +               return VCHIQ_ERROR;
7993 +       }
7994 +       set_user_nice(state->sync_thread, -20);
7995 +       wake_up_process(state->sync_thread);
7996 +
7997 +       BUG_ON(state->id >= VCHIQ_MAX_STATES);
7998 +       vchiq_states[state->id] = state;
7999 +
8000 +       /* Indicate readiness to the other side */
8001 +       local->initialised = 1;
8002 +
8003 +       return status;
8004 +}
8005 +
8006 +/* Called from application thread when a client or server service is created. */
8007 +VCHIQ_SERVICE_T *
8008 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8009 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8010 +       VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8011 +{
8012 +       VCHIQ_SERVICE_T *service;
8013 +
8014 +       service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8015 +       if (service) {
8016 +               service->base.fourcc   = params->fourcc;
8017 +               service->base.callback = params->callback;
8018 +               service->base.userdata = params->userdata;
8019 +               service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
8020 +               service->ref_count     = 1;
8021 +               service->srvstate      = VCHIQ_SRVSTATE_FREE;
8022 +               service->userdata_term = userdata_term;
8023 +               service->localport     = VCHIQ_PORT_FREE;
8024 +               service->remoteport    = VCHIQ_PORT_FREE;
8025 +
8026 +               service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8027 +                       VCHIQ_FOURCC_INVALID : params->fourcc;
8028 +               service->client_id     = 0;
8029 +               service->auto_close    = 1;
8030 +               service->sync          = 0;
8031 +               service->closing       = 0;
8032 +               atomic_set(&service->poll_flags, 0);
8033 +               service->version       = params->version;
8034 +               service->version_min   = params->version_min;
8035 +               service->state         = state;
8036 +               service->instance      = instance;
8037 +               service->service_use_count = 0;
8038 +               init_bulk_queue(&service->bulk_tx);
8039 +               init_bulk_queue(&service->bulk_rx);
8040 +               sema_init(&service->remove_event, 0);
8041 +               sema_init(&service->bulk_remove_event, 0);
8042 +               mutex_init(&service->bulk_mutex);
8043 +               memset(&service->stats, 0, sizeof(service->stats));
8044 +       } else {
8045 +               vchiq_log_error(vchiq_core_log_level,
8046 +                       "Out of memory");
8047 +       }
8048 +
8049 +       if (service) {
8050 +               VCHIQ_SERVICE_T **pservice = NULL;
8051 +               int i;
8052 +
8053 +               /* Although it is perfectly possible to use service_spinlock
8054 +               ** to protect the creation of services, it is overkill as it
8055 +               ** disables interrupts while the array is searched.
8056 +               ** The only danger is of another thread trying to create a
8057 +               ** service - service deletion is safe.
8058 +               ** Therefore it is preferable to use state->mutex which,
8059 +               ** although slower to claim, doesn't block interrupts while
8060 +               ** it is held.
8061 +               */
8062 +
8063 +               mutex_lock(&state->mutex);
8064 +
8065 +               /* Prepare to use a previously unused service */
8066 +               if (state->unused_service < VCHIQ_MAX_SERVICES)
8067 +                       pservice = &state->services[state->unused_service];
8068 +
8069 +               if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8070 +                       for (i = 0; i < state->unused_service; i++) {
8071 +                               VCHIQ_SERVICE_T *srv = state->services[i];
8072 +                               if (!srv) {
8073 +                                       pservice = &state->services[i];
8074 +                                       break;
8075 +                               }
8076 +                       }
8077 +               } else {
8078 +                       for (i = (state->unused_service - 1); i >= 0; i--) {
8079 +                               VCHIQ_SERVICE_T *srv = state->services[i];
8080 +                               if (!srv)
8081 +                                       pservice = &state->services[i];
8082 +                               else if ((srv->public_fourcc == params->fourcc)
8083 +                                       && ((srv->instance != instance) ||
8084 +                                       (srv->base.callback !=
8085 +                                       params->callback))) {
8086 +                                       /* There is another server using this
8087 +                                       ** fourcc which doesn't match. */
8088 +                                       pservice = NULL;
8089 +                                       break;
8090 +                               }
8091 +                       }
8092 +               }
8093 +
8094 +               if (pservice) {
8095 +                       service->localport = (pservice - state->services);
8096 +                       if (!handle_seq)
8097 +                               handle_seq = VCHIQ_MAX_STATES *
8098 +                                        VCHIQ_MAX_SERVICES;
8099 +                       service->handle = handle_seq |
8100 +                               (state->id * VCHIQ_MAX_SERVICES) |
8101 +                               service->localport;
8102 +                       handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8103 +                       *pservice = service;
8104 +                       if (pservice == &state->services[state->unused_service])
8105 +                               state->unused_service++;
8106 +               }
8107 +
8108 +               mutex_unlock(&state->mutex);
8109 +
8110 +               if (!pservice) {
8111 +                       kfree(service);
8112 +                       service = NULL;
8113 +               }
8114 +       }
8115 +
8116 +       if (service) {
8117 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
8118 +                       &state->service_quotas[service->localport];
8119 +               service_quota->slot_quota = state->default_slot_quota;
8120 +               service_quota->message_quota = state->default_message_quota;
8121 +               if (service_quota->slot_use_count == 0)
8122 +                       service_quota->previous_tx_index =
8123 +                               SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8124 +                               - 1;
8125 +
8126 +               /* Bring this service online */
8127 +               vchiq_set_service_state(service, srvstate);
8128 +
8129 +               vchiq_log_info(vchiq_core_msg_log_level,
8130 +                       "%s Service %c%c%c%c SrcPort:%d",
8131 +                       (srvstate == VCHIQ_SRVSTATE_OPENING)
8132 +                       ? "Open" : "Add",
8133 +                       VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8134 +                       service->localport);
8135 +       }
8136 +
8137 +       /* Don't unlock the service - leave it with a ref_count of 1. */
8138 +
8139 +       return service;
8140 +}
8141 +
8142 +VCHIQ_STATUS_T
8143 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8144 +{
8145 +       struct vchiq_open_payload payload = {
8146 +               service->base.fourcc,
8147 +               client_id,
8148 +               service->version,
8149 +               service->version_min
8150 +       };
8151 +       VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8152 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8153 +
8154 +       service->client_id = client_id;
8155 +       vchiq_use_service_internal(service);
8156 +       status = queue_message(service->state, NULL,
8157 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8158 +               &body, 1, sizeof(payload), 1);
8159 +       if (status == VCHIQ_SUCCESS) {
8160 +               if (down_interruptible(&service->remove_event) != 0) {
8161 +                       status = VCHIQ_RETRY;
8162 +                       vchiq_release_service_internal(service);
8163 +               } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8164 +                       (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8165 +                       if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8166 +                               vchiq_log_error(vchiq_core_log_level,
8167 +                                       "%d: osi - srvstate = %s (ref %d)",
8168 +                                       service->state->id,
8169 +                                       srvstate_names[service->srvstate],
8170 +                                       service->ref_count);
8171 +                       status = VCHIQ_ERROR;
8172 +                       VCHIQ_SERVICE_STATS_INC(service, error_count);
8173 +                       vchiq_release_service_internal(service);
8174 +               }
8175 +       }
8176 +       return status;
8177 +}
8178 +
8179 +static void
8180 +release_service_messages(VCHIQ_SERVICE_T *service)
8181 +{
8182 +       VCHIQ_STATE_T *state = service->state;
8183 +       int slot_last = state->remote->slot_last;
8184 +       int i;
8185 +
8186 +       /* Release any claimed messages */
8187 +       for (i = state->remote->slot_first; i <= slot_last; i++) {
8188 +               VCHIQ_SLOT_INFO_T *slot_info =
8189 +                       SLOT_INFO_FROM_INDEX(state, i);
8190 +               if (slot_info->release_count != slot_info->use_count) {
8191 +                       char *data =
8192 +                               (char *)SLOT_DATA_FROM_INDEX(state, i);
8193 +                       unsigned int pos, end;
8194 +
8195 +                       end = VCHIQ_SLOT_SIZE;
8196 +                       if (data == state->rx_data)
8197 +                               /* This buffer is still being read from - stop
8198 +                               ** at the current read position */
8199 +                               end = state->rx_pos & VCHIQ_SLOT_MASK;
8200 +
8201 +                       pos = 0;
8202 +
8203 +                       while (pos < end) {
8204 +                               VCHIQ_HEADER_T *header =
8205 +                                       (VCHIQ_HEADER_T *)(data + pos);
8206 +                               int msgid = header->msgid;
8207 +                               int port = VCHIQ_MSG_DSTPORT(msgid);
8208 +                               if ((port == service->localport) &&
8209 +                                       (msgid & VCHIQ_MSGID_CLAIMED)) {
8210 +                                       vchiq_log_info(vchiq_core_log_level,
8211 +                                               "  fsi - hdr %x",
8212 +                                               (unsigned int)header);
8213 +                                       release_slot(state, slot_info, header,
8214 +                                               NULL);
8215 +                               }
8216 +                               pos += calc_stride(header->size);
8217 +                               if (pos > VCHIQ_SLOT_SIZE) {
8218 +                                       vchiq_log_error(vchiq_core_log_level,
8219 +                                               "fsi - pos %x: header %x, "
8220 +                                               "msgid %x, header->msgid %x, "
8221 +                                               "header->size %x",
8222 +                                               pos, (unsigned int)header,
8223 +                                               msgid, header->msgid,
8224 +                                               header->size);
8225 +                                       WARN(1, "invalid slot position\n");
8226 +                               }
8227 +                       }
8228 +               }
8229 +       }
8230 +}
8231 +
8232 +static int
8233 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8234 +{
8235 +       VCHIQ_STATUS_T status;
8236 +
8237 +       /* Abort any outstanding bulk transfers */
8238 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8239 +               return 0;
8240 +       abort_outstanding_bulks(service, &service->bulk_tx);
8241 +       abort_outstanding_bulks(service, &service->bulk_rx);
8242 +       mutex_unlock(&service->bulk_mutex);
8243 +
8244 +       status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8245 +       if (status == VCHIQ_SUCCESS)
8246 +               status = notify_bulks(service, &service->bulk_rx,
8247 +                       0/*!retry_poll*/);
8248 +       return (status == VCHIQ_SUCCESS);
8249 +}
8250 +
8251 +static VCHIQ_STATUS_T
8252 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8253 +{
8254 +       VCHIQ_STATUS_T status;
8255 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8256 +       int newstate;
8257 +
8258 +       switch (service->srvstate) {
8259 +       case VCHIQ_SRVSTATE_OPEN:
8260 +       case VCHIQ_SRVSTATE_CLOSESENT:
8261 +       case VCHIQ_SRVSTATE_CLOSERECVD:
8262 +               if (is_server) {
8263 +                       if (service->auto_close) {
8264 +                               service->client_id = 0;
8265 +                               service->remoteport = VCHIQ_PORT_FREE;
8266 +                               newstate = VCHIQ_SRVSTATE_LISTENING;
8267 +                       } else
8268 +                               newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8269 +               } else
8270 +                       newstate = VCHIQ_SRVSTATE_CLOSED;
8271 +               vchiq_set_service_state(service, newstate);
8272 +               break;
8273 +       case VCHIQ_SRVSTATE_LISTENING:
8274 +               break;
8275 +       default:
8276 +               vchiq_log_error(vchiq_core_log_level,
8277 +                       "close_service_complete(%x) called in state %s",
8278 +                       service->handle, srvstate_names[service->srvstate]);
8279 +               WARN(1, "close_service_complete in unexpected state\n");
8280 +               return VCHIQ_ERROR;
8281 +       }
8282 +
8283 +       status = make_service_callback(service,
8284 +               VCHIQ_SERVICE_CLOSED, NULL, NULL);
8285 +
8286 +       if (status != VCHIQ_RETRY) {
8287 +               int uc = service->service_use_count;
8288 +               int i;
8289 +               /* Complete the close process */
8290 +               for (i = 0; i < uc; i++)
8291 +                       /* cater for cases where close is forced and the
8292 +                       ** client may not close all it's handles */
8293 +                       vchiq_release_service_internal(service);
8294 +
8295 +               service->client_id = 0;
8296 +               service->remoteport = VCHIQ_PORT_FREE;
8297 +
8298 +               if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8299 +                       vchiq_free_service_internal(service);
8300 +               else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8301 +                       if (is_server)
8302 +                               service->closing = 0;
8303 +
8304 +                       up(&service->remove_event);
8305 +               }
8306 +       } else
8307 +               vchiq_set_service_state(service, failstate);
8308 +
8309 +       return status;
8310 +}
8311 +
8312 +/* Called by the slot handler */
8313 +VCHIQ_STATUS_T
8314 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8315 +{
8316 +       VCHIQ_STATE_T *state = service->state;
8317 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8318 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8319 +
8320 +       vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8321 +               service->state->id, service->localport, close_recvd,
8322 +               srvstate_names[service->srvstate]);
8323 +
8324 +       switch (service->srvstate) {
8325 +       case VCHIQ_SRVSTATE_CLOSED:
8326 +       case VCHIQ_SRVSTATE_HIDDEN:
8327 +       case VCHIQ_SRVSTATE_LISTENING:
8328 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
8329 +               if (close_recvd)
8330 +                       vchiq_log_error(vchiq_core_log_level,
8331 +                               "vchiq_close_service_internal(1) called "
8332 +                               "in state %s",
8333 +                               srvstate_names[service->srvstate]);
8334 +               else if (is_server) {
8335 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8336 +                               status = VCHIQ_ERROR;
8337 +                       } else {
8338 +                               service->client_id = 0;
8339 +                               service->remoteport = VCHIQ_PORT_FREE;
8340 +                               if (service->srvstate ==
8341 +                                       VCHIQ_SRVSTATE_CLOSEWAIT)
8342 +                                       vchiq_set_service_state(service,
8343 +                                               VCHIQ_SRVSTATE_LISTENING);
8344 +                       }
8345 +                       up(&service->remove_event);
8346 +               } else
8347 +                       vchiq_free_service_internal(service);
8348 +               break;
8349 +       case VCHIQ_SRVSTATE_OPENING:
8350 +               if (close_recvd) {
8351 +                       /* The open was rejected - tell the user */
8352 +                       vchiq_set_service_state(service,
8353 +                               VCHIQ_SRVSTATE_CLOSEWAIT);
8354 +                       up(&service->remove_event);
8355 +               } else {
8356 +                       /* Shutdown mid-open - let the other side know */
8357 +                       status = queue_message(state, service,
8358 +                               VCHIQ_MAKE_MSG
8359 +                               (VCHIQ_MSG_CLOSE,
8360 +                               service->localport,
8361 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
8362 +                               NULL, 0, 0, 0);
8363 +               }
8364 +               break;
8365 +
8366 +       case VCHIQ_SRVSTATE_OPENSYNC:
8367 +               mutex_lock(&state->sync_mutex);
8368 +               /* Drop through */
8369 +
8370 +       case VCHIQ_SRVSTATE_OPEN:
8371 +               if (state->is_master || close_recvd) {
8372 +                       if (!do_abort_bulks(service))
8373 +                               status = VCHIQ_RETRY;
8374 +               }
8375 +
8376 +               release_service_messages(service);
8377 +
8378 +               if (status == VCHIQ_SUCCESS)
8379 +                       status = queue_message(state, service,
8380 +                               VCHIQ_MAKE_MSG
8381 +                               (VCHIQ_MSG_CLOSE,
8382 +                               service->localport,
8383 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
8384 +                               NULL, 0, 0, 0);
8385 +
8386 +               if (status == VCHIQ_SUCCESS) {
8387 +                       if (!close_recvd)
8388 +                               break;
8389 +               } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8390 +                       mutex_unlock(&state->sync_mutex);
8391 +                       break;
8392 +               } else
8393 +                       break;
8394 +
8395 +               status = close_service_complete(service,
8396 +                               VCHIQ_SRVSTATE_CLOSERECVD);
8397 +               break;
8398 +
8399 +       case VCHIQ_SRVSTATE_CLOSESENT:
8400 +               if (!close_recvd)
8401 +                       /* This happens when a process is killed mid-close */
8402 +                       break;
8403 +
8404 +               if (!state->is_master) {
8405 +                       if (!do_abort_bulks(service)) {
8406 +                               status = VCHIQ_RETRY;
8407 +                               break;
8408 +                       }
8409 +               }
8410 +
8411 +               if (status == VCHIQ_SUCCESS)
8412 +                       status = close_service_complete(service,
8413 +                               VCHIQ_SRVSTATE_CLOSERECVD);
8414 +               break;
8415 +
8416 +       case VCHIQ_SRVSTATE_CLOSERECVD:
8417 +               if (!close_recvd && is_server)
8418 +                       /* Force into LISTENING mode */
8419 +                       vchiq_set_service_state(service,
8420 +                               VCHIQ_SRVSTATE_LISTENING);
8421 +               status = close_service_complete(service,
8422 +                       VCHIQ_SRVSTATE_CLOSERECVD);
8423 +               break;
8424 +
8425 +       default:
8426 +               vchiq_log_error(vchiq_core_log_level,
8427 +                       "vchiq_close_service_internal(%d) called in state %s",
8428 +                       close_recvd, srvstate_names[service->srvstate]);
8429 +               break;
8430 +       }
8431 +
8432 +       return status;
8433 +}
8434 +
8435 +/* Called from the application process upon process death */
8436 +void
8437 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8438 +{
8439 +       VCHIQ_STATE_T *state = service->state;
8440 +
8441 +       vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8442 +               state->id, service->localport, service->remoteport);
8443 +
8444 +       mark_service_closing(service);
8445 +
8446 +       /* Mark the service for removal by the slot handler */
8447 +       request_poll(state, service, VCHIQ_POLL_REMOVE);
8448 +}
8449 +
8450 +/* Called from the slot handler */
8451 +void
8452 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8453 +{
8454 +       VCHIQ_STATE_T *state = service->state;
8455 +
8456 +       vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8457 +               state->id, service->localport);
8458 +
8459 +       switch (service->srvstate) {
8460 +       case VCHIQ_SRVSTATE_OPENING:
8461 +       case VCHIQ_SRVSTATE_CLOSED:
8462 +       case VCHIQ_SRVSTATE_HIDDEN:
8463 +       case VCHIQ_SRVSTATE_LISTENING:
8464 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
8465 +               break;
8466 +       default:
8467 +               vchiq_log_error(vchiq_core_log_level,
8468 +                       "%d: fsi - (%d) in state %s",
8469 +                       state->id, service->localport,
8470 +                       srvstate_names[service->srvstate]);
8471 +               return;
8472 +       }
8473 +
8474 +       vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8475 +
8476 +       up(&service->remove_event);
8477 +
8478 +       /* Release the initial lock */
8479 +       unlock_service(service);
8480 +}
8481 +
8482 +VCHIQ_STATUS_T
8483 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8484 +{
8485 +       VCHIQ_SERVICE_T *service;
8486 +       int i;
8487 +
8488 +       /* Find all services registered to this client and enable them. */
8489 +       i = 0;
8490 +       while ((service = next_service_by_instance(state, instance,
8491 +               &i)) != NULL) {
8492 +               if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8493 +                       vchiq_set_service_state(service,
8494 +                               VCHIQ_SRVSTATE_LISTENING);
8495 +               unlock_service(service);
8496 +       }
8497 +
8498 +       if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8499 +               if (queue_message(state, NULL,
8500 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8501 +                       0, 1) == VCHIQ_RETRY)
8502 +                       return VCHIQ_RETRY;
8503 +
8504 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8505 +       }
8506 +
8507 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8508 +               if (down_interruptible(&state->connect) != 0)
8509 +                       return VCHIQ_RETRY;
8510 +
8511 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8512 +               up(&state->connect);
8513 +       }
8514 +
8515 +       return VCHIQ_SUCCESS;
8516 +}
8517 +
8518 +VCHIQ_STATUS_T
8519 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8520 +{
8521 +       VCHIQ_SERVICE_T *service;
8522 +       int i;
8523 +
8524 +       /* Find all services registered to this client and enable them. */
8525 +       i = 0;
8526 +       while ((service = next_service_by_instance(state, instance,
8527 +               &i)) != NULL) {
8528 +               (void)vchiq_remove_service(service->handle);
8529 +               unlock_service(service);
8530 +       }
8531 +
8532 +       return VCHIQ_SUCCESS;
8533 +}
8534 +
8535 +VCHIQ_STATUS_T
8536 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8537 +{
8538 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8539 +
8540 +       switch (state->conn_state) {
8541 +       case VCHIQ_CONNSTATE_CONNECTED:
8542 +               /* Request a pause */
8543 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8544 +               request_poll(state, NULL, 0);
8545 +               break;
8546 +       default:
8547 +               vchiq_log_error(vchiq_core_log_level,
8548 +                       "vchiq_pause_internal in state %s\n",
8549 +                       conn_state_names[state->conn_state]);
8550 +               status = VCHIQ_ERROR;
8551 +               VCHIQ_STATS_INC(state, error_count);
8552 +               break;
8553 +       }
8554 +
8555 +       return status;
8556 +}
8557 +
8558 +VCHIQ_STATUS_T
8559 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8560 +{
8561 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8562 +
8563 +       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8564 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8565 +               request_poll(state, NULL, 0);
8566 +       } else {
8567 +               status = VCHIQ_ERROR;
8568 +               VCHIQ_STATS_INC(state, error_count);
8569 +       }
8570 +
8571 +       return status;
8572 +}
8573 +
8574 +VCHIQ_STATUS_T
8575 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8576 +{
8577 +       /* Unregister the service */
8578 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8579 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8580 +
8581 +       if (!service)
8582 +               return VCHIQ_ERROR;
8583 +
8584 +       vchiq_log_info(vchiq_core_log_level,
8585 +               "%d: close_service:%d",
8586 +               service->state->id, service->localport);
8587 +
8588 +       if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8589 +               (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8590 +               (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8591 +               unlock_service(service);
8592 +               return VCHIQ_ERROR;
8593 +       }
8594 +
8595 +       mark_service_closing(service);
8596 +
8597 +       if (current == service->state->slot_handler_thread) {
8598 +               status = vchiq_close_service_internal(service,
8599 +                       0/*!close_recvd*/);
8600 +               BUG_ON(status == VCHIQ_RETRY);
8601 +       } else {
8602 +       /* Mark the service for termination by the slot handler */
8603 +               request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8604 +       }
8605 +
8606 +       while (1) {
8607 +               if (down_interruptible(&service->remove_event) != 0) {
8608 +                       status = VCHIQ_RETRY;
8609 +                       break;
8610 +               }
8611 +
8612 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8613 +                       (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8614 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8615 +                       break;
8616 +
8617 +               vchiq_log_warning(vchiq_core_log_level,
8618 +                       "%d: close_service:%d - waiting in state %s",
8619 +                       service->state->id, service->localport,
8620 +                       srvstate_names[service->srvstate]);
8621 +       }
8622 +
8623 +       if ((status == VCHIQ_SUCCESS) &&
8624 +               (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8625 +               (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8626 +               status = VCHIQ_ERROR;
8627 +
8628 +       unlock_service(service);
8629 +
8630 +       return status;
8631 +}
8632 +
8633 +VCHIQ_STATUS_T
8634 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8635 +{
8636 +       /* Unregister the service */
8637 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8638 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8639 +
8640 +       if (!service)
8641 +               return VCHIQ_ERROR;
8642 +
8643 +       vchiq_log_info(vchiq_core_log_level,
8644 +               "%d: remove_service:%d",
8645 +               service->state->id, service->localport);
8646 +
8647 +       if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8648 +               unlock_service(service);
8649 +               return VCHIQ_ERROR;
8650 +       }
8651 +
8652 +       mark_service_closing(service);
8653 +
8654 +       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8655 +               (current == service->state->slot_handler_thread)) {
8656 +               /* Make it look like a client, because it must be removed and
8657 +                  not left in the LISTENING state. */
8658 +               service->public_fourcc = VCHIQ_FOURCC_INVALID;
8659 +
8660 +               status = vchiq_close_service_internal(service,
8661 +                       0/*!close_recvd*/);
8662 +               BUG_ON(status == VCHIQ_RETRY);
8663 +       } else {
8664 +               /* Mark the service for removal by the slot handler */
8665 +               request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8666 +       }
8667 +       while (1) {
8668 +               if (down_interruptible(&service->remove_event) != 0) {
8669 +                       status = VCHIQ_RETRY;
8670 +                       break;
8671 +               }
8672 +
8673 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8674 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8675 +                       break;
8676 +
8677 +               vchiq_log_warning(vchiq_core_log_level,
8678 +                       "%d: remove_service:%d - waiting in state %s",
8679 +                       service->state->id, service->localport,
8680 +                       srvstate_names[service->srvstate]);
8681 +       }
8682 +
8683 +       if ((status == VCHIQ_SUCCESS) &&
8684 +               (service->srvstate != VCHIQ_SRVSTATE_FREE))
8685 +               status = VCHIQ_ERROR;
8686 +
8687 +       unlock_service(service);
8688 +
8689 +       return status;
8690 +}
8691 +
8692 +
8693 +/* This function may be called by kernel threads or user threads.
8694 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8695 + * received and the call should be retried after being returned to user
8696 + * context.
8697 + * When called in blocking mode, the userdata field points to a bulk_waiter
8698 + * structure.
8699 + */
8700 +VCHIQ_STATUS_T
8701 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8702 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8703 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8704 +{
8705 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8706 +       VCHIQ_BULK_QUEUE_T *queue;
8707 +       VCHIQ_BULK_T *bulk;
8708 +       VCHIQ_STATE_T *state;
8709 +       struct bulk_waiter *bulk_waiter = NULL;
8710 +       const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8711 +       const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8712 +               VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8713 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8714 +
8715 +       if (!service ||
8716 +                (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8717 +                ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8718 +                (vchiq_check_service(service) != VCHIQ_SUCCESS))
8719 +               goto error_exit;
8720 +
8721 +       switch (mode) {
8722 +       case VCHIQ_BULK_MODE_NOCALLBACK:
8723 +       case VCHIQ_BULK_MODE_CALLBACK:
8724 +               break;
8725 +       case VCHIQ_BULK_MODE_BLOCKING:
8726 +               bulk_waiter = (struct bulk_waiter *)userdata;
8727 +               sema_init(&bulk_waiter->event, 0);
8728 +               bulk_waiter->actual = 0;
8729 +               bulk_waiter->bulk = NULL;
8730 +               break;
8731 +       case VCHIQ_BULK_MODE_WAITING:
8732 +               bulk_waiter = (struct bulk_waiter *)userdata;
8733 +               bulk = bulk_waiter->bulk;
8734 +               goto waiting;
8735 +       default:
8736 +               goto error_exit;
8737 +       }
8738 +
8739 +       state = service->state;
8740 +
8741 +       queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8742 +               &service->bulk_tx : &service->bulk_rx;
8743 +
8744 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8745 +               status = VCHIQ_RETRY;
8746 +               goto error_exit;
8747 +       }
8748 +
8749 +       if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8750 +               VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8751 +               do {
8752 +                       mutex_unlock(&service->bulk_mutex);
8753 +                       if (down_interruptible(&service->bulk_remove_event)
8754 +                               != 0) {
8755 +                               status = VCHIQ_RETRY;
8756 +                               goto error_exit;
8757 +                       }
8758 +                       if (mutex_lock_interruptible(&service->bulk_mutex)
8759 +                               != 0) {
8760 +                               status = VCHIQ_RETRY;
8761 +                               goto error_exit;
8762 +                       }
8763 +               } while (queue->local_insert == queue->remove +
8764 +                               VCHIQ_NUM_SERVICE_BULKS);
8765 +       }
8766 +
8767 +       bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8768 +
8769 +       bulk->mode = mode;
8770 +       bulk->dir = dir;
8771 +       bulk->userdata = userdata;
8772 +       bulk->size = size;
8773 +       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8774 +
8775 +       if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8776 +               VCHIQ_SUCCESS)
8777 +               goto unlock_error_exit;
8778 +
8779 +       wmb();
8780 +
8781 +       vchiq_log_info(vchiq_core_log_level,
8782 +               "%d: bt (%d->%d) %cx %x@%x %x",
8783 +               state->id,
8784 +               service->localport, service->remoteport, dir_char,
8785 +               size, (unsigned int)bulk->data, (unsigned int)userdata);
8786 +
8787 +       if (state->is_master) {
8788 +               queue->local_insert++;
8789 +               if (resolve_bulks(service, queue))
8790 +                       request_poll(state, service,
8791 +                               (dir == VCHIQ_BULK_TRANSMIT) ?
8792 +                               VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8793 +       } else {
8794 +               int payload[2] = { (int)bulk->data, bulk->size };
8795 +               VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8796 +
8797 +               status = queue_message(state, NULL,
8798 +                       VCHIQ_MAKE_MSG(dir_msgtype,
8799 +                               service->localport, service->remoteport),
8800 +                       &element, 1, sizeof(payload), 1);
8801 +               if (status != VCHIQ_SUCCESS) {
8802 +                       vchiq_complete_bulk(bulk);
8803 +                       goto unlock_error_exit;
8804 +               }
8805 +               queue->local_insert++;
8806 +       }
8807 +
8808 +       mutex_unlock(&service->bulk_mutex);
8809 +
8810 +       vchiq_log_trace(vchiq_core_log_level,
8811 +               "%d: bt:%d %cx li=%x ri=%x p=%x",
8812 +               state->id,
8813 +               service->localport, dir_char,
8814 +               queue->local_insert, queue->remote_insert, queue->process);
8815 +
8816 +waiting:
8817 +       unlock_service(service);
8818 +
8819 +       status = VCHIQ_SUCCESS;
8820 +
8821 +       if (bulk_waiter) {
8822 +               bulk_waiter->bulk = bulk;
8823 +               if (down_interruptible(&bulk_waiter->event) != 0)
8824 +                       status = VCHIQ_RETRY;
8825 +               else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8826 +                       status = VCHIQ_ERROR;
8827 +       }
8828 +
8829 +       return status;
8830 +
8831 +unlock_error_exit:
8832 +       mutex_unlock(&service->bulk_mutex);
8833 +
8834 +error_exit:
8835 +       if (service)
8836 +               unlock_service(service);
8837 +       return status;
8838 +}
8839 +
8840 +VCHIQ_STATUS_T
8841 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8842 +       const VCHIQ_ELEMENT_T *elements, unsigned int count)
8843 +{
8844 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8845 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8846 +
8847 +       unsigned int size = 0;
8848 +       unsigned int i;
8849 +
8850 +       if (!service ||
8851 +               (vchiq_check_service(service) != VCHIQ_SUCCESS))
8852 +               goto error_exit;
8853 +
8854 +       for (i = 0; i < (unsigned int)count; i++) {
8855 +               if (elements[i].size) {
8856 +                       if (elements[i].data == NULL) {
8857 +                               VCHIQ_SERVICE_STATS_INC(service, error_count);
8858 +                               goto error_exit;
8859 +                       }
8860 +                       size += elements[i].size;
8861 +               }
8862 +       }
8863 +
8864 +       if (size > VCHIQ_MAX_MSG_SIZE) {
8865 +               VCHIQ_SERVICE_STATS_INC(service, error_count);
8866 +               goto error_exit;
8867 +       }
8868 +
8869 +       switch (service->srvstate) {
8870 +       case VCHIQ_SRVSTATE_OPEN:
8871 +               status = queue_message(service->state, service,
8872 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8873 +                                       service->localport,
8874 +                                       service->remoteport),
8875 +                               elements, count, size, 1);
8876 +               break;
8877 +       case VCHIQ_SRVSTATE_OPENSYNC:
8878 +               status = queue_message_sync(service->state, service,
8879 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8880 +                                       service->localport,
8881 +                                       service->remoteport),
8882 +                               elements, count, size, 1);
8883 +               break;
8884 +       default:
8885 +               status = VCHIQ_ERROR;
8886 +               break;
8887 +       }
8888 +
8889 +error_exit:
8890 +       if (service)
8891 +               unlock_service(service);
8892 +
8893 +       return status;
8894 +}
8895 +
8896 +void
8897 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8898 +{
8899 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8900 +       VCHIQ_SHARED_STATE_T *remote;
8901 +       VCHIQ_STATE_T *state;
8902 +       int slot_index;
8903 +
8904 +       if (!service)
8905 +               return;
8906 +
8907 +       state = service->state;
8908 +       remote = state->remote;
8909 +
8910 +       slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8911 +
8912 +       if ((slot_index >= remote->slot_first) &&
8913 +               (slot_index <= remote->slot_last)) {
8914 +               int msgid = header->msgid;
8915 +               if (msgid & VCHIQ_MSGID_CLAIMED) {
8916 +                       VCHIQ_SLOT_INFO_T *slot_info =
8917 +                               SLOT_INFO_FROM_INDEX(state, slot_index);
8918 +
8919 +                       release_slot(state, slot_info, header, service);
8920 +               }
8921 +       } else if (slot_index == remote->slot_sync)
8922 +               release_message_sync(state, header);
8923 +
8924 +       unlock_service(service);
8925 +}
8926 +
8927 +static void
8928 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8929 +{
8930 +       header->msgid = VCHIQ_MSGID_PADDING;
8931 +       wmb();
8932 +       remote_event_signal(&state->remote->sync_release);
8933 +}
8934 +
8935 +VCHIQ_STATUS_T
8936 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8937 +{
8938 +   VCHIQ_STATUS_T status = VCHIQ_ERROR;
8939 +   VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8940 +
8941 +   if (!service ||
8942 +      (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
8943 +      !peer_version)
8944 +      goto exit;
8945 +   *peer_version = service->peer_version;
8946 +   status = VCHIQ_SUCCESS;
8947 +
8948 +exit:
8949 +   if (service)
8950 +      unlock_service(service);
8951 +   return status;
8952 +}
8953 +
8954 +VCHIQ_STATUS_T
8955 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
8956 +       int config_size, VCHIQ_CONFIG_T *pconfig)
8957 +{
8958 +       VCHIQ_CONFIG_T config;
8959 +
8960 +       (void)instance;
8961 +
8962 +       config.max_msg_size           = VCHIQ_MAX_MSG_SIZE;
8963 +       config.bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
8964 +       config.max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
8965 +       config.max_services           = VCHIQ_MAX_SERVICES;
8966 +       config.version                = VCHIQ_VERSION;
8967 +       config.version_min            = VCHIQ_VERSION_MIN;
8968 +
8969 +       if (config_size > sizeof(VCHIQ_CONFIG_T))
8970 +               return VCHIQ_ERROR;
8971 +
8972 +       memcpy(pconfig, &config,
8973 +               min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
8974 +
8975 +       return VCHIQ_SUCCESS;
8976 +}
8977 +
8978 +VCHIQ_STATUS_T
8979 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
8980 +       VCHIQ_SERVICE_OPTION_T option, int value)
8981 +{
8982 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8983 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8984 +
8985 +       if (service) {
8986 +               switch (option) {
8987 +               case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
8988 +                       service->auto_close = value;
8989 +                       status = VCHIQ_SUCCESS;
8990 +                       break;
8991 +
8992 +               case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
8993 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
8994 +                               &service->state->service_quotas[
8995 +                                       service->localport];
8996 +                       if (value == 0)
8997 +                               value = service->state->default_slot_quota;
8998 +                       if ((value >= service_quota->slot_use_count) &&
8999 +                                (value < (unsigned short)~0)) {
9000 +                               service_quota->slot_quota = value;
9001 +                               if ((value >= service_quota->slot_use_count) &&
9002 +                                       (service_quota->message_quota >=
9003 +                                        service_quota->message_use_count)) {
9004 +                                       /* Signal the service that it may have
9005 +                                       ** dropped below its quota */
9006 +                                       up(&service_quota->quota_event);
9007 +                               }
9008 +                               status = VCHIQ_SUCCESS;
9009 +                       }
9010 +               } break;
9011 +
9012 +               case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9013 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
9014 +                               &service->state->service_quotas[
9015 +                                       service->localport];
9016 +                       if (value == 0)
9017 +                               value = service->state->default_message_quota;
9018 +                       if ((value >= service_quota->message_use_count) &&
9019 +                                (value < (unsigned short)~0)) {
9020 +                               service_quota->message_quota = value;
9021 +                               if ((value >=
9022 +                                       service_quota->message_use_count) &&
9023 +                                       (service_quota->slot_quota >=
9024 +                                       service_quota->slot_use_count))
9025 +                                       /* Signal the service that it may have
9026 +                                       ** dropped below its quota */
9027 +                                       up(&service_quota->quota_event);
9028 +                               status = VCHIQ_SUCCESS;
9029 +                       }
9030 +               } break;
9031 +
9032 +               case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9033 +                       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9034 +                               (service->srvstate ==
9035 +                               VCHIQ_SRVSTATE_LISTENING)) {
9036 +                               service->sync = value;
9037 +                               status = VCHIQ_SUCCESS;
9038 +                       }
9039 +                       break;
9040 +
9041 +               default:
9042 +                       break;
9043 +               }
9044 +               unlock_service(service);
9045 +       }
9046 +
9047 +       return status;
9048 +}
9049 +
9050 +void
9051 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9052 +       VCHIQ_SHARED_STATE_T *shared, const char *label)
9053 +{
9054 +       static const char *const debug_names[] = {
9055 +               "<entries>",
9056 +               "SLOT_HANDLER_COUNT",
9057 +               "SLOT_HANDLER_LINE",
9058 +               "PARSE_LINE",
9059 +               "PARSE_HEADER",
9060 +               "PARSE_MSGID",
9061 +               "AWAIT_COMPLETION_LINE",
9062 +               "DEQUEUE_MESSAGE_LINE",
9063 +               "SERVICE_CALLBACK_LINE",
9064 +               "MSG_QUEUE_FULL_COUNT",
9065 +               "COMPLETION_QUEUE_FULL_COUNT"
9066 +       };
9067 +       int i;
9068 +
9069 +       char buf[80];
9070 +       int len;
9071 +       len = snprintf(buf, sizeof(buf),
9072 +               "  %s: slots %d-%d tx_pos=%x recycle=%x",
9073 +               label, shared->slot_first, shared->slot_last,
9074 +               shared->tx_pos, shared->slot_queue_recycle);
9075 +       vchiq_dump(dump_context, buf, len + 1);
9076 +
9077 +       len = snprintf(buf, sizeof(buf),
9078 +               "    Slots claimed:");
9079 +       vchiq_dump(dump_context, buf, len + 1);
9080 +
9081 +       for (i = shared->slot_first; i <= shared->slot_last; i++) {
9082 +               VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9083 +               if (slot_info.use_count != slot_info.release_count) {
9084 +                       len = snprintf(buf, sizeof(buf),
9085 +                               "      %d: %d/%d", i, slot_info.use_count,
9086 +                               slot_info.release_count);
9087 +                       vchiq_dump(dump_context, buf, len + 1);
9088 +               }
9089 +       }
9090 +
9091 +       for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9092 +               len = snprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
9093 +                       debug_names[i], shared->debug[i], shared->debug[i]);
9094 +               vchiq_dump(dump_context, buf, len + 1);
9095 +       }
9096 +}
9097 +
9098 +void
9099 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9100 +{
9101 +       char buf[80];
9102 +       int len;
9103 +       int i;
9104 +
9105 +       len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9106 +               conn_state_names[state->conn_state]);
9107 +       vchiq_dump(dump_context, buf, len + 1);
9108 +
9109 +       len = snprintf(buf, sizeof(buf),
9110 +               "  tx_pos=%x(@%x), rx_pos=%x(@%x)",
9111 +               state->local->tx_pos,
9112 +               (uint32_t)state->tx_data +
9113 +                       (state->local_tx_pos & VCHIQ_SLOT_MASK),
9114 +               state->rx_pos,
9115 +               (uint32_t)state->rx_data +
9116 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
9117 +       vchiq_dump(dump_context, buf, len + 1);
9118 +
9119 +       len = snprintf(buf, sizeof(buf),
9120 +               "  Version: %d (min %d)",
9121 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9122 +       vchiq_dump(dump_context, buf, len + 1);
9123 +
9124 +       if (VCHIQ_ENABLE_STATS) {
9125 +               len = snprintf(buf, sizeof(buf),
9126 +                       "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9127 +                       "error_count=%d",
9128 +                       state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9129 +                       state->stats.error_count);
9130 +               vchiq_dump(dump_context, buf, len + 1);
9131 +       }
9132 +
9133 +       len = snprintf(buf, sizeof(buf),
9134 +               "  Slots: %d available (%d data), %d recyclable, %d stalls "
9135 +               "(%d data)",
9136 +               ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9137 +                       state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9138 +               state->data_quota - state->data_use_count,
9139 +               state->local->slot_queue_recycle - state->slot_queue_available,
9140 +               state->stats.slot_stalls, state->stats.data_stalls);
9141 +       vchiq_dump(dump_context, buf, len + 1);
9142 +
9143 +       vchiq_dump_platform_state(dump_context);
9144 +
9145 +       vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9146 +       vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9147 +
9148 +       vchiq_dump_platform_instances(dump_context);
9149 +
9150 +       for (i = 0; i < state->unused_service; i++) {
9151 +               VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9152 +
9153 +               if (service) {
9154 +                       vchiq_dump_service_state(dump_context, service);
9155 +                       unlock_service(service);
9156 +               }
9157 +       }
9158 +}
9159 +
9160 +void
9161 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9162 +{
9163 +       char buf[80];
9164 +       int len;
9165 +
9166 +       len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9167 +               service->localport, srvstate_names[service->srvstate],
9168 +               service->ref_count - 1); /*Don't include the lock just taken*/
9169 +
9170 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9171 +               char remoteport[30];
9172 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
9173 +                       &service->state->service_quotas[service->localport];
9174 +               int fourcc = service->base.fourcc;
9175 +               int tx_pending, rx_pending;
9176 +               if (service->remoteport != VCHIQ_PORT_FREE) {
9177 +                       int len2 = snprintf(remoteport, sizeof(remoteport),
9178 +                               "%d", service->remoteport);
9179 +                       if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9180 +                               snprintf(remoteport + len2,
9181 +                                       sizeof(remoteport) - len2,
9182 +                                       " (client %x)", service->client_id);
9183 +               } else
9184 +                       strcpy(remoteport, "n/a");
9185 +
9186 +               len += snprintf(buf + len, sizeof(buf) - len,
9187 +                       " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9188 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
9189 +                       remoteport,
9190 +                       service_quota->message_use_count,
9191 +                       service_quota->message_quota,
9192 +                       service_quota->slot_use_count,
9193 +                       service_quota->slot_quota);
9194 +
9195 +               vchiq_dump(dump_context, buf, len + 1);
9196 +
9197 +               tx_pending = service->bulk_tx.local_insert -
9198 +                       service->bulk_tx.remote_insert;
9199 +
9200 +               rx_pending = service->bulk_rx.local_insert -
9201 +                       service->bulk_rx.remote_insert;
9202 +
9203 +               len = snprintf(buf, sizeof(buf),
9204 +                       "  Bulk: tx_pending=%d (size %d),"
9205 +                       " rx_pending=%d (size %d)",
9206 +                       tx_pending,
9207 +                       tx_pending ? service->bulk_tx.bulks[
9208 +                       BULK_INDEX(service->bulk_tx.remove)].size : 0,
9209 +                       rx_pending,
9210 +                       rx_pending ? service->bulk_rx.bulks[
9211 +                       BULK_INDEX(service->bulk_rx.remove)].size : 0);
9212 +
9213 +               if (VCHIQ_ENABLE_STATS) {
9214 +                       vchiq_dump(dump_context, buf, len + 1);
9215 +
9216 +                       len = snprintf(buf, sizeof(buf),
9217 +                               "  Ctrl: tx_count=%d, tx_bytes=%llu, "
9218 +                               "rx_count=%d, rx_bytes=%llu",
9219 +                               service->stats.ctrl_tx_count,
9220 +                               service->stats.ctrl_tx_bytes,
9221 +                               service->stats.ctrl_rx_count,
9222 +                               service->stats.ctrl_rx_bytes);
9223 +                       vchiq_dump(dump_context, buf, len + 1);
9224 +
9225 +                       len = snprintf(buf, sizeof(buf),
9226 +                               "  Bulk: tx_count=%d, tx_bytes=%llu, "
9227 +                               "rx_count=%d, rx_bytes=%llu",
9228 +                               service->stats.bulk_tx_count,
9229 +                               service->stats.bulk_tx_bytes,
9230 +                               service->stats.bulk_rx_count,
9231 +                               service->stats.bulk_rx_bytes);
9232 +                       vchiq_dump(dump_context, buf, len + 1);
9233 +
9234 +                       len = snprintf(buf, sizeof(buf),
9235 +                               "  %d quota stalls, %d slot stalls, "
9236 +                               "%d bulk stalls, %d aborted, %d errors",
9237 +                               service->stats.quota_stalls,
9238 +                               service->stats.slot_stalls,
9239 +                               service->stats.bulk_stalls,
9240 +                               service->stats.bulk_aborted_count,
9241 +                               service->stats.error_count);
9242 +                }
9243 +       }
9244 +
9245 +       vchiq_dump(dump_context, buf, len + 1);
9246 +
9247 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9248 +               vchiq_dump_platform_service_state(dump_context, service);
9249 +}
9250 +
9251 +
9252 +void
9253 +vchiq_loud_error_header(void)
9254 +{
9255 +       vchiq_log_error(vchiq_core_log_level,
9256 +               "============================================================"
9257 +               "================");
9258 +       vchiq_log_error(vchiq_core_log_level,
9259 +               "============================================================"
9260 +               "================");
9261 +       vchiq_log_error(vchiq_core_log_level, "=====");
9262 +}
9263 +
9264 +void
9265 +vchiq_loud_error_footer(void)
9266 +{
9267 +       vchiq_log_error(vchiq_core_log_level, "=====");
9268 +       vchiq_log_error(vchiq_core_log_level,
9269 +               "============================================================"
9270 +               "================");
9271 +       vchiq_log_error(vchiq_core_log_level,
9272 +               "============================================================"
9273 +               "================");
9274 +}
9275 +
9276 +
9277 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9278 +{
9279 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9280 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9281 +               status = queue_message(state, NULL,
9282 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9283 +                       NULL, 0, 0, 0);
9284 +       return status;
9285 +}
9286 +
9287 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9288 +{
9289 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9290 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9291 +               status = queue_message(state, NULL,
9292 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9293 +                       NULL, 0, 0, 0);
9294 +       return status;
9295 +}
9296 +
9297 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9298 +{
9299 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9300 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9301 +               status = queue_message(state, NULL,
9302 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9303 +                       NULL, 0, 0, 0);
9304 +       return status;
9305 +}
9306 +
9307 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9308 +       size_t numBytes)
9309 +{
9310 +       const uint8_t  *mem = (const uint8_t *)voidMem;
9311 +       size_t          offset;
9312 +       char            lineBuf[100];
9313 +       char           *s;
9314 +
9315 +       while (numBytes > 0) {
9316 +               s = lineBuf;
9317 +
9318 +               for (offset = 0; offset < 16; offset++) {
9319 +                       if (offset < numBytes)
9320 +                               s += snprintf(s, 4, "%02x ", mem[offset]);
9321 +                       else
9322 +                               s += snprintf(s, 4, "   ");
9323 +               }
9324 +
9325 +               for (offset = 0; offset < 16; offset++) {
9326 +                       if (offset < numBytes) {
9327 +                               uint8_t ch = mem[offset];
9328 +
9329 +                               if ((ch < ' ') || (ch > '~'))
9330 +                                       ch = '.';
9331 +                               *s++ = (char)ch;
9332 +                       }
9333 +               }
9334 +               *s++ = '\0';
9335 +
9336 +               if ((label != NULL) && (*label != '\0'))
9337 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
9338 +                               "%s: %08x: %s", label, addr, lineBuf);
9339 +               else
9340 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
9341 +                               "%08x: %s", addr, lineBuf);
9342 +
9343 +               addr += 16;
9344 +               mem += 16;
9345 +               if (numBytes > 16)
9346 +                       numBytes -= 16;
9347 +               else
9348 +                       numBytes = 0;
9349 +       }
9350 +}
9351 --- /dev/null
9352 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9353 @@ -0,0 +1,706 @@
9354 +/**
9355 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9356 + *
9357 + * Redistribution and use in source and binary forms, with or without
9358 + * modification, are permitted provided that the following conditions
9359 + * are met:
9360 + * 1. Redistributions of source code must retain the above copyright
9361 + *    notice, this list of conditions, and the following disclaimer,
9362 + *    without modification.
9363 + * 2. Redistributions in binary form must reproduce the above copyright
9364 + *    notice, this list of conditions and the following disclaimer in the
9365 + *    documentation and/or other materials provided with the distribution.
9366 + * 3. The names of the above-listed copyright holders may not be used
9367 + *    to endorse or promote products derived from this software without
9368 + *    specific prior written permission.
9369 + *
9370 + * ALTERNATIVELY, this software may be distributed under the terms of the
9371 + * GNU General Public License ("GPL") version 2, as published by the Free
9372 + * Software Foundation.
9373 + *
9374 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9375 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9376 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9377 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9378 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9379 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9380 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9381 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9382 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9383 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9384 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9385 + */
9386 +
9387 +#ifndef VCHIQ_CORE_H
9388 +#define VCHIQ_CORE_H
9389 +
9390 +#include <linux/mutex.h>
9391 +#include <linux/semaphore.h>
9392 +#include <linux/kthread.h>
9393 +
9394 +#include "vchiq_cfg.h"
9395 +
9396 +#include "vchiq.h"
9397 +
9398 +/* Run time control of log level, based on KERN_XXX level. */
9399 +#define VCHIQ_LOG_DEFAULT  4
9400 +#define VCHIQ_LOG_ERROR    3
9401 +#define VCHIQ_LOG_WARNING  4
9402 +#define VCHIQ_LOG_INFO     6
9403 +#define VCHIQ_LOG_TRACE    7
9404 +
9405 +#define VCHIQ_LOG_PREFIX   KERN_INFO "vchiq: "
9406 +
9407 +#ifndef vchiq_log_error
9408 +#define vchiq_log_error(cat, fmt, ...) \
9409 +       do { if (cat >= VCHIQ_LOG_ERROR) \
9410 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9411 +#endif
9412 +#ifndef vchiq_log_warning
9413 +#define vchiq_log_warning(cat, fmt, ...) \
9414 +       do { if (cat >= VCHIQ_LOG_WARNING) \
9415 +                printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9416 +#endif
9417 +#ifndef vchiq_log_info
9418 +#define vchiq_log_info(cat, fmt, ...) \
9419 +       do { if (cat >= VCHIQ_LOG_INFO) \
9420 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9421 +#endif
9422 +#ifndef vchiq_log_trace
9423 +#define vchiq_log_trace(cat, fmt, ...) \
9424 +       do { if (cat >= VCHIQ_LOG_TRACE) \
9425 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9426 +#endif
9427 +
9428 +#define vchiq_loud_error(...) \
9429 +       vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9430 +
9431 +#ifndef vchiq_static_assert
9432 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9433 +       extern int vchiq_static_assert[(cond) ? 1 : -1]
9434 +#endif
9435 +
9436 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9437 +
9438 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9439 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9440 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9441 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9442 +
9443 +#define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
9444 +#define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9445 +#define VCHIQ_SLOT_ZERO_SLOTS  ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9446 +       VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9447 +
9448 +#define VCHIQ_MSG_PADDING            0  /* -                                 */
9449 +#define VCHIQ_MSG_CONNECT            1  /* -                                 */
9450 +#define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
9451 +#define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
9452 +#define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
9453 +#define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
9454 +#define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
9455 +#define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
9456 +#define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
9457 +#define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
9458 +#define VCHIQ_MSG_PAUSE             10  /* -                                 */
9459 +#define VCHIQ_MSG_RESUME            11  /* -                                 */
9460 +#define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
9461 +#define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
9462 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
9463 +
9464 +#define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
9465 +#define VCHIQ_PORT_FREE                0x1000
9466 +#define VCHIQ_PORT_IS_VALID(port)      (port < VCHIQ_PORT_FREE)
9467 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9468 +       ((type<<24) | (srcport<<12) | (dstport<<0))
9469 +#define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)msgid >> 24)
9470 +#define VCHIQ_MSG_SRCPORT(msgid) \
9471 +       (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9472 +#define VCHIQ_MSG_DSTPORT(msgid) \
9473 +       ((unsigned short)msgid & 0xfff)
9474 +
9475 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9476 +       ((fourcc) >> 24) & 0xff, \
9477 +       ((fourcc) >> 16) & 0xff, \
9478 +       ((fourcc) >>  8) & 0xff, \
9479 +       (fourcc) & 0xff
9480 +
9481 +/* Ensure the fields are wide enough */
9482 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9483 +       == 0);
9484 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9485 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9486 +       (unsigned int)VCHIQ_PORT_FREE);
9487 +
9488 +#define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9489 +#define VCHIQ_MSGID_CLAIMED            0x40000000
9490 +
9491 +#define VCHIQ_FOURCC_INVALID           0x00000000
9492 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc)  (fourcc != VCHIQ_FOURCC_INVALID)
9493 +
9494 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9495 +
9496 +typedef uint32_t BITSET_T;
9497 +
9498 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9499 +
9500 +#define BITSET_SIZE(b)        ((b + 31) >> 5)
9501 +#define BITSET_WORD(b)        (b >> 5)
9502 +#define BITSET_BIT(b)         (1 << (b & 31))
9503 +#define BITSET_ZERO(bs)       memset(bs, 0, sizeof(bs))
9504 +#define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9505 +#define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9506 +#define BITSET_CLR(bs, b)     (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9507 +
9508 +#if VCHIQ_ENABLE_STATS
9509 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9510 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9511 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9512 +       (service->stats. stat += addend)
9513 +#else
9514 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9515 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9516 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9517 +#endif
9518 +
9519 +enum {
9520 +       DEBUG_ENTRIES,
9521 +#if VCHIQ_ENABLE_DEBUG
9522 +       DEBUG_SLOT_HANDLER_COUNT,
9523 +       DEBUG_SLOT_HANDLER_LINE,
9524 +       DEBUG_PARSE_LINE,
9525 +       DEBUG_PARSE_HEADER,
9526 +       DEBUG_PARSE_MSGID,
9527 +       DEBUG_AWAIT_COMPLETION_LINE,
9528 +       DEBUG_DEQUEUE_MESSAGE_LINE,
9529 +       DEBUG_SERVICE_CALLBACK_LINE,
9530 +       DEBUG_MSG_QUEUE_FULL_COUNT,
9531 +       DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9532 +#endif
9533 +       DEBUG_MAX
9534 +};
9535 +
9536 +#if VCHIQ_ENABLE_DEBUG
9537 +
9538 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9539 +#define DEBUG_TRACE(d) \
9540 +       do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9541 +#define DEBUG_VALUE(d, v) \
9542 +       do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9543 +#define DEBUG_COUNT(d) \
9544 +       do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9545 +
9546 +#else /* VCHIQ_ENABLE_DEBUG */
9547 +
9548 +#define DEBUG_INITIALISE(local)
9549 +#define DEBUG_TRACE(d)
9550 +#define DEBUG_VALUE(d, v)
9551 +#define DEBUG_COUNT(d)
9552 +
9553 +#endif /* VCHIQ_ENABLE_DEBUG */
9554 +
9555 +typedef enum {
9556 +       VCHIQ_CONNSTATE_DISCONNECTED,
9557 +       VCHIQ_CONNSTATE_CONNECTING,
9558 +       VCHIQ_CONNSTATE_CONNECTED,
9559 +       VCHIQ_CONNSTATE_PAUSING,
9560 +       VCHIQ_CONNSTATE_PAUSE_SENT,
9561 +       VCHIQ_CONNSTATE_PAUSED,
9562 +       VCHIQ_CONNSTATE_RESUMING,
9563 +       VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9564 +       VCHIQ_CONNSTATE_RESUME_TIMEOUT
9565 +} VCHIQ_CONNSTATE_T;
9566 +
9567 +enum {
9568 +       VCHIQ_SRVSTATE_FREE,
9569 +       VCHIQ_SRVSTATE_HIDDEN,
9570 +       VCHIQ_SRVSTATE_LISTENING,
9571 +       VCHIQ_SRVSTATE_OPENING,
9572 +       VCHIQ_SRVSTATE_OPEN,
9573 +       VCHIQ_SRVSTATE_OPENSYNC,
9574 +       VCHIQ_SRVSTATE_CLOSESENT,
9575 +       VCHIQ_SRVSTATE_CLOSERECVD,
9576 +       VCHIQ_SRVSTATE_CLOSEWAIT,
9577 +       VCHIQ_SRVSTATE_CLOSED
9578 +};
9579 +
9580 +enum {
9581 +       VCHIQ_POLL_TERMINATE,
9582 +       VCHIQ_POLL_REMOVE,
9583 +       VCHIQ_POLL_TXNOTIFY,
9584 +       VCHIQ_POLL_RXNOTIFY,
9585 +       VCHIQ_POLL_COUNT
9586 +};
9587 +
9588 +typedef enum {
9589 +       VCHIQ_BULK_TRANSMIT,
9590 +       VCHIQ_BULK_RECEIVE
9591 +} VCHIQ_BULK_DIR_T;
9592 +
9593 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9594 +
9595 +typedef struct vchiq_bulk_struct {
9596 +       short mode;
9597 +       short dir;
9598 +       void *userdata;
9599 +       VCHI_MEM_HANDLE_T handle;
9600 +       void *data;
9601 +       int size;
9602 +       void *remote_data;
9603 +       int remote_size;
9604 +       int actual;
9605 +} VCHIQ_BULK_T;
9606 +
9607 +typedef struct vchiq_bulk_queue_struct {
9608 +       int local_insert;  /* Where to insert the next local bulk */
9609 +       int remote_insert; /* Where to insert the next remote bulk (master) */
9610 +       int process;       /* Bulk to transfer next */
9611 +       int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9612 +       int remove;        /* Bulk to notify the local client of, and remove,
9613 +                          ** next */
9614 +       VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9615 +} VCHIQ_BULK_QUEUE_T;
9616 +
9617 +typedef struct remote_event_struct {
9618 +       int armed;
9619 +       int fired;
9620 +       struct semaphore *event;
9621 +} REMOTE_EVENT_T;
9622 +
9623 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9624 +
9625 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9626 +
9627 +typedef struct vchiq_slot_struct {
9628 +       char data[VCHIQ_SLOT_SIZE];
9629 +} VCHIQ_SLOT_T;
9630 +
9631 +typedef struct vchiq_slot_info_struct {
9632 +       /* Use two counters rather than one to avoid the need for a mutex. */
9633 +       short use_count;
9634 +       short release_count;
9635 +} VCHIQ_SLOT_INFO_T;
9636 +
9637 +typedef struct vchiq_service_struct {
9638 +       VCHIQ_SERVICE_BASE_T base;
9639 +       VCHIQ_SERVICE_HANDLE_T handle;
9640 +       unsigned int ref_count;
9641 +       int srvstate;
9642 +       VCHIQ_USERDATA_TERM_T userdata_term;
9643 +       unsigned int localport;
9644 +       unsigned int remoteport;
9645 +       int public_fourcc;
9646 +       int client_id;
9647 +       char auto_close;
9648 +       char sync;
9649 +       char closing;
9650 +       atomic_t poll_flags;
9651 +       short version;
9652 +       short version_min;
9653 +       short peer_version;
9654 +
9655 +       VCHIQ_STATE_T *state;
9656 +       VCHIQ_INSTANCE_T instance;
9657 +
9658 +       int service_use_count;
9659 +
9660 +       VCHIQ_BULK_QUEUE_T bulk_tx;
9661 +       VCHIQ_BULK_QUEUE_T bulk_rx;
9662 +
9663 +       struct semaphore remove_event;
9664 +       struct semaphore bulk_remove_event;
9665 +       struct mutex bulk_mutex;
9666 +
9667 +       struct service_stats_struct {
9668 +               int quota_stalls;
9669 +               int slot_stalls;
9670 +               int bulk_stalls;
9671 +               int error_count;
9672 +               int ctrl_tx_count;
9673 +               int ctrl_rx_count;
9674 +               int bulk_tx_count;
9675 +               int bulk_rx_count;
9676 +               int bulk_aborted_count;
9677 +               uint64_t ctrl_tx_bytes;
9678 +               uint64_t ctrl_rx_bytes;
9679 +               uint64_t bulk_tx_bytes;
9680 +               uint64_t bulk_rx_bytes;
9681 +       } stats;
9682 +} VCHIQ_SERVICE_T;
9683 +
9684 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9685 +       statically allocated, since for accounting reasons a service's slot
9686 +       usage is carried over between users of the same port number.
9687 + */
9688 +typedef struct vchiq_service_quota_struct {
9689 +       unsigned short slot_quota;
9690 +       unsigned short slot_use_count;
9691 +       unsigned short message_quota;
9692 +       unsigned short message_use_count;
9693 +       struct semaphore quota_event;
9694 +       int previous_tx_index;
9695 +} VCHIQ_SERVICE_QUOTA_T;
9696 +
9697 +typedef struct vchiq_shared_state_struct {
9698 +
9699 +       /* A non-zero value here indicates that the content is valid. */
9700 +       int initialised;
9701 +
9702 +       /* The first and last (inclusive) slots allocated to the owner. */
9703 +       int slot_first;
9704 +       int slot_last;
9705 +
9706 +       /* The slot allocated to synchronous messages from the owner. */
9707 +       int slot_sync;
9708 +
9709 +       /* Signalling this event indicates that owner's slot handler thread
9710 +       ** should run. */
9711 +       REMOTE_EVENT_T trigger;
9712 +
9713 +       /* Indicates the byte position within the stream where the next message
9714 +       ** will be written. The least significant bits are an index into the
9715 +       ** slot. The next bits are the index of the slot in slot_queue. */
9716 +       int tx_pos;
9717 +
9718 +       /* This event should be signalled when a slot is recycled. */
9719 +       REMOTE_EVENT_T recycle;
9720 +
9721 +       /* The slot_queue index where the next recycled slot will be written. */
9722 +       int slot_queue_recycle;
9723 +
9724 +       /* This event should be signalled when a synchronous message is sent. */
9725 +       REMOTE_EVENT_T sync_trigger;
9726 +
9727 +       /* This event should be signalled when a synchronous message has been
9728 +       ** released. */
9729 +       REMOTE_EVENT_T sync_release;
9730 +
9731 +       /* A circular buffer of slot indexes. */
9732 +       int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9733 +
9734 +       /* Debugging state */
9735 +       int debug[DEBUG_MAX];
9736 +} VCHIQ_SHARED_STATE_T;
9737 +
9738 +typedef struct vchiq_slot_zero_struct {
9739 +       int magic;
9740 +       short version;
9741 +       short version_min;
9742 +       int slot_zero_size;
9743 +       int slot_size;
9744 +       int max_slots;
9745 +       int max_slots_per_side;
9746 +       int platform_data[2];
9747 +       VCHIQ_SHARED_STATE_T master;
9748 +       VCHIQ_SHARED_STATE_T slave;
9749 +       VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9750 +} VCHIQ_SLOT_ZERO_T;
9751 +
9752 +struct vchiq_state_struct {
9753 +       int id;
9754 +       int initialised;
9755 +       VCHIQ_CONNSTATE_T conn_state;
9756 +       int is_master;
9757 +
9758 +       VCHIQ_SHARED_STATE_T *local;
9759 +       VCHIQ_SHARED_STATE_T *remote;
9760 +       VCHIQ_SLOT_T *slot_data;
9761 +
9762 +       unsigned short default_slot_quota;
9763 +       unsigned short default_message_quota;
9764 +
9765 +       /* Event indicating connect message received */
9766 +       struct semaphore connect;
9767 +
9768 +       /* Mutex protecting services */
9769 +       struct mutex mutex;
9770 +       VCHIQ_INSTANCE_T *instance;
9771 +
9772 +       /* Processes incoming messages */
9773 +       struct task_struct *slot_handler_thread;
9774 +
9775 +       /* Processes recycled slots */
9776 +       struct task_struct *recycle_thread;
9777 +
9778 +       /* Processes synchronous messages */
9779 +       struct task_struct *sync_thread;
9780 +
9781 +       /* Local implementation of the trigger remote event */
9782 +       struct semaphore trigger_event;
9783 +
9784 +       /* Local implementation of the recycle remote event */
9785 +       struct semaphore recycle_event;
9786 +
9787 +       /* Local implementation of the sync trigger remote event */
9788 +       struct semaphore sync_trigger_event;
9789 +
9790 +       /* Local implementation of the sync release remote event */
9791 +       struct semaphore sync_release_event;
9792 +
9793 +       char *tx_data;
9794 +       char *rx_data;
9795 +       VCHIQ_SLOT_INFO_T *rx_info;
9796 +
9797 +       struct mutex slot_mutex;
9798 +
9799 +       struct mutex recycle_mutex;
9800 +
9801 +       struct mutex sync_mutex;
9802 +
9803 +       struct mutex bulk_transfer_mutex;
9804 +
9805 +       /* Indicates the byte position within the stream from where the next
9806 +       ** message will be read. The least significant bits are an index into
9807 +       ** the slot.The next bits are the index of the slot in
9808 +       ** remote->slot_queue. */
9809 +       int rx_pos;
9810 +
9811 +       /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9812 +               from remote->tx_pos. */
9813 +       int local_tx_pos;
9814 +
9815 +       /* The slot_queue index of the slot to become available next. */
9816 +       int slot_queue_available;
9817 +
9818 +       /* A flag to indicate if any poll has been requested */
9819 +       int poll_needed;
9820 +
9821 +       /* Ths index of the previous slot used for data messages. */
9822 +       int previous_data_index;
9823 +
9824 +       /* The number of slots occupied by data messages. */
9825 +       unsigned short data_use_count;
9826 +
9827 +       /* The maximum number of slots to be occupied by data messages. */
9828 +       unsigned short data_quota;
9829 +
9830 +       /* An array of bit sets indicating which services must be polled. */
9831 +       atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9832 +
9833 +       /* The number of the first unused service */
9834 +       int unused_service;
9835 +
9836 +       /* Signalled when a free slot becomes available. */
9837 +       struct semaphore slot_available_event;
9838 +
9839 +       struct semaphore slot_remove_event;
9840 +
9841 +       /* Signalled when a free data slot becomes available. */
9842 +       struct semaphore data_quota_event;
9843 +
9844 +       /* Incremented when there are bulk transfers which cannot be processed
9845 +        * whilst paused and must be processed on resume */
9846 +       int deferred_bulks;
9847 +
9848 +       struct state_stats_struct {
9849 +               int slot_stalls;
9850 +               int data_stalls;
9851 +               int ctrl_tx_count;
9852 +               int ctrl_rx_count;
9853 +               int error_count;
9854 +       } stats;
9855 +
9856 +       VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9857 +       VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9858 +       VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9859 +
9860 +       VCHIQ_PLATFORM_STATE_T platform_state;
9861 +};
9862 +
9863 +struct bulk_waiter {
9864 +       VCHIQ_BULK_T *bulk;
9865 +       struct semaphore event;
9866 +       int actual;
9867 +};
9868 +
9869 +extern spinlock_t bulk_waiter_spinlock;
9870 +
9871 +extern int vchiq_core_log_level;
9872 +extern int vchiq_core_msg_log_level;
9873 +extern int vchiq_sync_log_level;
9874 +
9875 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9876 +
9877 +extern const char *
9878 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9879 +
9880 +extern VCHIQ_SLOT_ZERO_T *
9881 +vchiq_init_slots(void *mem_base, int mem_size);
9882 +
9883 +extern VCHIQ_STATUS_T
9884 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9885 +       int is_master);
9886 +
9887 +extern VCHIQ_STATUS_T
9888 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9889 +
9890 +extern VCHIQ_SERVICE_T *
9891 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9892 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9893 +       VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9894 +
9895 +extern VCHIQ_STATUS_T
9896 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9897 +
9898 +extern VCHIQ_STATUS_T
9899 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9900 +
9901 +extern void
9902 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9903 +
9904 +extern void
9905 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9906 +
9907 +extern VCHIQ_STATUS_T
9908 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9909 +
9910 +extern VCHIQ_STATUS_T
9911 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9912 +
9913 +extern VCHIQ_STATUS_T
9914 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9915 +
9916 +extern void
9917 +remote_event_pollall(VCHIQ_STATE_T *state);
9918 +
9919 +extern VCHIQ_STATUS_T
9920 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9921 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9922 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9923 +
9924 +extern void
9925 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9926 +
9927 +extern void
9928 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9929 +
9930 +extern void
9931 +vchiq_loud_error_header(void);
9932 +
9933 +extern void
9934 +vchiq_loud_error_footer(void);
9935 +
9936 +extern void
9937 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
9938 +
9939 +static inline VCHIQ_SERVICE_T *
9940 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
9941 +{
9942 +       VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
9943 +               (VCHIQ_MAX_STATES - 1)];
9944 +       if (!state)
9945 +               return NULL;
9946 +
9947 +       return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
9948 +}
9949 +
9950 +extern VCHIQ_SERVICE_T *
9951 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
9952 +
9953 +extern VCHIQ_SERVICE_T *
9954 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
9955 +
9956 +extern VCHIQ_SERVICE_T *
9957 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
9958 +       VCHIQ_SERVICE_HANDLE_T handle);
9959 +
9960 +extern VCHIQ_SERVICE_T *
9961 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
9962 +       int *pidx);
9963 +
9964 +extern void
9965 +lock_service(VCHIQ_SERVICE_T *service);
9966 +
9967 +extern void
9968 +unlock_service(VCHIQ_SERVICE_T *service);
9969 +
9970 +/* The following functions are called from vchiq_core, and external
9971 +** implementations must be provided. */
9972 +
9973 +extern VCHIQ_STATUS_T
9974 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
9975 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
9976 +
9977 +extern void
9978 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
9979 +
9980 +extern void
9981 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
9982 +
9983 +extern VCHIQ_STATUS_T
9984 +vchiq_copy_from_user(void *dst, const void *src, int size);
9985 +
9986 +extern void
9987 +remote_event_signal(REMOTE_EVENT_T *event);
9988 +
9989 +void
9990 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
9991 +
9992 +extern void
9993 +vchiq_platform_paused(VCHIQ_STATE_T *state);
9994 +
9995 +extern VCHIQ_STATUS_T
9996 +vchiq_platform_resume(VCHIQ_STATE_T *state);
9997 +
9998 +extern void
9999 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10000 +
10001 +extern void
10002 +vchiq_dump(void *dump_context, const char *str, int len);
10003 +
10004 +extern void
10005 +vchiq_dump_platform_state(void *dump_context);
10006 +
10007 +extern void
10008 +vchiq_dump_platform_instances(void *dump_context);
10009 +
10010 +extern void
10011 +vchiq_dump_platform_service_state(void *dump_context,
10012 +       VCHIQ_SERVICE_T *service);
10013 +
10014 +extern VCHIQ_STATUS_T
10015 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10016 +
10017 +extern VCHIQ_STATUS_T
10018 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10019 +
10020 +extern void
10021 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10022 +
10023 +extern void
10024 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10025 +
10026 +extern VCHIQ_STATUS_T
10027 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10028 +
10029 +extern VCHIQ_STATUS_T
10030 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10031 +
10032 +extern void
10033 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10034 +
10035 +extern VCHIQ_STATUS_T
10036 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10037 +
10038 +extern VCHIQ_STATUS_T
10039 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10040 +
10041 +extern VCHIQ_STATUS_T
10042 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10043 +
10044 +extern void
10045 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10046 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10047 +
10048 +extern void
10049 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10050 +
10051 +extern void
10052 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10053 +
10054 +
10055 +extern void
10056 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10057 +       size_t numBytes);
10058 +
10059 +#endif
10060 --- /dev/null
10061 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10062 @@ -0,0 +1,87 @@
10063 +#!/usr/bin/perl -w
10064 +
10065 +use strict;
10066 +
10067 +#
10068 +# Generate a version from available information
10069 +#
10070 +
10071 +my $prefix = shift @ARGV;
10072 +my $root = shift @ARGV;
10073 +
10074 +
10075 +if ( not defined $root ) {
10076 +       die "usage: $0 prefix root-dir\n";
10077 +}
10078 +
10079 +if ( ! -d $root ) {
10080 +       die "root directory $root not found\n";
10081 +}
10082 +
10083 +my $version = "unknown";
10084 +my $tainted = "";
10085 +
10086 +if ( -d "$root/.git" ) {
10087 +       # attempt to work out git version. only do so
10088 +       # on a linux build host, as cygwin builds are
10089 +       # already slow enough
10090 +
10091 +       if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10092 +               if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10093 +                       $version = "no git version";
10094 +               }
10095 +               else {
10096 +                       $version = <F>;
10097 +                       $version =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10098 +                       $version =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10099 +               }
10100 +
10101 +               if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10102 +                       $tainted = <G>;
10103 +                       $tainted =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10104 +                       $tainted =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10105 +                       if (length $tainted) {
10106 +                       $version = join ' ', $version, "(tainted)";
10107 +               }
10108 +               else {
10109 +                       $version = join ' ', $version, "(clean)";
10110 +         }
10111 +               }
10112 +       }
10113 +}
10114 +
10115 +my $hostname = `hostname`;
10116 +$hostname =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10117 +$hostname =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10118 +
10119 +
10120 +print STDERR "Version $version\n";
10121 +print <<EOF;
10122 +#include "${prefix}_build_info.h"
10123 +#include <linux/broadcom/vc_debug_sym.h>
10124 +
10125 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10126 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10127 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time,    __TIME__ );
10128 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date,    __DATE__ );
10129 +
10130 +const char *vchiq_get_build_hostname( void )
10131 +{
10132 +   return vchiq_build_hostname;
10133 +}
10134 +
10135 +const char *vchiq_get_build_version( void )
10136 +{
10137 +   return vchiq_build_version;
10138 +}
10139 +
10140 +const char *vchiq_get_build_date( void )
10141 +{
10142 +   return vchiq_build_date;
10143 +}
10144 +
10145 +const char *vchiq_get_build_time( void )
10146 +{
10147 +   return vchiq_build_time;
10148 +}
10149 +EOF
10150 --- /dev/null
10151 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10152 @@ -0,0 +1,188 @@
10153 +/**
10154 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10155 + *
10156 + * Redistribution and use in source and binary forms, with or without
10157 + * modification, are permitted provided that the following conditions
10158 + * are met:
10159 + * 1. Redistributions of source code must retain the above copyright
10160 + *    notice, this list of conditions, and the following disclaimer,
10161 + *    without modification.
10162 + * 2. Redistributions in binary form must reproduce the above copyright
10163 + *    notice, this list of conditions and the following disclaimer in the
10164 + *    documentation and/or other materials provided with the distribution.
10165 + * 3. The names of the above-listed copyright holders may not be used
10166 + *    to endorse or promote products derived from this software without
10167 + *    specific prior written permission.
10168 + *
10169 + * ALTERNATIVELY, this software may be distributed under the terms of the
10170 + * GNU General Public License ("GPL") version 2, as published by the Free
10171 + * Software Foundation.
10172 + *
10173 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10174 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10175 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10176 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10177 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10178 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10179 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10180 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10181 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10182 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10183 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10184 + */
10185 +
10186 +#ifndef VCHIQ_IF_H
10187 +#define VCHIQ_IF_H
10188 +
10189 +#include "interface/vchi/vchi_mh.h"
10190 +
10191 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10192 +
10193 +#define VCHIQ_SLOT_SIZE     4096
10194 +#define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10195 +#define VCHIQ_CHANNEL_SIZE  VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10196 +
10197 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10198 +                       (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10199 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10200 +#define VCHIQ_GET_SERVICE_FOURCC(service)   vchiq_get_service_fourcc(service)
10201 +
10202 +typedef enum {
10203 +       VCHIQ_SERVICE_OPENED,         /* service, -, -             */
10204 +       VCHIQ_SERVICE_CLOSED,         /* service, -, -             */
10205 +       VCHIQ_MESSAGE_AVAILABLE,      /* service, header, -        */
10206 +       VCHIQ_BULK_TRANSMIT_DONE,     /* service, -, bulk_userdata */
10207 +       VCHIQ_BULK_RECEIVE_DONE,      /* service, -, bulk_userdata */
10208 +       VCHIQ_BULK_TRANSMIT_ABORTED,  /* service, -, bulk_userdata */
10209 +       VCHIQ_BULK_RECEIVE_ABORTED    /* service, -, bulk_userdata */
10210 +} VCHIQ_REASON_T;
10211 +
10212 +typedef enum {
10213 +       VCHIQ_ERROR   = -1,
10214 +       VCHIQ_SUCCESS = 0,
10215 +       VCHIQ_RETRY   = 1
10216 +} VCHIQ_STATUS_T;
10217 +
10218 +typedef enum {
10219 +       VCHIQ_BULK_MODE_CALLBACK,
10220 +       VCHIQ_BULK_MODE_BLOCKING,
10221 +       VCHIQ_BULK_MODE_NOCALLBACK,
10222 +       VCHIQ_BULK_MODE_WAITING         /* Reserved for internal use */
10223 +} VCHIQ_BULK_MODE_T;
10224 +
10225 +typedef enum {
10226 +       VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10227 +       VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10228 +       VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10229 +       VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10230 +} VCHIQ_SERVICE_OPTION_T;
10231 +
10232 +typedef struct vchiq_header_struct {
10233 +       /* The message identifier - opaque to applications. */
10234 +       int msgid;
10235 +
10236 +       /* Size of message data. */
10237 +       unsigned int size;
10238 +
10239 +       char data[0];           /* message */
10240 +} VCHIQ_HEADER_T;
10241 +
10242 +typedef struct {
10243 +       const void *data;
10244 +       unsigned int size;
10245 +} VCHIQ_ELEMENT_T;
10246 +
10247 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10248 +
10249 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10250 +       VCHIQ_SERVICE_HANDLE_T, void *);
10251 +
10252 +typedef struct vchiq_service_base_struct {
10253 +       int fourcc;
10254 +       VCHIQ_CALLBACK_T callback;
10255 +       void *userdata;
10256 +} VCHIQ_SERVICE_BASE_T;
10257 +
10258 +typedef struct vchiq_service_params_struct {
10259 +       int fourcc;
10260 +       VCHIQ_CALLBACK_T callback;
10261 +       void *userdata;
10262 +       short version;       /* Increment for non-trivial changes */
10263 +       short version_min;   /* Update for incompatible changes */
10264 +} VCHIQ_SERVICE_PARAMS_T;
10265 +
10266 +typedef struct vchiq_config_struct {
10267 +       unsigned int max_msg_size;
10268 +       unsigned int bulk_threshold; /* The message size above which it
10269 +                                       is better to use a bulk transfer
10270 +                                       (<= max_msg_size) */
10271 +       unsigned int max_outstanding_bulks;
10272 +       unsigned int max_services;
10273 +       short version;      /* The version of VCHIQ */
10274 +       short version_min;  /* The minimum compatible version of VCHIQ */
10275 +} VCHIQ_CONFIG_T;
10276 +
10277 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10278 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10279 +
10280 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10281 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10282 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10283 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10284 +       const VCHIQ_SERVICE_PARAMS_T *params,
10285 +       VCHIQ_SERVICE_HANDLE_T *pservice);
10286 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10287 +       const VCHIQ_SERVICE_PARAMS_T *params,
10288 +       VCHIQ_SERVICE_HANDLE_T *pservice);
10289 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10290 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10291 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10292 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10293 +       VCHIQ_SERVICE_HANDLE_T service);
10294 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10295 +
10296 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10297 +       const VCHIQ_ELEMENT_T *elements, unsigned int count);
10298 +extern void           vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10299 +       VCHIQ_HEADER_T *header);
10300 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10301 +       const void *data, unsigned int size, void *userdata);
10302 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10303 +       void *data, unsigned int size, void *userdata);
10304 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10305 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10306 +       const void *offset, unsigned int size, void *userdata);
10307 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10308 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10309 +       void *offset, unsigned int size, void *userdata);
10310 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10311 +       const void *data, unsigned int size, void *userdata,
10312 +       VCHIQ_BULK_MODE_T mode);
10313 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10314 +       void *data, unsigned int size, void *userdata,
10315 +       VCHIQ_BULK_MODE_T mode);
10316 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10317 +       VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10318 +       void *userdata, VCHIQ_BULK_MODE_T mode);
10319 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10320 +       VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10321 +       void *userdata, VCHIQ_BULK_MODE_T mode);
10322 +extern int   vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10323 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10324 +extern int   vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10325 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10326 +       int config_size, VCHIQ_CONFIG_T *pconfig);
10327 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10328 +       VCHIQ_SERVICE_OPTION_T option, int value);
10329 +
10330 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10331 +       VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10332 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10333 +
10334 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10335 +       void *ptr, size_t num_bytes);
10336 +
10337 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10338 +      short *peer_version);
10339 +
10340 +#endif /* VCHIQ_IF_H */
10341 --- /dev/null
10342 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10343 @@ -0,0 +1,129 @@
10344 +/**
10345 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10346 + *
10347 + * Redistribution and use in source and binary forms, with or without
10348 + * modification, are permitted provided that the following conditions
10349 + * are met:
10350 + * 1. Redistributions of source code must retain the above copyright
10351 + *    notice, this list of conditions, and the following disclaimer,
10352 + *    without modification.
10353 + * 2. Redistributions in binary form must reproduce the above copyright
10354 + *    notice, this list of conditions and the following disclaimer in the
10355 + *    documentation and/or other materials provided with the distribution.
10356 + * 3. The names of the above-listed copyright holders may not be used
10357 + *    to endorse or promote products derived from this software without
10358 + *    specific prior written permission.
10359 + *
10360 + * ALTERNATIVELY, this software may be distributed under the terms of the
10361 + * GNU General Public License ("GPL") version 2, as published by the Free
10362 + * Software Foundation.
10363 + *
10364 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10365 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10366 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10367 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10368 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10369 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10370 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10371 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10372 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10373 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10374 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10375 + */
10376 +
10377 +#ifndef VCHIQ_IOCTLS_H
10378 +#define VCHIQ_IOCTLS_H
10379 +
10380 +#include <linux/ioctl.h>
10381 +#include "vchiq_if.h"
10382 +
10383 +#define VCHIQ_IOC_MAGIC 0xc4
10384 +#define VCHIQ_INVALID_HANDLE (~0)
10385 +
10386 +typedef struct {
10387 +       VCHIQ_SERVICE_PARAMS_T params;
10388 +       int is_open;
10389 +       int is_vchi;
10390 +       unsigned int handle;       /* OUT */
10391 +} VCHIQ_CREATE_SERVICE_T;
10392 +
10393 +typedef struct {
10394 +       unsigned int handle;
10395 +       unsigned int count;
10396 +       const VCHIQ_ELEMENT_T *elements;
10397 +} VCHIQ_QUEUE_MESSAGE_T;
10398 +
10399 +typedef struct {
10400 +       unsigned int handle;
10401 +       void *data;
10402 +       unsigned int size;
10403 +       void *userdata;
10404 +       VCHIQ_BULK_MODE_T mode;
10405 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10406 +
10407 +typedef struct {
10408 +       VCHIQ_REASON_T reason;
10409 +       VCHIQ_HEADER_T *header;
10410 +       void *service_userdata;
10411 +       void *bulk_userdata;
10412 +} VCHIQ_COMPLETION_DATA_T;
10413 +
10414 +typedef struct {
10415 +       unsigned int count;
10416 +       VCHIQ_COMPLETION_DATA_T *buf;
10417 +       unsigned int msgbufsize;
10418 +       unsigned int msgbufcount; /* IN/OUT */
10419 +       void **msgbufs;
10420 +} VCHIQ_AWAIT_COMPLETION_T;
10421 +
10422 +typedef struct {
10423 +       unsigned int handle;
10424 +       int blocking;
10425 +       unsigned int bufsize;
10426 +       void *buf;
10427 +} VCHIQ_DEQUEUE_MESSAGE_T;
10428 +
10429 +typedef struct {
10430 +       unsigned int config_size;
10431 +       VCHIQ_CONFIG_T *pconfig;
10432 +} VCHIQ_GET_CONFIG_T;
10433 +
10434 +typedef struct {
10435 +       unsigned int handle;
10436 +       VCHIQ_SERVICE_OPTION_T option;
10437 +       int value;
10438 +} VCHIQ_SET_SERVICE_OPTION_T;
10439 +
10440 +typedef struct {
10441 +       void     *virt_addr;
10442 +       size_t    num_bytes;
10443 +} VCHIQ_DUMP_MEM_T;
10444 +
10445 +#define VCHIQ_IOC_CONNECT              _IO(VCHIQ_IOC_MAGIC,   0)
10446 +#define VCHIQ_IOC_SHUTDOWN             _IO(VCHIQ_IOC_MAGIC,   1)
10447 +#define VCHIQ_IOC_CREATE_SERVICE \
10448 +       _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10449 +#define VCHIQ_IOC_REMOVE_SERVICE       _IO(VCHIQ_IOC_MAGIC,   3)
10450 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10451 +       _IOW(VCHIQ_IOC_MAGIC,  4, VCHIQ_QUEUE_MESSAGE_T)
10452 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10453 +       _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10454 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10455 +       _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10456 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10457 +       _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10458 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10459 +       _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10460 +#define VCHIQ_IOC_GET_CLIENT_ID        _IO(VCHIQ_IOC_MAGIC,   9)
10461 +#define VCHIQ_IOC_GET_CONFIG \
10462 +       _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10463 +#define VCHIQ_IOC_CLOSE_SERVICE        _IO(VCHIQ_IOC_MAGIC,   11)
10464 +#define VCHIQ_IOC_USE_SERVICE          _IO(VCHIQ_IOC_MAGIC,   12)
10465 +#define VCHIQ_IOC_RELEASE_SERVICE      _IO(VCHIQ_IOC_MAGIC,   13)
10466 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10467 +       _IOW(VCHIQ_IOC_MAGIC,  14, VCHIQ_SET_SERVICE_OPTION_T)
10468 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10469 +       _IOW(VCHIQ_IOC_MAGIC,  15, VCHIQ_DUMP_MEM_T)
10470 +#define VCHIQ_IOC_MAX                  15
10471 +
10472 +#endif
10473 --- /dev/null
10474 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10475 @@ -0,0 +1,456 @@
10476 +/**
10477 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10478 + *
10479 + * Redistribution and use in source and binary forms, with or without
10480 + * modification, are permitted provided that the following conditions
10481 + * are met:
10482 + * 1. Redistributions of source code must retain the above copyright
10483 + *    notice, this list of conditions, and the following disclaimer,
10484 + *    without modification.
10485 + * 2. Redistributions in binary form must reproduce the above copyright
10486 + *    notice, this list of conditions and the following disclaimer in the
10487 + *    documentation and/or other materials provided with the distribution.
10488 + * 3. The names of the above-listed copyright holders may not be used
10489 + *    to endorse or promote products derived from this software without
10490 + *    specific prior written permission.
10491 + *
10492 + * ALTERNATIVELY, this software may be distributed under the terms of the
10493 + * GNU General Public License ("GPL") version 2, as published by the Free
10494 + * Software Foundation.
10495 + *
10496 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10497 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10498 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10499 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10500 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10501 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10502 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10503 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10504 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10505 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10506 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10507 + */
10508 +
10509 +/* ---- Include Files ---------------------------------------------------- */
10510 +
10511 +#include <linux/kernel.h>
10512 +#include <linux/module.h>
10513 +#include <linux/mutex.h>
10514 +
10515 +#include "vchiq_core.h"
10516 +#include "vchiq_arm.h"
10517 +
10518 +/* ---- Public Variables ------------------------------------------------- */
10519 +
10520 +/* ---- Private Constants and Types -------------------------------------- */
10521 +
10522 +struct bulk_waiter_node {
10523 +       struct bulk_waiter bulk_waiter;
10524 +       int pid;
10525 +       struct list_head list;
10526 +};
10527 +
10528 +struct vchiq_instance_struct {
10529 +       VCHIQ_STATE_T *state;
10530 +
10531 +       int connected;
10532 +
10533 +       struct list_head bulk_waiter_list;
10534 +       struct mutex bulk_waiter_list_mutex;
10535 +};
10536 +
10537 +static VCHIQ_STATUS_T
10538 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10539 +       unsigned int size, VCHIQ_BULK_DIR_T dir);
10540 +
10541 +/****************************************************************************
10542 +*
10543 +*   vchiq_initialise
10544 +*
10545 +***************************************************************************/
10546 +#define VCHIQ_INIT_RETRIES 10
10547 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10548 +{
10549 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
10550 +       VCHIQ_STATE_T *state;
10551 +       VCHIQ_INSTANCE_T instance = NULL;
10552 +        int i;
10553 +
10554 +       vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10555 +
10556 +        /* VideoCore may not be ready due to boot up timing.
10557 +           It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10558 +        for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10559 +               state = vchiq_get_state();
10560 +               if (state)
10561 +                       break;
10562 +               udelay(500);
10563 +       }
10564 +       if (i==VCHIQ_INIT_RETRIES) {
10565 +               vchiq_log_error(vchiq_core_log_level,
10566 +                       "%s: videocore not initialized\n", __func__);
10567 +               goto failed;
10568 +       } else if (i>0) {
10569 +               vchiq_log_warning(vchiq_core_log_level,
10570 +                       "%s: videocore initialized after %d retries\n", __func__, i);
10571 +       }
10572 +
10573 +       instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10574 +       if (!instance) {
10575 +               vchiq_log_error(vchiq_core_log_level,
10576 +                       "%s: error allocating vchiq instance\n", __func__);
10577 +               goto failed;
10578 +       }
10579 +
10580 +       instance->connected = 0;
10581 +       instance->state = state;
10582 +       mutex_init(&instance->bulk_waiter_list_mutex);
10583 +       INIT_LIST_HEAD(&instance->bulk_waiter_list);
10584 +
10585 +       *instanceOut = instance;
10586 +
10587 +       status = VCHIQ_SUCCESS;
10588 +
10589 +failed:
10590 +       vchiq_log_trace(vchiq_core_log_level,
10591 +               "%s(%p): returning %d", __func__, instance, status);
10592 +
10593 +       return status;
10594 +}
10595 +EXPORT_SYMBOL(vchiq_initialise);
10596 +
10597 +/****************************************************************************
10598 +*
10599 +*   vchiq_shutdown
10600 +*
10601 +***************************************************************************/
10602 +
10603 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10604 +{
10605 +       VCHIQ_STATUS_T status;
10606 +       VCHIQ_STATE_T *state = instance->state;
10607 +
10608 +       vchiq_log_trace(vchiq_core_log_level,
10609 +               "%s(%p) called", __func__, instance);
10610 +
10611 +       if (mutex_lock_interruptible(&state->mutex) != 0)
10612 +               return VCHIQ_RETRY;
10613 +
10614 +       /* Remove all services */
10615 +       status = vchiq_shutdown_internal(state, instance);
10616 +
10617 +       mutex_unlock(&state->mutex);
10618 +
10619 +       vchiq_log_trace(vchiq_core_log_level,
10620 +               "%s(%p): returning %d", __func__, instance, status);
10621 +
10622 +       if (status == VCHIQ_SUCCESS) {
10623 +               struct list_head *pos, *next;
10624 +               list_for_each_safe(pos, next,
10625 +                               &instance->bulk_waiter_list) {
10626 +                       struct bulk_waiter_node *waiter;
10627 +                       waiter = list_entry(pos,
10628 +                                       struct bulk_waiter_node,
10629 +                                       list);
10630 +                       list_del(pos);
10631 +                       vchiq_log_info(vchiq_arm_log_level,
10632 +                                       "bulk_waiter - cleaned up %x "
10633 +                                       "for pid %d",
10634 +                                       (unsigned int)waiter, waiter->pid);
10635 +                       kfree(waiter);
10636 +               }
10637 +               kfree(instance);
10638 +       }
10639 +
10640 +       return status;
10641 +}
10642 +EXPORT_SYMBOL(vchiq_shutdown);
10643 +
10644 +/****************************************************************************
10645 +*
10646 +*   vchiq_is_connected
10647 +*
10648 +***************************************************************************/
10649 +
10650 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10651 +{
10652 +       return instance->connected;
10653 +}
10654 +
10655 +/****************************************************************************
10656 +*
10657 +*   vchiq_connect
10658 +*
10659 +***************************************************************************/
10660 +
10661 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10662 +{
10663 +       VCHIQ_STATUS_T status;
10664 +       VCHIQ_STATE_T *state = instance->state;
10665 +
10666 +       vchiq_log_trace(vchiq_core_log_level,
10667 +               "%s(%p) called", __func__, instance);
10668 +
10669 +       if (mutex_lock_interruptible(&state->mutex) != 0) {
10670 +               vchiq_log_trace(vchiq_core_log_level,
10671 +                       "%s: call to mutex_lock failed", __func__);
10672 +               status = VCHIQ_RETRY;
10673 +               goto failed;
10674 +       }
10675 +       status = vchiq_connect_internal(state, instance);
10676 +
10677 +       if (status == VCHIQ_SUCCESS)
10678 +               instance->connected = 1;
10679 +
10680 +       mutex_unlock(&state->mutex);
10681 +
10682 +failed:
10683 +       vchiq_log_trace(vchiq_core_log_level,
10684 +               "%s(%p): returning %d", __func__, instance, status);
10685 +
10686 +       return status;
10687 +}
10688 +EXPORT_SYMBOL(vchiq_connect);
10689 +
10690 +/****************************************************************************
10691 +*
10692 +*   vchiq_add_service
10693 +*
10694 +***************************************************************************/
10695 +
10696 +VCHIQ_STATUS_T vchiq_add_service(
10697 +       VCHIQ_INSTANCE_T              instance,
10698 +       const VCHIQ_SERVICE_PARAMS_T *params,
10699 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
10700 +{
10701 +       VCHIQ_STATUS_T status;
10702 +       VCHIQ_STATE_T *state = instance->state;
10703 +       VCHIQ_SERVICE_T *service = NULL;
10704 +       int srvstate;
10705 +
10706 +       vchiq_log_trace(vchiq_core_log_level,
10707 +               "%s(%p) called", __func__, instance);
10708 +
10709 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10710 +
10711 +       srvstate = vchiq_is_connected(instance)
10712 +               ? VCHIQ_SRVSTATE_LISTENING
10713 +               : VCHIQ_SRVSTATE_HIDDEN;
10714 +
10715 +       service = vchiq_add_service_internal(
10716 +               state,
10717 +               params,
10718 +               srvstate,
10719 +               instance,
10720 +               NULL);
10721 +
10722 +       if (service) {
10723 +               *phandle = service->handle;
10724 +               status = VCHIQ_SUCCESS;
10725 +       } else
10726 +               status = VCHIQ_ERROR;
10727 +
10728 +       vchiq_log_trace(vchiq_core_log_level,
10729 +               "%s(%p): returning %d", __func__, instance, status);
10730 +
10731 +       return status;
10732 +}
10733 +EXPORT_SYMBOL(vchiq_add_service);
10734 +
10735 +/****************************************************************************
10736 +*
10737 +*   vchiq_open_service
10738 +*
10739 +***************************************************************************/
10740 +
10741 +VCHIQ_STATUS_T vchiq_open_service(
10742 +       VCHIQ_INSTANCE_T              instance,
10743 +       const VCHIQ_SERVICE_PARAMS_T *params,
10744 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
10745 +{
10746 +       VCHIQ_STATUS_T   status = VCHIQ_ERROR;
10747 +       VCHIQ_STATE_T   *state = instance->state;
10748 +       VCHIQ_SERVICE_T *service = NULL;
10749 +
10750 +       vchiq_log_trace(vchiq_core_log_level,
10751 +               "%s(%p) called", __func__, instance);
10752 +
10753 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10754 +
10755 +       if (!vchiq_is_connected(instance))
10756 +               goto failed;
10757 +
10758 +       service = vchiq_add_service_internal(state,
10759 +               params,
10760 +               VCHIQ_SRVSTATE_OPENING,
10761 +               instance,
10762 +               NULL);
10763 +
10764 +       if (service) {
10765 +               status = vchiq_open_service_internal(service, current->pid);
10766 +               if (status == VCHIQ_SUCCESS)
10767 +                       *phandle = service->handle;
10768 +               else
10769 +                       vchiq_remove_service(service->handle);
10770 +       }
10771 +
10772 +failed:
10773 +       vchiq_log_trace(vchiq_core_log_level,
10774 +               "%s(%p): returning %d", __func__, instance, status);
10775 +
10776 +       return status;
10777 +}
10778 +EXPORT_SYMBOL(vchiq_open_service);
10779 +
10780 +VCHIQ_STATUS_T
10781 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10782 +       const void *data, unsigned int size, void *userdata)
10783 +{
10784 +       return vchiq_bulk_transfer(handle,
10785 +               VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10786 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10787 +}
10788 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10789 +
10790 +VCHIQ_STATUS_T
10791 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10792 +       unsigned int size, void *userdata)
10793 +{
10794 +       return vchiq_bulk_transfer(handle,
10795 +               VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10796 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10797 +}
10798 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10799 +
10800 +VCHIQ_STATUS_T
10801 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10802 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10803 +{
10804 +       VCHIQ_STATUS_T status;
10805 +
10806 +       switch (mode) {
10807 +       case VCHIQ_BULK_MODE_NOCALLBACK:
10808 +       case VCHIQ_BULK_MODE_CALLBACK:
10809 +               status = vchiq_bulk_transfer(handle,
10810 +                       VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10811 +                       mode, VCHIQ_BULK_TRANSMIT);
10812 +               break;
10813 +       case VCHIQ_BULK_MODE_BLOCKING:
10814 +               status = vchiq_blocking_bulk_transfer(handle,
10815 +                       (void *)data, size, VCHIQ_BULK_TRANSMIT);
10816 +               break;
10817 +       default:
10818 +               return VCHIQ_ERROR;
10819 +       }
10820 +
10821 +       return status;
10822 +}
10823 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10824 +
10825 +VCHIQ_STATUS_T
10826 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10827 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10828 +{
10829 +       VCHIQ_STATUS_T status;
10830 +
10831 +       switch (mode) {
10832 +       case VCHIQ_BULK_MODE_NOCALLBACK:
10833 +       case VCHIQ_BULK_MODE_CALLBACK:
10834 +               status = vchiq_bulk_transfer(handle,
10835 +                       VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10836 +                       mode, VCHIQ_BULK_RECEIVE);
10837 +               break;
10838 +       case VCHIQ_BULK_MODE_BLOCKING:
10839 +               status = vchiq_blocking_bulk_transfer(handle,
10840 +                       (void *)data, size, VCHIQ_BULK_RECEIVE);
10841 +               break;
10842 +       default:
10843 +               return VCHIQ_ERROR;
10844 +       }
10845 +
10846 +       return status;
10847 +}
10848 +EXPORT_SYMBOL(vchiq_bulk_receive);
10849 +
10850 +static VCHIQ_STATUS_T
10851 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10852 +       unsigned int size, VCHIQ_BULK_DIR_T dir)
10853 +{
10854 +       VCHIQ_INSTANCE_T instance;
10855 +       VCHIQ_SERVICE_T *service;
10856 +       VCHIQ_STATUS_T status;
10857 +       struct bulk_waiter_node *waiter = NULL;
10858 +       struct list_head *pos;
10859 +
10860 +       service = find_service_by_handle(handle);
10861 +       if (!service)
10862 +               return VCHIQ_ERROR;
10863 +
10864 +       instance = service->instance;
10865 +
10866 +       unlock_service(service);
10867 +
10868 +       mutex_lock(&instance->bulk_waiter_list_mutex);
10869 +       list_for_each(pos, &instance->bulk_waiter_list) {
10870 +               if (list_entry(pos, struct bulk_waiter_node,
10871 +                               list)->pid == current->pid) {
10872 +                       waiter = list_entry(pos,
10873 +                               struct bulk_waiter_node,
10874 +                               list);
10875 +                       list_del(pos);
10876 +                       break;
10877 +               }
10878 +       }
10879 +       mutex_unlock(&instance->bulk_waiter_list_mutex);
10880 +
10881 +       if (waiter) {
10882 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10883 +               if (bulk) {
10884 +                       /* This thread has an outstanding bulk transfer. */
10885 +                       if ((bulk->data != data) ||
10886 +                               (bulk->size != size)) {
10887 +                               /* This is not a retry of the previous one.
10888 +                               ** Cancel the signal when the transfer
10889 +                               ** completes. */
10890 +                               spin_lock(&bulk_waiter_spinlock);
10891 +                               bulk->userdata = NULL;
10892 +                               spin_unlock(&bulk_waiter_spinlock);
10893 +                       }
10894 +               }
10895 +       }
10896 +
10897 +       if (!waiter) {
10898 +               waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10899 +               if (!waiter) {
10900 +                       vchiq_log_error(vchiq_core_log_level,
10901 +                               "%s - out of memory", __func__);
10902 +                       return VCHIQ_ERROR;
10903 +               }
10904 +       }
10905 +
10906 +       status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10907 +               data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10908 +               dir);
10909 +       if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10910 +               !waiter->bulk_waiter.bulk) {
10911 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10912 +               if (bulk) {
10913 +                       /* Cancel the signal when the transfer
10914 +                        ** completes. */
10915 +                       spin_lock(&bulk_waiter_spinlock);
10916 +                       bulk->userdata = NULL;
10917 +                       spin_unlock(&bulk_waiter_spinlock);
10918 +               }
10919 +               kfree(waiter);
10920 +       } else {
10921 +               waiter->pid = current->pid;
10922 +               mutex_lock(&instance->bulk_waiter_list_mutex);
10923 +               list_add(&waiter->list, &instance->bulk_waiter_list);
10924 +               mutex_unlock(&instance->bulk_waiter_list_mutex);
10925 +               vchiq_log_info(vchiq_arm_log_level,
10926 +                               "saved bulk_waiter %x for pid %d",
10927 +                               (unsigned int)waiter, current->pid);
10928 +       }
10929 +
10930 +       return status;
10931 +}
10932 --- /dev/null
10933 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10934 @@ -0,0 +1,71 @@
10935 +/**
10936 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10937 + *
10938 + * Redistribution and use in source and binary forms, with or without
10939 + * modification, are permitted provided that the following conditions
10940 + * are met:
10941 + * 1. Redistributions of source code must retain the above copyright
10942 + *    notice, this list of conditions, and the following disclaimer,
10943 + *    without modification.
10944 + * 2. Redistributions in binary form must reproduce the above copyright
10945 + *    notice, this list of conditions and the following disclaimer in the
10946 + *    documentation and/or other materials provided with the distribution.
10947 + * 3. The names of the above-listed copyright holders may not be used
10948 + *    to endorse or promote products derived from this software without
10949 + *    specific prior written permission.
10950 + *
10951 + * ALTERNATIVELY, this software may be distributed under the terms of the
10952 + * GNU General Public License ("GPL") version 2, as published by the Free
10953 + * Software Foundation.
10954 + *
10955 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10956 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10957 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10958 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10959 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10960 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10961 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10962 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10963 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10964 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10965 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10966 + */
10967 +
10968 +#ifndef VCHIQ_MEMDRV_H
10969 +#define VCHIQ_MEMDRV_H
10970 +
10971 +/* ---- Include Files ----------------------------------------------------- */
10972 +
10973 +#include <linux/kernel.h>
10974 +#include "vchiq_if.h"
10975 +
10976 +/* ---- Constants and Types ---------------------------------------------- */
10977 +
10978 +typedef struct {
10979 +        void                   *armSharedMemVirt;
10980 +        dma_addr_t              armSharedMemPhys;
10981 +        size_t                  armSharedMemSize;
10982 +
10983 +        void                   *vcSharedMemVirt;
10984 +        dma_addr_t              vcSharedMemPhys;
10985 +        size_t                  vcSharedMemSize;
10986 +} VCHIQ_SHARED_MEM_INFO_T;
10987 +
10988 +/* ---- Variable Externs ------------------------------------------------- */
10989 +
10990 +/* ---- Function Prototypes ---------------------------------------------- */
10991 +
10992 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
10993 +
10994 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
10995 +
10996 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
10997 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
10998 +
10999 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11000 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
11001 +
11002 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11003 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
11004 +
11005 +#endif
11006 --- /dev/null
11007 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11008 @@ -0,0 +1,58 @@
11009 +/**
11010 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11011 + *
11012 + * Redistribution and use in source and binary forms, with or without
11013 + * modification, are permitted provided that the following conditions
11014 + * are met:
11015 + * 1. Redistributions of source code must retain the above copyright
11016 + *    notice, this list of conditions, and the following disclaimer,
11017 + *    without modification.
11018 + * 2. Redistributions in binary form must reproduce the above copyright
11019 + *    notice, this list of conditions and the following disclaimer in the
11020 + *    documentation and/or other materials provided with the distribution.
11021 + * 3. The names of the above-listed copyright holders may not be used
11022 + *    to endorse or promote products derived from this software without
11023 + *    specific prior written permission.
11024 + *
11025 + * ALTERNATIVELY, this software may be distributed under the terms of the
11026 + * GNU General Public License ("GPL") version 2, as published by the Free
11027 + * Software Foundation.
11028 + *
11029 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11030 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11031 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11032 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11033 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11034 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11035 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11036 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11037 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11038 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11039 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11040 + */
11041 +
11042 +#ifndef VCHIQ_PAGELIST_H
11043 +#define VCHIQ_PAGELIST_H
11044 +
11045 +#ifndef PAGE_SIZE
11046 +#define PAGE_SIZE 4096
11047 +#endif
11048 +#define CACHE_LINE_SIZE 32
11049 +#define PAGELIST_WRITE 0
11050 +#define PAGELIST_READ 1
11051 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11052 +
11053 +typedef struct pagelist_struct {
11054 +       unsigned long length;
11055 +       unsigned short type;
11056 +       unsigned short offset;
11057 +       unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11058 +                                  pages at consecutive addresses. */
11059 +} PAGELIST_T;
11060 +
11061 +typedef struct fragments_struct {
11062 +       char headbuf[CACHE_LINE_SIZE];
11063 +       char tailbuf[CACHE_LINE_SIZE];
11064 +} FRAGMENTS_T;
11065 +
11066 +#endif /* VCHIQ_PAGELIST_H */
11067 --- /dev/null
11068 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11069 @@ -0,0 +1,253 @@
11070 +/**
11071 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11072 + *
11073 + * Redistribution and use in source and binary forms, with or without
11074 + * modification, are permitted provided that the following conditions
11075 + * are met:
11076 + * 1. Redistributions of source code must retain the above copyright
11077 + *    notice, this list of conditions, and the following disclaimer,
11078 + *    without modification.
11079 + * 2. Redistributions in binary form must reproduce the above copyright
11080 + *    notice, this list of conditions and the following disclaimer in the
11081 + *    documentation and/or other materials provided with the distribution.
11082 + * 3. The names of the above-listed copyright holders may not be used
11083 + *    to endorse or promote products derived from this software without
11084 + *    specific prior written permission.
11085 + *
11086 + * ALTERNATIVELY, this software may be distributed under the terms of the
11087 + * GNU General Public License ("GPL") version 2, as published by the Free
11088 + * Software Foundation.
11089 + *
11090 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11091 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11092 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11093 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11094 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11095 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11096 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11097 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11098 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11099 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11100 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11101 + */
11102 +
11103 +
11104 +#include <linux/proc_fs.h>
11105 +#include "vchiq_core.h"
11106 +#include "vchiq_arm.h"
11107 +
11108 +#if 1
11109 +
11110 +int vchiq_proc_init(void)
11111 +{
11112 +       return 0;
11113 +}
11114 +
11115 +void vchiq_proc_deinit(void)
11116 +{
11117 +}
11118 +
11119 +#else
11120 +
11121 +struct vchiq_proc_info {
11122 +       /* Global 'vc' proc entry used by all instances */
11123 +       struct proc_dir_entry *vc_cfg_dir;
11124 +
11125 +       /* one entry per client process */
11126 +       struct proc_dir_entry *clients;
11127 +
11128 +       /* log categories */
11129 +       struct proc_dir_entry *log_categories;
11130 +};
11131 +
11132 +static struct vchiq_proc_info proc_info;
11133 +
11134 +struct proc_dir_entry *vchiq_proc_top(void)
11135 +{
11136 +       BUG_ON(proc_info.vc_cfg_dir == NULL);
11137 +       return proc_info.vc_cfg_dir;
11138 +}
11139 +
11140 +/****************************************************************************
11141 +*
11142 +*   log category entries
11143 +*
11144 +***************************************************************************/
11145 +#define PROC_WRITE_BUF_SIZE 256
11146 +
11147 +#define VCHIQ_LOG_ERROR_STR   "error"
11148 +#define VCHIQ_LOG_WARNING_STR "warning"
11149 +#define VCHIQ_LOG_INFO_STR    "info"
11150 +#define VCHIQ_LOG_TRACE_STR   "trace"
11151 +
11152 +static int log_cfg_read(char *buffer,
11153 +       char **start,
11154 +       off_t off,
11155 +       int count,
11156 +       int *eof,
11157 +       void *data)
11158 +{
11159 +       int len = 0;
11160 +       char *log_value = NULL;
11161 +
11162 +       switch (*((int *)data)) {
11163 +       case VCHIQ_LOG_ERROR:
11164 +               log_value = VCHIQ_LOG_ERROR_STR;
11165 +               break;
11166 +       case VCHIQ_LOG_WARNING:
11167 +               log_value = VCHIQ_LOG_WARNING_STR;
11168 +               break;
11169 +       case VCHIQ_LOG_INFO:
11170 +               log_value = VCHIQ_LOG_INFO_STR;
11171 +               break;
11172 +       case VCHIQ_LOG_TRACE:
11173 +               log_value = VCHIQ_LOG_TRACE_STR;
11174 +               break;
11175 +       default:
11176 +               break;
11177 +       }
11178 +
11179 +       len += sprintf(buffer + len,
11180 +               "%s\n",
11181 +               log_value ? log_value : "(null)");
11182 +
11183 +       return len;
11184 +}
11185 +
11186 +
11187 +static int log_cfg_write(struct file *file,
11188 +       const char __user *buffer,
11189 +       unsigned long count,
11190 +       void *data)
11191 +{
11192 +       int *log_module = data;
11193 +       char kbuf[PROC_WRITE_BUF_SIZE + 1];
11194 +
11195 +       (void)file;
11196 +
11197 +       memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11198 +       if (count >= PROC_WRITE_BUF_SIZE)
11199 +               count = PROC_WRITE_BUF_SIZE;
11200 +
11201 +       if (copy_from_user(kbuf,
11202 +               buffer,
11203 +               count) != 0)
11204 +               return -EFAULT;
11205 +       kbuf[count - 1] = 0;
11206 +
11207 +       if (strncmp("error", kbuf, strlen("error")) == 0)
11208 +               *log_module = VCHIQ_LOG_ERROR;
11209 +       else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11210 +               *log_module = VCHIQ_LOG_WARNING;
11211 +       else if (strncmp("info", kbuf, strlen("info")) == 0)
11212 +               *log_module = VCHIQ_LOG_INFO;
11213 +       else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11214 +               *log_module = VCHIQ_LOG_TRACE;
11215 +       else
11216 +               *log_module = VCHIQ_LOG_DEFAULT;
11217 +
11218 +       return count;
11219 +}
11220 +
11221 +/* Log category proc entries */
11222 +struct vchiq_proc_log_entry {
11223 +       const char *name;
11224 +       int *plevel;
11225 +       struct proc_dir_entry *dir;
11226 +};
11227 +
11228 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11229 +       { "core", &vchiq_core_log_level },
11230 +       { "msg",  &vchiq_core_msg_log_level },
11231 +       { "sync", &vchiq_sync_log_level },
11232 +       { "susp", &vchiq_susp_log_level },
11233 +       { "arm",  &vchiq_arm_log_level },
11234 +};
11235 +static int n_log_entries =
11236 +       sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11237 +
11238 +/* create an entry under /proc/vc/log for each log category */
11239 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11240 +{
11241 +       struct proc_dir_entry *dir;
11242 +       size_t i;
11243 +       int ret = 0;
11244 +       dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11245 +       if (!dir)
11246 +               return -ENOMEM;
11247 +       proc_info.log_categories = dir;
11248 +
11249 +       for (i = 0; i < n_log_entries; i++) {
11250 +               dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11251 +                                       0644,
11252 +                                       proc_info.log_categories);
11253 +               if (!dir) {
11254 +                       ret = -ENOMEM;
11255 +                       break;
11256 +               }
11257 +
11258 +               dir->read_proc = &log_cfg_read;
11259 +               dir->write_proc = &log_cfg_write;
11260 +               dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11261 +
11262 +               vchiq_proc_log_entries[i].dir = dir;
11263 +       }
11264 +       return ret;
11265 +}
11266 +
11267 +
11268 +int vchiq_proc_init(void)
11269 +{
11270 +       BUG_ON(proc_info.vc_cfg_dir != NULL);
11271 +
11272 +       proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11273 +       if (proc_info.vc_cfg_dir == NULL)
11274 +               goto fail;
11275 +
11276 +       proc_info.clients = proc_mkdir("clients",
11277 +                               proc_info.vc_cfg_dir);
11278 +       if (!proc_info.clients)
11279 +               goto fail;
11280 +
11281 +       if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11282 +               goto fail;
11283 +
11284 +       return 0;
11285 +
11286 +fail:
11287 +       vchiq_proc_deinit();
11288 +       vchiq_log_error(vchiq_arm_log_level,
11289 +               "%s: failed to create proc directory",
11290 +               __func__);
11291 +
11292 +       return -ENOMEM;
11293 +}
11294 +
11295 +/* remove all the proc entries */
11296 +void vchiq_proc_deinit(void)
11297 +{
11298 +       /* log category entries */
11299 +       if (proc_info.log_categories) {
11300 +               size_t i;
11301 +               for (i = 0; i < n_log_entries; i++)
11302 +                       if (vchiq_proc_log_entries[i].dir)
11303 +                               remove_proc_entry(
11304 +                                       vchiq_proc_log_entries[i].name,
11305 +                                       proc_info.log_categories);
11306 +
11307 +               remove_proc_entry(proc_info.log_categories->name,
11308 +                                 proc_info.vc_cfg_dir);
11309 +       }
11310 +       if (proc_info.clients)
11311 +               remove_proc_entry(proc_info.clients->name,
11312 +                                 proc_info.vc_cfg_dir);
11313 +       if (proc_info.vc_cfg_dir)
11314 +               remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11315 +}
11316 +
11317 +struct proc_dir_entry *vchiq_clients_top(void)
11318 +{
11319 +       return proc_info.clients;
11320 +}
11321 +
11322 +#endif
11323 --- /dev/null
11324 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11325 @@ -0,0 +1,828 @@
11326 +/**
11327 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11328 + *
11329 + * Redistribution and use in source and binary forms, with or without
11330 + * modification, are permitted provided that the following conditions
11331 + * are met:
11332 + * 1. Redistributions of source code must retain the above copyright
11333 + *    notice, this list of conditions, and the following disclaimer,
11334 + *    without modification.
11335 + * 2. Redistributions in binary form must reproduce the above copyright
11336 + *    notice, this list of conditions and the following disclaimer in the
11337 + *    documentation and/or other materials provided with the distribution.
11338 + * 3. The names of the above-listed copyright holders may not be used
11339 + *    to endorse or promote products derived from this software without
11340 + *    specific prior written permission.
11341 + *
11342 + * ALTERNATIVELY, this software may be distributed under the terms of the
11343 + * GNU General Public License ("GPL") version 2, as published by the Free
11344 + * Software Foundation.
11345 + *
11346 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11347 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11348 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11349 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11350 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11351 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11352 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11353 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11354 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11355 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11356 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11357 + */
11358 +#include <linux/module.h>
11359 +#include <linux/types.h>
11360 +
11361 +#include "interface/vchi/vchi.h"
11362 +#include "vchiq.h"
11363 +#include "vchiq_core.h"
11364 +
11365 +#include "vchiq_util.h"
11366 +
11367 +#include <stddef.h>
11368 +
11369 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11370 +
11371 +typedef struct {
11372 +       VCHIQ_SERVICE_HANDLE_T handle;
11373 +
11374 +       VCHIU_QUEUE_T queue;
11375 +
11376 +       VCHI_CALLBACK_T callback;
11377 +       void *callback_param;
11378 +} SHIM_SERVICE_T;
11379 +
11380 +/* ----------------------------------------------------------------------
11381 + * return pointer to the mphi message driver function table
11382 + * -------------------------------------------------------------------- */
11383 +const VCHI_MESSAGE_DRIVER_T *
11384 +vchi_mphi_message_driver_func_table(void)
11385 +{
11386 +       return NULL;
11387 +}
11388 +
11389 +/* ----------------------------------------------------------------------
11390 + * return a pointer to the 'single' connection driver fops
11391 + * -------------------------------------------------------------------- */
11392 +const VCHI_CONNECTION_API_T *
11393 +single_get_func_table(void)
11394 +{
11395 +       return NULL;
11396 +}
11397 +
11398 +VCHI_CONNECTION_T *vchi_create_connection(
11399 +       const VCHI_CONNECTION_API_T *function_table,
11400 +       const VCHI_MESSAGE_DRIVER_T *low_level)
11401 +{
11402 +       (void)function_table;
11403 +       (void)low_level;
11404 +       return NULL;
11405 +}
11406 +
11407 +/***********************************************************
11408 + * Name: vchi_msg_peek
11409 + *
11410 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
11411 + *             void **data,
11412 + *             uint32_t *msg_size,
11413 +
11414 +
11415 + *             VCHI_FLAGS_T flags
11416 + *
11417 + * Description: Routine to return a pointer to the current message (to allow in
11418 + *              place processing). The message can be removed using
11419 + *              vchi_msg_remove when you're finished
11420 + *
11421 + * Returns: int32_t - success == 0
11422 + *
11423 + ***********************************************************/
11424 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11425 +       void **data,
11426 +       uint32_t *msg_size,
11427 +       VCHI_FLAGS_T flags)
11428 +{
11429 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11430 +       VCHIQ_HEADER_T *header;
11431 +
11432 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11433 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11434 +
11435 +       if (flags == VCHI_FLAGS_NONE)
11436 +               if (vchiu_queue_is_empty(&service->queue))
11437 +                       return -1;
11438 +
11439 +       header = vchiu_queue_peek(&service->queue);
11440 +
11441 +       *data = header->data;
11442 +       *msg_size = header->size;
11443 +
11444 +       return 0;
11445 +}
11446 +EXPORT_SYMBOL(vchi_msg_peek);
11447 +
11448 +/***********************************************************
11449 + * Name: vchi_msg_remove
11450 + *
11451 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
11452 + *
11453 + * Description: Routine to remove a message (after it has been read with
11454 + *              vchi_msg_peek)
11455 + *
11456 + * Returns: int32_t - success == 0
11457 + *
11458 + ***********************************************************/
11459 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11460 +{
11461 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11462 +       VCHIQ_HEADER_T *header;
11463 +
11464 +       header = vchiu_queue_pop(&service->queue);
11465 +
11466 +       vchiq_release_message(service->handle, header);
11467 +
11468 +       return 0;
11469 +}
11470 +EXPORT_SYMBOL(vchi_msg_remove);
11471 +
11472 +/***********************************************************
11473 + * Name: vchi_msg_queue
11474 + *
11475 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11476 + *             const void *data,
11477 + *             uint32_t data_size,
11478 + *             VCHI_FLAGS_T flags,
11479 + *             void *msg_handle,
11480 + *
11481 + * Description: Thin wrapper to queue a message onto a connection
11482 + *
11483 + * Returns: int32_t - success == 0
11484 + *
11485 + ***********************************************************/
11486 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11487 +       const void *data,
11488 +       uint32_t data_size,
11489 +       VCHI_FLAGS_T flags,
11490 +       void *msg_handle)
11491 +{
11492 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11493 +       VCHIQ_ELEMENT_T element = {data, data_size};
11494 +       VCHIQ_STATUS_T status;
11495 +
11496 +       (void)msg_handle;
11497 +
11498 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11499 +
11500 +       status = vchiq_queue_message(service->handle, &element, 1);
11501 +
11502 +       /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11503 +       ** implement a retry mechanism since this function is supposed
11504 +       ** to block until queued
11505 +       */
11506 +       while (status == VCHIQ_RETRY) {
11507 +               msleep(1);
11508 +               status = vchiq_queue_message(service->handle, &element, 1);
11509 +       }
11510 +
11511 +       return vchiq_status_to_vchi(status);
11512 +}
11513 +EXPORT_SYMBOL(vchi_msg_queue);
11514 +
11515 +/***********************************************************
11516 + * Name: vchi_bulk_queue_receive
11517 + *
11518 + * Arguments:  VCHI_BULK_HANDLE_T handle,
11519 + *             void *data_dst,
11520 + *             const uint32_t data_size,
11521 + *             VCHI_FLAGS_T flags
11522 + *             void *bulk_handle
11523 + *
11524 + * Description: Routine to setup a rcv buffer
11525 + *
11526 + * Returns: int32_t - success == 0
11527 + *
11528 + ***********************************************************/
11529 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11530 +       void *data_dst,
11531 +       uint32_t data_size,
11532 +       VCHI_FLAGS_T flags,
11533 +       void *bulk_handle)
11534 +{
11535 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11536 +       VCHIQ_BULK_MODE_T mode;
11537 +       VCHIQ_STATUS_T status;
11538 +
11539 +       switch ((int)flags) {
11540 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11541 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11542 +               WARN_ON(!service->callback);
11543 +               mode = VCHIQ_BULK_MODE_CALLBACK;
11544 +               break;
11545 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11546 +               mode = VCHIQ_BULK_MODE_BLOCKING;
11547 +               break;
11548 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11549 +       case VCHI_FLAGS_NONE:
11550 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
11551 +               break;
11552 +       default:
11553 +               WARN(1, "unsupported message\n");
11554 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
11555 +       }
11556 +
11557 +       status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11558 +               bulk_handle, mode);
11559 +
11560 +       /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11561 +       ** implement a retry mechanism since this function is supposed
11562 +       ** to block until queued
11563 +       */
11564 +       while (status == VCHIQ_RETRY) {
11565 +               msleep(1);
11566 +               status = vchiq_bulk_receive(service->handle, data_dst,
11567 +                       data_size, bulk_handle, mode);
11568 +       }
11569 +
11570 +       return vchiq_status_to_vchi(status);
11571 +}
11572 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11573 +
11574 +/***********************************************************
11575 + * Name: vchi_bulk_queue_transmit
11576 + *
11577 + * Arguments:  VCHI_BULK_HANDLE_T handle,
11578 + *             const void *data_src,
11579 + *             uint32_t data_size,
11580 + *             VCHI_FLAGS_T flags,
11581 + *             void *bulk_handle
11582 + *
11583 + * Description: Routine to transmit some data
11584 + *
11585 + * Returns: int32_t - success == 0
11586 + *
11587 + ***********************************************************/
11588 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11589 +       const void *data_src,
11590 +       uint32_t data_size,
11591 +       VCHI_FLAGS_T flags,
11592 +       void *bulk_handle)
11593 +{
11594 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11595 +       VCHIQ_BULK_MODE_T mode;
11596 +       VCHIQ_STATUS_T status;
11597 +
11598 +       switch ((int)flags) {
11599 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11600 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11601 +               WARN_ON(!service->callback);
11602 +               mode = VCHIQ_BULK_MODE_CALLBACK;
11603 +               break;
11604 +       case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11605 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11606 +               mode = VCHIQ_BULK_MODE_BLOCKING;
11607 +               break;
11608 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11609 +       case VCHI_FLAGS_NONE:
11610 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
11611 +               break;
11612 +       default:
11613 +               WARN(1, "unsupported message\n");
11614 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
11615 +       }
11616 +
11617 +       status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11618 +               bulk_handle, mode);
11619 +
11620 +       /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11621 +       ** implement a retry mechanism since this function is supposed
11622 +       ** to block until queued
11623 +       */
11624 +       while (status == VCHIQ_RETRY) {
11625 +               msleep(1);
11626 +               status = vchiq_bulk_transmit(service->handle, data_src,
11627 +                       data_size, bulk_handle, mode);
11628 +       }
11629 +
11630 +       return vchiq_status_to_vchi(status);
11631 +}
11632 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11633 +
11634 +/***********************************************************
11635 + * Name: vchi_msg_dequeue
11636 + *
11637 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11638 + *             void *data,
11639 + *             uint32_t max_data_size_to_read,
11640 + *             uint32_t *actual_msg_size
11641 + *             VCHI_FLAGS_T flags
11642 + *
11643 + * Description: Routine to dequeue a message into the supplied buffer
11644 + *
11645 + * Returns: int32_t - success == 0
11646 + *
11647 + ***********************************************************/
11648 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11649 +       void *data,
11650 +       uint32_t max_data_size_to_read,
11651 +       uint32_t *actual_msg_size,
11652 +       VCHI_FLAGS_T flags)
11653 +{
11654 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11655 +       VCHIQ_HEADER_T *header;
11656 +
11657 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11658 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11659 +
11660 +       if (flags == VCHI_FLAGS_NONE)
11661 +               if (vchiu_queue_is_empty(&service->queue))
11662 +                       return -1;
11663 +
11664 +       header = vchiu_queue_pop(&service->queue);
11665 +
11666 +       memcpy(data, header->data, header->size < max_data_size_to_read ?
11667 +               header->size : max_data_size_to_read);
11668 +
11669 +       *actual_msg_size = header->size;
11670 +
11671 +       vchiq_release_message(service->handle, header);
11672 +
11673 +       return 0;
11674 +}
11675 +EXPORT_SYMBOL(vchi_msg_dequeue);
11676 +
11677 +/***********************************************************
11678 + * Name: vchi_msg_queuev
11679 + *
11680 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11681 + *             VCHI_MSG_VECTOR_T *vector,
11682 + *             uint32_t count,
11683 + *             VCHI_FLAGS_T flags,
11684 + *             void *msg_handle
11685 + *
11686 + * Description: Thin wrapper to queue a message onto a connection
11687 + *
11688 + * Returns: int32_t - success == 0
11689 + *
11690 + ***********************************************************/
11691 +
11692 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11693 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11694 +       offsetof(VCHIQ_ELEMENT_T, data));
11695 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11696 +       offsetof(VCHIQ_ELEMENT_T, size));
11697 +
11698 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11699 +       VCHI_MSG_VECTOR_T *vector,
11700 +       uint32_t count,
11701 +       VCHI_FLAGS_T flags,
11702 +       void *msg_handle)
11703 +{
11704 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11705 +
11706 +       (void)msg_handle;
11707 +
11708 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11709 +
11710 +       return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11711 +               (const VCHIQ_ELEMENT_T *)vector, count));
11712 +}
11713 +EXPORT_SYMBOL(vchi_msg_queuev);
11714 +
11715 +/***********************************************************
11716 + * Name: vchi_held_msg_release
11717 + *
11718 + * Arguments:  VCHI_HELD_MSG_T *message
11719 + *
11720 + * Description: Routine to release a held message (after it has been read with
11721 + *              vchi_msg_hold)
11722 + *
11723 + * Returns: int32_t - success == 0
11724 + *
11725 + ***********************************************************/
11726 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11727 +{
11728 +       vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11729 +               (VCHIQ_HEADER_T *)message->message);
11730 +
11731 +       return 0;
11732 +}
11733 +EXPORT_SYMBOL(vchi_held_msg_release);
11734 +
11735 +/***********************************************************
11736 + * Name: vchi_msg_hold
11737 + *
11738 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11739 + *             void **data,
11740 + *             uint32_t *msg_size,
11741 + *             VCHI_FLAGS_T flags,
11742 + *             VCHI_HELD_MSG_T *message_handle
11743 + *
11744 + * Description: Routine to return a pointer to the current message (to allow
11745 + *              in place processing). The message is dequeued - don't forget
11746 + *              to release the message using vchi_held_msg_release when you're
11747 + *              finished.
11748 + *
11749 + * Returns: int32_t - success == 0
11750 + *
11751 + ***********************************************************/
11752 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11753 +       void **data,
11754 +       uint32_t *msg_size,
11755 +       VCHI_FLAGS_T flags,
11756 +       VCHI_HELD_MSG_T *message_handle)
11757 +{
11758 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11759 +       VCHIQ_HEADER_T *header;
11760 +
11761 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11762 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11763 +
11764 +       if (flags == VCHI_FLAGS_NONE)
11765 +               if (vchiu_queue_is_empty(&service->queue))
11766 +                       return -1;
11767 +
11768 +       header = vchiu_queue_pop(&service->queue);
11769 +
11770 +       *data = header->data;
11771 +       *msg_size = header->size;
11772 +
11773 +       message_handle->service =
11774 +               (struct opaque_vchi_service_t *)service->handle;
11775 +       message_handle->message = header;
11776 +
11777 +       return 0;
11778 +}
11779 +EXPORT_SYMBOL(vchi_msg_hold);
11780 +
11781 +/***********************************************************
11782 + * Name: vchi_initialise
11783 + *
11784 + * Arguments: VCHI_INSTANCE_T *instance_handle
11785 + *            VCHI_CONNECTION_T **connections
11786 + *            const uint32_t num_connections
11787 + *
11788 + * Description: Initialises the hardware but does not transmit anything
11789 + *              When run as a Host App this will be called twice hence the need
11790 + *              to malloc the state information
11791 + *
11792 + * Returns: 0 if successful, failure otherwise
11793 + *
11794 + ***********************************************************/
11795 +
11796 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11797 +{
11798 +       VCHIQ_INSTANCE_T instance;
11799 +       VCHIQ_STATUS_T status;
11800 +
11801 +       status = vchiq_initialise(&instance);
11802 +
11803 +       *instance_handle = (VCHI_INSTANCE_T)instance;
11804 +
11805 +       return vchiq_status_to_vchi(status);
11806 +}
11807 +EXPORT_SYMBOL(vchi_initialise);
11808 +
11809 +/***********************************************************
11810 + * Name: vchi_connect
11811 + *
11812 + * Arguments: VCHI_CONNECTION_T **connections
11813 + *            const uint32_t num_connections
11814 + *            VCHI_INSTANCE_T instance_handle)
11815 + *
11816 + * Description: Starts the command service on each connection,
11817 + *              causing INIT messages to be pinged back and forth
11818 + *
11819 + * Returns: 0 if successful, failure otherwise
11820 + *
11821 + ***********************************************************/
11822 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11823 +       const uint32_t num_connections,
11824 +       VCHI_INSTANCE_T instance_handle)
11825 +{
11826 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11827 +
11828 +       (void)connections;
11829 +       (void)num_connections;
11830 +
11831 +       return vchiq_connect(instance);
11832 +}
11833 +EXPORT_SYMBOL(vchi_connect);
11834 +
11835 +
11836 +/***********************************************************
11837 + * Name: vchi_disconnect
11838 + *
11839 + * Arguments: VCHI_INSTANCE_T instance_handle
11840 + *
11841 + * Description: Stops the command service on each connection,
11842 + *              causing DE-INIT messages to be pinged back and forth
11843 + *
11844 + * Returns: 0 if successful, failure otherwise
11845 + *
11846 + ***********************************************************/
11847 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11848 +{
11849 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11850 +       return vchiq_status_to_vchi(vchiq_shutdown(instance));
11851 +}
11852 +EXPORT_SYMBOL(vchi_disconnect);
11853 +
11854 +
11855 +/***********************************************************
11856 + * Name: vchi_service_open
11857 + * Name: vchi_service_create
11858 + *
11859 + * Arguments: VCHI_INSTANCE_T *instance_handle
11860 + *            SERVICE_CREATION_T *setup,
11861 + *            VCHI_SERVICE_HANDLE_T *handle
11862 + *
11863 + * Description: Routine to open a service
11864 + *
11865 + * Returns: int32_t - success == 0
11866 + *
11867 + ***********************************************************/
11868 +
11869 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11870 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11871 +{
11872 +       SHIM_SERVICE_T *service =
11873 +               (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11874 +
11875 +        if (!service->callback)
11876 +               goto release;
11877 +
11878 +       switch (reason) {
11879 +       case VCHIQ_MESSAGE_AVAILABLE:
11880 +               vchiu_queue_push(&service->queue, header);
11881 +
11882 +               service->callback(service->callback_param,
11883 +                                 VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11884 +
11885 +               goto done;
11886 +               break;
11887 +
11888 +       case VCHIQ_BULK_TRANSMIT_DONE:
11889 +               service->callback(service->callback_param,
11890 +                                 VCHI_CALLBACK_BULK_SENT, bulk_user);
11891 +               break;
11892 +
11893 +       case VCHIQ_BULK_RECEIVE_DONE:
11894 +               service->callback(service->callback_param,
11895 +                                 VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11896 +               break;
11897 +
11898 +       case VCHIQ_SERVICE_CLOSED:
11899 +               service->callback(service->callback_param,
11900 +                                 VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11901 +               break;
11902 +
11903 +       case VCHIQ_SERVICE_OPENED:
11904 +               /* No equivalent VCHI reason */
11905 +               break;
11906 +
11907 +       case VCHIQ_BULK_TRANSMIT_ABORTED:
11908 +               service->callback(service->callback_param,
11909 +                                 VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
11910 +                                 bulk_user);
11911 +               break;
11912 +
11913 +       case VCHIQ_BULK_RECEIVE_ABORTED:
11914 +               service->callback(service->callback_param,
11915 +                                 VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
11916 +                                 bulk_user);
11917 +               break;
11918 +
11919 +       default:
11920 +               WARN(1, "not supported\n");
11921 +               break;
11922 +       }
11923 +
11924 +release:
11925 +        vchiq_release_message(service->handle, header);
11926 +done:
11927 +       return VCHIQ_SUCCESS;
11928 +}
11929 +
11930 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
11931 +       SERVICE_CREATION_T *setup)
11932 +{
11933 +       SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
11934 +
11935 +       (void)instance;
11936 +
11937 +       if (service) {
11938 +               if (vchiu_queue_init(&service->queue, 64)) {
11939 +                       service->callback = setup->callback;
11940 +                       service->callback_param = setup->callback_param;
11941 +               } else {
11942 +                       kfree(service);
11943 +                       service = NULL;
11944 +               }
11945 +       }
11946 +
11947 +       return service;
11948 +}
11949 +
11950 +static void service_free(SHIM_SERVICE_T *service)
11951 +{
11952 +       if (service) {
11953 +               vchiu_queue_delete(&service->queue);
11954 +               kfree(service);
11955 +       }
11956 +}
11957 +
11958 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
11959 +       SERVICE_CREATION_T *setup,
11960 +       VCHI_SERVICE_HANDLE_T *handle)
11961 +{
11962 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11963 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
11964 +       if (service) {
11965 +               VCHIQ_SERVICE_PARAMS_T params;
11966 +               VCHIQ_STATUS_T status;
11967 +
11968 +               memset(&params, 0, sizeof(params));
11969 +               params.fourcc = setup->service_id;
11970 +               params.callback = shim_callback;
11971 +               params.userdata = service;
11972 +               params.version = setup->version.version;
11973 +               params.version_min = setup->version.version_min;
11974 +
11975 +               status = vchiq_open_service(instance, &params,
11976 +                       &service->handle);
11977 +               if (status != VCHIQ_SUCCESS) {
11978 +                       service_free(service);
11979 +                       service = NULL;
11980 +               }
11981 +       }
11982 +
11983 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
11984 +
11985 +       return (service != NULL) ? 0 : -1;
11986 +}
11987 +EXPORT_SYMBOL(vchi_service_open);
11988 +
11989 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
11990 +       SERVICE_CREATION_T *setup,
11991 +       VCHI_SERVICE_HANDLE_T *handle)
11992 +{
11993 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11994 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
11995 +       if (service) {
11996 +               VCHIQ_SERVICE_PARAMS_T params;
11997 +               VCHIQ_STATUS_T status;
11998 +
11999 +               memset(&params, 0, sizeof(params));
12000 +               params.fourcc = setup->service_id;
12001 +               params.callback = shim_callback;
12002 +               params.userdata = service;
12003 +               params.version = setup->version.version;
12004 +               params.version_min = setup->version.version_min;
12005 +               status = vchiq_add_service(instance, &params, &service->handle);
12006 +
12007 +               if (status != VCHIQ_SUCCESS) {
12008 +                       service_free(service);
12009 +                       service = NULL;
12010 +               }
12011 +       }
12012 +
12013 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
12014 +
12015 +       return (service != NULL) ? 0 : -1;
12016 +}
12017 +EXPORT_SYMBOL(vchi_service_create);
12018 +
12019 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12020 +{
12021 +       int32_t ret = -1;
12022 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12023 +       if (service) {
12024 +               VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12025 +               if (status == VCHIQ_SUCCESS) {
12026 +                       service_free(service);
12027 +                       service = NULL;
12028 +               }
12029 +
12030 +               ret = vchiq_status_to_vchi(status);
12031 +       }
12032 +       return ret;
12033 +}
12034 +EXPORT_SYMBOL(vchi_service_close);
12035 +
12036 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12037 +{
12038 +       int32_t ret = -1;
12039 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12040 +       if (service) {
12041 +               VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12042 +               if (status == VCHIQ_SUCCESS) {
12043 +                       service_free(service);
12044 +                       service = NULL;
12045 +               }
12046 +
12047 +               ret = vchiq_status_to_vchi(status);
12048 +       }
12049 +       return ret;
12050 +}
12051 +EXPORT_SYMBOL(vchi_service_destroy);
12052 +
12053 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12054 +{
12055 +   int32_t ret = -1;
12056 +   SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12057 +   if(service)
12058 +   {
12059 +      VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12060 +      ret = vchiq_status_to_vchi( status );
12061 +   }
12062 +   return ret;
12063 +}
12064 +EXPORT_SYMBOL(vchi_get_peer_version);
12065 +
12066 +/* ----------------------------------------------------------------------
12067 + * read a uint32_t from buffer.
12068 + * network format is defined to be little endian
12069 + * -------------------------------------------------------------------- */
12070 +uint32_t
12071 +vchi_readbuf_uint32(const void *_ptr)
12072 +{
12073 +       const unsigned char *ptr = _ptr;
12074 +       return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12075 +}
12076 +
12077 +/* ----------------------------------------------------------------------
12078 + * write a uint32_t to buffer.
12079 + * network format is defined to be little endian
12080 + * -------------------------------------------------------------------- */
12081 +void
12082 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12083 +{
12084 +       unsigned char *ptr = _ptr;
12085 +       ptr[0] = (unsigned char)((value >> 0)  & 0xFF);
12086 +       ptr[1] = (unsigned char)((value >> 8)  & 0xFF);
12087 +       ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12088 +       ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12089 +}
12090 +
12091 +/* ----------------------------------------------------------------------
12092 + * read a uint16_t from buffer.
12093 + * network format is defined to be little endian
12094 + * -------------------------------------------------------------------- */
12095 +uint16_t
12096 +vchi_readbuf_uint16(const void *_ptr)
12097 +{
12098 +       const unsigned char *ptr = _ptr;
12099 +       return ptr[0] | (ptr[1] << 8);
12100 +}
12101 +
12102 +/* ----------------------------------------------------------------------
12103 + * write a uint16_t into the buffer.
12104 + * network format is defined to be little endian
12105 + * -------------------------------------------------------------------- */
12106 +void
12107 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12108 +{
12109 +       unsigned char *ptr = _ptr;
12110 +       ptr[0] = (value >> 0)  & 0xFF;
12111 +       ptr[1] = (value >> 8)  & 0xFF;
12112 +}
12113 +
12114 +/***********************************************************
12115 + * Name: vchi_service_use
12116 + *
12117 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12118 + *
12119 + * Description: Routine to increment refcount on a service
12120 + *
12121 + * Returns: void
12122 + *
12123 + ***********************************************************/
12124 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12125 +{
12126 +       int32_t ret = -1;
12127 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12128 +       if (service)
12129 +               ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12130 +       return ret;
12131 +}
12132 +EXPORT_SYMBOL(vchi_service_use);
12133 +
12134 +/***********************************************************
12135 + * Name: vchi_service_release
12136 + *
12137 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12138 + *
12139 + * Description: Routine to decrement refcount on a service
12140 + *
12141 + * Returns: void
12142 + *
12143 + ***********************************************************/
12144 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12145 +{
12146 +       int32_t ret = -1;
12147 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12148 +       if (service)
12149 +               ret = vchiq_status_to_vchi(
12150 +                       vchiq_release_service(service->handle));
12151 +       return ret;
12152 +}
12153 +EXPORT_SYMBOL(vchi_service_release);
12154 --- /dev/null
12155 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12156 @@ -0,0 +1,151 @@
12157 +/**
12158 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12159 + *
12160 + * Redistribution and use in source and binary forms, with or without
12161 + * modification, are permitted provided that the following conditions
12162 + * are met:
12163 + * 1. Redistributions of source code must retain the above copyright
12164 + *    notice, this list of conditions, and the following disclaimer,
12165 + *    without modification.
12166 + * 2. Redistributions in binary form must reproduce the above copyright
12167 + *    notice, this list of conditions and the following disclaimer in the
12168 + *    documentation and/or other materials provided with the distribution.
12169 + * 3. The names of the above-listed copyright holders may not be used
12170 + *    to endorse or promote products derived from this software without
12171 + *    specific prior written permission.
12172 + *
12173 + * ALTERNATIVELY, this software may be distributed under the terms of the
12174 + * GNU General Public License ("GPL") version 2, as published by the Free
12175 + * Software Foundation.
12176 + *
12177 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12178 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12179 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12180 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12181 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12182 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12183 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12184 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12185 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12186 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12187 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12188 + */
12189 +
12190 +#include "vchiq_util.h"
12191 +
12192 +static inline int is_pow2(int i)
12193 +{
12194 +       return i && !(i & (i - 1));
12195 +}
12196 +
12197 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12198 +{
12199 +       WARN_ON(!is_pow2(size));
12200 +
12201 +       queue->size = size;
12202 +       queue->read = 0;
12203 +       queue->write = 0;
12204 +
12205 +       sema_init(&queue->pop, 0);
12206 +       sema_init(&queue->push, 0);
12207 +
12208 +       queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12209 +       if (queue->storage == NULL) {
12210 +               vchiu_queue_delete(queue);
12211 +               return 0;
12212 +       }
12213 +       return 1;
12214 +}
12215 +
12216 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12217 +{
12218 +       if (queue->storage != NULL)
12219 +               kfree(queue->storage);
12220 +}
12221 +
12222 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12223 +{
12224 +       return queue->read == queue->write;
12225 +}
12226 +
12227 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12228 +{
12229 +       return queue->write == queue->read + queue->size;
12230 +}
12231 +
12232 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12233 +{
12234 +       while (queue->write == queue->read + queue->size) {
12235 +               if (down_interruptible(&queue->pop) != 0) {
12236 +                       flush_signals(current);
12237 +               }
12238 +       }
12239 +
12240 +       /*
12241 +        * Write to queue->storage must be visible after read from
12242 +        * queue->read
12243 +        */
12244 +       smp_mb();
12245 +
12246 +       queue->storage[queue->write & (queue->size - 1)] = header;
12247 +
12248 +       /*
12249 +        * Write to queue->storage must be visible before write to
12250 +        * queue->write
12251 +        */
12252 +       smp_wmb();
12253 +
12254 +       queue->write++;
12255 +
12256 +       up(&queue->push);
12257 +}
12258 +
12259 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12260 +{
12261 +       while (queue->write == queue->read) {
12262 +               if (down_interruptible(&queue->push) != 0) {
12263 +                       flush_signals(current);
12264 +               }
12265 +       }
12266 +
12267 +       up(&queue->push); // We haven't removed anything from the queue.
12268 +
12269 +       /*
12270 +        * Read from queue->storage must be visible after read from
12271 +        * queue->write
12272 +        */
12273 +       smp_rmb();
12274 +
12275 +       return queue->storage[queue->read & (queue->size - 1)];
12276 +}
12277 +
12278 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12279 +{
12280 +       VCHIQ_HEADER_T *header;
12281 +
12282 +       while (queue->write == queue->read) {
12283 +               if (down_interruptible(&queue->push) != 0) {
12284 +                       flush_signals(current);
12285 +               }
12286 +       }
12287 +
12288 +       /*
12289 +        * Read from queue->storage must be visible after read from
12290 +        * queue->write
12291 +        */
12292 +       smp_rmb();
12293 +
12294 +       header = queue->storage[queue->read & (queue->size - 1)];
12295 +
12296 +       /*
12297 +        * Read from queue->storage must be visible before write to
12298 +        * queue->read
12299 +        */
12300 +       smp_mb();
12301 +
12302 +       queue->read++;
12303 +
12304 +       up(&queue->pop);
12305 +
12306 +       return header;
12307 +}
12308 --- /dev/null
12309 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12310 @@ -0,0 +1,81 @@
12311 +/**
12312 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12313 + *
12314 + * Redistribution and use in source and binary forms, with or without
12315 + * modification, are permitted provided that the following conditions
12316 + * are met:
12317 + * 1. Redistributions of source code must retain the above copyright
12318 + *    notice, this list of conditions, and the following disclaimer,
12319 + *    without modification.
12320 + * 2. Redistributions in binary form must reproduce the above copyright
12321 + *    notice, this list of conditions and the following disclaimer in the
12322 + *    documentation and/or other materials provided with the distribution.
12323 + * 3. The names of the above-listed copyright holders may not be used
12324 + *    to endorse or promote products derived from this software without
12325 + *    specific prior written permission.
12326 + *
12327 + * ALTERNATIVELY, this software may be distributed under the terms of the
12328 + * GNU General Public License ("GPL") version 2, as published by the Free
12329 + * Software Foundation.
12330 + *
12331 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12332 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12333 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12334 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12335 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12336 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12337 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12338 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12339 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12340 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12341 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12342 + */
12343 +
12344 +#ifndef VCHIQ_UTIL_H
12345 +#define VCHIQ_UTIL_H
12346 +
12347 +#include <linux/types.h>
12348 +#include <linux/semaphore.h>
12349 +#include <linux/mutex.h>
12350 +#include <linux/bitops.h>
12351 +#include <linux/kthread.h>
12352 +#include <linux/wait.h>
12353 +#include <linux/vmalloc.h>
12354 +#include <linux/jiffies.h>
12355 +#include <linux/delay.h>
12356 +#include <linux/string.h>
12357 +#include <linux/types.h>
12358 +#include <linux/interrupt.h>
12359 +#include <linux/random.h>
12360 +#include <linux/sched.h>
12361 +#include <linux/ctype.h>
12362 +#include <linux/uaccess.h>
12363 +#include <linux/time.h>  /* for time_t */
12364 +#include <linux/slab.h>
12365 +#include <linux/vmalloc.h>
12366 +
12367 +#include "vchiq_if.h"
12368 +
12369 +typedef struct {
12370 +       int size;
12371 +       int read;
12372 +       int write;
12373 +
12374 +       struct semaphore pop;
12375 +       struct semaphore push;
12376 +
12377 +       VCHIQ_HEADER_T **storage;
12378 +} VCHIU_QUEUE_T;
12379 +
12380 +extern int  vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12381 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12382 +
12383 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12384 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12385 +
12386 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12387 +
12388 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12389 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12390 +
12391 +#endif
12392 --- /dev/null
12393 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12394 @@ -0,0 +1,59 @@
12395 +/**
12396 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12397 + *
12398 + * Redistribution and use in source and binary forms, with or without
12399 + * modification, are permitted provided that the following conditions
12400 + * are met:
12401 + * 1. Redistributions of source code must retain the above copyright
12402 + *    notice, this list of conditions, and the following disclaimer,
12403 + *    without modification.
12404 + * 2. Redistributions in binary form must reproduce the above copyright
12405 + *    notice, this list of conditions and the following disclaimer in the
12406 + *    documentation and/or other materials provided with the distribution.
12407 + * 3. The names of the above-listed copyright holders may not be used
12408 + *    to endorse or promote products derived from this software without
12409 + *    specific prior written permission.
12410 + *
12411 + * ALTERNATIVELY, this software may be distributed under the terms of the
12412 + * GNU General Public License ("GPL") version 2, as published by the Free
12413 + * Software Foundation.
12414 + *
12415 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12416 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12417 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12418 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12419 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12420 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12421 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12422 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12423 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12424 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12425 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12426 + */
12427 +#include "vchiq_build_info.h"
12428 +#include <linux/broadcom/vc_debug_sym.h>
12429 +
12430 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12431 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12432 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time,    __TIME__ );
12433 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date,    __DATE__ );
12434 +
12435 +const char *vchiq_get_build_hostname( void )
12436 +{
12437 +   return vchiq_build_hostname;
12438 +}
12439 +
12440 +const char *vchiq_get_build_version( void )
12441 +{
12442 +   return vchiq_build_version;
12443 +}
12444 +
12445 +const char *vchiq_get_build_date( void )
12446 +{
12447 +   return vchiq_build_date;
12448 +}
12449 +
12450 +const char *vchiq_get_build_time( void )
12451 +{
12452 +   return vchiq_build_time;
12453 +}