3212e085da9cc937a2d02b292066bcaf31a10210
[14.07/openwrt.git] / target / linux / brcm2708 / patches-3.10 / 0005-bcm2708-vchiq-driver.patch
1 From 2fdd1c3ab61a7a58a3ef3bc5b3d1504202bbc108 Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 005/174] bcm2708 vchiq driver
5
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
7 ---
8  drivers/misc/Kconfig                               |    2 +
9  drivers/misc/Makefile                              |    1 +
10  drivers/misc/vc04_services/Kconfig                 |   10 +
11  drivers/misc/vc04_services/Makefile                |   18 +
12  .../interface/vchi/connections/connection.h        |  328 ++
13  .../interface/vchi/message_drivers/message.h       |  204 ++
14  drivers/misc/vc04_services/interface/vchi/vchi.h   |  373 ++
15  .../misc/vc04_services/interface/vchi/vchi_cfg.h   |  224 ++
16  .../interface/vchi/vchi_cfg_internal.h             |   71 +
17  .../vc04_services/interface/vchi/vchi_common.h     |  163 +
18  .../misc/vc04_services/interface/vchi/vchi_mh.h    |   42 +
19  .../misc/vc04_services/interface/vchiq_arm/vchiq.h |   41 +
20  .../vc04_services/interface/vchiq_arm/vchiq_2835.h |   42 +
21  .../interface/vchiq_arm/vchiq_2835_arm.c           |  538 +++
22  .../vc04_services/interface/vchiq_arm/vchiq_arm.c  | 2813 ++++++++++++++
23  .../vc04_services/interface/vchiq_arm/vchiq_arm.h  |  212 ++
24  .../interface/vchiq_arm/vchiq_build_info.h         |   37 +
25  .../vc04_services/interface/vchiq_arm/vchiq_cfg.h  |   60 +
26  .../interface/vchiq_arm/vchiq_connected.c          |  119 +
27  .../interface/vchiq_arm/vchiq_connected.h          |   51 +
28  .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
29  .../vc04_services/interface/vchiq_arm/vchiq_core.h |  706 ++++
30  .../interface/vchiq_arm/vchiq_genversion           |   89 +
31  .../vc04_services/interface/vchiq_arm/vchiq_if.h   |  188 +
32  .../interface/vchiq_arm/vchiq_ioctl.h              |  129 +
33  .../interface/vchiq_arm/vchiq_kern_lib.c           |  456 +++
34  .../interface/vchiq_arm/vchiq_memdrv.h             |   71 +
35  .../interface/vchiq_arm/vchiq_pagelist.h           |   58 +
36  .../vc04_services/interface/vchiq_arm/vchiq_proc.c |  254 ++
37  .../vc04_services/interface/vchiq_arm/vchiq_shim.c |  815 +++++
38  .../vc04_services/interface/vchiq_arm/vchiq_util.c |  151 +
39  .../vc04_services/interface/vchiq_arm/vchiq_util.h |   82 +
40  .../interface/vchiq_arm/vchiq_version.c            |   59 +
41  33 files changed, 12231 insertions(+)
42  create mode 100644 drivers/misc/vc04_services/Kconfig
43  create mode 100644 drivers/misc/vc04_services/Makefile
44  create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
45  create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
46  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
47  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
48  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
49  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
50  create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
51  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
52  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
53  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
54  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
55  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
56  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
57  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
58  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
59  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
60  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
61  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
62  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
63  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
64  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
65  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
66  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
67  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
68  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
69  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
70  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
71  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
72  create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
73
74 --- a/drivers/misc/Kconfig
75 +++ b/drivers/misc/Kconfig
76 @@ -536,4 +536,6 @@ source "drivers/misc/carma/Kconfig"
77  source "drivers/misc/altera-stapl/Kconfig"
78  source "drivers/misc/mei/Kconfig"
79  source "drivers/misc/vmw_vmci/Kconfig"
80 +source "drivers/misc/vc04_services/Kconfig"
81  endmenu
82 +
83 --- a/drivers/misc/Makefile
84 +++ b/drivers/misc/Makefile
85 @@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI)               += mei/
86  obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
87  obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
88  obj-$(CONFIG_SRAM)             += sram.o
89 +obj-y                          += vc04_services/
90 --- /dev/null
91 +++ b/drivers/misc/vc04_services/Kconfig
92 @@ -0,0 +1,10 @@
93 +config BCM2708_VCHIQ
94 +       tristate "Videocore VCHIQ"
95 +       depends on MACH_BCM2708
96 +       default y
97 +       help
98 +               Kernel to VideoCore communication interface for the
99 +               BCM2708 family of products.
100 +               Defaults to Y when the Broadcom Videocore services
101 +               are included in the build, N otherwise.
102 +
103 --- /dev/null
104 +++ b/drivers/misc/vc04_services/Makefile
105 @@ -0,0 +1,18 @@
106 +ifeq ($(CONFIG_MACH_BCM2708),y)
107 +
108 +obj-$(CONFIG_BCM2708_VCHIQ)    += vchiq.o
109 +
110 +vchiq-objs := \
111 +   interface/vchiq_arm/vchiq_core.o  \
112 +   interface/vchiq_arm/vchiq_arm.o \
113 +   interface/vchiq_arm/vchiq_kern_lib.o \
114 +   interface/vchiq_arm/vchiq_2835_arm.o \
115 +   interface/vchiq_arm/vchiq_proc.o \
116 +   interface/vchiq_arm/vchiq_shim.o \
117 +   interface/vchiq_arm/vchiq_util.o \
118 +   interface/vchiq_arm/vchiq_connected.o \
119 +
120 +EXTRA_CFLAGS += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
121 +
122 +endif
123 +
124 --- /dev/null
125 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
126 @@ -0,0 +1,328 @@
127 +/**
128 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
129 + *
130 + * Redistribution and use in source and binary forms, with or without
131 + * modification, are permitted provided that the following conditions
132 + * are met:
133 + * 1. Redistributions of source code must retain the above copyright
134 + *    notice, this list of conditions, and the following disclaimer,
135 + *    without modification.
136 + * 2. Redistributions in binary form must reproduce the above copyright
137 + *    notice, this list of conditions and the following disclaimer in the
138 + *    documentation and/or other materials provided with the distribution.
139 + * 3. The names of the above-listed copyright holders may not be used
140 + *    to endorse or promote products derived from this software without
141 + *    specific prior written permission.
142 + *
143 + * ALTERNATIVELY, this software may be distributed under the terms of the
144 + * GNU General Public License ("GPL") version 2, as published by the Free
145 + * Software Foundation.
146 + *
147 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
148 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
149 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
150 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
151 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
152 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
153 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
154 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
155 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
156 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
157 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
158 + */
159 +
160 +#ifndef CONNECTION_H_
161 +#define CONNECTION_H_
162 +
163 +#include <linux/kernel.h>
164 +#include <linux/types.h>
165 +#include <linux/semaphore.h>
166 +
167 +#include "interface/vchi/vchi_cfg_internal.h"
168 +#include "interface/vchi/vchi_common.h"
169 +#include "interface/vchi/message_drivers/message.h"
170 +
171 +/******************************************************************************
172 + Global defs
173 + *****************************************************************************/
174 +
175 +// Opaque handle for a connection / service pair
176 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
177 +
178 +// opaque handle to the connection state information
179 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
180 +
181 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
182 +
183 +
184 +/******************************************************************************
185 + API
186 + *****************************************************************************/
187 +
188 +// Routine to init a connection with a particular low level driver
189 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
190 +                                                             const VCHI_MESSAGE_DRIVER_T * driver );
191 +
192 +// Routine to control CRC enabling at a connection level
193 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
194 +                                                  VCHI_CRC_CONTROL_T control );
195 +
196 +// Routine to create a service
197 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
198 +                                                      int32_t service_id,
199 +                                                      uint32_t rx_fifo_size,
200 +                                                      uint32_t tx_fifo_size,
201 +                                                      int server,
202 +                                                      VCHI_CALLBACK_T callback,
203 +                                                      void *callback_param,
204 +                                                      int32_t want_crc,
205 +                                                      int32_t want_unaligned_bulk_rx,
206 +                                                      int32_t want_unaligned_bulk_tx,
207 +                                                      VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
208 +
209 +// Routine to close a service
210 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
211 +
212 +// Routine to queue a message
213 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
214 +                                                            const void *data,
215 +                                                            uint32_t data_size,
216 +                                                            VCHI_FLAGS_T flags,
217 +                                                            void *msg_handle );
218 +
219 +// scatter-gather (vector) message queueing
220 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
221 +                                                             VCHI_MSG_VECTOR_T *vector,
222 +                                                             uint32_t count,
223 +                                                             VCHI_FLAGS_T flags,
224 +                                                             void *msg_handle );
225 +
226 +// Routine to dequeue a message
227 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
228 +                                                              void *data,
229 +                                                              uint32_t max_data_size_to_read,
230 +                                                              uint32_t *actual_msg_size,
231 +                                                              VCHI_FLAGS_T flags );
232 +
233 +// Routine to peek at a message
234 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
235 +                                                           void **data,
236 +                                                           uint32_t *msg_size,
237 +                                                           VCHI_FLAGS_T flags );
238 +
239 +// Routine to hold a message
240 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
241 +                                                           void **data,
242 +                                                           uint32_t *msg_size,
243 +                                                           VCHI_FLAGS_T flags,
244 +                                                           void **message_handle );
245 +
246 +// Routine to initialise a received message iterator
247 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
248 +                                                                VCHI_MSG_ITER_T *iter,
249 +                                                                VCHI_FLAGS_T flags );
250 +
251 +// Routine to release a held message
252 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
253 +                                                       void *message_handle );
254 +
255 +// Routine to get info on a held message
256 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
257 +                                                    void *message_handle,
258 +                                                    void **data,
259 +                                                    int32_t *msg_size,
260 +                                                    uint32_t *tx_timestamp,
261 +                                                    uint32_t *rx_timestamp );
262 +
263 +// Routine to check whether the iterator has a next message
264 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
265 +                                                       const VCHI_MSG_ITER_T *iter );
266 +
267 +// Routine to advance the iterator
268 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
269 +                                                    VCHI_MSG_ITER_T *iter,
270 +                                                    void **data,
271 +                                                    uint32_t *msg_size );
272 +
273 +// Routine to remove the last message returned by the iterator
274 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
275 +                                                      VCHI_MSG_ITER_T *iter );
276 +
277 +// Routine to hold the last message returned by the iterator
278 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
279 +                                                    VCHI_MSG_ITER_T *iter,
280 +                                                    void **msg_handle );
281 +
282 +// Routine to transmit bulk data
283 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
284 +                                                          const void *data_src,
285 +                                                          uint32_t data_size,
286 +                                                          VCHI_FLAGS_T flags,
287 +                                                          void *bulk_handle );
288 +
289 +// Routine to receive data
290 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
291 +                                                         void *data_dst,
292 +                                                         uint32_t data_size,
293 +                                                         VCHI_FLAGS_T flags,
294 +                                                         void *bulk_handle );
295 +
296 +// Routine to report if a server is available
297 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
298 +
299 +// Routine to report the number of RX slots available
300 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
301 +
302 +// Routine to report the RX slot size
303 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
304 +
305 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
306 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
307 +                                                     int32_t service,
308 +                                                     uint32_t length,
309 +                                                     MESSAGE_TX_CHANNEL_T channel,
310 +                                                     uint32_t channel_params,
311 +                                                     uint32_t data_length,
312 +                                                     uint32_t data_offset);
313 +
314 +// Callback to inform a service that a Xon or Xoff message has been received
315 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
316 +
317 +// Callback to inform a service that a server available reply message has been received
318 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
319 +
320 +// Callback to indicate that bulk auxiliary messages have arrived
321 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
322 +
323 +// Callback to indicate that bulk auxiliary messages have arrived
324 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
325 +
326 +// Callback with all the connection info you require
327 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
328 +
329 +// Callback to inform of a disconnect
330 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
331 +
332 +// Callback to inform of a power control request
333 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
334 +
335 +// allocate memory suitably aligned for this connection
336 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
337 +
338 +// free memory allocated by buffer_allocate
339 +typedef void   (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
340 +
341 +
342 +/******************************************************************************
343 + System driver struct
344 + *****************************************************************************/
345 +
346 +struct opaque_vchi_connection_api_t
347 +{
348 +   // Routine to init the connection
349 +   VCHI_CONNECTION_INIT_T                      init;
350 +
351 +   // Connection-level CRC control
352 +   VCHI_CONNECTION_CRC_CONTROL_T               crc_control;
353 +
354 +   // Routine to connect to or create service
355 +   VCHI_CONNECTION_SERVICE_CONNECT_T           service_connect;
356 +
357 +   // Routine to disconnect from a service
358 +   VCHI_CONNECTION_SERVICE_DISCONNECT_T        service_disconnect;
359 +
360 +   // Routine to queue a message
361 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T     service_queue_msg;
362 +
363 +   // scatter-gather (vector) message queue
364 +   VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T    service_queue_msgv;
365 +
366 +   // Routine to dequeue a message
367 +   VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T   service_dequeue_msg;
368 +
369 +   // Routine to peek at a message
370 +   VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T      service_peek_msg;
371 +
372 +   // Routine to hold a message
373 +   VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T      service_hold_msg;
374 +
375 +   // Routine to initialise a received message iterator
376 +   VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
377 +
378 +   // Routine to release a message
379 +   VCHI_CONNECTION_HELD_MSG_RELEASE_T          held_msg_release;
380 +
381 +   // Routine to get information on a held message
382 +   VCHI_CONNECTION_HELD_MSG_INFO_T             held_msg_info;
383 +
384 +   // Routine to check for next message on iterator
385 +   VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T         msg_iter_has_next;
386 +
387 +   // Routine to get next message on iterator
388 +   VCHI_CONNECTION_MSG_ITER_NEXT_T             msg_iter_next;
389 +
390 +   // Routine to remove the last message returned by iterator
391 +   VCHI_CONNECTION_MSG_ITER_REMOVE_T           msg_iter_remove;
392 +
393 +   // Routine to hold the last message returned by iterator
394 +   VCHI_CONNECTION_MSG_ITER_HOLD_T             msg_iter_hold;
395 +
396 +   // Routine to transmit bulk data
397 +   VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T       bulk_queue_transmit;
398 +
399 +   // Routine to receive data
400 +   VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T        bulk_queue_receive;
401 +
402 +   // Routine to report the available servers
403 +   VCHI_CONNECTION_SERVER_PRESENT              server_present;
404 +
405 +   // Routine to report the number of RX slots available
406 +   VCHI_CONNECTION_RX_SLOTS_AVAILABLE          connection_rx_slots_available;
407 +
408 +   // Routine to report the RX slot size
409 +   VCHI_CONNECTION_RX_SLOT_SIZE                connection_rx_slot_size;
410 +
411 +   // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
412 +   VCHI_CONNECTION_RX_BULK_BUFFER_ADDED        rx_bulk_buffer_added;
413 +
414 +   // Callback to inform a service that a Xon or Xoff message has been received
415 +   VCHI_CONNECTION_FLOW_CONTROL                flow_control;
416 +
417 +   // Callback to inform a service that a server available reply message has been received
418 +   VCHI_CONNECTION_SERVER_AVAILABLE_REPLY      server_available_reply;
419 +
420 +   // Callback to indicate that bulk auxiliary messages have arrived
421 +   VCHI_CONNECTION_BULK_AUX_RECEIVED           bulk_aux_received;
422 +
423 +   // Callback to indicate that a bulk auxiliary message has been transmitted
424 +   VCHI_CONNECTION_BULK_AUX_TRANSMITTED        bulk_aux_transmitted;
425 +
426 +   // Callback to provide information about the connection
427 +   VCHI_CONNECTION_INFO                        connection_info;
428 +
429 +   // Callback to notify that peer has requested disconnect
430 +   VCHI_CONNECTION_DISCONNECT                  disconnect;
431 +
432 +   // Callback to notify that peer has requested power change
433 +   VCHI_CONNECTION_POWER_CONTROL               power_control;
434 +
435 +   // allocate memory suitably aligned for this connection
436 +   VCHI_BUFFER_ALLOCATE                        buffer_allocate;
437 +
438 +   // free memory allocated by buffer_allocate
439 +   VCHI_BUFFER_FREE                            buffer_free;
440 +
441 +};
442 +
443 +struct vchi_connection_t {
444 +   const VCHI_CONNECTION_API_T *api;
445 +   VCHI_CONNECTION_STATE_T     *state;
446 +#ifdef VCHI_COARSE_LOCKING
447 +   struct semaphore             sem;
448 +#endif
449 +};
450 +
451 +
452 +#endif /* CONNECTION_H_ */
453 +
454 +/****************************** End of file **********************************/
455 --- /dev/null
456 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
457 @@ -0,0 +1,204 @@
458 +/**
459 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
460 + *
461 + * Redistribution and use in source and binary forms, with or without
462 + * modification, are permitted provided that the following conditions
463 + * are met:
464 + * 1. Redistributions of source code must retain the above copyright
465 + *    notice, this list of conditions, and the following disclaimer,
466 + *    without modification.
467 + * 2. Redistributions in binary form must reproduce the above copyright
468 + *    notice, this list of conditions and the following disclaimer in the
469 + *    documentation and/or other materials provided with the distribution.
470 + * 3. The names of the above-listed copyright holders may not be used
471 + *    to endorse or promote products derived from this software without
472 + *    specific prior written permission.
473 + *
474 + * ALTERNATIVELY, this software may be distributed under the terms of the
475 + * GNU General Public License ("GPL") version 2, as published by the Free
476 + * Software Foundation.
477 + *
478 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
479 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
480 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
481 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
482 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
483 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
484 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
485 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
486 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
487 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
488 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
489 + */
490 +
491 +#ifndef _VCHI_MESSAGE_H_
492 +#define _VCHI_MESSAGE_H_
493 +
494 +#include <linux/kernel.h>
495 +#include <linux/types.h>
496 +#include <linux/semaphore.h>
497 +
498 +#include "interface/vchi/vchi_cfg_internal.h"
499 +#include "interface/vchi/vchi_common.h"
500 +
501 +
502 +typedef enum message_event_type {
503 +   MESSAGE_EVENT_NONE,
504 +   MESSAGE_EVENT_NOP,
505 +   MESSAGE_EVENT_MESSAGE,
506 +   MESSAGE_EVENT_SLOT_COMPLETE,
507 +   MESSAGE_EVENT_RX_BULK_PAUSED,
508 +   MESSAGE_EVENT_RX_BULK_COMPLETE,
509 +   MESSAGE_EVENT_TX_COMPLETE,
510 +   MESSAGE_EVENT_MSG_DISCARDED
511 +} MESSAGE_EVENT_TYPE_T;
512 +
513 +typedef enum vchi_msg_flags
514 +{
515 +   VCHI_MSG_FLAGS_NONE                  = 0x0,
516 +   VCHI_MSG_FLAGS_TERMINATE_DMA         = 0x1
517 +} VCHI_MSG_FLAGS_T;
518 +
519 +typedef enum message_tx_channel
520 +{
521 +   MESSAGE_TX_CHANNEL_MESSAGE           = 0,
522 +   MESSAGE_TX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
523 +} MESSAGE_TX_CHANNEL_T;
524 +
525 +// Macros used for cycling through bulk channels
526 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
527 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
528 +
529 +typedef enum message_rx_channel
530 +{
531 +   MESSAGE_RX_CHANNEL_MESSAGE           = 0,
532 +   MESSAGE_RX_CHANNEL_BULK              = 1 // drivers may provide multiple bulk channels, from 1 upwards
533 +} MESSAGE_RX_CHANNEL_T;
534 +
535 +// Message receive slot information
536 +typedef struct rx_msg_slot_info {
537 +
538 +   struct rx_msg_slot_info *next;
539 +   //struct slot_info *prev;
540 +#if !defined VCHI_COARSE_LOCKING
541 +   struct semaphore   sem;
542 +#endif
543 +
544 +   uint8_t           *addr;               // base address of slot
545 +   uint32_t           len;                // length of slot in bytes
546 +
547 +   uint32_t           write_ptr;          // hardware causes this to advance
548 +   uint32_t           read_ptr;           // this module does the reading
549 +   int                active;             // is this slot in the hardware dma fifo?
550 +   uint32_t           msgs_parsed;        // count how many messages are in this slot
551 +   uint32_t           msgs_released;      // how many messages have been released
552 +   void              *state;              // connection state information
553 +   uint8_t            ref_count[VCHI_MAX_SERVICES_PER_CONNECTION];          // reference count for slots held by services
554 +} RX_MSG_SLOTINFO_T;
555 +
556 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
557 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
558 +// driver will be tasked with sending the aligned core section.
559 +typedef struct rx_bulk_slotinfo_t {
560 +   struct rx_bulk_slotinfo_t *next;
561 +
562 +   struct semaphore *blocking;
563 +
564 +   // needed by DMA
565 +   void        *addr;
566 +   uint32_t     len;
567 +
568 +   // needed for the callback
569 +   void        *service;
570 +   void        *handle;
571 +   VCHI_FLAGS_T flags;
572 +} RX_BULK_SLOTINFO_T;
573 +
574 +
575 +/* ----------------------------------------------------------------------
576 + * each connection driver will have a pool of the following struct.
577 + *
578 + * the pool will be managed by vchi_qman_*
579 + * this means there will be multiple queues (single linked lists)
580 + * a given struct message_info will be on exactly one of these queues
581 + * at any one time
582 + * -------------------------------------------------------------------- */
583 +typedef struct rx_message_info {
584 +
585 +   struct message_info *next;
586 +   //struct message_info *prev;
587 +
588 +   uint8_t    *addr;
589 +   uint32_t   len;
590 +   RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
591 +   uint32_t   tx_timestamp;
592 +   uint32_t   rx_timestamp;
593 +
594 +} RX_MESSAGE_INFO_T;
595 +
596 +typedef struct {
597 +   MESSAGE_EVENT_TYPE_T type;
598 +
599 +   struct {
600 +      // for messages
601 +      void    *addr;           // address of message
602 +      uint16_t slot_delta;     // whether this message indicated slot delta
603 +      uint32_t len;            // length of message
604 +      RX_MSG_SLOTINFO_T *slot; // slot this message is in
605 +      int32_t  service;   // service id this message is destined for
606 +      uint32_t tx_timestamp;   // timestamp from the header
607 +      uint32_t rx_timestamp;   // timestamp when we parsed it
608 +   } message;
609 +
610 +   // FIXME: cleanup slot reporting...
611 +   RX_MSG_SLOTINFO_T *rx_msg;
612 +   RX_BULK_SLOTINFO_T *rx_bulk;
613 +   void *tx_handle;
614 +   MESSAGE_TX_CHANNEL_T tx_channel;
615 +
616 +} MESSAGE_EVENT_T;
617 +
618 +
619 +// callbacks
620 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
621 +
622 +typedef struct {
623 +   VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
624 +} VCHI_MESSAGE_DRIVER_OPEN_T;
625 +
626 +
627 +// handle to this instance of message driver (as returned by ->open)
628 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
629 +
630 +struct opaque_vchi_message_driver_t {
631 +   VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
632 +   int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
633 +   int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
634 +   int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
635 +   int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot );      // rx message
636 +   int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot );  // rx data (bulk)
637 +   int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle );      // tx (message & bulk)
638 +   void    (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event );     // get the next event from message_driver
639 +   int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
640 +   int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
641 +                            *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
642 +
643 +   int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
644 +   int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
645 +   void *  (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
646 +   void    (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
647 +   int     (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
648 +   int     (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
649 +
650 +   int32_t  (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
651 +   uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
652 +   int     (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
653 +   int     (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
654 +   void    (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
655 +   void    (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
656 +};
657 +
658 +
659 +#endif // _VCHI_MESSAGE_H_
660 +
661 +/****************************** End of file ***********************************/
662 --- /dev/null
663 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
664 @@ -0,0 +1,373 @@
665 +/**
666 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
667 + *
668 + * Redistribution and use in source and binary forms, with or without
669 + * modification, are permitted provided that the following conditions
670 + * are met:
671 + * 1. Redistributions of source code must retain the above copyright
672 + *    notice, this list of conditions, and the following disclaimer,
673 + *    without modification.
674 + * 2. Redistributions in binary form must reproduce the above copyright
675 + *    notice, this list of conditions and the following disclaimer in the
676 + *    documentation and/or other materials provided with the distribution.
677 + * 3. The names of the above-listed copyright holders may not be used
678 + *    to endorse or promote products derived from this software without
679 + *    specific prior written permission.
680 + *
681 + * ALTERNATIVELY, this software may be distributed under the terms of the
682 + * GNU General Public License ("GPL") version 2, as published by the Free
683 + * Software Foundation.
684 + *
685 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
686 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
687 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
688 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
689 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
690 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
691 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
692 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
693 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
694 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
695 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
696 + */
697 +
698 +#ifndef VCHI_H_
699 +#define VCHI_H_
700 +
701 +#include "interface/vchi/vchi_cfg.h"
702 +#include "interface/vchi/vchi_common.h"
703 +#include "interface/vchi/connections/connection.h"
704 +#include "vchi_mh.h"
705 +
706 +
707 +/******************************************************************************
708 + Global defs
709 + *****************************************************************************/
710 +
711 +#define VCHI_BULK_ROUND_UP(x)     ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
712 +#define VCHI_BULK_ROUND_DOWN(x)   (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
713 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
714 +
715 +#ifdef USE_VCHIQ_ARM
716 +#define VCHI_BULK_ALIGNED(x)      1
717 +#else
718 +#define VCHI_BULK_ALIGNED(x)      (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
719 +#endif
720 +
721 +struct vchi_version {
722 +       uint32_t version;
723 +       uint32_t version_min;
724 +};
725 +#define VCHI_VERSION(v_) { v_, v_ }
726 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
727 +
728 +typedef enum
729 +{
730 +   VCHI_VEC_POINTER,
731 +   VCHI_VEC_HANDLE,
732 +   VCHI_VEC_LIST
733 +} VCHI_MSG_VECTOR_TYPE_T;
734 +
735 +typedef struct vchi_msg_vector_ex {
736 +
737 +   VCHI_MSG_VECTOR_TYPE_T type;
738 +   union
739 +   {
740 +      // a memory handle
741 +      struct
742 +      {
743 +         VCHI_MEM_HANDLE_T handle;
744 +         uint32_t offset;
745 +         int32_t vec_len;
746 +      } handle;
747 +
748 +      // an ordinary data pointer
749 +      struct
750 +      {
751 +         const void *vec_base;
752 +         int32_t vec_len;
753 +      } ptr;
754 +
755 +      // a nested vector list
756 +      struct
757 +      {
758 +         struct vchi_msg_vector_ex *vec;
759 +         uint32_t vec_len;
760 +      } list;
761 +   } u;
762 +} VCHI_MSG_VECTOR_EX_T;
763 +
764 +
765 +// Construct an entry in a msg vector for a pointer (p) of length (l)
766 +#define VCHI_VEC_POINTER(p,l)  VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
767 +
768 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
769 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE,  { { (h), (o), (l) } }
770 +
771 +// Macros to manipulate 'FOURCC' values
772 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
773 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
774 +
775 +
776 +// Opaque service information
777 +struct opaque_vchi_service_t;
778 +
779 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
780 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
781 +typedef struct
782 +{
783 +   struct opaque_vchi_service_t *service;
784 +   void *message;
785 +} VCHI_HELD_MSG_T;
786 +
787 +
788 +
789 +// structure used to provide the information needed to open a server or a client
790 +typedef struct {
791 +       struct vchi_version version;
792 +       int32_t service_id;
793 +       VCHI_CONNECTION_T *connection;
794 +       uint32_t rx_fifo_size;
795 +       uint32_t tx_fifo_size;
796 +       VCHI_CALLBACK_T callback;
797 +       void *callback_param;
798 +       /* client intends to receive bulk transfers of
799 +               odd lengths or into unaligned buffers */
800 +       int32_t want_unaligned_bulk_rx;
801 +       /* client intends to transmit bulk transfers of
802 +               odd lengths or out of unaligned buffers */
803 +       int32_t want_unaligned_bulk_tx;
804 +       /* client wants to check CRCs on (bulk) xfers.
805 +               Only needs to be set at 1 end - will do both directions. */
806 +       int32_t want_crc;
807 +} SERVICE_CREATION_T;
808 +
809 +// Opaque handle for a VCHI instance
810 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
811 +
812 +// Opaque handle for a server or client
813 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
814 +
815 +// Service registration & startup
816 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
817 +
818 +typedef struct service_info_tag {
819 +   const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
820 +   VCHI_SERVICE_INIT init;          /* Service initialisation function */
821 +   void *vll_handle;                /* VLL handle; NULL when unloaded or a "static VLL" in build */
822 +} SERVICE_INFO_T;
823 +
824 +/******************************************************************************
825 + Global funcs - implementation is specific to which side you are on (local / remote)
826 + *****************************************************************************/
827 +
828 +#ifdef __cplusplus
829 +extern "C" {
830 +#endif
831 +
832 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
833 +                                                   const VCHI_MESSAGE_DRIVER_T * low_level);
834 +
835 +
836 +// Routine used to initialise the vchi on both local + remote connections
837 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
838 +
839 +extern int32_t vchi_exit( void );
840 +
841 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
842 +                             const uint32_t num_connections,
843 +                             VCHI_INSTANCE_T instance_handle );
844 +
845 +//When this is called, ensure that all services have no data pending.
846 +//Bulk transfers can remain 'queued'
847 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
848 +
849 +// Global control over bulk CRC checking
850 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
851 +                                 VCHI_CRC_CONTROL_T control );
852 +
853 +// helper functions
854 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
855 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
856 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
857 +
858 +
859 +/******************************************************************************
860 + Global service API
861 + *****************************************************************************/
862 +// Routine to create a named service
863 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
864 +                                    SERVICE_CREATION_T *setup,
865 +                                    VCHI_SERVICE_HANDLE_T *handle );
866 +
867 +// Routine to destory a service
868 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
869 +
870 +// Routine to open a named service
871 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
872 +                                  SERVICE_CREATION_T *setup,
873 +                                  VCHI_SERVICE_HANDLE_T *handle);
874 +
875 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
876 +                                      short *peer_version );
877 +
878 +// Routine to close a named service
879 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
880 +
881 +// Routine to increment ref count on a named service
882 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
883 +
884 +// Routine to decrement ref count on a named service
885 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
886 +
887 +// Routine to send a message accross a service
888 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
889 +                               const void *data,
890 +                               uint32_t data_size,
891 +                               VCHI_FLAGS_T flags,
892 +                               void *msg_handle );
893 +
894 +// scatter-gather (vector) and send message
895 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
896 +                            VCHI_MSG_VECTOR_EX_T *vector,
897 +                            uint32_t count,
898 +                            VCHI_FLAGS_T flags,
899 +                            void *msg_handle );
900 +
901 +// legacy scatter-gather (vector) and send message, only handles pointers
902 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
903 +                         VCHI_MSG_VECTOR_T *vector,
904 +                         uint32_t count,
905 +                         VCHI_FLAGS_T flags,
906 +                         void *msg_handle );
907 +
908 +// Routine to receive a msg from a service
909 +// Dequeue is equivalent to hold, copy into client buffer, release
910 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
911 +                                 void *data,
912 +                                 uint32_t max_data_size_to_read,
913 +                                 uint32_t *actual_msg_size,
914 +                                 VCHI_FLAGS_T flags );
915 +
916 +// Routine to look at a message in place.
917 +// The message is not dequeued, so a subsequent call to peek or dequeue
918 +// will return the same message.
919 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
920 +                              void **data,
921 +                              uint32_t *msg_size,
922 +                              VCHI_FLAGS_T flags );
923 +
924 +// Routine to remove a message after it has been read in place with peek
925 +// The first message on the queue is dequeued.
926 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
927 +
928 +// Routine to look at a message in place.
929 +// The message is dequeued, so the caller is left holding it; the descriptor is
930 +// filled in and must be released when the user has finished with the message.
931 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
932 +                              void **data,        // } may be NULL, as info can be
933 +                              uint32_t *msg_size, // } obtained from HELD_MSG_T
934 +                              VCHI_FLAGS_T flags,
935 +                              VCHI_HELD_MSG_T *message_descriptor );
936 +
937 +// Initialise an iterator to look through messages in place
938 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
939 +                                    VCHI_MSG_ITER_T *iter,
940 +                                    VCHI_FLAGS_T flags );
941 +
942 +/******************************************************************************
943 + Global service support API - operations on held messages and message iterators
944 + *****************************************************************************/
945 +
946 +// Routine to get the address of a held message
947 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
948 +
949 +// Routine to get the size of a held message
950 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
951 +
952 +// Routine to get the transmit timestamp as written into the header by the peer
953 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
954 +
955 +// Routine to get the reception timestamp, written as we parsed the header
956 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
957 +
958 +// Routine to release a held message after it has been processed
959 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
960 +
961 +// Indicates whether the iterator has a next message.
962 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
963 +
964 +// Return the pointer and length for the next message and advance the iterator.
965 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
966 +                                   void **data,
967 +                                   uint32_t *msg_size );
968 +
969 +// Remove the last message returned by vchi_msg_iter_next.
970 +// Can only be called once after each call to vchi_msg_iter_next.
971 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
972 +
973 +// Hold the last message returned by vchi_msg_iter_next.
974 +// Can only be called once after each call to vchi_msg_iter_next.
975 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
976 +                                   VCHI_HELD_MSG_T *message );
977 +
978 +// Return information for the next message, and hold it, advancing the iterator.
979 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
980 +                                        void **data,        // } may be NULL
981 +                                        uint32_t *msg_size, // }
982 +                                        VCHI_HELD_MSG_T *message );
983 +
984 +
985 +/******************************************************************************
986 + Global bulk API
987 + *****************************************************************************/
988 +
989 +// Routine to prepare interface for a transfer from the other side
990 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
991 +                                        void *data_dst,
992 +                                        uint32_t data_size,
993 +                                        VCHI_FLAGS_T flags,
994 +                                        void *transfer_handle );
995 +
996 +
997 +// Prepare interface for a transfer from the other side into relocatable memory.
998 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
999 +                                       VCHI_MEM_HANDLE_T h_dst,
1000 +                                       uint32_t offset,
1001 +                                       uint32_t data_size,
1002 +                                       const VCHI_FLAGS_T flags,
1003 +                                       void * const bulk_handle );
1004 +
1005 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1006 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1007 +                                         const void *data_src,
1008 +                                         uint32_t data_size,
1009 +                                         VCHI_FLAGS_T flags,
1010 +                                         void *transfer_handle );
1011 +
1012 +
1013 +/******************************************************************************
1014 + Configuration plumbing
1015 + *****************************************************************************/
1016 +
1017 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1018 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1019 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1020 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1021 +
1022 +// declare all message drivers here
1023 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1024 +
1025 +#ifdef __cplusplus
1026 +}
1027 +#endif
1028 +
1029 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1030 +                                               VCHI_MEM_HANDLE_T h_src,
1031 +                                               uint32_t offset,
1032 +                                               uint32_t data_size,
1033 +                                               VCHI_FLAGS_T flags,
1034 +                                               void *transfer_handle );
1035 +#endif /* VCHI_H_ */
1036 +
1037 +/****************************** End of file **********************************/
1038 --- /dev/null
1039 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1040 @@ -0,0 +1,224 @@
1041 +/**
1042 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1043 + *
1044 + * Redistribution and use in source and binary forms, with or without
1045 + * modification, are permitted provided that the following conditions
1046 + * are met:
1047 + * 1. Redistributions of source code must retain the above copyright
1048 + *    notice, this list of conditions, and the following disclaimer,
1049 + *    without modification.
1050 + * 2. Redistributions in binary form must reproduce the above copyright
1051 + *    notice, this list of conditions and the following disclaimer in the
1052 + *    documentation and/or other materials provided with the distribution.
1053 + * 3. The names of the above-listed copyright holders may not be used
1054 + *    to endorse or promote products derived from this software without
1055 + *    specific prior written permission.
1056 + *
1057 + * ALTERNATIVELY, this software may be distributed under the terms of the
1058 + * GNU General Public License ("GPL") version 2, as published by the Free
1059 + * Software Foundation.
1060 + *
1061 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1062 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1063 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1064 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1065 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1066 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1067 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1068 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1069 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1070 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1071 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1072 + */
1073 +
1074 +#ifndef VCHI_CFG_H_
1075 +#define VCHI_CFG_H_
1076 +
1077 +/****************************************************************************************
1078 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1079 + * services.
1080 + ***************************************************************************************/
1081 +
1082 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1083 +/* Really determined by the message driver, and should be available from a run-time call. */
1084 +#ifndef VCHI_BULK_ALIGN
1085 +#   if __VCCOREVER__ >= 0x04000000
1086 +#       define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1087 +#   else
1088 +#       define VCHI_BULK_ALIGN 16
1089 +#   endif
1090 +#endif
1091 +
1092 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1093 +/* May be less than or greater than VCHI_BULK_ALIGN */
1094 +/* Really determined by the message driver, and should be available from a run-time call. */
1095 +#ifndef VCHI_BULK_GRANULARITY
1096 +#   if __VCCOREVER__ >= 0x04000000
1097 +#       define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1098 +#   else
1099 +#       define VCHI_BULK_GRANULARITY 16
1100 +#   endif
1101 +#endif
1102 +
1103 +/* The largest possible message to be queued with vchi_msg_queue. */
1104 +#ifndef VCHI_MAX_MSG_SIZE
1105 +#   if defined VCHI_LOCAL_HOST_PORT
1106 +#       define VCHI_MAX_MSG_SIZE     16384         // makes file transfers fast, but should they be using bulk?
1107 +#   else
1108 +#       define VCHI_MAX_MSG_SIZE      4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1109 +#   endif
1110 +#endif
1111 +
1112 +/******************************************************************************************
1113 + * Defines below are system configuration options, and should not be used by VCHI services.
1114 + *****************************************************************************************/
1115 +
1116 +/* How many connections can we support? A localhost implementation uses 2 connections,
1117 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1118 + * driver. */
1119 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1120 +#   define VCHI_MAX_NUM_CONNECTIONS 3
1121 +#endif
1122 +
1123 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1124 + * amount of static memory. */
1125 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1126 +#  define VCHI_MAX_SERVICES_PER_CONNECTION 36
1127 +#endif
1128 +
1129 +/* Adjust if using a message driver that supports more logical TX channels */
1130 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1131 +#   define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1132 +#endif
1133 +
1134 +/* Adjust if using a message driver that supports more logical RX channels */
1135 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1136 +#   define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1137 +#endif
1138 +
1139 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1140 + * receive queue space, less message headers. */
1141 +#ifndef VCHI_NUM_READ_SLOTS
1142 +#  if defined(VCHI_LOCAL_HOST_PORT)
1143 +#     define VCHI_NUM_READ_SLOTS 4
1144 +#  else
1145 +#     define VCHI_NUM_READ_SLOTS 48
1146 +#  endif
1147 +#endif
1148 +
1149 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1150 + * performance. Only define on VideoCore end, talking to host.
1151 + */
1152 +//#define VCHI_MSG_RX_OVERRUN
1153 +
1154 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1155 + * underneath VCHI will usually have its own buffering. */
1156 +#ifndef VCHI_NUM_WRITE_SLOTS
1157 +#  define VCHI_NUM_WRITE_SLOTS 4
1158 +#endif
1159 +
1160 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1161 + * then it's taking up too much buffer space, and the peer service will be told to stop
1162 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1163 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1164 + * is too high. */
1165 +#ifndef VCHI_XOFF_THRESHOLD
1166 +#  define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1167 +#endif
1168 +
1169 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1170 + * service has dequeued/released enough messages that it's now occupying
1171 + * VCHI_XON_THRESHOLD slots or fewer. */
1172 +#ifndef VCHI_XON_THRESHOLD
1173 +#  define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1174 +#endif
1175 +
1176 +/* A size below which a bulk transfer omits the handshake completely and always goes
1177 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1178 + * can guarantee this by enabling unaligned transmits).
1179 + * Not API. */
1180 +#ifndef VCHI_MIN_BULK_SIZE
1181 +#  define VCHI_MIN_BULK_SIZE    ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1182 +#endif
1183 +
1184 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1185 + * speed and latency; the smaller the chunk size the better change of messages and other
1186 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1187 + * break transmissions into chunks.
1188 + */
1189 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1190 +#  define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1191 +#endif
1192 +
1193 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1194 + * with multiple-line frames. Only use if the receiver can cope. */
1195 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1196 +#  define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1197 +#endif
1198 +
1199 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1200 + * vchi_msg_queue will be blocked. */
1201 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1202 +#  define VCHI_TX_MSG_QUEUE_SIZE           256
1203 +#endif
1204 +
1205 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1206 + * will be suspended until older messages are dequeued/released. */
1207 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1208 +#  define VCHI_RX_MSG_QUEUE_SIZE           256
1209 +#endif
1210 +
1211 +/* Really should be able to cope if we run out of received message descriptors, by
1212 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1213 + * under the carpet. */
1214 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1215 +#  undef VCHI_RX_MSG_QUEUE_SIZE
1216 +#  define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1217 +#endif
1218 +
1219 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1220 + * will be blocked. */
1221 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1222 +#  define VCHI_TX_BULK_QUEUE_SIZE           64
1223 +#endif
1224 +
1225 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1226 + * will be blocked. */
1227 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1228 +#  define VCHI_RX_BULK_QUEUE_SIZE           64
1229 +#endif
1230 +
1231 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1232 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1233 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1234 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1235 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1236 +#  define VCHI_MAX_PEER_BULK_REQUESTS       32
1237 +#endif
1238 +
1239 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1240 + * transmitter on and off.
1241 + */
1242 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1243 +
1244 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1245 +
1246 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1247 + * negative for no IDLE.
1248 + */
1249 +#  ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1250 +#    define VCHI_CCP2TX_IDLE_TIMEOUT        5
1251 +#  endif
1252 +
1253 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1254 + * negative for no OFF.
1255 + */
1256 +#  ifndef VCHI_CCP2TX_OFF_TIMEOUT
1257 +#    define VCHI_CCP2TX_OFF_TIMEOUT         1000
1258 +#  endif
1259 +
1260 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1261 +
1262 +#endif /* VCHI_CFG_H_ */
1263 +
1264 +/****************************** End of file **********************************/
1265 --- /dev/null
1266 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1267 @@ -0,0 +1,71 @@
1268 +/**
1269 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1270 + *
1271 + * Redistribution and use in source and binary forms, with or without
1272 + * modification, are permitted provided that the following conditions
1273 + * are met:
1274 + * 1. Redistributions of source code must retain the above copyright
1275 + *    notice, this list of conditions, and the following disclaimer,
1276 + *    without modification.
1277 + * 2. Redistributions in binary form must reproduce the above copyright
1278 + *    notice, this list of conditions and the following disclaimer in the
1279 + *    documentation and/or other materials provided with the distribution.
1280 + * 3. The names of the above-listed copyright holders may not be used
1281 + *    to endorse or promote products derived from this software without
1282 + *    specific prior written permission.
1283 + *
1284 + * ALTERNATIVELY, this software may be distributed under the terms of the
1285 + * GNU General Public License ("GPL") version 2, as published by the Free
1286 + * Software Foundation.
1287 + *
1288 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1289 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1290 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1291 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1292 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1293 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1294 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1295 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1296 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1297 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1298 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1299 + */
1300 +
1301 +#ifndef VCHI_CFG_INTERNAL_H_
1302 +#define VCHI_CFG_INTERNAL_H_
1303 +
1304 +/****************************************************************************************
1305 + * Control optimisation attempts.
1306 + ***************************************************************************************/
1307 +
1308 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1309 +#define VCHI_COARSE_LOCKING
1310 +
1311 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1312 +// (only relevant if VCHI_COARSE_LOCKING)
1313 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1314 +
1315 +// Avoid lock on non-blocking peek
1316 +// (only relevant if VCHI_COARSE_LOCKING)
1317 +#define VCHI_AVOID_PEEK_LOCK
1318 +
1319 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1320 +#define VCHI_MULTIPLE_HANDLER_THREADS
1321 +
1322 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1323 +// our way through the pool of descriptors.
1324 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1325 +
1326 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1327 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1328 +
1329 +// Don't use message descriptors for TX messages that don't need them
1330 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1331 +
1332 +// Nano-locks for multiqueue
1333 +//#define VCHI_MQUEUE_NANOLOCKS
1334 +
1335 +// Lock-free(er) dequeuing
1336 +//#define VCHI_RX_NANOLOCKS
1337 +
1338 +#endif /*VCHI_CFG_INTERNAL_H_*/
1339 --- /dev/null
1340 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1341 @@ -0,0 +1,163 @@
1342 +/**
1343 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1344 + *
1345 + * Redistribution and use in source and binary forms, with or without
1346 + * modification, are permitted provided that the following conditions
1347 + * are met:
1348 + * 1. Redistributions of source code must retain the above copyright
1349 + *    notice, this list of conditions, and the following disclaimer,
1350 + *    without modification.
1351 + * 2. Redistributions in binary form must reproduce the above copyright
1352 + *    notice, this list of conditions and the following disclaimer in the
1353 + *    documentation and/or other materials provided with the distribution.
1354 + * 3. The names of the above-listed copyright holders may not be used
1355 + *    to endorse or promote products derived from this software without
1356 + *    specific prior written permission.
1357 + *
1358 + * ALTERNATIVELY, this software may be distributed under the terms of the
1359 + * GNU General Public License ("GPL") version 2, as published by the Free
1360 + * Software Foundation.
1361 + *
1362 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1363 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1364 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1365 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1366 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1367 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1368 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1369 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1370 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1371 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1372 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1373 + */
1374 +
1375 +#ifndef VCHI_COMMON_H_
1376 +#define VCHI_COMMON_H_
1377 +
1378 +
1379 +//flags used when sending messages (must be bitmapped)
1380 +typedef enum
1381 +{
1382 +   VCHI_FLAGS_NONE                      = 0x0,
1383 +   VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE   = 0x1,   // waits for message to be received, or sent (NB. not the same as being seen on other side)
1384 +   VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2,   // run a callback when message sent
1385 +   VCHI_FLAGS_BLOCK_UNTIL_QUEUED        = 0x4,   // return once the transfer is in a queue ready to go
1386 +   VCHI_FLAGS_ALLOW_PARTIAL             = 0x8,
1387 +   VCHI_FLAGS_BLOCK_UNTIL_DATA_READ     = 0x10,
1388 +   VCHI_FLAGS_CALLBACK_WHEN_DATA_READ   = 0x20,
1389 +
1390 +   VCHI_FLAGS_ALIGN_SLOT            = 0x000080,  // internal use only
1391 +   VCHI_FLAGS_BULK_AUX_QUEUED       = 0x010000,  // internal use only
1392 +   VCHI_FLAGS_BULK_AUX_COMPLETE     = 0x020000,  // internal use only
1393 +   VCHI_FLAGS_BULK_DATA_QUEUED      = 0x040000,  // internal use only
1394 +   VCHI_FLAGS_BULK_DATA_COMPLETE    = 0x080000,  // internal use only
1395 +   VCHI_FLAGS_INTERNAL              = 0xFF0000
1396 +} VCHI_FLAGS_T;
1397 +
1398 +// constants for vchi_crc_control()
1399 +typedef enum {
1400 +   VCHI_CRC_NOTHING = -1,
1401 +   VCHI_CRC_PER_SERVICE = 0,
1402 +   VCHI_CRC_EVERYTHING = 1,
1403 +} VCHI_CRC_CONTROL_T;
1404 +
1405 +//callback reasons when an event occurs on a service
1406 +typedef enum
1407 +{
1408 +   VCHI_CALLBACK_REASON_MIN,
1409 +
1410 +   //This indicates that there is data available
1411 +   //handle is the msg id that was transmitted with the data
1412 +   //    When a message is received and there was no FULL message available previously, send callback
1413 +   //    Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1414 +   VCHI_CALLBACK_MSG_AVAILABLE,
1415 +   VCHI_CALLBACK_MSG_SENT,
1416 +   VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1417 +
1418 +   // This indicates that a transfer from the other side has completed
1419 +   VCHI_CALLBACK_BULK_RECEIVED,
1420 +   //This indicates that data queued up to be sent has now gone
1421 +   //handle is the msg id that was used when sending the data
1422 +   VCHI_CALLBACK_BULK_SENT,
1423 +   VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1424 +   VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1425 +
1426 +   VCHI_CALLBACK_SERVICE_CLOSED,
1427 +
1428 +   // this side has sent XOFF to peer due to lack of data consumption by service
1429 +   // (suggests the service may need to take some recovery action if it has
1430 +   // been deliberately holding off consuming data)
1431 +   VCHI_CALLBACK_SENT_XOFF,
1432 +   VCHI_CALLBACK_SENT_XON,
1433 +
1434 +   // indicates that a bulk transfer has finished reading the source buffer
1435 +   VCHI_CALLBACK_BULK_DATA_READ,
1436 +
1437 +   // power notification events (currently host side only)
1438 +   VCHI_CALLBACK_PEER_OFF,
1439 +   VCHI_CALLBACK_PEER_SUSPENDED,
1440 +   VCHI_CALLBACK_PEER_ON,
1441 +   VCHI_CALLBACK_PEER_RESUMED,
1442 +   VCHI_CALLBACK_FORCED_POWER_OFF,
1443 +
1444 +#ifdef USE_VCHIQ_ARM
1445 +   // some extra notifications provided by vchiq_arm
1446 +   VCHI_CALLBACK_SERVICE_OPENED,
1447 +   VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1448 +   VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1449 +#endif
1450 +
1451 +   VCHI_CALLBACK_REASON_MAX
1452 +} VCHI_CALLBACK_REASON_T;
1453 +
1454 +//Calback used by all services / bulk transfers
1455 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1456 +                                 VCHI_CALLBACK_REASON_T reason,
1457 +                                 void *handle ); //for transmitting msg's only
1458 +
1459 +
1460 +
1461 +/*
1462 + * Define vector struct for scatter-gather (vector) operations
1463 + * Vectors can be nested - if a vector element has negative length, then
1464 + * the data pointer is treated as pointing to another vector array, with
1465 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1466 + * you can do this:
1467 + *
1468 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1469 + * {
1470 + *    VCHI_MSG_VECTOR_T nv[2];
1471 + *    nv[0].vec_base = my_header;
1472 + *    nv[0].vec_len = sizeof my_header;
1473 + *    nv[1].vec_base = v;
1474 + *    nv[1].vec_len = -n;
1475 + *    ...
1476 + *
1477 + */
1478 +typedef struct vchi_msg_vector {
1479 +   const void *vec_base;
1480 +   int32_t vec_len;
1481 +} VCHI_MSG_VECTOR_T;
1482 +
1483 +// Opaque type for a connection API
1484 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1485 +
1486 +// Opaque type for a message driver
1487 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1488 +
1489 +
1490 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1491 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1492 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1493 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1494 +// is used again after messages for that service are removed/dequeued by any
1495 +// means other than vchi_msg_iter_... calls on the iterator itself.
1496 +typedef struct {
1497 +   struct opaque_vchi_service_t *service;
1498 +   void *last;
1499 +   void *next;
1500 +   void *remove;
1501 +} VCHI_MSG_ITER_T;
1502 +
1503 +
1504 +#endif // VCHI_COMMON_H_
1505 --- /dev/null
1506 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1507 @@ -0,0 +1,42 @@
1508 +/**
1509 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1510 + *
1511 + * Redistribution and use in source and binary forms, with or without
1512 + * modification, are permitted provided that the following conditions
1513 + * are met:
1514 + * 1. Redistributions of source code must retain the above copyright
1515 + *    notice, this list of conditions, and the following disclaimer,
1516 + *    without modification.
1517 + * 2. Redistributions in binary form must reproduce the above copyright
1518 + *    notice, this list of conditions and the following disclaimer in the
1519 + *    documentation and/or other materials provided with the distribution.
1520 + * 3. The names of the above-listed copyright holders may not be used
1521 + *    to endorse or promote products derived from this software without
1522 + *    specific prior written permission.
1523 + *
1524 + * ALTERNATIVELY, this software may be distributed under the terms of the
1525 + * GNU General Public License ("GPL") version 2, as published by the Free
1526 + * Software Foundation.
1527 + *
1528 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1529 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1530 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1531 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1532 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1533 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1534 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1535 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1536 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1537 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1538 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1539 + */
1540 +
1541 +#ifndef VCHI_MH_H_
1542 +#define VCHI_MH_H_
1543 +
1544 +#include <linux/types.h>
1545 +
1546 +typedef int32_t VCHI_MEM_HANDLE_T;
1547 +#define VCHI_MEM_HANDLE_INVALID 0
1548 +
1549 +#endif
1550 --- /dev/null
1551 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1552 @@ -0,0 +1,41 @@
1553 +/**
1554 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1555 + *
1556 + * Redistribution and use in source and binary forms, with or without
1557 + * modification, are permitted provided that the following conditions
1558 + * are met:
1559 + * 1. Redistributions of source code must retain the above copyright
1560 + *    notice, this list of conditions, and the following disclaimer,
1561 + *    without modification.
1562 + * 2. Redistributions in binary form must reproduce the above copyright
1563 + *    notice, this list of conditions and the following disclaimer in the
1564 + *    documentation and/or other materials provided with the distribution.
1565 + * 3. The names of the above-listed copyright holders may not be used
1566 + *    to endorse or promote products derived from this software without
1567 + *    specific prior written permission.
1568 + *
1569 + * ALTERNATIVELY, this software may be distributed under the terms of the
1570 + * GNU General Public License ("GPL") version 2, as published by the Free
1571 + * Software Foundation.
1572 + *
1573 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1574 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1575 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1576 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1577 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1578 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1579 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1580 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1581 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1582 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1583 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1584 + */
1585 +
1586 +#ifndef VCHIQ_VCHIQ_H
1587 +#define VCHIQ_VCHIQ_H
1588 +
1589 +#include "vchiq_if.h"
1590 +#include "vchiq_util.h"
1591 +
1592 +#endif
1593 +
1594 --- /dev/null
1595 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1596 @@ -0,0 +1,42 @@
1597 +/**
1598 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1599 + *
1600 + * Redistribution and use in source and binary forms, with or without
1601 + * modification, are permitted provided that the following conditions
1602 + * are met:
1603 + * 1. Redistributions of source code must retain the above copyright
1604 + *    notice, this list of conditions, and the following disclaimer,
1605 + *    without modification.
1606 + * 2. Redistributions in binary form must reproduce the above copyright
1607 + *    notice, this list of conditions and the following disclaimer in the
1608 + *    documentation and/or other materials provided with the distribution.
1609 + * 3. The names of the above-listed copyright holders may not be used
1610 + *    to endorse or promote products derived from this software without
1611 + *    specific prior written permission.
1612 + *
1613 + * ALTERNATIVELY, this software may be distributed under the terms of the
1614 + * GNU General Public License ("GPL") version 2, as published by the Free
1615 + * Software Foundation.
1616 + *
1617 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1618 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1619 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1620 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1621 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1622 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1623 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1624 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1625 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1626 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1627 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1628 + */
1629 +
1630 +#ifndef VCHIQ_2835_H
1631 +#define VCHIQ_2835_H
1632 +
1633 +#include "vchiq_pagelist.h"
1634 +
1635 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1636 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
1637 +
1638 +#endif /* VCHIQ_2835_H */
1639 --- /dev/null
1640 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1641 @@ -0,0 +1,538 @@
1642 +/**
1643 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1644 + *
1645 + * Redistribution and use in source and binary forms, with or without
1646 + * modification, are permitted provided that the following conditions
1647 + * are met:
1648 + * 1. Redistributions of source code must retain the above copyright
1649 + *    notice, this list of conditions, and the following disclaimer,
1650 + *    without modification.
1651 + * 2. Redistributions in binary form must reproduce the above copyright
1652 + *    notice, this list of conditions and the following disclaimer in the
1653 + *    documentation and/or other materials provided with the distribution.
1654 + * 3. The names of the above-listed copyright holders may not be used
1655 + *    to endorse or promote products derived from this software without
1656 + *    specific prior written permission.
1657 + *
1658 + * ALTERNATIVELY, this software may be distributed under the terms of the
1659 + * GNU General Public License ("GPL") version 2, as published by the Free
1660 + * Software Foundation.
1661 + *
1662 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1663 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1664 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1665 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1666 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1667 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1668 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1669 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1670 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1671 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1672 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1673 + */
1674 +
1675 +#include <linux/kernel.h>
1676 +#include <linux/types.h>
1677 +#include <linux/errno.h>
1678 +#include <linux/interrupt.h>
1679 +#include <linux/irq.h>
1680 +#include <linux/pagemap.h>
1681 +#include <linux/dma-mapping.h>
1682 +#include <linux/version.h>
1683 +#include <linux/io.h>
1684 +#include <linux/uaccess.h>
1685 +#include <asm/pgtable.h>
1686 +
1687 +#include <mach/irqs.h>
1688 +
1689 +#include <mach/platform.h>
1690 +#include <mach/vcio.h>
1691 +
1692 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1693 +
1694 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1695 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1696 +
1697 +#include "vchiq_arm.h"
1698 +#include "vchiq_2835.h"
1699 +#include "vchiq_connected.h"
1700 +
1701 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1702 +
1703 +typedef struct vchiq_2835_state_struct {
1704 +   int inited;
1705 +   VCHIQ_ARM_STATE_T arm_state;
1706 +} VCHIQ_2835_ARM_STATE_T;
1707 +
1708 +static char *g_slot_mem;
1709 +static int g_slot_mem_size;
1710 +dma_addr_t g_slot_phys;
1711 +static FRAGMENTS_T *g_fragments_base;
1712 +static FRAGMENTS_T *g_free_fragments;
1713 +struct semaphore g_free_fragments_sema;
1714 +
1715 +extern int vchiq_arm_log_level;
1716 +
1717 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1718 +
1719 +static irqreturn_t
1720 +vchiq_doorbell_irq(int irq, void *dev_id);
1721 +
1722 +static int
1723 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1724 +                struct task_struct *task, PAGELIST_T ** ppagelist);
1725 +
1726 +static void
1727 +free_pagelist(PAGELIST_T *pagelist, int actual);
1728 +
1729 +int __init
1730 +vchiq_platform_init(VCHIQ_STATE_T *state)
1731 +{
1732 +       VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1733 +       int frag_mem_size;
1734 +       int err;
1735 +       int i;
1736 +
1737 +       /* Allocate space for the channels in coherent memory */
1738 +       g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1739 +       frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1740 +
1741 +       g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1742 +               &g_slot_phys, GFP_ATOMIC);
1743 +
1744 +       if (!g_slot_mem) {
1745 +               vchiq_log_error(vchiq_arm_log_level,
1746 +                       "Unable to allocate channel memory");
1747 +               err = -ENOMEM;
1748 +               goto failed_alloc;
1749 +       }
1750 +
1751 +       WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1752 +
1753 +       vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1754 +       if (!vchiq_slot_zero) {
1755 +               err = -EINVAL;
1756 +               goto failed_init_slots;
1757 +       }
1758 +
1759 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1760 +               (int)g_slot_phys + g_slot_mem_size;
1761 +       vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1762 +               MAX_FRAGMENTS;
1763 +
1764 +       g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1765 +       g_slot_mem_size += frag_mem_size;
1766 +
1767 +       g_free_fragments = g_fragments_base;
1768 +       for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1769 +               *(FRAGMENTS_T **)&g_fragments_base[i] =
1770 +                       &g_fragments_base[i + 1];
1771 +       }
1772 +       *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1773 +       sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1774 +
1775 +       if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1776 +               VCHIQ_SUCCESS) {
1777 +               err = -EINVAL;
1778 +               goto failed_vchiq_init;
1779 +       }
1780 +
1781 +       err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1782 +               IRQF_IRQPOLL, "VCHIQ doorbell",
1783 +               state);
1784 +       if (err < 0) {
1785 +               vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1786 +                       "irq=%d err=%d", __func__,
1787 +                       VCHIQ_DOORBELL_IRQ, err);
1788 +               goto failed_request_irq;
1789 +       }
1790 +
1791 +       /* Send the base address of the slots to VideoCore */
1792 +
1793 +       dsb(); /* Ensure all writes have completed */
1794 +
1795 +       bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1796 +
1797 +       vchiq_log_info(vchiq_arm_log_level,
1798 +               "vchiq_init - done (slots %x, phys %x)",
1799 +               (unsigned int)vchiq_slot_zero, g_slot_phys);
1800 +
1801 +   vchiq_call_connected_callbacks();
1802 +
1803 +   return 0;
1804 +
1805 +failed_request_irq:
1806 +failed_vchiq_init:
1807 +failed_init_slots:
1808 +   dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1809 +
1810 +failed_alloc:
1811 +   return err;
1812 +}
1813 +
1814 +void __exit
1815 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1816 +{
1817 +   free_irq(VCHIQ_DOORBELL_IRQ, state);
1818 +   dma_free_coherent(NULL, g_slot_mem_size,
1819 +                     g_slot_mem, g_slot_phys);
1820 +}
1821 +
1822 +
1823 +VCHIQ_STATUS_T
1824 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1825 +{
1826 +   VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1827 +   state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1828 +   ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1829 +   status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1830 +   if(status != VCHIQ_SUCCESS)
1831 +   {
1832 +      ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1833 +   }
1834 +   return status;
1835 +}
1836 +
1837 +VCHIQ_ARM_STATE_T*
1838 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1839 +{
1840 +   if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1841 +   {
1842 +      BUG();
1843 +   }
1844 +   return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1845 +}
1846 +
1847 +void
1848 +remote_event_signal(REMOTE_EVENT_T *event)
1849 +{
1850 +       wmb();
1851 +
1852 +       event->fired = 1;
1853 +
1854 +       dsb();         /* data barrier operation */
1855 +
1856 +       if (event->armed) {
1857 +               /* trigger vc interrupt */
1858 +
1859 +               writel(0, __io_address(ARM_0_BELL2));
1860 +       }
1861 +}
1862 +
1863 +int
1864 +vchiq_copy_from_user(void *dst, const void *src, int size)
1865 +{
1866 +       if ((uint32_t)src < TASK_SIZE) {
1867 +               return copy_from_user(dst, src, size);
1868 +       } else {
1869 +               memcpy(dst, src, size);
1870 +               return 0;
1871 +       }
1872 +}
1873 +
1874 +VCHIQ_STATUS_T
1875 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1876 +       void *offset, int size, int dir)
1877 +{
1878 +       PAGELIST_T *pagelist;
1879 +       int ret;
1880 +
1881 +       WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1882 +
1883 +       ret = create_pagelist((char __user *)offset, size,
1884 +                       (dir == VCHIQ_BULK_RECEIVE)
1885 +                       ? PAGELIST_READ
1886 +                       : PAGELIST_WRITE,
1887 +                       current,
1888 +                       &pagelist);
1889 +       if (ret != 0)
1890 +               return VCHIQ_ERROR;
1891 +
1892 +       bulk->handle = memhandle;
1893 +       bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1894 +
1895 +       /* Store the pagelist address in remote_data, which isn't used by the
1896 +          slave. */
1897 +       bulk->remote_data = pagelist;
1898 +
1899 +       return VCHIQ_SUCCESS;
1900 +}
1901 +
1902 +void
1903 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1904 +{
1905 +       if (bulk && bulk->remote_data && bulk->actual)
1906 +               free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1907 +}
1908 +
1909 +void
1910 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1911 +{
1912 +       /*
1913 +        * This should only be called on the master (VideoCore) side, but
1914 +        * provide an implementation to avoid the need for ifdefery.
1915 +        */
1916 +       BUG();
1917 +}
1918 +
1919 +void
1920 +vchiq_dump_platform_state(void *dump_context)
1921 +{
1922 +       char buf[80];
1923 +       int len;
1924 +       len = snprintf(buf, sizeof(buf),
1925 +               "  Platform: 2835 (VC master)");
1926 +       vchiq_dump(dump_context, buf, len + 1);
1927 +}
1928 +
1929 +VCHIQ_STATUS_T
1930 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1931 +{
1932 +   return VCHIQ_ERROR;
1933 +}
1934 +
1935 +VCHIQ_STATUS_T
1936 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1937 +{
1938 +   return VCHIQ_SUCCESS;
1939 +}
1940 +
1941 +void
1942 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1943 +{
1944 +}
1945 +
1946 +void
1947 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
1948 +{
1949 +}
1950 +
1951 +int
1952 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
1953 +{
1954 +   return 1; // autosuspend not supported - videocore always wanted
1955 +}
1956 +
1957 +int
1958 +vchiq_platform_use_suspend_timer(void)
1959 +{
1960 +   return 0;
1961 +}
1962 +void
1963 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
1964 +{
1965 +       vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
1966 +}
1967 +void
1968 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
1969 +{
1970 +       (void)state;
1971 +}
1972 +/*
1973 + * Local functions
1974 + */
1975 +
1976 +static irqreturn_t
1977 +vchiq_doorbell_irq(int irq, void *dev_id)
1978 +{
1979 +       VCHIQ_STATE_T *state = dev_id;
1980 +       irqreturn_t ret = IRQ_NONE;
1981 +       unsigned int status;
1982 +
1983 +       /* Read (and clear) the doorbell */
1984 +       status = readl(__io_address(ARM_0_BELL0));
1985 +
1986 +       if (status & 0x4) {  /* Was the doorbell rung? */
1987 +               remote_event_pollall(state);
1988 +               ret = IRQ_HANDLED;
1989 +       }
1990 +
1991 +       return ret;
1992 +}
1993 +
1994 +/* There is a potential problem with partial cache lines (pages?)
1995 +** at the ends of the block when reading. If the CPU accessed anything in
1996 +** the same line (page?) then it may have pulled old data into the cache,
1997 +** obscuring the new data underneath. We can solve this by transferring the
1998 +** partial cache lines separately, and allowing the ARM to copy into the
1999 +** cached area.
2000 +
2001 +** N.B. This implementation plays slightly fast and loose with the Linux
2002 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2003 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2004 +** from increased speed as a result.
2005 +*/
2006 +
2007 +static int
2008 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2009 +       struct task_struct *task, PAGELIST_T ** ppagelist)
2010 +{
2011 +       PAGELIST_T *pagelist;
2012 +       struct page **pages;
2013 +       struct page *page;
2014 +       unsigned long *addrs;
2015 +       unsigned int num_pages, offset, i;
2016 +       char *addr, *base_addr, *next_addr;
2017 +       int run, addridx, actual_pages;
2018 +
2019 +       offset = (unsigned int)buf & (PAGE_SIZE - 1);
2020 +       num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2021 +
2022 +       *ppagelist = NULL;
2023 +
2024 +       /* Allocate enough storage to hold the page pointers and the page
2025 +       ** list
2026 +       */
2027 +       pagelist = kmalloc(sizeof(PAGELIST_T) +
2028 +               (num_pages * sizeof(unsigned long)) +
2029 +               (num_pages * sizeof(pages[0])),
2030 +               GFP_KERNEL);
2031 +
2032 +       vchiq_log_trace(vchiq_arm_log_level,
2033 +               "create_pagelist - %x", (unsigned int)pagelist);
2034 +       if (!pagelist)
2035 +               return -ENOMEM;
2036 +
2037 +       addrs = pagelist->addrs;
2038 +       pages = (struct page **)(addrs + num_pages);
2039 +
2040 +       down_read(&task->mm->mmap_sem);
2041 +       actual_pages = get_user_pages(task, task->mm,
2042 +               (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages,
2043 +               (type == PAGELIST_READ) /*Write */ , 0 /*Force */ ,
2044 +               pages, NULL /*vmas */);
2045 +       up_read(&task->mm->mmap_sem);
2046 +
2047 +   if (actual_pages != num_pages)
2048 +   {
2049 +      /* This is probably due to the process being killed */
2050 +      while (actual_pages > 0)
2051 +      {
2052 +         actual_pages--;
2053 +         page_cache_release(pages[actual_pages]);
2054 +      }
2055 +      kfree(pagelist);
2056 +      if (actual_pages == 0)
2057 +         actual_pages = -ENOMEM;
2058 +      return actual_pages;
2059 +   }
2060 +
2061 +       pagelist->length = count;
2062 +       pagelist->type = type;
2063 +       pagelist->offset = offset;
2064 +
2065 +       /* Group the pages into runs of contiguous pages */
2066 +
2067 +       base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2068 +       next_addr = base_addr + PAGE_SIZE;
2069 +       addridx = 0;
2070 +       run = 0;
2071 +
2072 +       for (i = 1; i < num_pages; i++) {
2073 +               addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2074 +               if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2075 +                       next_addr += PAGE_SIZE;
2076 +                       run++;
2077 +               } else {
2078 +                       addrs[addridx] = (unsigned long)base_addr + run;
2079 +                       addridx++;
2080 +                       base_addr = addr;
2081 +                       next_addr = addr + PAGE_SIZE;
2082 +                       run = 0;
2083 +               }
2084 +       }
2085 +
2086 +       addrs[addridx] = (unsigned long)base_addr + run;
2087 +       addridx++;
2088 +
2089 +       /* Partial cache lines (fragments) require special measures */
2090 +       if ((type == PAGELIST_READ) &&
2091 +               ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2092 +               ((pagelist->offset + pagelist->length) &
2093 +               (CACHE_LINE_SIZE - 1)))) {
2094 +               FRAGMENTS_T *fragments;
2095 +
2096 +               if (down_interruptible(&g_free_fragments_sema) != 0) {
2097 +                       kfree(pagelist);
2098 +                       return -EINTR;
2099 +               }
2100 +
2101 +               WARN_ON(g_free_fragments == NULL);
2102 +
2103 +               down(&g_free_fragments_mutex);
2104 +               fragments = (FRAGMENTS_T *) g_free_fragments;
2105 +               WARN_ON(fragments == NULL);
2106 +               g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2107 +               up(&g_free_fragments_mutex);
2108 +               pagelist->type =
2109 +                        PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2110 +                                                        g_fragments_base);
2111 +       }
2112 +
2113 +       for (page = virt_to_page(pagelist);
2114 +               page <= virt_to_page(addrs + num_pages - 1); page++) {
2115 +               flush_dcache_page(page);
2116 +       }
2117 +
2118 +       *ppagelist = pagelist;
2119 +
2120 +       return 0;
2121 +}
2122 +
2123 +static void
2124 +free_pagelist(PAGELIST_T *pagelist, int actual)
2125 +{
2126 +       struct page **pages;
2127 +       unsigned int num_pages, i;
2128 +
2129 +       vchiq_log_trace(vchiq_arm_log_level,
2130 +               "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2131 +
2132 +       num_pages =
2133 +               (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2134 +               PAGE_SIZE;
2135 +
2136 +       pages = (struct page **)(pagelist->addrs + num_pages);
2137 +
2138 +       /* Deal with any partial cache lines (fragments) */
2139 +       if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2140 +               FRAGMENTS_T *fragments = g_fragments_base +
2141 +                       (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2142 +               int head_bytes, tail_bytes;
2143 +               head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2144 +                       (CACHE_LINE_SIZE - 1);
2145 +               tail_bytes = (pagelist->offset + actual) &
2146 +                       (CACHE_LINE_SIZE - 1);
2147 +
2148 +               if ((actual >= 0) && (head_bytes != 0)) {
2149 +                       if (head_bytes > actual)
2150 +                               head_bytes = actual;
2151 +
2152 +                       memcpy((char *)page_address(pages[0]) +
2153 +                               pagelist->offset,
2154 +                               fragments->headbuf,
2155 +                               head_bytes);
2156 +               }
2157 +               if ((actual >= 0) && (head_bytes < actual) &&
2158 +                       (tail_bytes != 0)) {
2159 +                       memcpy((char *)page_address(pages[num_pages - 1]) +
2160 +                               ((pagelist->offset + actual) &
2161 +                               (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2162 +                               fragments->tailbuf, tail_bytes);
2163 +               }
2164 +
2165 +               down(&g_free_fragments_mutex);
2166 +               *(FRAGMENTS_T **) fragments = g_free_fragments;
2167 +               g_free_fragments = fragments;
2168 +               up(&g_free_fragments_mutex);
2169 +               up(&g_free_fragments_sema);
2170 +       }
2171 +
2172 +       for (i = 0; i < num_pages; i++) {
2173 +               if (pagelist->type != PAGELIST_WRITE)
2174 +                       set_page_dirty(pages[i]);
2175 +               page_cache_release(pages[i]);
2176 +       }
2177 +
2178 +       kfree(pagelist);
2179 +}
2180 --- /dev/null
2181 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2182 @@ -0,0 +1,2813 @@
2183 +/**
2184 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2185 + *
2186 + * Redistribution and use in source and binary forms, with or without
2187 + * modification, are permitted provided that the following conditions
2188 + * are met:
2189 + * 1. Redistributions of source code must retain the above copyright
2190 + *    notice, this list of conditions, and the following disclaimer,
2191 + *    without modification.
2192 + * 2. Redistributions in binary form must reproduce the above copyright
2193 + *    notice, this list of conditions and the following disclaimer in the
2194 + *    documentation and/or other materials provided with the distribution.
2195 + * 3. The names of the above-listed copyright holders may not be used
2196 + *    to endorse or promote products derived from this software without
2197 + *    specific prior written permission.
2198 + *
2199 + * ALTERNATIVELY, this software may be distributed under the terms of the
2200 + * GNU General Public License ("GPL") version 2, as published by the Free
2201 + * Software Foundation.
2202 + *
2203 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2204 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2205 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2206 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2207 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2208 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2209 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2210 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2211 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2212 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2213 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2214 + */
2215 +
2216 +#include <linux/kernel.h>
2217 +#include <linux/module.h>
2218 +#include <linux/types.h>
2219 +#include <linux/errno.h>
2220 +#include <linux/cdev.h>
2221 +#include <linux/fs.h>
2222 +#include <linux/device.h>
2223 +#include <linux/mm.h>
2224 +#include <linux/highmem.h>
2225 +#include <linux/pagemap.h>
2226 +#include <linux/bug.h>
2227 +#include <linux/semaphore.h>
2228 +#include <linux/list.h>
2229 +#include <linux/proc_fs.h>
2230 +
2231 +#include "vchiq_core.h"
2232 +#include "vchiq_ioctl.h"
2233 +#include "vchiq_arm.h"
2234 +
2235 +#define DEVICE_NAME "vchiq"
2236 +
2237 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2238 +#undef MODULE_PARAM_PREFIX
2239 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2240 +
2241 +#define VCHIQ_MINOR 0
2242 +
2243 +/* Some per-instance constants */
2244 +#define MAX_COMPLETIONS 16
2245 +#define MAX_SERVICES 64
2246 +#define MAX_ELEMENTS 8
2247 +#define MSG_QUEUE_SIZE 64
2248 +
2249 +#define KEEPALIVE_VER 1
2250 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2251 +
2252 +/* Run time control of log level, based on KERN_XXX level. */
2253 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2254 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2255 +
2256 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2257 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2258 +
2259 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2260 +static const char *const suspend_state_names[] = {
2261 +       "VC_SUSPEND_FORCE_CANCELED",
2262 +       "VC_SUSPEND_REJECTED",
2263 +       "VC_SUSPEND_FAILED",
2264 +       "VC_SUSPEND_IDLE",
2265 +       "VC_SUSPEND_REQUESTED",
2266 +       "VC_SUSPEND_IN_PROGRESS",
2267 +       "VC_SUSPEND_SUSPENDED"
2268 +};
2269 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2270 +static const char *const resume_state_names[] = {
2271 +       "VC_RESUME_FAILED",
2272 +       "VC_RESUME_IDLE",
2273 +       "VC_RESUME_REQUESTED",
2274 +       "VC_RESUME_IN_PROGRESS",
2275 +       "VC_RESUME_RESUMED"
2276 +};
2277 +/* The number of times we allow force suspend to timeout before actually
2278 +** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
2279 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2280 +*/
2281 +#define FORCE_SUSPEND_FAIL_MAX 8
2282 +
2283 +/* The time in ms allowed for videocore to go idle when force suspend has been
2284 + * requested */
2285 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2286 +
2287 +
2288 +static void suspend_timer_callback(unsigned long context);
2289 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2290 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2291 +
2292 +
2293 +typedef struct user_service_struct {
2294 +       VCHIQ_SERVICE_T *service;
2295 +       void *userdata;
2296 +       VCHIQ_INSTANCE_T instance;
2297 +       int is_vchi;
2298 +       int dequeue_pending;
2299 +       int message_available_pos;
2300 +       int msg_insert;
2301 +       int msg_remove;
2302 +       struct semaphore insert_event;
2303 +       struct semaphore remove_event;
2304 +       VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2305 +} USER_SERVICE_T;
2306 +
2307 +struct bulk_waiter_node {
2308 +       struct bulk_waiter bulk_waiter;
2309 +       int pid;
2310 +       struct list_head list;
2311 +};
2312 +
2313 +struct vchiq_instance_struct {
2314 +       VCHIQ_STATE_T *state;
2315 +       VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2316 +       int completion_insert;
2317 +       int completion_remove;
2318 +       struct semaphore insert_event;
2319 +       struct semaphore remove_event;
2320 +       struct mutex completion_mutex;
2321 +
2322 +       int connected;
2323 +       int closing;
2324 +       int pid;
2325 +       int mark;
2326 +
2327 +       struct list_head bulk_waiter_list;
2328 +       struct mutex bulk_waiter_list_mutex;
2329 +
2330 +       struct proc_dir_entry *proc_entry;
2331 +};
2332 +
2333 +typedef struct dump_context_struct {
2334 +       char __user *buf;
2335 +       size_t actual;
2336 +       size_t space;
2337 +       loff_t offset;
2338 +} DUMP_CONTEXT_T;
2339 +
2340 +static struct cdev    vchiq_cdev;
2341 +static dev_t          vchiq_devid;
2342 +static VCHIQ_STATE_T g_state;
2343 +static struct class  *vchiq_class;
2344 +static struct device *vchiq_dev;
2345 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2346 +
2347 +static const char *const ioctl_names[] = {
2348 +       "CONNECT",
2349 +       "SHUTDOWN",
2350 +       "CREATE_SERVICE",
2351 +       "REMOVE_SERVICE",
2352 +       "QUEUE_MESSAGE",
2353 +       "QUEUE_BULK_TRANSMIT",
2354 +       "QUEUE_BULK_RECEIVE",
2355 +       "AWAIT_COMPLETION",
2356 +       "DEQUEUE_MESSAGE",
2357 +       "GET_CLIENT_ID",
2358 +       "GET_CONFIG",
2359 +       "CLOSE_SERVICE",
2360 +       "USE_SERVICE",
2361 +       "RELEASE_SERVICE",
2362 +       "SET_SERVICE_OPTION",
2363 +       "DUMP_PHYS_MEM"
2364 +};
2365 +
2366 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2367 +       (VCHIQ_IOC_MAX + 1));
2368 +
2369 +static void
2370 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2371 +
2372 +/****************************************************************************
2373 +*
2374 +*   add_completion
2375 +*
2376 +***************************************************************************/
2377 +
2378 +static VCHIQ_STATUS_T
2379 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2380 +       VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2381 +       void *bulk_userdata)
2382 +{
2383 +       VCHIQ_COMPLETION_DATA_T *completion;
2384 +       DEBUG_INITIALISE(g_state.local)
2385 +
2386 +       while (instance->completion_insert ==
2387 +               (instance->completion_remove + MAX_COMPLETIONS)) {
2388 +               /* Out of space - wait for the client */
2389 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2390 +               vchiq_log_trace(vchiq_arm_log_level,
2391 +                       "add_completion - completion queue full");
2392 +               DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2393 +               if (down_interruptible(&instance->remove_event) != 0) {
2394 +                       vchiq_log_info(vchiq_arm_log_level,
2395 +                               "service_callback interrupted");
2396 +                       return VCHIQ_RETRY;
2397 +               } else if (instance->closing) {
2398 +                       vchiq_log_info(vchiq_arm_log_level,
2399 +                               "service_callback closing");
2400 +                       return VCHIQ_ERROR;
2401 +               }
2402 +               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2403 +       }
2404 +
2405 +       completion =
2406 +                &instance->completions[instance->completion_insert &
2407 +                (MAX_COMPLETIONS - 1)];
2408 +
2409 +       completion->header = header;
2410 +       completion->reason = reason;
2411 +       /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2412 +       completion->service_userdata = user_service->service;
2413 +       completion->bulk_userdata = bulk_userdata;
2414 +
2415 +       if (reason == VCHIQ_SERVICE_CLOSED)
2416 +               /* Take an extra reference, to be held until
2417 +                  this CLOSED notification is delivered. */
2418 +               lock_service(user_service->service);
2419 +
2420 +       /* A write barrier is needed here to ensure that the entire completion
2421 +               record is written out before the insert point. */
2422 +       wmb();
2423 +
2424 +       if (reason == VCHIQ_MESSAGE_AVAILABLE)
2425 +               user_service->message_available_pos =
2426 +                       instance->completion_insert;
2427 +       instance->completion_insert++;
2428 +
2429 +       up(&instance->insert_event);
2430 +
2431 +       return VCHIQ_SUCCESS;
2432 +}
2433 +
2434 +/****************************************************************************
2435 +*
2436 +*   service_callback
2437 +*
2438 +***************************************************************************/
2439 +
2440 +static VCHIQ_STATUS_T
2441 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2442 +       VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2443 +{
2444 +       /* How do we ensure the callback goes to the right client?
2445 +       ** The service_user data points to a USER_SERVICE_T record containing
2446 +       ** the original callback and the user state structure, which contains a
2447 +       ** circular buffer for completion records.
2448 +       */
2449 +       USER_SERVICE_T *user_service;
2450 +       VCHIQ_SERVICE_T *service;
2451 +       VCHIQ_INSTANCE_T instance;
2452 +       DEBUG_INITIALISE(g_state.local)
2453 +
2454 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2455 +
2456 +       service = handle_to_service(handle);
2457 +       BUG_ON(!service);
2458 +       user_service = (USER_SERVICE_T *)service->base.userdata;
2459 +       instance = user_service->instance;
2460 +
2461 +       if (!instance || instance->closing)
2462 +               return VCHIQ_SUCCESS;
2463 +
2464 +       vchiq_log_trace(vchiq_arm_log_level,
2465 +               "service_callback - service %lx(%d), reason %d, header %lx, "
2466 +               "instance %lx, bulk_userdata %lx",
2467 +               (unsigned long)user_service,
2468 +               service->localport,
2469 +               reason, (unsigned long)header,
2470 +               (unsigned long)instance, (unsigned long)bulk_userdata);
2471 +
2472 +       if (header && user_service->is_vchi) {
2473 +               spin_lock(&msg_queue_spinlock);
2474 +               while (user_service->msg_insert ==
2475 +                       (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2476 +                       spin_unlock(&msg_queue_spinlock);
2477 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2478 +                       DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2479 +                       vchiq_log_trace(vchiq_arm_log_level,
2480 +                               "service_callback - msg queue full");
2481 +                       /* If there is no MESSAGE_AVAILABLE in the completion
2482 +                       ** queue, add one
2483 +                       */
2484 +                       if ((user_service->message_available_pos -
2485 +                               instance->completion_remove) < 0) {
2486 +                               VCHIQ_STATUS_T status;
2487 +                               vchiq_log_info(vchiq_arm_log_level,
2488 +                                       "Inserting extra MESSAGE_AVAILABLE");
2489 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2490 +                               status = add_completion(instance, reason,
2491 +                                       NULL, user_service, bulk_userdata);
2492 +                               if (status != VCHIQ_SUCCESS) {
2493 +                                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2494 +                                       return status;
2495 +                               }
2496 +                       }
2497 +
2498 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2499 +                       if (down_interruptible(&user_service->remove_event)
2500 +                               != 0) {
2501 +                               vchiq_log_info(vchiq_arm_log_level,
2502 +                                       "service_callback interrupted");
2503 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2504 +                               return VCHIQ_RETRY;
2505 +                       } else if (instance->closing) {
2506 +                               vchiq_log_info(vchiq_arm_log_level,
2507 +                                       "service_callback closing");
2508 +                               DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2509 +                               return VCHIQ_ERROR;
2510 +                       }
2511 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2512 +                       spin_lock(&msg_queue_spinlock);
2513 +               }
2514 +
2515 +               user_service->msg_queue[user_service->msg_insert &
2516 +                       (MSG_QUEUE_SIZE - 1)] = header;
2517 +               user_service->msg_insert++;
2518 +               spin_unlock(&msg_queue_spinlock);
2519 +
2520 +               up(&user_service->insert_event);
2521 +
2522 +               /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2523 +               ** there is a MESSAGE_AVAILABLE in the completion queue then
2524 +               ** bypass the completion queue.
2525 +               */
2526 +               if (((user_service->message_available_pos -
2527 +                       instance->completion_remove) >= 0) ||
2528 +                       user_service->dequeue_pending) {
2529 +                       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2530 +                       user_service->dequeue_pending = 0;
2531 +                       return VCHIQ_SUCCESS;
2532 +               }
2533 +
2534 +               header = NULL;
2535 +       }
2536 +       DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2537 +
2538 +       return add_completion(instance, reason, header, user_service,
2539 +               bulk_userdata);
2540 +}
2541 +
2542 +/****************************************************************************
2543 +*
2544 +*   user_service_free
2545 +*
2546 +***************************************************************************/
2547 +static void
2548 +user_service_free(void *userdata)
2549 +{
2550 +       kfree(userdata);
2551 +}
2552 +
2553 +/****************************************************************************
2554 +*
2555 +*   vchiq_ioctl
2556 +*
2557 +***************************************************************************/
2558 +
2559 +static long
2560 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2561 +{
2562 +       VCHIQ_INSTANCE_T instance = file->private_data;
2563 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2564 +       VCHIQ_SERVICE_T *service = NULL;
2565 +       long ret = 0;
2566 +       int i, rc;
2567 +       DEBUG_INITIALISE(g_state.local)
2568 +
2569 +       vchiq_log_trace(vchiq_arm_log_level,
2570 +                "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2571 +               (unsigned int)instance,
2572 +               ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2573 +               (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2574 +               ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2575 +
2576 +       switch (cmd) {
2577 +       case VCHIQ_IOC_SHUTDOWN:
2578 +               if (!instance->connected)
2579 +                       break;
2580 +
2581 +               /* Remove all services */
2582 +               i = 0;
2583 +               while ((service = next_service_by_instance(instance->state,
2584 +                       instance, &i)) != NULL) {
2585 +                       status = vchiq_remove_service(service->handle);
2586 +                       unlock_service(service);
2587 +                       if (status != VCHIQ_SUCCESS)
2588 +                               break;
2589 +               }
2590 +               service = NULL;
2591 +
2592 +               if (status == VCHIQ_SUCCESS) {
2593 +                       /* Wake the completion thread and ask it to exit */
2594 +                       instance->closing = 1;
2595 +                       up(&instance->insert_event);
2596 +               }
2597 +
2598 +               break;
2599 +
2600 +       case VCHIQ_IOC_CONNECT:
2601 +               if (instance->connected) {
2602 +                       ret = -EINVAL;
2603 +                       break;
2604 +               }
2605 +               rc = mutex_lock_interruptible(&instance->state->mutex);
2606 +               if (rc != 0) {
2607 +                       vchiq_log_error(vchiq_arm_log_level,
2608 +                               "vchiq: connect: could not lock mutex for "
2609 +                               "state %d: %d",
2610 +                               instance->state->id, rc);
2611 +                       ret = -EINTR;
2612 +                       break;
2613 +               }
2614 +               status = vchiq_connect_internal(instance->state, instance);
2615 +               mutex_unlock(&instance->state->mutex);
2616 +
2617 +               if (status == VCHIQ_SUCCESS)
2618 +                       instance->connected = 1;
2619 +               else
2620 +                       vchiq_log_error(vchiq_arm_log_level,
2621 +                               "vchiq: could not connect: %d", status);
2622 +               break;
2623 +
2624 +       case VCHIQ_IOC_CREATE_SERVICE: {
2625 +               VCHIQ_CREATE_SERVICE_T args;
2626 +               USER_SERVICE_T *user_service = NULL;
2627 +               void *userdata;
2628 +               int srvstate;
2629 +
2630 +               if (copy_from_user
2631 +                        (&args, (const void __user *)arg,
2632 +                         sizeof(args)) != 0) {
2633 +                       ret = -EFAULT;
2634 +                       break;
2635 +               }
2636 +
2637 +               user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2638 +               if (!user_service) {
2639 +                       ret = -ENOMEM;
2640 +                       break;
2641 +               }
2642 +
2643 +               if (args.is_open) {
2644 +                       if (!instance->connected) {
2645 +                               ret = -ENOTCONN;
2646 +                               kfree(user_service);
2647 +                               break;
2648 +                       }
2649 +                       srvstate = VCHIQ_SRVSTATE_OPENING;
2650 +               } else {
2651 +                       srvstate =
2652 +                                instance->connected ?
2653 +                                VCHIQ_SRVSTATE_LISTENING :
2654 +                                VCHIQ_SRVSTATE_HIDDEN;
2655 +               }
2656 +
2657 +               userdata = args.params.userdata;
2658 +               args.params.callback = service_callback;
2659 +               args.params.userdata = user_service;
2660 +               service = vchiq_add_service_internal(
2661 +                               instance->state,
2662 +                               &args.params, srvstate,
2663 +                               instance, user_service_free);
2664 +
2665 +               if (service != NULL) {
2666 +                       user_service->service = service;
2667 +                       user_service->userdata = userdata;
2668 +                       user_service->instance = instance;
2669 +                       user_service->is_vchi = args.is_vchi;
2670 +                       user_service->dequeue_pending = 0;
2671 +                       user_service->message_available_pos =
2672 +                               instance->completion_remove - 1;
2673 +                       user_service->msg_insert = 0;
2674 +                       user_service->msg_remove = 0;
2675 +                       sema_init(&user_service->insert_event, 0);
2676 +                       sema_init(&user_service->remove_event, 0);
2677 +
2678 +                       if (args.is_open) {
2679 +                               status = vchiq_open_service_internal
2680 +                                       (service, instance->pid);
2681 +                               if (status != VCHIQ_SUCCESS) {
2682 +                                       vchiq_remove_service(service->handle);
2683 +                                       service = NULL;
2684 +                                       ret = (status == VCHIQ_RETRY) ?
2685 +                                               -EINTR : -EIO;
2686 +                                       break;
2687 +                               }
2688 +                       }
2689 +
2690 +                       if (copy_to_user((void __user *)
2691 +                               &(((VCHIQ_CREATE_SERVICE_T __user *)
2692 +                                       arg)->handle),
2693 +                               (const void *)&service->handle,
2694 +                               sizeof(service->handle)) != 0) {
2695 +                               ret = -EFAULT;
2696 +                               vchiq_remove_service(service->handle);
2697 +                       }
2698 +
2699 +                       service = NULL;
2700 +               } else {
2701 +                       ret = -EEXIST;
2702 +                       kfree(user_service);
2703 +               }
2704 +       } break;
2705 +
2706 +       case VCHIQ_IOC_CLOSE_SERVICE: {
2707 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2708 +
2709 +               service = find_service_for_instance(instance, handle);
2710 +               if (service != NULL)
2711 +                       status = vchiq_close_service(service->handle);
2712 +               else
2713 +                       ret = -EINVAL;
2714 +       } break;
2715 +
2716 +       case VCHIQ_IOC_REMOVE_SERVICE: {
2717 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2718 +
2719 +               service = find_service_for_instance(instance, handle);
2720 +               if (service != NULL)
2721 +                       status = vchiq_remove_service(service->handle);
2722 +               else
2723 +                       ret = -EINVAL;
2724 +       } break;
2725 +
2726 +       case VCHIQ_IOC_USE_SERVICE:
2727 +       case VCHIQ_IOC_RELEASE_SERVICE: {
2728 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2729 +
2730 +               service = find_service_for_instance(instance, handle);
2731 +               if (service != NULL) {
2732 +                       status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2733 +                               vchiq_use_service_internal(service) :
2734 +                               vchiq_release_service_internal(service);
2735 +                       if (status != VCHIQ_SUCCESS) {
2736 +                               vchiq_log_error(vchiq_susp_log_level,
2737 +                                       "%s: cmd %s returned error %d for "
2738 +                                       "service %c%c%c%c:%03d",
2739 +                                       __func__,
2740 +                                       (cmd == VCHIQ_IOC_USE_SERVICE) ?
2741 +                                               "VCHIQ_IOC_USE_SERVICE" :
2742 +                                               "VCHIQ_IOC_RELEASE_SERVICE",
2743 +                                       status,
2744 +                                       VCHIQ_FOURCC_AS_4CHARS(
2745 +                                               service->base.fourcc),
2746 +                                       service->client_id);
2747 +                               ret = -EINVAL;
2748 +                       }
2749 +               } else
2750 +                       ret = -EINVAL;
2751 +       } break;
2752 +
2753 +       case VCHIQ_IOC_QUEUE_MESSAGE: {
2754 +               VCHIQ_QUEUE_MESSAGE_T args;
2755 +               if (copy_from_user
2756 +                        (&args, (const void __user *)arg,
2757 +                         sizeof(args)) != 0) {
2758 +                       ret = -EFAULT;
2759 +                       break;
2760 +               }
2761 +
2762 +               service = find_service_for_instance(instance, args.handle);
2763 +
2764 +               if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2765 +                       /* Copy elements into kernel space */
2766 +                       VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2767 +                       if (copy_from_user(elements, args.elements,
2768 +                               args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2769 +                               status = vchiq_queue_message
2770 +                                       (args.handle,
2771 +                                       elements, args.count);
2772 +                       else
2773 +                               ret = -EFAULT;
2774 +               } else {
2775 +                       ret = -EINVAL;
2776 +               }
2777 +       } break;
2778 +
2779 +       case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2780 +       case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2781 +               VCHIQ_QUEUE_BULK_TRANSFER_T args;
2782 +               struct bulk_waiter_node *waiter = NULL;
2783 +               VCHIQ_BULK_DIR_T dir =
2784 +                       (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2785 +                       VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2786 +
2787 +               if (copy_from_user
2788 +                       (&args, (const void __user *)arg,
2789 +                       sizeof(args)) != 0) {
2790 +                       ret = -EFAULT;
2791 +                       break;
2792 +               }
2793 +
2794 +               service = find_service_for_instance(instance, args.handle);
2795 +               if (!service) {
2796 +                       ret = -EINVAL;
2797 +                       break;
2798 +               }
2799 +
2800 +               if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2801 +                       waiter = kzalloc(sizeof(struct bulk_waiter_node),
2802 +                               GFP_KERNEL);
2803 +                       if (!waiter) {
2804 +                               ret = -ENOMEM;
2805 +                               break;
2806 +                       }
2807 +                       args.userdata = &waiter->bulk_waiter;
2808 +               } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2809 +                       struct list_head *pos;
2810 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
2811 +                       list_for_each(pos, &instance->bulk_waiter_list) {
2812 +                               if (list_entry(pos, struct bulk_waiter_node,
2813 +                                       list)->pid == current->pid) {
2814 +                                       waiter = list_entry(pos,
2815 +                                               struct bulk_waiter_node,
2816 +                                               list);
2817 +                                       list_del(pos);
2818 +                                       break;
2819 +                               }
2820 +
2821 +                       }
2822 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
2823 +                       if (!waiter) {
2824 +                               vchiq_log_error(vchiq_arm_log_level,
2825 +                                       "no bulk_waiter found for pid %d",
2826 +                                       current->pid);
2827 +                               ret = -ESRCH;
2828 +                               break;
2829 +                       }
2830 +                       vchiq_log_info(vchiq_arm_log_level,
2831 +                               "found bulk_waiter %x for pid %d",
2832 +                               (unsigned int)waiter, current->pid);
2833 +                       args.userdata = &waiter->bulk_waiter;
2834 +               }
2835 +               status = vchiq_bulk_transfer
2836 +                       (args.handle,
2837 +                        VCHI_MEM_HANDLE_INVALID,
2838 +                        args.data, args.size,
2839 +                        args.userdata, args.mode,
2840 +                        dir);
2841 +               if (!waiter)
2842 +                       break;
2843 +               if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2844 +                       !waiter->bulk_waiter.bulk) {
2845 +                       if (waiter->bulk_waiter.bulk) {
2846 +                               /* Cancel the signal when the transfer
2847 +                               ** completes. */
2848 +                               spin_lock(&bulk_waiter_spinlock);
2849 +                               waiter->bulk_waiter.bulk->userdata = NULL;
2850 +                               spin_unlock(&bulk_waiter_spinlock);
2851 +                       }
2852 +                       kfree(waiter);
2853 +               } else {
2854 +                       const VCHIQ_BULK_MODE_T mode_waiting =
2855 +                               VCHIQ_BULK_MODE_WAITING;
2856 +                       waiter->pid = current->pid;
2857 +                       mutex_lock(&instance->bulk_waiter_list_mutex);
2858 +                       list_add(&waiter->list, &instance->bulk_waiter_list);
2859 +                       mutex_unlock(&instance->bulk_waiter_list_mutex);
2860 +                       vchiq_log_info(vchiq_arm_log_level,
2861 +                               "saved bulk_waiter %x for pid %d",
2862 +                               (unsigned int)waiter, current->pid);
2863 +
2864 +                       if (copy_to_user((void __user *)
2865 +                               &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2866 +                                       arg)->mode),
2867 +                               (const void *)&mode_waiting,
2868 +                               sizeof(mode_waiting)) != 0)
2869 +                               ret = -EFAULT;
2870 +               }
2871 +       } break;
2872 +
2873 +       case VCHIQ_IOC_AWAIT_COMPLETION: {
2874 +               VCHIQ_AWAIT_COMPLETION_T args;
2875 +
2876 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2877 +               if (!instance->connected) {
2878 +                       ret = -ENOTCONN;
2879 +                       break;
2880 +               }
2881 +
2882 +               if (copy_from_user(&args, (const void __user *)arg,
2883 +                       sizeof(args)) != 0) {
2884 +                       ret = -EFAULT;
2885 +                       break;
2886 +               }
2887 +
2888 +               mutex_lock(&instance->completion_mutex);
2889 +
2890 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2891 +               while ((instance->completion_remove ==
2892 +                       instance->completion_insert)
2893 +                       && !instance->closing) {
2894 +                       int rc;
2895 +                       DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2896 +                       mutex_unlock(&instance->completion_mutex);
2897 +                       rc = down_interruptible(&instance->insert_event);
2898 +                       mutex_lock(&instance->completion_mutex);
2899 +                       if (rc != 0) {
2900 +                               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2901 +                               vchiq_log_info(vchiq_arm_log_level,
2902 +                                       "AWAIT_COMPLETION interrupted");
2903 +                               ret = -EINTR;
2904 +                               break;
2905 +                       }
2906 +               }
2907 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2908 +
2909 +               /* A read memory barrier is needed to stop prefetch of a stale
2910 +               ** completion record
2911 +               */
2912 +               rmb();
2913 +
2914 +               if (ret == 0) {
2915 +                       int msgbufcount = args.msgbufcount;
2916 +                       for (ret = 0; ret < args.count; ret++) {
2917 +                               VCHIQ_COMPLETION_DATA_T *completion;
2918 +                               VCHIQ_SERVICE_T *service;
2919 +                               USER_SERVICE_T *user_service;
2920 +                               VCHIQ_HEADER_T *header;
2921 +                               if (instance->completion_remove ==
2922 +                                       instance->completion_insert)
2923 +                                       break;
2924 +                               completion = &instance->completions[
2925 +                                       instance->completion_remove &
2926 +                                       (MAX_COMPLETIONS - 1)];
2927 +
2928 +                               service = completion->service_userdata;
2929 +                               user_service = service->base.userdata;
2930 +                               completion->service_userdata =
2931 +                                       user_service->userdata;
2932 +
2933 +                               header = completion->header;
2934 +                               if (header) {
2935 +                                       void __user *msgbuf;
2936 +                                       int msglen;
2937 +
2938 +                                       msglen = header->size +
2939 +                                               sizeof(VCHIQ_HEADER_T);
2940 +                                       /* This must be a VCHIQ-style service */
2941 +                                       if (args.msgbufsize < msglen) {
2942 +                                               vchiq_log_error(
2943 +                                                       vchiq_arm_log_level,
2944 +                                                       "header %x: msgbufsize"
2945 +                                                       " %x < msglen %x",
2946 +                                                       (unsigned int)header,
2947 +                                                       args.msgbufsize,
2948 +                                                       msglen);
2949 +                                               WARN(1, "invalid message "
2950 +                                                       "size\n");
2951 +                                               if (ret == 0)
2952 +                                                       ret = -EMSGSIZE;
2953 +                                               break;
2954 +                                       }
2955 +                                       if (msgbufcount <= 0)
2956 +                                               /* Stall here for lack of a
2957 +                                               ** buffer for the message. */
2958 +                                               break;
2959 +                                       /* Get the pointer from user space */
2960 +                                       msgbufcount--;
2961 +                                       if (copy_from_user(&msgbuf,
2962 +                                               (const void __user *)
2963 +                                               &args.msgbufs[msgbufcount],
2964 +                                               sizeof(msgbuf)) != 0) {
2965 +                                               if (ret == 0)
2966 +                                                       ret = -EFAULT;
2967 +                                               break;
2968 +                                       }
2969 +
2970 +                                       /* Copy the message to user space */
2971 +                                       if (copy_to_user(msgbuf, header,
2972 +                                               msglen) != 0) {
2973 +                                               if (ret == 0)
2974 +                                                       ret = -EFAULT;
2975 +                                               break;
2976 +                                       }
2977 +
2978 +                                       /* Now it has been copied, the message
2979 +                                       ** can be released. */
2980 +                                       vchiq_release_message(service->handle,
2981 +                                               header);
2982 +
2983 +                                       /* The completion must point to the
2984 +                                       ** msgbuf. */
2985 +                                       completion->header = msgbuf;
2986 +                               }
2987 +
2988 +                               if (completion->reason ==
2989 +                                       VCHIQ_SERVICE_CLOSED)
2990 +                                       unlock_service(service);
2991 +
2992 +                               if (copy_to_user((void __user *)(
2993 +                                       (size_t)args.buf +
2994 +                                       ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
2995 +                                       completion,
2996 +                                       sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
2997 +                                               if (ret == 0)
2998 +                                                       ret = -EFAULT;
2999 +                                       break;
3000 +                               }
3001 +
3002 +                               instance->completion_remove++;
3003 +                       }
3004 +
3005 +                       if (msgbufcount != args.msgbufcount) {
3006 +                               if (copy_to_user((void __user *)
3007 +                                       &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3008 +                                               msgbufcount,
3009 +                                       &msgbufcount,
3010 +                                       sizeof(msgbufcount)) != 0) {
3011 +                                       ret = -EFAULT;
3012 +                               }
3013 +                       }
3014 +               }
3015 +
3016 +               if (ret != 0)
3017 +                       up(&instance->remove_event);
3018 +               mutex_unlock(&instance->completion_mutex);
3019 +               DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3020 +       } break;
3021 +
3022 +       case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3023 +               VCHIQ_DEQUEUE_MESSAGE_T args;
3024 +               USER_SERVICE_T *user_service;
3025 +               VCHIQ_HEADER_T *header;
3026 +
3027 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3028 +               if (copy_from_user
3029 +                        (&args, (const void __user *)arg,
3030 +                         sizeof(args)) != 0) {
3031 +                       ret = -EFAULT;
3032 +                       break;
3033 +               }
3034 +               service = find_service_for_instance(instance, args.handle);
3035 +               if (!service) {
3036 +                       ret = -EINVAL;
3037 +                       break;
3038 +               }
3039 +               user_service = (USER_SERVICE_T *)service->base.userdata;
3040 +               if (user_service->is_vchi == 0) {
3041 +                       ret = -EINVAL;
3042 +                       break;
3043 +               }
3044 +
3045 +               spin_lock(&msg_queue_spinlock);
3046 +               if (user_service->msg_remove == user_service->msg_insert) {
3047 +                       if (!args.blocking) {
3048 +                               spin_unlock(&msg_queue_spinlock);
3049 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3050 +                               ret = -EWOULDBLOCK;
3051 +                               break;
3052 +                       }
3053 +                       user_service->dequeue_pending = 1;
3054 +                       do {
3055 +                               spin_unlock(&msg_queue_spinlock);
3056 +                               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3057 +                               if (down_interruptible(
3058 +                                       &user_service->insert_event) != 0) {
3059 +                                       vchiq_log_info(vchiq_arm_log_level,
3060 +                                               "DEQUEUE_MESSAGE interrupted");
3061 +                                       ret = -EINTR;
3062 +                                       break;
3063 +                               }
3064 +                               spin_lock(&msg_queue_spinlock);
3065 +                       } while (user_service->msg_remove ==
3066 +                               user_service->msg_insert);
3067 +
3068 +                       if (ret)
3069 +                               break;
3070 +               }
3071 +
3072 +               BUG_ON((int)(user_service->msg_insert -
3073 +                       user_service->msg_remove) < 0);
3074 +
3075 +               header = user_service->msg_queue[user_service->msg_remove &
3076 +                       (MSG_QUEUE_SIZE - 1)];
3077 +               user_service->msg_remove++;
3078 +               spin_unlock(&msg_queue_spinlock);
3079 +
3080 +               up(&user_service->remove_event);
3081 +               if (header == NULL)
3082 +                       ret = -ENOTCONN;
3083 +               else if (header->size <= args.bufsize) {
3084 +                       /* Copy to user space if msgbuf is not NULL */
3085 +                       if ((args.buf == NULL) ||
3086 +                               (copy_to_user((void __user *)args.buf,
3087 +                               header->data,
3088 +                               header->size) == 0)) {
3089 +                               ret = header->size;
3090 +                               vchiq_release_message(
3091 +                                       service->handle,
3092 +                                       header);
3093 +                       } else
3094 +                               ret = -EFAULT;
3095 +               } else {
3096 +                       vchiq_log_error(vchiq_arm_log_level,
3097 +                               "header %x: bufsize %x < size %x",
3098 +                               (unsigned int)header, args.bufsize,
3099 +                               header->size);
3100 +                       WARN(1, "invalid size\n");
3101 +                       ret = -EMSGSIZE;
3102 +               }
3103 +               DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3104 +       } break;
3105 +
3106 +       case VCHIQ_IOC_GET_CLIENT_ID: {
3107 +               VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3108 +
3109 +               ret = vchiq_get_client_id(handle);
3110 +       } break;
3111 +
3112 +       case VCHIQ_IOC_GET_CONFIG: {
3113 +               VCHIQ_GET_CONFIG_T args;
3114 +               VCHIQ_CONFIG_T config;
3115 +
3116 +               if (copy_from_user(&args, (const void __user *)arg,
3117 +                       sizeof(args)) != 0) {
3118 +                       ret = -EFAULT;
3119 +                       break;
3120 +               }
3121 +               if (args.config_size > sizeof(config)) {
3122 +                       ret = -EINVAL;
3123 +                       break;
3124 +               }
3125 +               status = vchiq_get_config(instance, args.config_size, &config);
3126 +               if (status == VCHIQ_SUCCESS) {
3127 +                       if (copy_to_user((void __user *)args.pconfig,
3128 +                                   &config, args.config_size) != 0) {
3129 +                               ret = -EFAULT;
3130 +                               break;
3131 +                       }
3132 +               }
3133 +       } break;
3134 +
3135 +       case VCHIQ_IOC_SET_SERVICE_OPTION: {
3136 +               VCHIQ_SET_SERVICE_OPTION_T args;
3137 +
3138 +               if (copy_from_user(
3139 +                       &args, (const void __user *)arg,
3140 +                       sizeof(args)) != 0) {
3141 +                       ret = -EFAULT;
3142 +                       break;
3143 +               }
3144 +
3145 +               service = find_service_for_instance(instance, args.handle);
3146 +               if (!service) {
3147 +                       ret = -EINVAL;
3148 +                       break;
3149 +               }
3150 +
3151 +               status = vchiq_set_service_option(
3152 +                               args.handle, args.option, args.value);
3153 +       } break;
3154 +
3155 +       case VCHIQ_IOC_DUMP_PHYS_MEM: {
3156 +               VCHIQ_DUMP_MEM_T  args;
3157 +
3158 +               if (copy_from_user
3159 +                        (&args, (const void __user *)arg,
3160 +                         sizeof(args)) != 0) {
3161 +                       ret = -EFAULT;
3162 +                       break;
3163 +               }
3164 +               dump_phys_mem(args.virt_addr, args.num_bytes);
3165 +       } break;
3166 +
3167 +       default:
3168 +               ret = -ENOTTY;
3169 +               break;
3170 +       }
3171 +
3172 +       if (service)
3173 +               unlock_service(service);
3174 +
3175 +       if (ret == 0) {
3176 +               if (status == VCHIQ_ERROR)
3177 +                       ret = -EIO;
3178 +               else if (status == VCHIQ_RETRY)
3179 +                       ret = -EINTR;
3180 +       }
3181 +
3182 +       if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3183 +               (ret != -EWOULDBLOCK))
3184 +               vchiq_log_info(vchiq_arm_log_level,
3185 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
3186 +                       (unsigned long)instance,
3187 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3188 +                               ioctl_names[_IOC_NR(cmd)] :
3189 +                               "<invalid>",
3190 +                       status, ret);
3191 +       else
3192 +               vchiq_log_trace(vchiq_arm_log_level,
3193 +                       "  ioctl instance %lx, cmd %s -> status %d, %ld",
3194 +                       (unsigned long)instance,
3195 +                       (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3196 +                               ioctl_names[_IOC_NR(cmd)] :
3197 +                               "<invalid>",
3198 +                       status, ret);
3199 +
3200 +       return ret;
3201 +}
3202 +
3203 +/****************************************************************************
3204 +*
3205 +*   vchiq_open
3206 +*
3207 +***************************************************************************/
3208 +
3209 +static int
3210 +vchiq_open(struct inode *inode, struct file *file)
3211 +{
3212 +       int dev = iminor(inode) & 0x0f;
3213 +       vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3214 +       switch (dev) {
3215 +       case VCHIQ_MINOR: {
3216 +               int ret;
3217 +               VCHIQ_STATE_T *state = vchiq_get_state();
3218 +               VCHIQ_INSTANCE_T instance;
3219 +
3220 +               if (!state) {
3221 +                       vchiq_log_error(vchiq_arm_log_level,
3222 +                               "vchiq has no connection to VideoCore");
3223 +                       return -ENOTCONN;
3224 +               }
3225 +
3226 +               instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3227 +               if (!instance)
3228 +                       return -ENOMEM;
3229 +
3230 +               instance->state = state;
3231 +               instance->pid = current->tgid;
3232 +
3233 +               ret = vchiq_proc_add_instance(instance);
3234 +               if (ret != 0) {
3235 +                       kfree(instance);
3236 +                       return ret;
3237 +               }
3238 +
3239 +               sema_init(&instance->insert_event, 0);
3240 +               sema_init(&instance->remove_event, 0);
3241 +               mutex_init(&instance->completion_mutex);
3242 +               mutex_init(&instance->bulk_waiter_list_mutex);
3243 +               INIT_LIST_HEAD(&instance->bulk_waiter_list);
3244 +
3245 +               file->private_data = instance;
3246 +       } break;
3247 +
3248 +       default:
3249 +               vchiq_log_error(vchiq_arm_log_level,
3250 +                       "Unknown minor device: %d", dev);
3251 +               return -ENXIO;
3252 +       }
3253 +
3254 +       return 0;
3255 +}
3256 +
3257 +/****************************************************************************
3258 +*
3259 +*   vchiq_release
3260 +*
3261 +***************************************************************************/
3262 +
3263 +static int
3264 +vchiq_release(struct inode *inode, struct file *file)
3265 +{
3266 +       int dev = iminor(inode) & 0x0f;
3267 +       int ret = 0;
3268 +       switch (dev) {
3269 +       case VCHIQ_MINOR: {
3270 +               VCHIQ_INSTANCE_T instance = file->private_data;
3271 +               VCHIQ_STATE_T *state = vchiq_get_state();
3272 +               VCHIQ_SERVICE_T *service;
3273 +               int i;
3274 +
3275 +               vchiq_log_info(vchiq_arm_log_level,
3276 +                       "vchiq_release: instance=%lx",
3277 +                       (unsigned long)instance);
3278 +
3279 +               if (!state) {
3280 +                       ret = -EPERM;
3281 +                       goto out;
3282 +               }
3283 +
3284 +               /* Ensure videocore is awake to allow termination. */
3285 +               vchiq_use_internal(instance->state, NULL,
3286 +                               USE_TYPE_VCHIQ);
3287 +
3288 +               mutex_lock(&instance->completion_mutex);
3289 +
3290 +               /* Wake the completion thread and ask it to exit */
3291 +               instance->closing = 1;
3292 +               up(&instance->insert_event);
3293 +
3294 +               mutex_unlock(&instance->completion_mutex);
3295 +
3296 +               /* Wake the slot handler if the completion queue is full. */
3297 +               up(&instance->remove_event);
3298 +
3299 +               /* Mark all services for termination... */
3300 +               i = 0;
3301 +               while ((service = next_service_by_instance(state, instance,
3302 +                       &i)) != NULL) {
3303 +                       USER_SERVICE_T *user_service = service->base.userdata;
3304 +
3305 +                       /* Wake the slot handler if the msg queue is full. */
3306 +                       up(&user_service->remove_event);
3307 +
3308 +                       vchiq_terminate_service_internal(service);
3309 +                       unlock_service(service);
3310 +               }
3311 +
3312 +               /* ...and wait for them to die */
3313 +               i = 0;
3314 +               while ((service = next_service_by_instance(state, instance, &i))
3315 +                       != NULL) {
3316 +                       USER_SERVICE_T *user_service = service->base.userdata;
3317 +
3318 +                       down(&service->remove_event);
3319 +
3320 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3321 +
3322 +                       spin_lock(&msg_queue_spinlock);
3323 +
3324 +                       while (user_service->msg_remove !=
3325 +                               user_service->msg_insert) {
3326 +                               VCHIQ_HEADER_T *header = user_service->
3327 +                                       msg_queue[user_service->msg_remove &
3328 +                                               (MSG_QUEUE_SIZE - 1)];
3329 +                               user_service->msg_remove++;
3330 +                               spin_unlock(&msg_queue_spinlock);
3331 +
3332 +                               if (header)
3333 +                                       vchiq_release_message(
3334 +                                               service->handle,
3335 +                                               header);
3336 +                               spin_lock(&msg_queue_spinlock);
3337 +                       }
3338 +
3339 +                       spin_unlock(&msg_queue_spinlock);
3340 +
3341 +                       unlock_service(service);
3342 +               }
3343 +
3344 +               /* Release any closed services */
3345 +               while (instance->completion_remove !=
3346 +                       instance->completion_insert) {
3347 +                       VCHIQ_COMPLETION_DATA_T *completion;
3348 +                       VCHIQ_SERVICE_T *service;
3349 +                       completion = &instance->completions[
3350 +                               instance->completion_remove &
3351 +                               (MAX_COMPLETIONS - 1)];
3352 +                       service = completion->service_userdata;
3353 +                       if (completion->reason == VCHIQ_SERVICE_CLOSED)
3354 +                               unlock_service(service);
3355 +                       instance->completion_remove++;
3356 +               }
3357 +
3358 +               /* Release the PEER service count. */
3359 +               vchiq_release_internal(instance->state, NULL);
3360 +
3361 +               {
3362 +                       struct list_head *pos, *next;
3363 +                       list_for_each_safe(pos, next,
3364 +                               &instance->bulk_waiter_list) {
3365 +                               struct bulk_waiter_node *waiter;
3366 +                               waiter = list_entry(pos,
3367 +                                       struct bulk_waiter_node,
3368 +                                       list);
3369 +                               list_del(pos);
3370 +                               vchiq_log_info(vchiq_arm_log_level,
3371 +                                       "bulk_waiter - cleaned up %x "
3372 +                                       "for pid %d",
3373 +                                       (unsigned int)waiter, waiter->pid);
3374 +                               kfree(waiter);
3375 +                       }
3376 +               }
3377 +
3378 +               vchiq_proc_remove_instance(instance);
3379 +
3380 +               kfree(instance);
3381 +               file->private_data = NULL;
3382 +       } break;
3383 +
3384 +       default:
3385 +               vchiq_log_error(vchiq_arm_log_level,
3386 +                       "Unknown minor device: %d", dev);
3387 +               ret = -ENXIO;
3388 +       }
3389 +
3390 +out:
3391 +       return ret;
3392 +}
3393 +
3394 +/****************************************************************************
3395 +*
3396 +*   vchiq_dump
3397 +*
3398 +***************************************************************************/
3399 +
3400 +void
3401 +vchiq_dump(void *dump_context, const char *str, int len)
3402 +{
3403 +       DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3404 +
3405 +       if (context->actual < context->space) {
3406 +               int copy_bytes;
3407 +               if (context->offset > 0) {
3408 +                       int skip_bytes = min(len, (int)context->offset);
3409 +                       str += skip_bytes;
3410 +                       len -= skip_bytes;
3411 +                       context->offset -= skip_bytes;
3412 +                       if (context->offset > 0)
3413 +                               return;
3414 +               }
3415 +               copy_bytes = min(len, (int)(context->space - context->actual));
3416 +               if (copy_bytes == 0)
3417 +                       return;
3418 +               if (copy_to_user(context->buf + context->actual, str,
3419 +                       copy_bytes))
3420 +                       context->actual = -EFAULT;
3421 +               context->actual += copy_bytes;
3422 +               len -= copy_bytes;
3423 +
3424 +               /* If tne terminating NUL is included in the length, then it
3425 +               ** marks the end of a line and should be replaced with a
3426 +               ** carriage return. */
3427 +               if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3428 +                       char cr = '\n';
3429 +                       if (copy_to_user(context->buf + context->actual - 1,
3430 +                               &cr, 1))
3431 +                               context->actual = -EFAULT;
3432 +               }
3433 +       }
3434 +}
3435 +
3436 +/****************************************************************************
3437 +*
3438 +*   vchiq_dump_platform_instance_state
3439 +*
3440 +***************************************************************************/
3441 +
3442 +void
3443 +vchiq_dump_platform_instances(void *dump_context)
3444 +{
3445 +       VCHIQ_STATE_T *state = vchiq_get_state();
3446 +       char buf[80];
3447 +       int len;
3448 +       int i;
3449 +
3450 +       /* There is no list of instances, so instead scan all services,
3451 +               marking those that have been dumped. */
3452 +
3453 +       for (i = 0; i < state->unused_service; i++) {
3454 +               VCHIQ_SERVICE_T *service = state->services[i];
3455 +               VCHIQ_INSTANCE_T instance;
3456 +
3457 +               if (service && (service->base.callback == service_callback)) {
3458 +                       instance = service->instance;
3459 +                       if (instance)
3460 +                               instance->mark = 0;
3461 +               }
3462 +       }
3463 +
3464 +       for (i = 0; i < state->unused_service; i++) {
3465 +               VCHIQ_SERVICE_T *service = state->services[i];
3466 +               VCHIQ_INSTANCE_T instance;
3467 +
3468 +               if (service && (service->base.callback == service_callback)) {
3469 +                       instance = service->instance;
3470 +                       if (instance && !instance->mark) {
3471 +                               len = snprintf(buf, sizeof(buf),
3472 +                                       "Instance %x: pid %d,%s completions "
3473 +                                               "%d/%d",
3474 +                                       (unsigned int)instance, instance->pid,
3475 +                                       instance->connected ? " connected, " :
3476 +                                               "",
3477 +                                       instance->completion_insert -
3478 +                                               instance->completion_remove,
3479 +                                       MAX_COMPLETIONS);
3480 +
3481 +                               vchiq_dump(dump_context, buf, len + 1);
3482 +
3483 +                               instance->mark = 1;
3484 +                       }
3485 +               }
3486 +       }
3487 +}
3488 +
3489 +/****************************************************************************
3490 +*
3491 +*   vchiq_dump_platform_service_state
3492 +*
3493 +***************************************************************************/
3494 +
3495 +void
3496 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3497 +{
3498 +       USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3499 +       char buf[80];
3500 +       int len;
3501 +
3502 +       len = snprintf(buf, sizeof(buf), "  instance %x",
3503 +               (unsigned int)service->instance);
3504 +
3505 +       if ((service->base.callback == service_callback) &&
3506 +               user_service->is_vchi) {
3507 +               len += snprintf(buf + len, sizeof(buf) - len,
3508 +                       ", %d/%d messages",
3509 +                       user_service->msg_insert - user_service->msg_remove,
3510 +                       MSG_QUEUE_SIZE);
3511 +
3512 +               if (user_service->dequeue_pending)
3513 +                       len += snprintf(buf + len, sizeof(buf) - len,
3514 +                               " (dequeue pending)");
3515 +       }
3516 +
3517 +       vchiq_dump(dump_context, buf, len + 1);
3518 +}
3519 +
3520 +/****************************************************************************
3521 +*
3522 +*   dump_user_mem
3523 +*
3524 +***************************************************************************/
3525 +
3526 +static void
3527 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3528 +{
3529 +       int            rc;
3530 +       uint8_t       *end_virt_addr = virt_addr + num_bytes;
3531 +       int            num_pages;
3532 +       int            offset;
3533 +       int            end_offset;
3534 +       int            page_idx;
3535 +       int            prev_idx;
3536 +       struct page   *page;
3537 +       struct page  **pages;
3538 +       uint8_t       *kmapped_virt_ptr;
3539 +
3540 +       /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3541 +
3542 +       virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3543 +       end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3544 +               ~0x0fuL);
3545 +
3546 +       offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3547 +       end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3548 +
3549 +       num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3550 +
3551 +       pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3552 +       if (pages == NULL) {
3553 +               vchiq_log_error(vchiq_arm_log_level,
3554 +                       "Unable to allocation memory for %d pages\n",
3555 +                       num_pages);
3556 +               return;
3557 +       }
3558 +
3559 +       down_read(&current->mm->mmap_sem);
3560 +       rc = get_user_pages(current,      /* task */
3561 +               current->mm,              /* mm */
3562 +               (unsigned long)virt_addr, /* start */
3563 +               num_pages,                /* len */
3564 +               0,                        /* write */
3565 +               0,                        /* force */
3566 +               pages,                    /* pages (array of page pointers) */
3567 +               NULL);                    /* vmas */
3568 +       up_read(&current->mm->mmap_sem);
3569 +
3570 +       prev_idx = -1;
3571 +       page = NULL;
3572 +
3573 +       while (offset < end_offset) {
3574 +
3575 +               int page_offset = offset % PAGE_SIZE;
3576 +               page_idx = offset / PAGE_SIZE;
3577 +
3578 +               if (page_idx != prev_idx) {
3579 +
3580 +                       if (page != NULL)
3581 +                               kunmap(page);
3582 +                       page = pages[page_idx];
3583 +                       kmapped_virt_ptr = kmap(page);
3584 +
3585 +                       prev_idx = page_idx;
3586 +               }
3587 +
3588 +               if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3589 +                       vchiq_log_dump_mem("ph",
3590 +                               (uint32_t)(unsigned long)&kmapped_virt_ptr[
3591 +                                       page_offset],
3592 +                               &kmapped_virt_ptr[page_offset], 16);
3593 +
3594 +               offset += 16;
3595 +       }
3596 +       if (page != NULL)
3597 +               kunmap(page);
3598 +
3599 +       for (page_idx = 0; page_idx < num_pages; page_idx++)
3600 +               page_cache_release(pages[page_idx]);
3601 +
3602 +       kfree(pages);
3603 +}
3604 +
3605 +/****************************************************************************
3606 +*
3607 +*   vchiq_read
3608 +*
3609 +***************************************************************************/
3610 +
3611 +static ssize_t
3612 +vchiq_read(struct file *file, char __user *buf,
3613 +       size_t count, loff_t *ppos)
3614 +{
3615 +       DUMP_CONTEXT_T context;
3616 +       context.buf = buf;
3617 +       context.actual = 0;
3618 +       context.space = count;
3619 +       context.offset = *ppos;
3620 +
3621 +       vchiq_dump_state(&context, &g_state);
3622 +
3623 +       *ppos += context.actual;
3624 +
3625 +       return context.actual;
3626 +}
3627 +
3628 +VCHIQ_STATE_T *
3629 +vchiq_get_state(void)
3630 +{
3631 +
3632 +       if (g_state.remote == NULL)
3633 +               printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3634 +       else if (g_state.remote->initialised != 1)
3635 +               printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3636 +                       __func__, g_state.remote->initialised);
3637 +
3638 +       return ((g_state.remote != NULL) &&
3639 +               (g_state.remote->initialised == 1)) ? &g_state : NULL;
3640 +}
3641 +
3642 +static const struct file_operations
3643 +vchiq_fops = {
3644 +       .owner = THIS_MODULE,
3645 +       .unlocked_ioctl = vchiq_ioctl,
3646 +       .open = vchiq_open,
3647 +       .release = vchiq_release,
3648 +       .read = vchiq_read
3649 +};
3650 +
3651 +/*
3652 + * Autosuspend related functionality
3653 + */
3654 +
3655 +int
3656 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3657 +{
3658 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3659 +       if (!arm_state)
3660 +               /* autosuspend not supported - always return wanted */
3661 +               return 1;
3662 +       else if (arm_state->blocked_count)
3663 +               return 1;
3664 +       else if (!arm_state->videocore_use_count)
3665 +               /* usage count zero - check for override unless we're forcing */
3666 +               if (arm_state->resume_blocked)
3667 +                       return 0;
3668 +               else
3669 +                       return vchiq_platform_videocore_wanted(state);
3670 +       else
3671 +               /* non-zero usage count - videocore still required */
3672 +               return 1;
3673 +}
3674 +
3675 +static VCHIQ_STATUS_T
3676 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3677 +       VCHIQ_HEADER_T *header,
3678 +       VCHIQ_SERVICE_HANDLE_T service_user,
3679 +       void *bulk_user)
3680 +{
3681 +       vchiq_log_error(vchiq_susp_log_level,
3682 +               "%s callback reason %d", __func__, reason);
3683 +       return 0;
3684 +}
3685 +
3686 +static int
3687 +vchiq_keepalive_thread_func(void *v)
3688 +{
3689 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3690 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3691 +
3692 +       VCHIQ_STATUS_T status;
3693 +       VCHIQ_INSTANCE_T instance;
3694 +       VCHIQ_SERVICE_HANDLE_T ka_handle;
3695 +
3696 +       VCHIQ_SERVICE_PARAMS_T params = {
3697 +               .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3698 +               .callback    = vchiq_keepalive_vchiq_callback,
3699 +               .version     = KEEPALIVE_VER,
3700 +               .version_min = KEEPALIVE_VER_MIN
3701 +       };
3702 +
3703 +       status = vchiq_initialise(&instance);
3704 +       if (status != VCHIQ_SUCCESS) {
3705 +               vchiq_log_error(vchiq_susp_log_level,
3706 +                       "%s vchiq_initialise failed %d", __func__, status);
3707 +               goto exit;
3708 +       }
3709 +
3710 +       status = vchiq_connect(instance);
3711 +       if (status != VCHIQ_SUCCESS) {
3712 +               vchiq_log_error(vchiq_susp_log_level,
3713 +                       "%s vchiq_connect failed %d", __func__, status);
3714 +               goto shutdown;
3715 +       }
3716 +
3717 +       status = vchiq_add_service(instance, &params, &ka_handle);
3718 +       if (status != VCHIQ_SUCCESS) {
3719 +               vchiq_log_error(vchiq_susp_log_level,
3720 +                       "%s vchiq_open_service failed %d", __func__, status);
3721 +               goto shutdown;
3722 +       }
3723 +
3724 +       while (1) {
3725 +               long rc = 0, uc = 0;
3726 +               if (wait_for_completion_interruptible(&arm_state->ka_evt)
3727 +                               != 0) {
3728 +                       vchiq_log_error(vchiq_susp_log_level,
3729 +                               "%s interrupted", __func__);
3730 +                       flush_signals(current);
3731 +                       continue;
3732 +               }
3733 +
3734 +               /* read and clear counters.  Do release_count then use_count to
3735 +                * prevent getting more releases than uses */
3736 +               rc = atomic_xchg(&arm_state->ka_release_count, 0);
3737 +               uc = atomic_xchg(&arm_state->ka_use_count, 0);
3738 +
3739 +               /* Call use/release service the requisite number of times.
3740 +                * Process use before release so use counts don't go negative */
3741 +               while (uc--) {
3742 +                       atomic_inc(&arm_state->ka_use_ack_count);
3743 +                       status = vchiq_use_service(ka_handle);
3744 +                       if (status != VCHIQ_SUCCESS) {
3745 +                               vchiq_log_error(vchiq_susp_log_level,
3746 +                                       "%s vchiq_use_service error %d",
3747 +                                       __func__, status);
3748 +                       }
3749 +               }
3750 +               while (rc--) {
3751 +                       status = vchiq_release_service(ka_handle);
3752 +                       if (status != VCHIQ_SUCCESS) {
3753 +                               vchiq_log_error(vchiq_susp_log_level,
3754 +                                       "%s vchiq_release_service error %d",
3755 +                                       __func__, status);
3756 +                       }
3757 +               }
3758 +       }
3759 +
3760 +shutdown:
3761 +       vchiq_shutdown(instance);
3762 +exit:
3763 +       return 0;
3764 +}
3765 +
3766 +
3767 +
3768 +VCHIQ_STATUS_T
3769 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3770 +{
3771 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3772 +
3773 +       if (arm_state) {
3774 +               rwlock_init(&arm_state->susp_res_lock);
3775 +
3776 +               init_completion(&arm_state->ka_evt);
3777 +               atomic_set(&arm_state->ka_use_count, 0);
3778 +               atomic_set(&arm_state->ka_use_ack_count, 0);
3779 +               atomic_set(&arm_state->ka_release_count, 0);
3780 +
3781 +               init_completion(&arm_state->vc_suspend_complete);
3782 +
3783 +               init_completion(&arm_state->vc_resume_complete);
3784 +               /* Initialise to 'done' state.  We only want to block on resume
3785 +                * completion while videocore is suspended. */
3786 +               set_resume_state(arm_state, VC_RESUME_RESUMED);
3787 +
3788 +               init_completion(&arm_state->resume_blocker);
3789 +               /* Initialise to 'done' state.  We only want to block on this
3790 +                * completion while resume is blocked */
3791 +               complete_all(&arm_state->resume_blocker);
3792 +
3793 +               init_completion(&arm_state->blocked_blocker);
3794 +               /* Initialise to 'done' state.  We only want to block on this
3795 +                * completion while things are waiting on the resume blocker */
3796 +               complete_all(&arm_state->blocked_blocker);
3797 +
3798 +               arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3799 +               arm_state->suspend_timer_running = 0;
3800 +               init_timer(&arm_state->suspend_timer);
3801 +               arm_state->suspend_timer.data = (unsigned long)(state);
3802 +               arm_state->suspend_timer.function = suspend_timer_callback;
3803 +
3804 +               arm_state->first_connect = 0;
3805 +
3806 +       }
3807 +       return status;
3808 +}
3809 +
3810 +/*
3811 +** Functions to modify the state variables;
3812 +**     set_suspend_state
3813 +**     set_resume_state
3814 +**
3815 +** There are more state variables than we might like, so ensure they remain in
3816 +** step.  Suspend and resume state are maintained separately, since most of
3817 +** these state machines can operate independently.  However, there are a few
3818 +** states where state transitions in one state machine cause a reset to the
3819 +** other state machine.  In addition, there are some completion events which
3820 +** need to occur on state machine reset and end-state(s), so these are also
3821 +** dealt with in these functions.
3822 +**
3823 +** In all states we set the state variable according to the input, but in some
3824 +** cases we perform additional steps outlined below;
3825 +**
3826 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3827 +**                     The suspend completion is completed after any suspend
3828 +**                     attempt.  When we reset the state machine we also reset
3829 +**                     the completion.  This reset occurs when videocore is
3830 +**                     resumed, and also if we initiate suspend after a suspend
3831 +**                     failure.
3832 +**
3833 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3834 +**                     suspend - ie from this point on we must try to suspend
3835 +**                     before resuming can occur.  We therefore also reset the
3836 +**                     resume state machine to VC_RESUME_IDLE in this state.
3837 +**
3838 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3839 +**                     complete_all on the suspend completion to notify
3840 +**                     anything waiting for suspend to happen.
3841 +**
3842 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3843 +**                     initiate resume, so no need to alter resume state.
3844 +**                     We call complete_all on the suspend completion to notify
3845 +**                     of suspend rejection.
3846 +**
3847 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
3848 +**                     suspend completion and reset the resume state machine.
3849 +**
3850 +** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
3851 +**                     resume completion is in it's 'done' state whenever
3852 +**                     videcore is running.  Therfore, the VC_RESUME_IDLE state
3853 +**                     implies that videocore is suspended.
3854 +**                     Hence, any thread which needs to wait until videocore is
3855 +**                     running can wait on this completion - it will only block
3856 +**                     if videocore is suspended.
3857 +**
3858 +** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
3859 +**                     Call complete_all on the resume completion to unblock
3860 +**                     any threads waiting for resume.  Also reset the suspend
3861 +**                     state machine to it's idle state.
3862 +**
3863 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3864 +*/
3865 +
3866 +inline void
3867 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3868 +       enum vc_suspend_status new_state)
3869 +{
3870 +       /* set the state in all cases */
3871 +       arm_state->vc_suspend_state = new_state;
3872 +
3873 +       /* state specific additional actions */
3874 +       switch (new_state) {
3875 +       case VC_SUSPEND_FORCE_CANCELED:
3876 +               complete_all(&arm_state->vc_suspend_complete);
3877 +               break;
3878 +       case VC_SUSPEND_REJECTED:
3879 +               complete_all(&arm_state->vc_suspend_complete);
3880 +               break;
3881 +       case VC_SUSPEND_FAILED:
3882 +               complete_all(&arm_state->vc_suspend_complete);
3883 +               arm_state->vc_resume_state = VC_RESUME_RESUMED;
3884 +               complete_all(&arm_state->vc_resume_complete);
3885 +               break;
3886 +       case VC_SUSPEND_IDLE:
3887 +               INIT_COMPLETION(arm_state->vc_suspend_complete);
3888 +               break;
3889 +       case VC_SUSPEND_REQUESTED:
3890 +               break;
3891 +       case VC_SUSPEND_IN_PROGRESS:
3892 +               set_resume_state(arm_state, VC_RESUME_IDLE);
3893 +               break;
3894 +       case VC_SUSPEND_SUSPENDED:
3895 +               complete_all(&arm_state->vc_suspend_complete);
3896 +               break;
3897 +       default:
3898 +               BUG();
3899 +               break;
3900 +       }
3901 +}
3902 +
3903 +inline void
3904 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3905 +       enum vc_resume_status new_state)
3906 +{
3907 +       /* set the state in all cases */
3908 +       arm_state->vc_resume_state = new_state;
3909 +
3910 +       /* state specific additional actions */
3911 +       switch (new_state) {
3912 +       case VC_RESUME_FAILED:
3913 +               break;
3914 +       case VC_RESUME_IDLE:
3915 +               INIT_COMPLETION(arm_state->vc_resume_complete);
3916 +               break;
3917 +       case VC_RESUME_REQUESTED:
3918 +               break;
3919 +       case VC_RESUME_IN_PROGRESS:
3920 +               break;
3921 +       case VC_RESUME_RESUMED:
3922 +               complete_all(&arm_state->vc_resume_complete);
3923 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3924 +               break;
3925 +       default:
3926 +               BUG();
3927 +               break;
3928 +       }
3929 +}
3930 +
3931 +
3932 +/* should be called with the write lock held */
3933 +inline void
3934 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3935 +{
3936 +       del_timer(&arm_state->suspend_timer);
3937 +       arm_state->suspend_timer.expires = jiffies +
3938 +               msecs_to_jiffies(arm_state->
3939 +                       suspend_timer_timeout);
3940 +       add_timer(&arm_state->suspend_timer);
3941 +       arm_state->suspend_timer_running = 1;
3942 +}
3943 +
3944 +/* should be called with the write lock held */
3945 +static inline void
3946 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3947 +{
3948 +       if (arm_state->suspend_timer_running) {
3949 +               del_timer(&arm_state->suspend_timer);
3950 +               arm_state->suspend_timer_running = 0;
3951 +       }
3952 +}
3953 +
3954 +static inline int
3955 +need_resume(VCHIQ_STATE_T *state)
3956 +{
3957 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3958 +       return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
3959 +                       (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
3960 +                       vchiq_videocore_wanted(state);
3961 +}
3962 +
3963 +static int
3964 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
3965 +{
3966 +       int status = VCHIQ_SUCCESS;
3967 +       const unsigned long timeout_val =
3968 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
3969 +       int resume_count = 0;
3970 +
3971 +       /* Allow any threads which were blocked by the last force suspend to
3972 +        * complete if they haven't already.  Only give this one shot; if
3973 +        * blocked_count is incremented after blocked_blocker is completed
3974 +        * (which only happens when blocked_count hits 0) then those threads
3975 +        * will have to wait until next time around */
3976 +       if (arm_state->blocked_count) {
3977 +               INIT_COMPLETION(arm_state->blocked_blocker);
3978 +               write_unlock_bh(&arm_state->susp_res_lock);
3979 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
3980 +                       "blocked clients", __func__);
3981 +               if (wait_for_completion_interruptible_timeout(
3982 +                               &arm_state->blocked_blocker, timeout_val)
3983 +                                       <= 0) {
3984 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
3985 +                               "previously blocked clients failed" , __func__);
3986 +                       status = VCHIQ_ERROR;
3987 +                       write_lock_bh(&arm_state->susp_res_lock);
3988 +                       goto out;
3989 +               }
3990 +               vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
3991 +                       "clients resumed", __func__);
3992 +               write_lock_bh(&arm_state->susp_res_lock);
3993 +       }
3994 +
3995 +       /* We need to wait for resume to complete if it's in process */
3996 +       while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
3997 +                       arm_state->vc_resume_state > VC_RESUME_IDLE) {
3998 +               if (resume_count > 1) {
3999 +                       status = VCHIQ_ERROR;
4000 +                       vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4001 +                               "many times for resume" , __func__);
4002 +                       goto out;
4003 +               }
4004 +               write_unlock_bh(&arm_state->susp_res_lock);
4005 +               vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4006 +                       __func__);
4007 +               if (wait_for_completion_interruptible_timeout(
4008 +                               &arm_state->vc_resume_complete, timeout_val)
4009 +                                       <= 0) {
4010 +                       vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4011 +                               "resume failed (%s)", __func__,
4012 +                               resume_state_names[arm_state->vc_resume_state +
4013 +                                                       VC_RESUME_NUM_OFFSET]);
4014 +                       status = VCHIQ_ERROR;
4015 +                       write_lock_bh(&arm_state->susp_res_lock);
4016 +                       goto out;
4017 +               }
4018 +               vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4019 +               write_lock_bh(&arm_state->susp_res_lock);
4020 +               resume_count++;
4021 +       }
4022 +       INIT_COMPLETION(arm_state->resume_blocker);
4023 +       arm_state->resume_blocked = 1;
4024 +
4025 +out:
4026 +       return status;
4027 +}
4028 +
4029 +static inline void
4030 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4031 +{
4032 +       complete_all(&arm_state->resume_blocker);
4033 +       arm_state->resume_blocked = 0;
4034 +}
4035 +
4036 +/* Initiate suspend via slot handler. Should be called with the write lock
4037 + * held */
4038 +VCHIQ_STATUS_T
4039 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4040 +{
4041 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
4042 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4043 +
4044 +       if (!arm_state)
4045 +               goto out;
4046 +
4047 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4048 +       status = VCHIQ_SUCCESS;
4049 +
4050 +
4051 +       switch (arm_state->vc_suspend_state) {
4052 +       case VC_SUSPEND_REQUESTED:
4053 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4054 +                       "requested", __func__);
4055 +               break;
4056 +       case VC_SUSPEND_IN_PROGRESS:
4057 +               vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4058 +                       "progress", __func__);
4059 +               break;
4060 +
4061 +       default:
4062 +               /* We don't expect to be in other states, so log but continue
4063 +                * anyway */
4064 +               vchiq_log_error(vchiq_susp_log_level,
4065 +                       "%s unexpected suspend state %s", __func__,
4066 +                       suspend_state_names[arm_state->vc_suspend_state +
4067 +                                               VC_SUSPEND_NUM_OFFSET]);
4068 +               /* fall through */
4069 +       case VC_SUSPEND_REJECTED:
4070 +       case VC_SUSPEND_FAILED:
4071 +               /* Ensure any idle state actions have been run */
4072 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4073 +               /* fall through */
4074 +       case VC_SUSPEND_IDLE:
4075 +               vchiq_log_info(vchiq_susp_log_level,
4076 +                       "%s: suspending", __func__);
4077 +               set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4078 +               /* kick the slot handler thread to initiate suspend */
4079 +               request_poll(state, NULL, 0);
4080 +               break;
4081 +       }
4082 +
4083 +out:
4084 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4085 +       return status;
4086 +}
4087 +
4088 +void
4089 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4090 +{
4091 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4092 +       int susp = 0;
4093 +
4094 +       if (!arm_state)
4095 +               goto out;
4096 +
4097 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4098 +
4099 +       write_lock_bh(&arm_state->susp_res_lock);
4100 +       if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4101 +                       arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4102 +               set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4103 +               susp = 1;
4104 +       }
4105 +       write_unlock_bh(&arm_state->susp_res_lock);
4106 +
4107 +       if (susp)
4108 +               vchiq_platform_suspend(state);
4109 +
4110 +out:
4111 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4112 +       return;
4113 +}
4114 +
4115 +
4116 +static void
4117 +output_timeout_error(VCHIQ_STATE_T *state)
4118 +{
4119 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4120 +       char service_err[50] = "";
4121 +       int vc_use_count = arm_state->videocore_use_count;
4122 +       int active_services = state->unused_service;
4123 +       int i;
4124 +
4125 +       if (!arm_state->videocore_use_count) {
4126 +               snprintf(service_err, 50, " Videocore usecount is 0");
4127 +               goto output_msg;
4128 +       }
4129 +       for (i = 0; i < active_services; i++) {
4130 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
4131 +               if (service_ptr && service_ptr->service_use_count &&
4132 +                       (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4133 +                       snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4134 +                               "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4135 +                                       service_ptr->base.fourcc),
4136 +                                service_ptr->client_id,
4137 +                                service_ptr->service_use_count,
4138 +                                service_ptr->service_use_count ==
4139 +                                        vc_use_count ? "" : " (+ more)");
4140 +                       break;
4141 +               }
4142 +       }
4143 +
4144 +output_msg:
4145 +       vchiq_log_error(vchiq_susp_log_level,
4146 +               "timed out waiting for vc suspend (%d).%s",
4147 +                arm_state->autosuspend_override, service_err);
4148 +
4149 +}
4150 +
4151 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4152 +** We don't actually force suspend, since videocore may get into a bad state
4153 +** if we force suspend at a bad time.  Instead, we wait for autosuspend to
4154 +** determine a good point to suspend.  If this doesn't happen within 100ms we
4155 +** report failure.
4156 +**
4157 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4158 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4159 +*/
4160 +VCHIQ_STATUS_T
4161 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4162 +{
4163 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4164 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
4165 +       long rc = 0;
4166 +       int repeat = -1;
4167 +
4168 +       if (!arm_state)
4169 +               goto out;
4170 +
4171 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4172 +
4173 +       write_lock_bh(&arm_state->susp_res_lock);
4174 +
4175 +       status = block_resume(arm_state);
4176 +       if (status != VCHIQ_SUCCESS)
4177 +               goto unlock;
4178 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4179 +               /* Already suspended - just block resume and exit */
4180 +               vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4181 +                       __func__);
4182 +               status = VCHIQ_SUCCESS;
4183 +               goto unlock;
4184 +       } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4185 +               /* initiate suspend immediately in the case that we're waiting
4186 +                * for the timeout */
4187 +               stop_suspend_timer(arm_state);
4188 +               if (!vchiq_videocore_wanted(state)) {
4189 +                       vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4190 +                               "idle, initiating suspend", __func__);
4191 +                       status = vchiq_arm_vcsuspend(state);
4192 +               } else if (arm_state->autosuspend_override <
4193 +                                               FORCE_SUSPEND_FAIL_MAX) {
4194 +                       vchiq_log_info(vchiq_susp_log_level, "%s letting "
4195 +                               "videocore go idle", __func__);
4196 +                       status = VCHIQ_SUCCESS;
4197 +               } else {
4198 +                       vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4199 +                               "many times - attempting suspend", __func__);
4200 +                       status = vchiq_arm_vcsuspend(state);
4201 +               }
4202 +       } else {
4203 +               vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4204 +                       "in progress - wait for completion", __func__);
4205 +               status = VCHIQ_SUCCESS;
4206 +       }
4207 +
4208 +       /* Wait for suspend to happen due to system idle (not forced..) */
4209 +       if (status != VCHIQ_SUCCESS)
4210 +               goto unblock_resume;
4211 +
4212 +       do {
4213 +               write_unlock_bh(&arm_state->susp_res_lock);
4214 +
4215 +               rc = wait_for_completion_interruptible_timeout(
4216 +                               &arm_state->vc_suspend_complete,
4217 +                               msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4218 +
4219 +               write_lock_bh(&arm_state->susp_res_lock);
4220 +               if (rc < 0) {
4221 +                       vchiq_log_warning(vchiq_susp_log_level, "%s "
4222 +                               "interrupted waiting for suspend", __func__);
4223 +                       status = VCHIQ_ERROR;
4224 +                       goto unblock_resume;
4225 +               } else if (rc == 0) {
4226 +                       if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4227 +                               /* Repeat timeout once if in progress */
4228 +                               if (repeat < 0) {
4229 +                                       repeat = 1;
4230 +                                       continue;
4231 +                               }
4232 +                       }
4233 +                       arm_state->autosuspend_override++;
4234 +                       output_timeout_error(state);
4235 +
4236 +                       status = VCHIQ_RETRY;
4237 +                       goto unblock_resume;
4238 +               }
4239 +       } while (0 < (repeat--));
4240 +
4241 +       /* Check and report state in case we need to abort ARM suspend */
4242 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4243 +               status = VCHIQ_RETRY;
4244 +               vchiq_log_error(vchiq_susp_log_level,
4245 +                       "%s videocore suspend failed (state %s)", __func__,
4246 +                       suspend_state_names[arm_state->vc_suspend_state +
4247 +                                               VC_SUSPEND_NUM_OFFSET]);
4248 +               /* Reset the state only if it's still in an error state.
4249 +                * Something could have already initiated another suspend. */
4250 +               if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4251 +                       set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4252 +
4253 +               goto unblock_resume;
4254 +       }
4255 +
4256 +       /* successfully suspended - unlock and exit */
4257 +       goto unlock;
4258 +
4259 +unblock_resume:
4260 +       /* all error states need to unblock resume before exit */
4261 +       unblock_resume(arm_state);
4262 +
4263 +unlock:
4264 +       write_unlock_bh(&arm_state->susp_res_lock);
4265 +
4266 +out:
4267 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4268 +       return status;
4269 +}
4270 +
4271 +void
4272 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4273 +{
4274 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4275 +
4276 +       if (!arm_state)
4277 +               goto out;
4278 +
4279 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4280 +
4281 +       write_lock_bh(&arm_state->susp_res_lock);
4282 +       if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4283 +                       arm_state->first_connect &&
4284 +                       !vchiq_videocore_wanted(state)) {
4285 +               vchiq_arm_vcsuspend(state);
4286 +       }
4287 +       write_unlock_bh(&arm_state->susp_res_lock);
4288 +
4289 +out:
4290 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4291 +       return;
4292 +}
4293 +
4294 +
4295 +int
4296 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4297 +{
4298 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4299 +       int resume = 0;
4300 +       int ret = -1;
4301 +
4302 +       if (!arm_state)
4303 +               goto out;
4304 +
4305 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4306 +
4307 +       write_lock_bh(&arm_state->susp_res_lock);
4308 +       unblock_resume(arm_state);
4309 +       resume = vchiq_check_resume(state);
4310 +       write_unlock_bh(&arm_state->susp_res_lock);
4311 +
4312 +       if (resume) {
4313 +               if (wait_for_completion_interruptible(
4314 +                       &arm_state->vc_resume_complete) < 0) {
4315 +                       vchiq_log_error(vchiq_susp_log_level,
4316 +                               "%s interrupted", __func__);
4317 +                       /* failed, cannot accurately derive suspend
4318 +                        * state, so exit early. */
4319 +                       goto out;
4320 +               }
4321 +       }
4322 +
4323 +       read_lock_bh(&arm_state->susp_res_lock);
4324 +       if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4325 +               vchiq_log_info(vchiq_susp_log_level,
4326 +                               "%s: Videocore remains suspended", __func__);
4327 +       } else {
4328 +               vchiq_log_info(vchiq_susp_log_level,
4329 +                               "%s: Videocore resumed", __func__);
4330 +               ret = 0;
4331 +       }
4332 +       read_unlock_bh(&arm_state->susp_res_lock);
4333 +out:
4334 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4335 +       return ret;
4336 +}
4337 +
4338 +/* This function should be called with the write lock held */
4339 +int
4340 +vchiq_check_resume(VCHIQ_STATE_T *state)
4341 +{
4342 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4343 +       int resume = 0;
4344 +
4345 +       if (!arm_state)
4346 +               goto out;
4347 +
4348 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4349 +
4350 +       if (need_resume(state)) {
4351 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
4352 +               request_poll(state, NULL, 0);
4353 +               resume = 1;
4354 +       }
4355 +
4356 +out:
4357 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4358 +       return resume;
4359 +}
4360 +
4361 +void
4362 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4363 +{
4364 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4365 +       int res = 0;
4366 +
4367 +       if (!arm_state)
4368 +               goto out;
4369 +
4370 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4371 +
4372 +       write_lock_bh(&arm_state->susp_res_lock);
4373 +       if (arm_state->wake_address == 0) {
4374 +               vchiq_log_info(vchiq_susp_log_level,
4375 +                                       "%s: already awake", __func__);
4376 +               goto unlock;
4377 +       }
4378 +       if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4379 +               vchiq_log_info(vchiq_susp_log_level,
4380 +                                       "%s: already resuming", __func__);
4381 +               goto unlock;
4382 +       }
4383 +
4384 +       if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4385 +               set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4386 +               res = 1;
4387 +       } else
4388 +               vchiq_log_trace(vchiq_susp_log_level,
4389 +                               "%s: not resuming (resume state %s)", __func__,
4390 +                               resume_state_names[arm_state->vc_resume_state +
4391 +                                                       VC_RESUME_NUM_OFFSET]);
4392 +
4393 +unlock:
4394 +       write_unlock_bh(&arm_state->susp_res_lock);
4395 +
4396 +       if (res)
4397 +               vchiq_platform_resume(state);
4398 +
4399 +out:
4400 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4401 +       return;
4402 +
4403 +}
4404 +
4405 +
4406 +
4407 +VCHIQ_STATUS_T
4408 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4409 +               enum USE_TYPE_E use_type)
4410 +{
4411 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4412 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4413 +       char entity[16];
4414 +       int *entity_uc;
4415 +       int local_uc, local_entity_uc;
4416 +
4417 +       if (!arm_state)
4418 +               goto out;
4419 +
4420 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4421 +
4422 +       if (use_type == USE_TYPE_VCHIQ) {
4423 +               sprintf(entity, "VCHIQ:   ");
4424 +               entity_uc = &arm_state->peer_use_count;
4425 +       } else if (service) {
4426 +               sprintf(entity, "%c%c%c%c:%03d",
4427 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4428 +                       service->client_id);
4429 +               entity_uc = &service->service_use_count;
4430 +       } else {
4431 +               vchiq_log_error(vchiq_susp_log_level, "%s null service "
4432 +                               "ptr", __func__);
4433 +               ret = VCHIQ_ERROR;
4434 +               goto out;
4435 +       }
4436 +
4437 +       write_lock_bh(&arm_state->susp_res_lock);
4438 +       while (arm_state->resume_blocked) {
4439 +               /* If we call 'use' while force suspend is waiting for suspend,
4440 +                * then we're about to block the thread which the force is
4441 +                * waiting to complete, so we're bound to just time out. In this
4442 +                * case, set the suspend state such that the wait will be
4443 +                * canceled, so we can complete as quickly as possible. */
4444 +               if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4445 +                               VC_SUSPEND_IDLE) {
4446 +                       set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4447 +                       break;
4448 +               }
4449 +               /* If suspend is already in progress then we need to block */
4450 +               if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4451 +                       /* Indicate that there are threads waiting on the resume
4452 +                        * blocker.  These need to be allowed to complete before
4453 +                        * a _second_ call to force suspend can complete,
4454 +                        * otherwise low priority threads might never actually
4455 +                        * continue */
4456 +                       arm_state->blocked_count++;
4457 +                       write_unlock_bh(&arm_state->susp_res_lock);
4458 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4459 +                               "blocked - waiting...", __func__, entity);
4460 +                       if (wait_for_completion_killable(
4461 +                                       &arm_state->resume_blocker) != 0) {
4462 +                               vchiq_log_error(vchiq_susp_log_level, "%s %s "
4463 +                                       "wait for resume blocker interrupted",
4464 +                                       __func__, entity);
4465 +                               ret = VCHIQ_ERROR;
4466 +                               write_lock_bh(&arm_state->susp_res_lock);
4467 +                               arm_state->blocked_count--;
4468 +                               write_unlock_bh(&arm_state->susp_res_lock);
4469 +                               goto out;
4470 +                       }
4471 +                       vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4472 +                               "unblocked", __func__, entity);
4473 +                       write_lock_bh(&arm_state->susp_res_lock);
4474 +                       if (--arm_state->blocked_count == 0)
4475 +                               complete_all(&arm_state->blocked_blocker);
4476 +               }
4477 +       }
4478 +
4479 +       stop_suspend_timer(arm_state);
4480 +
4481 +       local_uc = ++arm_state->videocore_use_count;
4482 +       local_entity_uc = ++(*entity_uc);
4483 +
4484 +       /* If there's a pending request which hasn't yet been serviced then
4485 +        * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
4486 +        * vc_resume_complete will block until we either resume or fail to
4487 +        * suspend */
4488 +       if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4489 +               set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4490 +
4491 +       if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4492 +               set_resume_state(arm_state, VC_RESUME_REQUESTED);
4493 +               vchiq_log_info(vchiq_susp_log_level,
4494 +                       "%s %s count %d, state count %d",
4495 +                       __func__, entity, local_entity_uc, local_uc);
4496 +               request_poll(state, NULL, 0);
4497 +       } else
4498 +               vchiq_log_trace(vchiq_susp_log_level,
4499 +                       "%s %s count %d, state count %d",
4500 +                       __func__, entity, *entity_uc, local_uc);
4501 +
4502 +
4503 +       write_unlock_bh(&arm_state->susp_res_lock);
4504 +
4505 +       /* Completion is in a done state when we're not suspended, so this won't
4506 +        * block for the non-suspended case. */
4507 +       if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4508 +               vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4509 +                       __func__, entity);
4510 +               if (wait_for_completion_killable(
4511 +                               &arm_state->vc_resume_complete) != 0) {
4512 +                       vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4513 +                               "resume interrupted", __func__, entity);
4514 +                       ret = VCHIQ_ERROR;
4515 +                       goto out;
4516 +               }
4517 +               vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4518 +                       entity);
4519 +       }
4520 +
4521 +       if (ret == VCHIQ_SUCCESS) {
4522 +               VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4523 +               long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4524 +               while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4525 +                       /* Send the use notify to videocore */
4526 +                       status = vchiq_send_remote_use_active(state);
4527 +                       if (status == VCHIQ_SUCCESS)
4528 +                               ack_cnt--;
4529 +                       else
4530 +                               atomic_add(ack_cnt,
4531 +                                       &arm_state->ka_use_ack_count);
4532 +               }
4533 +       }
4534 +
4535 +out:
4536 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4537 +       return ret;
4538 +}
4539 +
4540 +VCHIQ_STATUS_T
4541 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4542 +{
4543 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4544 +       VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4545 +       char entity[16];
4546 +       int *entity_uc;
4547 +       int local_uc, local_entity_uc;
4548 +
4549 +       if (!arm_state)
4550 +               goto out;
4551 +
4552 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4553 +
4554 +       if (service) {
4555 +               sprintf(entity, "%c%c%c%c:%03d",
4556 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4557 +                       service->client_id);
4558 +               entity_uc = &service->service_use_count;
4559 +       } else {
4560 +               sprintf(entity, "PEER:   ");
4561 +               entity_uc = &arm_state->peer_use_count;
4562 +       }
4563 +
4564 +       write_lock_bh(&arm_state->susp_res_lock);
4565 +       if (!arm_state->videocore_use_count || !(*entity_uc)) {
4566 +               /* Don't use BUG_ON - don't allow user thread to crash kernel */
4567 +               WARN_ON(!arm_state->videocore_use_count);
4568 +               WARN_ON(!(*entity_uc));
4569 +               ret = VCHIQ_ERROR;
4570 +               goto unlock;
4571 +       }
4572 +       local_uc = --arm_state->videocore_use_count;
4573 +       local_entity_uc = --(*entity_uc);
4574 +
4575 +       if (!vchiq_videocore_wanted(state)) {
4576 +               if (vchiq_platform_use_suspend_timer() &&
4577 +                               !arm_state->resume_blocked) {
4578 +                       /* Only use the timer if we're not trying to force
4579 +                        * suspend (=> resume_blocked) */
4580 +                       start_suspend_timer(arm_state);
4581 +               } else {
4582 +                       vchiq_log_info(vchiq_susp_log_level,
4583 +                               "%s %s count %d, state count %d - suspending",
4584 +                               __func__, entity, *entity_uc,
4585 +                               arm_state->videocore_use_count);
4586 +                       vchiq_arm_vcsuspend(state);
4587 +               }
4588 +       } else
4589 +               vchiq_log_trace(vchiq_susp_log_level,
4590 +                       "%s %s count %d, state count %d",
4591 +                       __func__, entity, *entity_uc,
4592 +                       arm_state->videocore_use_count);
4593 +
4594 +unlock:
4595 +       write_unlock_bh(&arm_state->susp_res_lock);
4596 +
4597 +out:
4598 +       vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4599 +       return ret;
4600 +}
4601 +
4602 +void
4603 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4604 +{
4605 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4606 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4607 +       atomic_inc(&arm_state->ka_use_count);
4608 +       complete(&arm_state->ka_evt);
4609 +}
4610 +
4611 +void
4612 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4613 +{
4614 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4615 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4616 +       atomic_inc(&arm_state->ka_release_count);
4617 +       complete(&arm_state->ka_evt);
4618 +}
4619 +
4620 +VCHIQ_STATUS_T
4621 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4622 +{
4623 +       return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4624 +}
4625 +
4626 +VCHIQ_STATUS_T
4627 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4628 +{
4629 +       return vchiq_release_internal(service->state, service);
4630 +}
4631 +
4632 +static void suspend_timer_callback(unsigned long context)
4633 +{
4634 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4635 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4636 +       if (!arm_state)
4637 +               goto out;
4638 +       vchiq_log_info(vchiq_susp_log_level,
4639 +               "%s - suspend timer expired - check suspend", __func__);
4640 +       vchiq_check_suspend(state);
4641 +out:
4642 +       return;
4643 +}
4644 +
4645 +VCHIQ_STATUS_T
4646 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4647 +{
4648 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4649 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4650 +       if (service) {
4651 +               ret = vchiq_use_internal(service->state, service,
4652 +                               USE_TYPE_SERVICE_NO_RESUME);
4653 +               unlock_service(service);
4654 +       }
4655 +       return ret;
4656 +}
4657 +
4658 +VCHIQ_STATUS_T
4659 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4660 +{
4661 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4662 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4663 +       if (service) {
4664 +               ret = vchiq_use_internal(service->state, service,
4665 +                               USE_TYPE_SERVICE);
4666 +               unlock_service(service);
4667 +       }
4668 +       return ret;
4669 +}
4670 +
4671 +VCHIQ_STATUS_T
4672 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4673 +{
4674 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4675 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4676 +       if (service) {
4677 +               ret = vchiq_release_internal(service->state, service);
4678 +               unlock_service(service);
4679 +       }
4680 +       return ret;
4681 +}
4682 +
4683 +void
4684 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4685 +{
4686 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4687 +       int i, j = 0;
4688 +       /* Only dump 64 services */
4689 +       static const int local_max_services = 64;
4690 +       /* If there's more than 64 services, only dump ones with
4691 +        * non-zero counts */
4692 +       int only_nonzero = 0;
4693 +       static const char *nz = "<-- preventing suspend";
4694 +
4695 +       enum vc_suspend_status vc_suspend_state;
4696 +       enum vc_resume_status  vc_resume_state;
4697 +       int peer_count;
4698 +       int vc_use_count;
4699 +       int active_services;
4700 +       struct service_data_struct {
4701 +               int fourcc;
4702 +               int clientid;
4703 +               int use_count;
4704 +       } service_data[local_max_services];
4705 +
4706 +       if (!arm_state)
4707 +               return;
4708 +
4709 +       read_lock_bh(&arm_state->susp_res_lock);
4710 +       vc_suspend_state = arm_state->vc_suspend_state;
4711 +       vc_resume_state  = arm_state->vc_resume_state;
4712 +       peer_count = arm_state->peer_use_count;
4713 +       vc_use_count = arm_state->videocore_use_count;
4714 +       active_services = state->unused_service;
4715 +       if (active_services > local_max_services)
4716 +               only_nonzero = 1;
4717 +
4718 +       for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4719 +               VCHIQ_SERVICE_T *service_ptr = state->services[i];
4720 +               if (!service_ptr)
4721 +                       continue;
4722 +
4723 +               if (only_nonzero && !service_ptr->service_use_count)
4724 +                       continue;
4725 +
4726 +               if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4727 +                       service_data[j].fourcc = service_ptr->base.fourcc;
4728 +                       service_data[j].clientid = service_ptr->client_id;
4729 +                       service_data[j++].use_count = service_ptr->
4730 +                                                       service_use_count;
4731 +               }
4732 +       }
4733 +
4734 +       read_unlock_bh(&arm_state->susp_res_lock);
4735 +
4736 +       vchiq_log_warning(vchiq_susp_log_level,
4737 +               "-- Videcore suspend state: %s --",
4738 +               suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4739 +       vchiq_log_warning(vchiq_susp_log_level,
4740 +               "-- Videcore resume state: %s --",
4741 +               resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4742 +
4743 +       if (only_nonzero)
4744 +               vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4745 +                       "services (%d).  Only dumping up to first %d services "
4746 +                       "with non-zero use-count", active_services,
4747 +                       local_max_services);
4748 +
4749 +       for (i = 0; i < j; i++) {
4750 +               vchiq_log_warning(vchiq_susp_log_level,
4751 +                       "----- %c%c%c%c:%d service count %d %s",
4752 +                       VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4753 +                       service_data[i].clientid,
4754 +                       service_data[i].use_count,
4755 +                       service_data[i].use_count ? nz : "");
4756 +       }
4757 +       vchiq_log_warning(vchiq_susp_log_level,
4758 +               "----- VCHIQ use count count %d", peer_count);
4759 +       vchiq_log_warning(vchiq_susp_log_level,
4760 +               "--- Overall vchiq instance use count %d", vc_use_count);
4761 +
4762 +       vchiq_dump_platform_use_state(state);
4763 +}
4764 +
4765 +VCHIQ_STATUS_T
4766 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4767 +{
4768 +       VCHIQ_ARM_STATE_T *arm_state;
4769 +       VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4770 +
4771 +       if (!service || !service->state)
4772 +               goto out;
4773 +
4774 +       vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4775 +
4776 +       arm_state = vchiq_platform_get_arm_state(service->state);
4777 +
4778 +       read_lock_bh(&arm_state->susp_res_lock);
4779 +       if (service->service_use_count)
4780 +               ret = VCHIQ_SUCCESS;
4781 +       read_unlock_bh(&arm_state->susp_res_lock);
4782 +
4783 +       if (ret == VCHIQ_ERROR) {
4784 +               vchiq_log_error(vchiq_susp_log_level,
4785 +                       "%s ERROR - %c%c%c%c:%d service count %d, "
4786 +                       "state count %d, videocore suspend state %s", __func__,
4787 +                       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4788 +                       service->client_id, service->service_use_count,
4789 +                       arm_state->videocore_use_count,
4790 +                       suspend_state_names[arm_state->vc_suspend_state +
4791 +                                               VC_SUSPEND_NUM_OFFSET]);
4792 +               vchiq_dump_service_use_state(service->state);
4793 +       }
4794 +out:
4795 +       return ret;
4796 +}
4797 +
4798 +/* stub functions */
4799 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4800 +{
4801 +       (void)state;
4802 +}
4803 +
4804 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4805 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4806 +{
4807 +       VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4808 +       vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4809 +               get_conn_state_name(oldstate), get_conn_state_name(newstate));
4810 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4811 +               write_lock_bh(&arm_state->susp_res_lock);
4812 +               if (!arm_state->first_connect) {
4813 +                       char threadname[10];
4814 +                       arm_state->first_connect = 1;
4815 +                       write_unlock_bh(&arm_state->susp_res_lock);
4816 +                       snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4817 +                               state->id);
4818 +                       arm_state->ka_thread = kthread_create(
4819 +                               &vchiq_keepalive_thread_func,
4820 +                               (void *)state,
4821 +                               threadname);
4822 +                       if (arm_state->ka_thread == NULL) {
4823 +                               vchiq_log_error(vchiq_susp_log_level,
4824 +                                       "vchiq: FATAL: couldn't create thread %s",
4825 +                                       threadname);
4826 +                       } else {
4827 +                               wake_up_process(arm_state->ka_thread);
4828 +                       }
4829 +               } else
4830 +                       write_unlock_bh(&arm_state->susp_res_lock);
4831 +       }
4832 +}
4833 +
4834 +
4835 +/****************************************************************************
4836 +*
4837 +*   vchiq_init - called when the module is loaded.
4838 +*
4839 +***************************************************************************/
4840 +
4841 +static int __init
4842 +vchiq_init(void)
4843 +{
4844 +       int err;
4845 +       void *ptr_err;
4846 +
4847 +       /* create proc entries */
4848 +       err = vchiq_proc_init();
4849 +       if (err != 0)
4850 +               goto failed_proc_init;
4851 +
4852 +       err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4853 +       if (err != 0) {
4854 +               vchiq_log_error(vchiq_arm_log_level,
4855 +                       "Unable to allocate device number");
4856 +               goto failed_alloc_chrdev;
4857 +       }
4858 +       cdev_init(&vchiq_cdev, &vchiq_fops);
4859 +       vchiq_cdev.owner = THIS_MODULE;
4860 +       err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4861 +       if (err != 0) {
4862 +               vchiq_log_error(vchiq_arm_log_level,
4863 +                       "Unable to register device");
4864 +               goto failed_cdev_add;
4865 +       }
4866 +
4867 +       /* create sysfs entries */
4868 +       vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4869 +       ptr_err = vchiq_class;
4870 +       if (IS_ERR(ptr_err))
4871 +               goto failed_class_create;
4872 +
4873 +       vchiq_dev = device_create(vchiq_class, NULL,
4874 +               vchiq_devid, NULL, "vchiq");
4875 +       ptr_err = vchiq_dev;
4876 +       if (IS_ERR(ptr_err))
4877 +               goto failed_device_create;
4878 +
4879 +       err = vchiq_platform_init(&g_state);
4880 +       if (err != 0)
4881 +               goto failed_platform_init;
4882 +
4883 +       vchiq_log_info(vchiq_arm_log_level,
4884 +               "vchiq: initialised - version %d (min %d), device %d.%d",
4885 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4886 +               MAJOR(vchiq_devid), MINOR(vchiq_devid));
4887 +
4888 +       return 0;
4889 +
4890 +failed_platform_init:
4891 +       device_destroy(vchiq_class, vchiq_devid);
4892 +failed_device_create:
4893 +       class_destroy(vchiq_class);
4894 +failed_class_create:
4895 +       cdev_del(&vchiq_cdev);
4896 +       err = PTR_ERR(ptr_err);
4897 +failed_cdev_add:
4898 +       unregister_chrdev_region(vchiq_devid, 1);
4899 +failed_alloc_chrdev:
4900 +       vchiq_proc_deinit();
4901 +failed_proc_init:
4902 +       vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4903 +       return err;
4904 +}
4905 +
4906 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4907 +{
4908 +       VCHIQ_SERVICE_T *service;
4909 +       int use_count = 0, i;
4910 +       i = 0;
4911 +       while ((service = next_service_by_instance(instance->state,
4912 +               instance, &i)) != NULL) {
4913 +               use_count += service->service_use_count;
4914 +               unlock_service(service);
4915 +       }
4916 +       return use_count;
4917 +}
4918 +
4919 +/* read the per-process use-count */
4920 +static int proc_read_use_count(char *page, char **start,
4921 +                              off_t off, int count,
4922 +                              int *eof, void *data)
4923 +{
4924 +       VCHIQ_INSTANCE_T instance = data;
4925 +       int len, use_count;
4926 +
4927 +       use_count = vchiq_instance_get_use_count(instance);
4928 +       len = snprintf(page+off, count, "%d\n", use_count);
4929 +
4930 +       return len;
4931 +}
4932 +
4933 +/* add an instance (process) to the proc entries */
4934 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
4935 +{
4936 +#if 1
4937 +       return 0;
4938 +#else
4939 +       char pidstr[32];
4940 +       struct proc_dir_entry *top, *use_count;
4941 +       struct proc_dir_entry *clients = vchiq_clients_top();
4942 +       int pid = instance->pid;
4943 +
4944 +       snprintf(pidstr, sizeof(pidstr), "%d", pid);
4945 +       top = proc_mkdir(pidstr, clients);
4946 +       if (!top)
4947 +               goto fail_top;
4948 +
4949 +       use_count = create_proc_read_entry("use_count",
4950 +                                          0444, top,
4951 +                                          proc_read_use_count,
4952 +                                          instance);
4953 +       if (!use_count)
4954 +               goto fail_use_count;
4955 +
4956 +       instance->proc_entry = top;
4957 +
4958 +       return 0;
4959 +
4960 +fail_use_count:
4961 +       remove_proc_entry(top->name, clients);
4962 +fail_top:
4963 +       return -ENOMEM;
4964 +#endif
4965 +}
4966 +
4967 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
4968 +{
4969 +#if 0
4970 +       struct proc_dir_entry *clients = vchiq_clients_top();
4971 +       remove_proc_entry("use_count", instance->proc_entry);
4972 +       remove_proc_entry(instance->proc_entry->name, clients);
4973 +#endif
4974 +}
4975 +
4976 +/****************************************************************************
4977 +*
4978 +*   vchiq_exit - called when the module is unloaded.
4979 +*
4980 +***************************************************************************/
4981 +
4982 +static void __exit
4983 +vchiq_exit(void)
4984 +{
4985 +       vchiq_platform_exit(&g_state);
4986 +       device_destroy(vchiq_class, vchiq_devid);
4987 +       class_destroy(vchiq_class);
4988 +       cdev_del(&vchiq_cdev);
4989 +       unregister_chrdev_region(vchiq_devid, 1);
4990 +}
4991 +
4992 +module_init(vchiq_init);
4993 +module_exit(vchiq_exit);
4994 +MODULE_LICENSE("GPL");
4995 +MODULE_AUTHOR("Broadcom Corporation");
4996 --- /dev/null
4997 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
4998 @@ -0,0 +1,212 @@
4999 +/**
5000 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5001 + *
5002 + * Redistribution and use in source and binary forms, with or without
5003 + * modification, are permitted provided that the following conditions
5004 + * are met:
5005 + * 1. Redistributions of source code must retain the above copyright
5006 + *    notice, this list of conditions, and the following disclaimer,
5007 + *    without modification.
5008 + * 2. Redistributions in binary form must reproduce the above copyright
5009 + *    notice, this list of conditions and the following disclaimer in the
5010 + *    documentation and/or other materials provided with the distribution.
5011 + * 3. The names of the above-listed copyright holders may not be used
5012 + *    to endorse or promote products derived from this software without
5013 + *    specific prior written permission.
5014 + *
5015 + * ALTERNATIVELY, this software may be distributed under the terms of the
5016 + * GNU General Public License ("GPL") version 2, as published by the Free
5017 + * Software Foundation.
5018 + *
5019 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5020 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5021 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5022 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5023 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5024 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5025 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5026 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5027 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5028 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5029 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5030 + */
5031 +
5032 +#ifndef VCHIQ_ARM_H
5033 +#define VCHIQ_ARM_H
5034 +
5035 +#include <linux/mutex.h>
5036 +#include <linux/semaphore.h>
5037 +#include <linux/atomic.h>
5038 +#include "vchiq_core.h"
5039 +
5040 +
5041 +enum vc_suspend_status {
5042 +       VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5043 +       VC_SUSPEND_REJECTED = -2,  /* Videocore rejected suspend request */
5044 +       VC_SUSPEND_FAILED = -1,    /* Videocore suspend failed */
5045 +       VC_SUSPEND_IDLE = 0,       /* VC active, no suspend actions */
5046 +       VC_SUSPEND_REQUESTED,      /* User has requested suspend */
5047 +       VC_SUSPEND_IN_PROGRESS,    /* Slot handler has recvd suspend request */
5048 +       VC_SUSPEND_SUSPENDED       /* Videocore suspend succeeded */
5049 +};
5050 +
5051 +enum vc_resume_status {
5052 +       VC_RESUME_FAILED = -1, /* Videocore resume failed */
5053 +       VC_RESUME_IDLE = 0,    /* VC suspended, no resume actions */
5054 +       VC_RESUME_REQUESTED,   /* User has requested resume */
5055 +       VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5056 +       VC_RESUME_RESUMED      /* Videocore resumed successfully (active) */
5057 +};
5058 +
5059 +
5060 +enum USE_TYPE_E {
5061 +       USE_TYPE_SERVICE,
5062 +       USE_TYPE_SERVICE_NO_RESUME,
5063 +       USE_TYPE_VCHIQ
5064 +};
5065 +
5066 +
5067 +
5068 +typedef struct vchiq_arm_state_struct {
5069 +       /* Keepalive-related data */
5070 +       struct task_struct *ka_thread;
5071 +       struct completion ka_evt;
5072 +       atomic_t ka_use_count;
5073 +       atomic_t ka_use_ack_count;
5074 +       atomic_t ka_release_count;
5075 +
5076 +       struct completion vc_suspend_complete;
5077 +       struct completion vc_resume_complete;
5078 +
5079 +       rwlock_t susp_res_lock;
5080 +       enum vc_suspend_status vc_suspend_state;
5081 +       enum vc_resume_status vc_resume_state;
5082 +
5083 +       unsigned int wake_address;
5084 +
5085 +       struct timer_list suspend_timer;
5086 +       int suspend_timer_timeout;
5087 +       int suspend_timer_running;
5088 +
5089 +       /* Global use count for videocore.
5090 +       ** This is equal to the sum of the use counts for all services.  When
5091 +       ** this hits zero the videocore suspend procedure will be initiated.
5092 +       */
5093 +       int videocore_use_count;
5094 +
5095 +       /* Use count to track requests from videocore peer.
5096 +       ** This use count is not associated with a service, so needs to be
5097 +       ** tracked separately with the state.
5098 +       */
5099 +       int peer_use_count;
5100 +
5101 +       /* Flag to indicate whether resume is blocked.  This happens when the
5102 +       ** ARM is suspending
5103 +       */
5104 +       struct completion resume_blocker;
5105 +       int resume_blocked;
5106 +       struct completion blocked_blocker;
5107 +       int blocked_count;
5108 +
5109 +       int autosuspend_override;
5110 +
5111 +       /* Flag to indicate that the first vchiq connect has made it through.
5112 +       ** This means that both sides should be fully ready, and we should
5113 +       ** be able to suspend after this point.
5114 +       */
5115 +       int first_connect;
5116 +
5117 +       unsigned long long suspend_start_time;
5118 +       unsigned long long sleep_start_time;
5119 +       unsigned long long resume_start_time;
5120 +       unsigned long long last_wake_time;
5121 +
5122 +} VCHIQ_ARM_STATE_T;
5123 +
5124 +extern int vchiq_arm_log_level;
5125 +extern int vchiq_susp_log_level;
5126 +
5127 +extern int __init
5128 +vchiq_platform_init(VCHIQ_STATE_T *state);
5129 +
5130 +extern void __exit
5131 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5132 +
5133 +extern VCHIQ_STATE_T *
5134 +vchiq_get_state(void);
5135 +
5136 +extern VCHIQ_STATUS_T
5137 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5138 +
5139 +extern VCHIQ_STATUS_T
5140 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5141 +
5142 +extern int
5143 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5144 +
5145 +extern VCHIQ_STATUS_T
5146 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5147 +
5148 +extern VCHIQ_STATUS_T
5149 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5150 +
5151 +extern int
5152 +vchiq_check_resume(VCHIQ_STATE_T *state);
5153 +
5154 +extern void
5155 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5156 +
5157 +extern VCHIQ_STATUS_T
5158 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5159 +
5160 +extern VCHIQ_STATUS_T
5161 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5162 +
5163 +extern VCHIQ_STATUS_T
5164 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5165 +
5166 +extern VCHIQ_STATUS_T
5167 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5168 +
5169 +extern int
5170 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5171 +
5172 +extern int
5173 +vchiq_platform_use_suspend_timer(void);
5174 +
5175 +extern void
5176 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5177 +
5178 +extern void
5179 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5180 +
5181 +extern VCHIQ_ARM_STATE_T*
5182 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5183 +
5184 +extern int
5185 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5186 +
5187 +extern VCHIQ_STATUS_T
5188 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5189 +               enum USE_TYPE_E use_type);
5190 +extern VCHIQ_STATUS_T
5191 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5192 +
5193 +void
5194 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5195 +       enum vc_suspend_status new_state);
5196 +
5197 +void
5198 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5199 +       enum vc_resume_status new_state);
5200 +
5201 +void
5202 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5203 +
5204 +extern int vchiq_proc_init(void);
5205 +extern void vchiq_proc_deinit(void);
5206 +extern struct proc_dir_entry *vchiq_proc_top(void);
5207 +extern struct proc_dir_entry *vchiq_clients_top(void);
5208 +
5209 +
5210 +#endif /* VCHIQ_ARM_H */
5211 --- /dev/null
5212 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5213 @@ -0,0 +1,37 @@
5214 +/**
5215 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5216 + *
5217 + * Redistribution and use in source and binary forms, with or without
5218 + * modification, are permitted provided that the following conditions
5219 + * are met:
5220 + * 1. Redistributions of source code must retain the above copyright
5221 + *    notice, this list of conditions, and the following disclaimer,
5222 + *    without modification.
5223 + * 2. Redistributions in binary form must reproduce the above copyright
5224 + *    notice, this list of conditions and the following disclaimer in the
5225 + *    documentation and/or other materials provided with the distribution.
5226 + * 3. The names of the above-listed copyright holders may not be used
5227 + *    to endorse or promote products derived from this software without
5228 + *    specific prior written permission.
5229 + *
5230 + * ALTERNATIVELY, this software may be distributed under the terms of the
5231 + * GNU General Public License ("GPL") version 2, as published by the Free
5232 + * Software Foundation.
5233 + *
5234 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5235 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5236 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5237 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5238 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5239 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5240 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5241 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5242 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5243 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5244 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5245 + */
5246 +
5247 +const char *vchiq_get_build_hostname(void);
5248 +const char *vchiq_get_build_version(void);
5249 +const char *vchiq_get_build_time(void);
5250 +const char *vchiq_get_build_date(void);
5251 --- /dev/null
5252 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5253 @@ -0,0 +1,60 @@
5254 +/**
5255 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5256 + *
5257 + * Redistribution and use in source and binary forms, with or without
5258 + * modification, are permitted provided that the following conditions
5259 + * are met:
5260 + * 1. Redistributions of source code must retain the above copyright
5261 + *    notice, this list of conditions, and the following disclaimer,
5262 + *    without modification.
5263 + * 2. Redistributions in binary form must reproduce the above copyright
5264 + *    notice, this list of conditions and the following disclaimer in the
5265 + *    documentation and/or other materials provided with the distribution.
5266 + * 3. The names of the above-listed copyright holders may not be used
5267 + *    to endorse or promote products derived from this software without
5268 + *    specific prior written permission.
5269 + *
5270 + * ALTERNATIVELY, this software may be distributed under the terms of the
5271 + * GNU General Public License ("GPL") version 2, as published by the Free
5272 + * Software Foundation.
5273 + *
5274 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5275 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5276 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5277 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5278 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5279 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5280 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5281 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5282 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5283 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5284 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5285 + */
5286 +
5287 +#ifndef VCHIQ_CFG_H
5288 +#define VCHIQ_CFG_H
5289 +
5290 +#define VCHIQ_MAGIC              VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5291 +/* The version of VCHIQ - change with any non-trivial change */
5292 +#define VCHIQ_VERSION            6
5293 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5294 +** incompatible change */
5295 +#define VCHIQ_VERSION_MIN        3
5296 +
5297 +#define VCHIQ_MAX_STATES         1
5298 +#define VCHIQ_MAX_SERVICES       4096
5299 +#define VCHIQ_MAX_SLOTS          128
5300 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5301 +
5302 +#define VCHIQ_NUM_CURRENT_BULKS        32
5303 +#define VCHIQ_NUM_SERVICE_BULKS        4
5304 +
5305 +#ifndef VCHIQ_ENABLE_DEBUG
5306 +#define VCHIQ_ENABLE_DEBUG             1
5307 +#endif
5308 +
5309 +#ifndef VCHIQ_ENABLE_STATS
5310 +#define VCHIQ_ENABLE_STATS             1
5311 +#endif
5312 +
5313 +#endif /* VCHIQ_CFG_H */
5314 --- /dev/null
5315 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5316 @@ -0,0 +1,119 @@
5317 +/**
5318 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5319 + *
5320 + * Redistribution and use in source and binary forms, with or without
5321 + * modification, are permitted provided that the following conditions
5322 + * are met:
5323 + * 1. Redistributions of source code must retain the above copyright
5324 + *    notice, this list of conditions, and the following disclaimer,
5325 + *    without modification.
5326 + * 2. Redistributions in binary form must reproduce the above copyright
5327 + *    notice, this list of conditions and the following disclaimer in the
5328 + *    documentation and/or other materials provided with the distribution.
5329 + * 3. The names of the above-listed copyright holders may not be used
5330 + *    to endorse or promote products derived from this software without
5331 + *    specific prior written permission.
5332 + *
5333 + * ALTERNATIVELY, this software may be distributed under the terms of the
5334 + * GNU General Public License ("GPL") version 2, as published by the Free
5335 + * Software Foundation.
5336 + *
5337 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5338 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5339 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5340 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5341 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5342 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5343 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5344 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5345 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5346 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5347 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5348 + */
5349 +
5350 +#include "vchiq_connected.h"
5351 +#include "vchiq_core.h"
5352 +#include <linux/module.h>
5353 +#include <linux/mutex.h>
5354 +
5355 +#define  MAX_CALLBACKS  10
5356 +
5357 +static   int                        g_connected;
5358 +static   int                        g_num_deferred_callbacks;
5359 +static   VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5360 +static   int                        g_once_init;
5361 +static   struct mutex               g_connected_mutex;
5362 +
5363 +/****************************************************************************
5364 +*
5365 +* Function to initialize our lock.
5366 +*
5367 +***************************************************************************/
5368 +
5369 +static void connected_init(void)
5370 +{
5371 +       if (!g_once_init) {
5372 +               mutex_init(&g_connected_mutex);
5373 +               g_once_init = 1;
5374 +       }
5375 +}
5376 +
5377 +/****************************************************************************
5378 +*
5379 +* This function is used to defer initialization until the vchiq stack is
5380 +* initialized. If the stack is already initialized, then the callback will
5381 +* be made immediately, otherwise it will be deferred until
5382 +* vchiq_call_connected_callbacks is called.
5383 +*
5384 +***************************************************************************/
5385 +
5386 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5387 +{
5388 +       connected_init();
5389 +
5390 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5391 +               return;
5392 +
5393 +       if (g_connected)
5394 +               /* We're already connected. Call the callback immediately. */
5395 +
5396 +               callback();
5397 +       else {
5398 +               if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5399 +                       vchiq_log_error(vchiq_core_log_level,
5400 +                               "There already %d callback registered - "
5401 +                               "please increase MAX_CALLBACKS",
5402 +                               g_num_deferred_callbacks);
5403 +               else {
5404 +                       g_deferred_callback[g_num_deferred_callbacks] =
5405 +                               callback;
5406 +                       g_num_deferred_callbacks++;
5407 +               }
5408 +       }
5409 +       mutex_unlock(&g_connected_mutex);
5410 +}
5411 +
5412 +/****************************************************************************
5413 +*
5414 +* This function is called by the vchiq stack once it has been connected to
5415 +* the videocore and clients can start to use the stack.
5416 +*
5417 +***************************************************************************/
5418 +
5419 +void vchiq_call_connected_callbacks(void)
5420 +{
5421 +       int i;
5422 +
5423 +       connected_init();
5424 +
5425 +       if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5426 +               return;
5427 +
5428 +       for (i = 0; i <  g_num_deferred_callbacks; i++)
5429 +               g_deferred_callback[i]();
5430 +
5431 +       g_num_deferred_callbacks = 0;
5432 +       g_connected = 1;
5433 +       mutex_unlock(&g_connected_mutex);
5434 +}
5435 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5436 --- /dev/null
5437 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5438 @@ -0,0 +1,51 @@
5439 +/**
5440 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5441 + *
5442 + * Redistribution and use in source and binary forms, with or without
5443 + * modification, are permitted provided that the following conditions
5444 + * are met:
5445 + * 1. Redistributions of source code must retain the above copyright
5446 + *    notice, this list of conditions, and the following disclaimer,
5447 + *    without modification.
5448 + * 2. Redistributions in binary form must reproduce the above copyright
5449 + *    notice, this list of conditions and the following disclaimer in the
5450 + *    documentation and/or other materials provided with the distribution.
5451 + * 3. The names of the above-listed copyright holders may not be used
5452 + *    to endorse or promote products derived from this software without
5453 + *    specific prior written permission.
5454 + *
5455 + * ALTERNATIVELY, this software may be distributed under the terms of the
5456 + * GNU General Public License ("GPL") version 2, as published by the Free
5457 + * Software Foundation.
5458 + *
5459 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5460 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5461 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5462 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5463 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5464 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5465 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5466 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5467 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5468 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5469 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5470 + */
5471 +
5472 +#ifndef VCHIQ_CONNECTED_H
5473 +#define VCHIQ_CONNECTED_H
5474 +
5475 +/* ---- Include Files ----------------------------------------------------- */
5476 +
5477 +/* ---- Constants and Types ---------------------------------------------- */
5478 +
5479 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5480 +
5481 +/* ---- Variable Externs ------------------------------------------------- */
5482 +
5483 +/* ---- Function Prototypes ---------------------------------------------- */
5484 +
5485 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5486 +void vchiq_call_connected_callbacks(void);
5487 +
5488 +#endif /* VCHIQ_CONNECTED_H */
5489 +
5490 --- /dev/null
5491 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5492 @@ -0,0 +1,3824 @@
5493 +/**
5494 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5495 + *
5496 + * Redistribution and use in source and binary forms, with or without
5497 + * modification, are permitted provided that the following conditions
5498 + * are met:
5499 + * 1. Redistributions of source code must retain the above copyright
5500 + *    notice, this list of conditions, and the following disclaimer,
5501 + *    without modification.
5502 + * 2. Redistributions in binary form must reproduce the above copyright
5503 + *    notice, this list of conditions and the following disclaimer in the
5504 + *    documentation and/or other materials provided with the distribution.
5505 + * 3. The names of the above-listed copyright holders may not be used
5506 + *    to endorse or promote products derived from this software without
5507 + *    specific prior written permission.
5508 + *
5509 + * ALTERNATIVELY, this software may be distributed under the terms of the
5510 + * GNU General Public License ("GPL") version 2, as published by the Free
5511 + * Software Foundation.
5512 + *
5513 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5514 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5515 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5516 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5517 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5518 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5519 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5520 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5521 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5522 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5523 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5524 + */
5525 +
5526 +#include "vchiq_core.h"
5527 +
5528 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5529 +
5530 +#define HANDLE_STATE_SHIFT 12
5531 +
5532 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5533 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5534 +#define SLOT_INDEX_FROM_DATA(state, data) \
5535 +       (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5536 +       VCHIQ_SLOT_SIZE)
5537 +#define SLOT_INDEX_FROM_INFO(state, info) \
5538 +       ((unsigned int)(info - state->slot_info))
5539 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5540 +       ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5541 +
5542 +
5543 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5544 +
5545 +
5546 +struct vchiq_open_payload {
5547 +       int fourcc;
5548 +       int client_id;
5549 +       short version;
5550 +       short version_min;
5551 +};
5552 +
5553 +struct vchiq_openack_payload {
5554 +       short version;
5555 +};
5556 +
5557 +/* we require this for consistency between endpoints */
5558 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5559 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5560 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5561 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5562 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5563 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5564 +
5565 +/* Run time control of log level, based on KERN_XXX level. */
5566 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5567 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5568 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5569 +
5570 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5571 +
5572 +static DEFINE_SPINLOCK(service_spinlock);
5573 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5574 +DEFINE_SPINLOCK(quota_spinlock);
5575 +
5576 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5577 +static unsigned int handle_seq;
5578 +
5579 +static const char *const srvstate_names[] = {
5580 +       "FREE",
5581 +       "HIDDEN",
5582 +       "LISTENING",
5583 +       "OPENING",
5584 +       "OPEN",
5585 +       "OPENSYNC",
5586 +       "CLOSESENT",
5587 +       "CLOSERECVD",
5588 +       "CLOSEWAIT",
5589 +       "CLOSED"
5590 +};
5591 +
5592 +static const char *const reason_names[] = {
5593 +       "SERVICE_OPENED",
5594 +       "SERVICE_CLOSED",
5595 +       "MESSAGE_AVAILABLE",
5596 +       "BULK_TRANSMIT_DONE",
5597 +       "BULK_RECEIVE_DONE",
5598 +       "BULK_TRANSMIT_ABORTED",
5599 +       "BULK_RECEIVE_ABORTED"
5600 +};
5601 +
5602 +static const char *const conn_state_names[] = {
5603 +       "DISCONNECTED",
5604 +       "CONNECTING",
5605 +       "CONNECTED",
5606 +       "PAUSING",
5607 +       "PAUSE_SENT",
5608 +       "PAUSED",
5609 +       "RESUMING",
5610 +       "PAUSE_TIMEOUT",
5611 +       "RESUME_TIMEOUT"
5612 +};
5613 +
5614 +
5615 +static void
5616 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5617 +
5618 +static const char *msg_type_str(unsigned int msg_type)
5619 +{
5620 +       switch (msg_type) {
5621 +       case VCHIQ_MSG_PADDING:       return "PADDING";
5622 +       case VCHIQ_MSG_CONNECT:       return "CONNECT";
5623 +       case VCHIQ_MSG_OPEN:          return "OPEN";
5624 +       case VCHIQ_MSG_OPENACK:       return "OPENACK";
5625 +       case VCHIQ_MSG_CLOSE:         return "CLOSE";
5626 +       case VCHIQ_MSG_DATA:          return "DATA";
5627 +       case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
5628 +       case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
5629 +       case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
5630 +       case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
5631 +       case VCHIQ_MSG_PAUSE:         return "PAUSE";
5632 +       case VCHIQ_MSG_RESUME:        return "RESUME";
5633 +       case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
5634 +       case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
5635 +       case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
5636 +       }
5637 +       return "???";
5638 +}
5639 +
5640 +static inline void
5641 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5642 +{
5643 +       vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5644 +               service->state->id, service->localport,
5645 +               srvstate_names[service->srvstate],
5646 +               srvstate_names[newstate]);
5647 +       service->srvstate = newstate;
5648 +}
5649 +
5650 +VCHIQ_SERVICE_T *
5651 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5652 +{
5653 +       VCHIQ_SERVICE_T *service;
5654 +
5655 +       spin_lock(&service_spinlock);
5656 +       service = handle_to_service(handle);
5657 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5658 +               (service->handle == handle)) {
5659 +               BUG_ON(service->ref_count == 0);
5660 +               service->ref_count++;
5661 +       } else
5662 +               service = NULL;
5663 +       spin_unlock(&service_spinlock);
5664 +
5665 +       if (!service)
5666 +               vchiq_log_info(vchiq_core_log_level,
5667 +                       "Invalid service handle 0x%x", handle);
5668 +
5669 +       return service;
5670 +}
5671 +
5672 +VCHIQ_SERVICE_T *
5673 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5674 +{
5675 +       VCHIQ_SERVICE_T *service = NULL;
5676 +       if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5677 +               spin_lock(&service_spinlock);
5678 +               service = state->services[localport];
5679 +               if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5680 +                       BUG_ON(service->ref_count == 0);
5681 +                       service->ref_count++;
5682 +               } else
5683 +                       service = NULL;
5684 +               spin_unlock(&service_spinlock);
5685 +       }
5686 +
5687 +       if (!service)
5688 +               vchiq_log_info(vchiq_core_log_level,
5689 +                       "Invalid port %d", localport);
5690 +
5691 +       return service;
5692 +}
5693 +
5694 +VCHIQ_SERVICE_T *
5695 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5696 +       VCHIQ_SERVICE_HANDLE_T handle) {
5697 +       VCHIQ_SERVICE_T *service;
5698 +
5699 +       spin_lock(&service_spinlock);
5700 +       service = handle_to_service(handle);
5701 +       if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5702 +               (service->handle == handle) &&
5703 +               (service->instance == instance)) {
5704 +               BUG_ON(service->ref_count == 0);
5705 +               service->ref_count++;
5706 +       } else
5707 +               service = NULL;
5708 +       spin_unlock(&service_spinlock);
5709 +
5710 +       if (!service)
5711 +               vchiq_log_info(vchiq_core_log_level,
5712 +                       "Invalid service handle 0x%x", handle);
5713 +
5714 +       return service;
5715 +}
5716 +
5717 +VCHIQ_SERVICE_T *
5718 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5719 +       int *pidx)
5720 +{
5721 +       VCHIQ_SERVICE_T *service = NULL;
5722 +       int idx = *pidx;
5723 +
5724 +       spin_lock(&service_spinlock);
5725 +       while (idx < state->unused_service) {
5726 +               VCHIQ_SERVICE_T *srv = state->services[idx++];
5727 +               if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5728 +                       (srv->instance == instance)) {
5729 +                       service = srv;
5730 +                       BUG_ON(service->ref_count == 0);
5731 +                       service->ref_count++;
5732 +                       break;
5733 +               }
5734 +       }
5735 +       spin_unlock(&service_spinlock);
5736 +
5737 +       *pidx = idx;
5738 +
5739 +       return service;
5740 +}
5741 +
5742 +void
5743 +lock_service(VCHIQ_SERVICE_T *service)
5744 +{
5745 +       spin_lock(&service_spinlock);
5746 +       BUG_ON(!service || (service->ref_count == 0));
5747 +       if (service)
5748 +               service->ref_count++;
5749 +       spin_unlock(&service_spinlock);
5750 +}
5751 +
5752 +void
5753 +unlock_service(VCHIQ_SERVICE_T *service)
5754 +{
5755 +       VCHIQ_STATE_T *state = service->state;
5756 +       spin_lock(&service_spinlock);
5757 +       BUG_ON(!service || (service->ref_count == 0));
5758 +       if (service && service->ref_count) {
5759 +               service->ref_count--;
5760 +               if (!service->ref_count) {
5761 +                       BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5762 +                       state->services[service->localport] = NULL;
5763 +               } else
5764 +                       service = NULL;
5765 +       }
5766 +       spin_unlock(&service_spinlock);
5767 +
5768 +       if (service && service->userdata_term)
5769 +               service->userdata_term(service->base.userdata);
5770 +
5771 +       kfree(service);
5772 +}
5773 +
5774 +int
5775 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5776 +{
5777 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5778 +       int id;
5779 +
5780 +       id = service ? service->client_id : 0;
5781 +       if (service)
5782 +               unlock_service(service);
5783 +
5784 +       return id;
5785 +}
5786 +
5787 +void *
5788 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5789 +{
5790 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
5791 +
5792 +       return service ? service->base.userdata : NULL;
5793 +}
5794 +
5795 +int
5796 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5797 +{
5798 +       VCHIQ_SERVICE_T *service = handle_to_service(handle);
5799 +
5800 +       return service ? service->base.fourcc : 0;
5801 +}
5802 +
5803 +static void
5804 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5805 +{
5806 +       VCHIQ_STATE_T *state = service->state;
5807 +       VCHIQ_SERVICE_QUOTA_T *service_quota;
5808 +
5809 +       service->closing = 1;
5810 +
5811 +       /* Synchronise with other threads. */
5812 +       mutex_lock(&state->recycle_mutex);
5813 +       mutex_unlock(&state->recycle_mutex);
5814 +       if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5815 +               /* If we're pausing then the slot_mutex is held until resume
5816 +                * by the slot handler.  Therefore don't try to acquire this
5817 +                * mutex if we're the slot handler and in the pause sent state.
5818 +                * We don't need to in this case anyway. */
5819 +               mutex_lock(&state->slot_mutex);
5820 +               mutex_unlock(&state->slot_mutex);
5821 +       }
5822 +
5823 +       /* Unblock any sending thread. */
5824 +       service_quota = &state->service_quotas[service->localport];
5825 +       up(&service_quota->quota_event);
5826 +}
5827 +
5828 +static void
5829 +mark_service_closing(VCHIQ_SERVICE_T *service)
5830 +{
5831 +       mark_service_closing_internal(service, 0);
5832 +}
5833 +
5834 +static inline VCHIQ_STATUS_T
5835 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5836 +       VCHIQ_HEADER_T *header, void *bulk_userdata)
5837 +{
5838 +       VCHIQ_STATUS_T status;
5839 +       vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5840 +               service->state->id, service->localport, reason_names[reason],
5841 +               (unsigned int)header, (unsigned int)bulk_userdata);
5842 +       status = service->base.callback(reason, header, service->handle,
5843 +               bulk_userdata);
5844 +       if (status == VCHIQ_ERROR) {
5845 +               vchiq_log_warning(vchiq_core_log_level,
5846 +                       "%d: ignoring ERROR from callback to service %x",
5847 +                       service->state->id, service->handle);
5848 +               status = VCHIQ_SUCCESS;
5849 +       }
5850 +       return status;
5851 +}
5852 +
5853 +inline void
5854 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5855 +{
5856 +       VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5857 +       vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5858 +               conn_state_names[oldstate],
5859 +               conn_state_names[newstate]);
5860 +       state->conn_state = newstate;
5861 +       vchiq_platform_conn_state_changed(state, oldstate, newstate);
5862 +}
5863 +
5864 +static inline void
5865 +remote_event_create(REMOTE_EVENT_T *event)
5866 +{
5867 +       event->armed = 0;
5868 +       /* Don't clear the 'fired' flag because it may already have been set
5869 +       ** by the other side. */
5870 +       sema_init(event->event, 0);
5871 +}
5872 +
5873 +static inline void
5874 +remote_event_destroy(REMOTE_EVENT_T *event)
5875 +{
5876 +       (void)event;
5877 +}
5878 +
5879 +static inline int
5880 +remote_event_wait(REMOTE_EVENT_T *event)
5881 +{
5882 +       if (!event->fired) {
5883 +               event->armed = 1;
5884 +               dsb();
5885 +               if (!event->fired) {
5886 +                       if (down_interruptible(event->event) != 0) {
5887 +                               event->armed = 0;
5888 +                               return 0;
5889 +                       }
5890 +               }
5891 +               event->armed = 0;
5892 +               wmb();
5893 +       }
5894 +
5895 +       event->fired = 0;
5896 +       return 1;
5897 +}
5898 +
5899 +static inline void
5900 +remote_event_signal_local(REMOTE_EVENT_T *event)
5901 +{
5902 +       event->armed = 0;
5903 +       up(event->event);
5904 +}
5905 +
5906 +static inline void
5907 +remote_event_poll(REMOTE_EVENT_T *event)
5908 +{
5909 +       if (event->fired && event->armed)
5910 +               remote_event_signal_local(event);
5911 +}
5912 +
5913 +void
5914 +remote_event_pollall(VCHIQ_STATE_T *state)
5915 +{
5916 +       remote_event_poll(&state->local->sync_trigger);
5917 +       remote_event_poll(&state->local->sync_release);
5918 +       remote_event_poll(&state->local->trigger);
5919 +       remote_event_poll(&state->local->recycle);
5920 +}
5921 +
5922 +/* Round up message sizes so that any space at the end of a slot is always big
5923 +** enough for a header. This relies on header size being a power of two, which
5924 +** has been verified earlier by a static assertion. */
5925 +
5926 +static inline unsigned int
5927 +calc_stride(unsigned int size)
5928 +{
5929 +       /* Allow room for the header */
5930 +       size += sizeof(VCHIQ_HEADER_T);
5931 +
5932 +       /* Round up */
5933 +       return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
5934 +               - 1);
5935 +}
5936 +
5937 +/* Called by the slot handler thread */
5938 +static VCHIQ_SERVICE_T *
5939 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
5940 +{
5941 +       int i;
5942 +
5943 +       WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
5944 +
5945 +       for (i = 0; i < state->unused_service; i++) {
5946 +               VCHIQ_SERVICE_T *service = state->services[i];
5947 +               if (service &&
5948 +                       (service->public_fourcc == fourcc) &&
5949 +                       ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
5950 +                       ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
5951 +                       (service->remoteport == VCHIQ_PORT_FREE)))) {
5952 +                       lock_service(service);
5953 +                       return service;
5954 +               }
5955 +       }
5956 +
5957 +       return NULL;
5958 +}
5959 +
5960 +/* Called by the slot handler thread */
5961 +static VCHIQ_SERVICE_T *
5962 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
5963 +{
5964 +       int i;
5965 +       for (i = 0; i < state->unused_service; i++) {
5966 +               VCHIQ_SERVICE_T *service = state->services[i];
5967 +               if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
5968 +                       && (service->remoteport == port)) {
5969 +                       lock_service(service);
5970 +                       return service;
5971 +               }
5972 +       }
5973 +       return NULL;
5974 +}
5975 +
5976 +inline void
5977 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
5978 +{
5979 +       uint32_t value;
5980 +
5981 +       if (service) {
5982 +               do {
5983 +                       value = atomic_read(&service->poll_flags);
5984 +               } while (atomic_cmpxchg(&service->poll_flags, value,
5985 +                       value | (1 << poll_type)) != value);
5986 +
5987 +               do {
5988 +                       value = atomic_read(&state->poll_services[
5989 +                               service->localport>>5]);
5990 +               } while (atomic_cmpxchg(
5991 +                       &state->poll_services[service->localport>>5],
5992 +                       value, value | (1 << (service->localport & 0x1f)))
5993 +                       != value);
5994 +       }
5995 +
5996 +       state->poll_needed = 1;
5997 +       wmb();
5998 +
5999 +       /* ... and ensure the slot handler runs. */
6000 +       remote_event_signal_local(&state->local->trigger);
6001 +}
6002 +
6003 +/* Called from queue_message, by the slot handler and application threads,
6004 +** with slot_mutex held */
6005 +static VCHIQ_HEADER_T *
6006 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6007 +{
6008 +       VCHIQ_SHARED_STATE_T *local = state->local;
6009 +       int tx_pos = state->local_tx_pos;
6010 +       int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6011 +
6012 +       if (space > slot_space) {
6013 +               VCHIQ_HEADER_T *header;
6014 +               /* Fill the remaining space with padding */
6015 +               WARN_ON(state->tx_data == NULL);
6016 +               header = (VCHIQ_HEADER_T *)
6017 +                       (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6018 +               header->msgid = VCHIQ_MSGID_PADDING;
6019 +               header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6020 +
6021 +               tx_pos += slot_space;
6022 +       }
6023 +
6024 +       /* If necessary, get the next slot. */
6025 +       if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6026 +               int slot_index;
6027 +
6028 +               /* If there is no free slot... */
6029 +
6030 +               if (down_trylock(&state->slot_available_event) != 0) {
6031 +                       /* ...wait for one. */
6032 +
6033 +                       VCHIQ_STATS_INC(state, slot_stalls);
6034 +
6035 +                       /* But first, flush through the last slot. */
6036 +                       state->local_tx_pos = tx_pos;
6037 +                       local->tx_pos = tx_pos;
6038 +                       remote_event_signal(&state->remote->trigger);
6039 +
6040 +                       if (!is_blocking ||
6041 +                               (down_interruptible(
6042 +                               &state->slot_available_event) != 0))
6043 +                               return NULL; /* No space available */
6044 +               }
6045 +
6046 +               BUG_ON(tx_pos ==
6047 +                       (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6048 +
6049 +               slot_index = local->slot_queue[
6050 +                       SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6051 +                       VCHIQ_SLOT_QUEUE_MASK];
6052 +               state->tx_data =
6053 +                       (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6054 +       }
6055 +
6056 +       state->local_tx_pos = tx_pos + space;
6057 +
6058 +       return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6059 +}
6060 +
6061 +/* Called by the recycle thread. */
6062 +static void
6063 +process_free_queue(VCHIQ_STATE_T *state)
6064 +{
6065 +       VCHIQ_SHARED_STATE_T *local = state->local;
6066 +       BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6067 +       int slot_queue_available;
6068 +
6069 +       /* Use a read memory barrier to ensure that any state that may have
6070 +       ** been modified by another thread is not masked by stale prefetched
6071 +       ** values. */
6072 +       rmb();
6073 +
6074 +       /* Find slots which have been freed by the other side, and return them
6075 +       ** to the available queue. */
6076 +       slot_queue_available = state->slot_queue_available;
6077 +
6078 +       while (slot_queue_available != local->slot_queue_recycle) {
6079 +               unsigned int pos;
6080 +               int slot_index = local->slot_queue[slot_queue_available++ &
6081 +                       VCHIQ_SLOT_QUEUE_MASK];
6082 +               char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6083 +               int data_found = 0;
6084 +
6085 +               vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6086 +                       state->id, slot_index, (unsigned int)data,
6087 +                       local->slot_queue_recycle, slot_queue_available);
6088 +
6089 +               /* Initialise the bitmask for services which have used this
6090 +               ** slot */
6091 +               BITSET_ZERO(service_found);
6092 +
6093 +               pos = 0;
6094 +
6095 +               while (pos < VCHIQ_SLOT_SIZE) {
6096 +                       VCHIQ_HEADER_T *header =
6097 +                               (VCHIQ_HEADER_T *)(data + pos);
6098 +                       int msgid = header->msgid;
6099 +                       if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6100 +                               int port = VCHIQ_MSG_SRCPORT(msgid);
6101 +                               VCHIQ_SERVICE_QUOTA_T *service_quota =
6102 +                                       &state->service_quotas[port];
6103 +                               int count;
6104 +                               spin_lock(&quota_spinlock);
6105 +                               count = service_quota->message_use_count;
6106 +                               if (count > 0)
6107 +                                       service_quota->message_use_count =
6108 +                                               count - 1;
6109 +                               spin_unlock(&quota_spinlock);
6110 +
6111 +                               if (count == service_quota->message_quota)
6112 +                                       /* Signal the service that it
6113 +                                       ** has dropped below its quota
6114 +                                       */
6115 +                                       up(&service_quota->quota_event);
6116 +                               else if (count == 0) {
6117 +                                       vchiq_log_error(vchiq_core_log_level,
6118 +                                               "service %d "
6119 +                                               "message_use_count=%d "
6120 +                                               "(header %x, msgid %x, "
6121 +                                               "header->msgid %x, "
6122 +                                               "header->size %x)",
6123 +                                               port,
6124 +                                               service_quota->
6125 +                                                       message_use_count,
6126 +                                               (unsigned int)header, msgid,
6127 +                                               header->msgid,
6128 +                                               header->size);
6129 +                                       WARN(1, "invalid message use count\n");
6130 +                               }
6131 +                               if (!BITSET_IS_SET(service_found, port)) {
6132 +                                       /* Set the found bit for this service */
6133 +                                       BITSET_SET(service_found, port);
6134 +
6135 +                                       spin_lock(&quota_spinlock);
6136 +                                       count = service_quota->slot_use_count;
6137 +                                       if (count > 0)
6138 +                                               service_quota->slot_use_count =
6139 +                                                       count - 1;
6140 +                                       spin_unlock(&quota_spinlock);
6141 +
6142 +                                       if (count > 0) {
6143 +                                               /* Signal the service in case
6144 +                                               ** it has dropped below its
6145 +                                               ** quota */
6146 +                                               up(&service_quota->quota_event);
6147 +                                               vchiq_log_trace(
6148 +                                                       vchiq_core_log_level,
6149 +                                                       "%d: pfq:%d %x@%x - "
6150 +                                                       "slot_use->%d",
6151 +                                                       state->id, port,
6152 +                                                       header->size,
6153 +                                                       (unsigned int)header,
6154 +                                                       count - 1);
6155 +                                       } else {
6156 +                                               vchiq_log_error(
6157 +                                                       vchiq_core_log_level,
6158 +                                                               "service %d "
6159 +                                                               "slot_use_count"
6160 +                                                               "=%d (header %x"
6161 +                                                               ", msgid %x, "
6162 +                                                               "header->msgid"
6163 +                                                               " %x, header->"
6164 +                                                               "size %x)",
6165 +                                                       port, count,
6166 +                                                       (unsigned int)header,
6167 +                                                       msgid,
6168 +                                                       header->msgid,
6169 +                                                       header->size);
6170 +                                               WARN(1, "bad slot use count\n");
6171 +                                       }
6172 +                               }
6173 +
6174 +                               data_found = 1;
6175 +                       }
6176 +
6177 +                       pos += calc_stride(header->size);
6178 +                       if (pos > VCHIQ_SLOT_SIZE) {
6179 +                               vchiq_log_error(vchiq_core_log_level,
6180 +                                       "pfq - pos %x: header %x, msgid %x, "
6181 +                                       "header->msgid %x, header->size %x",
6182 +                                       pos, (unsigned int)header, msgid,
6183 +                                       header->msgid, header->size);
6184 +                               WARN(1, "invalid slot position\n");
6185 +                       }
6186 +               }
6187 +
6188 +               if (data_found) {
6189 +                       int count;
6190 +                       spin_lock(&quota_spinlock);
6191 +                       count = state->data_use_count;
6192 +                       if (count > 0)
6193 +                               state->data_use_count =
6194 +                                       count - 1;
6195 +                       spin_unlock(&quota_spinlock);
6196 +                       if (count == state->data_quota)
6197 +                               up(&state->data_quota_event);
6198 +               }
6199 +
6200 +               state->slot_queue_available = slot_queue_available;
6201 +               up(&state->slot_available_event);
6202 +       }
6203 +}
6204 +
6205 +/* Called by the slot handler and application threads */
6206 +static VCHIQ_STATUS_T
6207 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6208 +       int msgid, const VCHIQ_ELEMENT_T *elements,
6209 +       int count, int size, int is_blocking)
6210 +{
6211 +       VCHIQ_SHARED_STATE_T *local;
6212 +       VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6213 +       VCHIQ_HEADER_T *header;
6214 +       int type = VCHIQ_MSG_TYPE(msgid);
6215 +
6216 +       unsigned int stride;
6217 +
6218 +       local = state->local;
6219 +
6220 +       stride = calc_stride(size);
6221 +
6222 +       WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6223 +
6224 +       if ((type != VCHIQ_MSG_RESUME) &&
6225 +               (mutex_lock_interruptible(&state->slot_mutex) != 0))
6226 +               return VCHIQ_RETRY;
6227 +
6228 +       if (type == VCHIQ_MSG_DATA) {
6229 +               int tx_end_index;
6230 +
6231 +               BUG_ON(!service);
6232 +
6233 +               if (service->closing) {
6234 +                       /* The service has been closed */
6235 +                       mutex_unlock(&state->slot_mutex);
6236 +                       return VCHIQ_ERROR;
6237 +               }
6238 +
6239 +               service_quota = &state->service_quotas[service->localport];
6240 +
6241 +               spin_lock(&quota_spinlock);
6242 +
6243 +               /* Ensure this service doesn't use more than its quota of
6244 +               ** messages or slots */
6245 +               tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6246 +                       state->local_tx_pos + stride - 1);
6247 +
6248 +               /* Ensure data messages don't use more than their quota of
6249 +               ** slots */
6250 +               while ((tx_end_index != state->previous_data_index) &&
6251 +                       (state->data_use_count == state->data_quota)) {
6252 +                       VCHIQ_STATS_INC(state, data_stalls);
6253 +                       spin_unlock(&quota_spinlock);
6254 +                       mutex_unlock(&state->slot_mutex);
6255 +
6256 +                       if (down_interruptible(&state->data_quota_event)
6257 +                               != 0)
6258 +                               return VCHIQ_RETRY;
6259 +
6260 +                       mutex_lock(&state->slot_mutex);
6261 +                       spin_lock(&quota_spinlock);
6262 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6263 +                               state->local_tx_pos + stride - 1);
6264 +                       if ((tx_end_index == state->previous_data_index) ||
6265 +                               (state->data_use_count < state->data_quota)) {
6266 +                               /* Pass the signal on to other waiters */
6267 +                               up(&state->data_quota_event);
6268 +                               break;
6269 +                       }
6270 +               }
6271 +
6272 +               while ((service_quota->message_use_count ==
6273 +                               service_quota->message_quota) ||
6274 +                       ((tx_end_index != service_quota->previous_tx_index) &&
6275 +                       (service_quota->slot_use_count ==
6276 +                               service_quota->slot_quota))) {
6277 +                       spin_unlock(&quota_spinlock);
6278 +                       vchiq_log_trace(vchiq_core_log_level,
6279 +                               "%d: qm:%d %s,%x - quota stall "
6280 +                               "(msg %d, slot %d)",
6281 +                               state->id, service->localport,
6282 +                               msg_type_str(type), size,
6283 +                               service_quota->message_use_count,
6284 +                               service_quota->slot_use_count);
6285 +                       VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6286 +                       mutex_unlock(&state->slot_mutex);
6287 +                       if (down_interruptible(&service_quota->quota_event)
6288 +                               != 0)
6289 +                               return VCHIQ_RETRY;
6290 +                       if (service->closing)
6291 +                               return VCHIQ_ERROR;
6292 +                       if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6293 +                               return VCHIQ_RETRY;
6294 +                       if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6295 +                               /* The service has been closed */
6296 +                               mutex_unlock(&state->slot_mutex);
6297 +                               return VCHIQ_ERROR;
6298 +                       }
6299 +                       spin_lock(&quota_spinlock);
6300 +                       tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6301 +                               state->local_tx_pos + stride - 1);
6302 +               }
6303 +
6304 +               spin_unlock(&quota_spinlock);
6305 +       }
6306 +
6307 +       header = reserve_space(state, stride, is_blocking);
6308 +
6309 +       if (!header) {
6310 +               if (service)
6311 +                       VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6312 +               mutex_unlock(&state->slot_mutex);
6313 +               return VCHIQ_RETRY;
6314 +       }
6315 +
6316 +       if (type == VCHIQ_MSG_DATA) {
6317 +               int i, pos;
6318 +               int tx_end_index;
6319 +               int slot_use_count;
6320 +
6321 +               vchiq_log_info(vchiq_core_log_level,
6322 +                       "%d: qm %s@%x,%x (%d->%d)",
6323 +                       state->id,
6324 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6325 +                       (unsigned int)header, size,
6326 +                       VCHIQ_MSG_SRCPORT(msgid),
6327 +                       VCHIQ_MSG_DSTPORT(msgid));
6328 +
6329 +               BUG_ON(!service);
6330 +
6331 +               for (i = 0, pos = 0; i < (unsigned int)count;
6332 +                       pos += elements[i++].size)
6333 +                       if (elements[i].size) {
6334 +                               if (vchiq_copy_from_user
6335 +                                       (header->data + pos, elements[i].data,
6336 +                                       (size_t) elements[i].size) !=
6337 +                                       VCHIQ_SUCCESS) {
6338 +                                       mutex_unlock(&state->slot_mutex);
6339 +                                       VCHIQ_SERVICE_STATS_INC(service,
6340 +                                               error_count);
6341 +                                       return VCHIQ_ERROR;
6342 +                               }
6343 +                               if (i == 0) {
6344 +                                       if (vchiq_core_msg_log_level >=
6345 +                                               VCHIQ_LOG_INFO)
6346 +                                               vchiq_log_dump_mem("Sent", 0,
6347 +                                                       header->data + pos,
6348 +                                                       min(64u,
6349 +                                                       elements[0].size));
6350 +                               }
6351 +                       }
6352 +
6353 +               spin_lock(&quota_spinlock);
6354 +               service_quota->message_use_count++;
6355 +
6356 +               tx_end_index =
6357 +                       SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6358 +
6359 +               /* If this transmission can't fit in the last slot used by any
6360 +               ** service, the data_use_count must be increased. */
6361 +               if (tx_end_index != state->previous_data_index) {
6362 +                       state->previous_data_index = tx_end_index;
6363 +                       state->data_use_count++;
6364 +               }
6365 +
6366 +               /* If this isn't the same slot last used by this service,
6367 +               ** the service's slot_use_count must be increased. */
6368 +               if (tx_end_index != service_quota->previous_tx_index) {
6369 +                       service_quota->previous_tx_index = tx_end_index;
6370 +                       slot_use_count = ++service_quota->slot_use_count;
6371 +               } else {
6372 +                       slot_use_count = 0;
6373 +               }
6374 +
6375 +               spin_unlock(&quota_spinlock);
6376 +
6377 +               if (slot_use_count)
6378 +                       vchiq_log_trace(vchiq_core_log_level,
6379 +                               "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6380 +                               state->id, service->localport,
6381 +                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6382 +                               slot_use_count, header);
6383 +
6384 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6385 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6386 +       } else {
6387 +               vchiq_log_info(vchiq_core_log_level,
6388 +                       "%d: qm %s@%x,%x (%d->%d)", state->id,
6389 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6390 +                       (unsigned int)header, size,
6391 +                       VCHIQ_MSG_SRCPORT(msgid),
6392 +                       VCHIQ_MSG_DSTPORT(msgid));
6393 +               if (size != 0) {
6394 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
6395 +                       memcpy(header->data, elements[0].data,
6396 +                               elements[0].size);
6397 +               }
6398 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
6399 +       }
6400 +
6401 +       header->msgid = msgid;
6402 +       header->size = size;
6403 +
6404 +       {
6405 +               int svc_fourcc;
6406 +
6407 +               svc_fourcc = service
6408 +                       ? service->base.fourcc
6409 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6410 +
6411 +               vchiq_log_info(vchiq_core_msg_log_level,
6412 +                       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6413 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6414 +                       VCHIQ_MSG_TYPE(msgid),
6415 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6416 +                       VCHIQ_MSG_SRCPORT(msgid),
6417 +                       VCHIQ_MSG_DSTPORT(msgid),
6418 +                       size);
6419 +       }
6420 +
6421 +       /* Make sure the new header is visible to the peer. */
6422 +       wmb();
6423 +
6424 +       /* Make the new tx_pos visible to the peer. */
6425 +       local->tx_pos = state->local_tx_pos;
6426 +       wmb();
6427 +
6428 +       if (service && (type == VCHIQ_MSG_CLOSE))
6429 +               vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6430 +
6431 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6432 +               mutex_unlock(&state->slot_mutex);
6433 +
6434 +       remote_event_signal(&state->remote->trigger);
6435 +
6436 +       return VCHIQ_SUCCESS;
6437 +}
6438 +
6439 +/* Called by the slot handler and application threads */
6440 +static VCHIQ_STATUS_T
6441 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6442 +       int msgid, const VCHIQ_ELEMENT_T *elements,
6443 +       int count, int size, int is_blocking)
6444 +{
6445 +       VCHIQ_SHARED_STATE_T *local;
6446 +       VCHIQ_HEADER_T *header;
6447 +
6448 +       local = state->local;
6449 +
6450 +       if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6451 +               (mutex_lock_interruptible(&state->sync_mutex) != 0))
6452 +               return VCHIQ_RETRY;
6453 +
6454 +       remote_event_wait(&local->sync_release);
6455 +
6456 +       rmb();
6457 +
6458 +       header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6459 +               local->slot_sync);
6460 +
6461 +       {
6462 +               int oldmsgid = header->msgid;
6463 +               if (oldmsgid != VCHIQ_MSGID_PADDING)
6464 +                       vchiq_log_error(vchiq_core_log_level,
6465 +                               "%d: qms - msgid %x, not PADDING",
6466 +                               state->id, oldmsgid);
6467 +       }
6468 +
6469 +       if (service) {
6470 +               int i, pos;
6471 +
6472 +               vchiq_log_info(vchiq_sync_log_level,
6473 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
6474 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6475 +                       (unsigned int)header, size,
6476 +                       VCHIQ_MSG_SRCPORT(msgid),
6477 +                       VCHIQ_MSG_DSTPORT(msgid));
6478 +
6479 +               for (i = 0, pos = 0; i < (unsigned int)count;
6480 +                       pos += elements[i++].size)
6481 +                       if (elements[i].size) {
6482 +                               if (vchiq_copy_from_user
6483 +                                       (header->data + pos, elements[i].data,
6484 +                                       (size_t) elements[i].size) !=
6485 +                                       VCHIQ_SUCCESS) {
6486 +                                       mutex_unlock(&state->sync_mutex);
6487 +                                       VCHIQ_SERVICE_STATS_INC(service,
6488 +                                               error_count);
6489 +                                       return VCHIQ_ERROR;
6490 +                               }
6491 +                               if (i == 0) {
6492 +                                       if (vchiq_sync_log_level >=
6493 +                                               VCHIQ_LOG_TRACE)
6494 +                                               vchiq_log_dump_mem("Sent Sync",
6495 +                                                       0, header->data + pos,
6496 +                                                       min(64u,
6497 +                                                       elements[0].size));
6498 +                               }
6499 +                       }
6500 +
6501 +               VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6502 +               VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6503 +       } else {
6504 +               vchiq_log_info(vchiq_sync_log_level,
6505 +                       "%d: qms %s@%x,%x (%d->%d)", state->id,
6506 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6507 +                       (unsigned int)header, size,
6508 +                       VCHIQ_MSG_SRCPORT(msgid),
6509 +                       VCHIQ_MSG_DSTPORT(msgid));
6510 +               if (size != 0) {
6511 +                       WARN_ON(!((count == 1) && (size == elements[0].size)));
6512 +                       memcpy(header->data, elements[0].data,
6513 +                               elements[0].size);
6514 +               }
6515 +               VCHIQ_STATS_INC(state, ctrl_tx_count);
6516 +       }
6517 +
6518 +       header->size = size;
6519 +       header->msgid = msgid;
6520 +
6521 +       if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6522 +               int svc_fourcc;
6523 +
6524 +               svc_fourcc = service
6525 +                       ? service->base.fourcc
6526 +                       : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6527 +
6528 +               vchiq_log_trace(vchiq_sync_log_level,
6529 +                       "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6530 +                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6531 +                       VCHIQ_MSG_TYPE(msgid),
6532 +                       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6533 +                       VCHIQ_MSG_SRCPORT(msgid),
6534 +                       VCHIQ_MSG_DSTPORT(msgid),
6535 +                       size);
6536 +       }
6537 +
6538 +       /* Make sure the new header is visible to the peer. */
6539 +       wmb();
6540 +
6541 +       remote_event_signal(&state->remote->sync_trigger);
6542 +
6543 +       if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6544 +               mutex_unlock(&state->sync_mutex);
6545 +
6546 +       return VCHIQ_SUCCESS;
6547 +}
6548 +
6549 +static inline void
6550 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6551 +{
6552 +       slot->use_count++;
6553 +}
6554 +
6555 +static void
6556 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6557 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6558 +{
6559 +       int release_count;
6560 +
6561 +       mutex_lock(&state->recycle_mutex);
6562 +
6563 +       if (header) {
6564 +               int msgid = header->msgid;
6565 +               if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6566 +                       (service && service->closing)) {
6567 +                       mutex_unlock(&state->recycle_mutex);
6568 +                       return;
6569 +               }
6570 +
6571 +               /* Rewrite the message header to prevent a double
6572 +               ** release */
6573 +               header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6574 +       }
6575 +
6576 +       release_count = slot_info->release_count;
6577 +       slot_info->release_count = ++release_count;
6578 +
6579 +       if (release_count == slot_info->use_count) {
6580 +               int slot_queue_recycle;
6581 +               /* Add to the freed queue */
6582 +
6583 +               /* A read barrier is necessary here to prevent speculative
6584 +               ** fetches of remote->slot_queue_recycle from overtaking the
6585 +               ** mutex. */
6586 +               rmb();
6587 +
6588 +               slot_queue_recycle = state->remote->slot_queue_recycle;
6589 +               state->remote->slot_queue[slot_queue_recycle &
6590 +                       VCHIQ_SLOT_QUEUE_MASK] =
6591 +                       SLOT_INDEX_FROM_INFO(state, slot_info);
6592 +               state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6593 +               vchiq_log_info(vchiq_core_log_level,
6594 +                       "%d: release_slot %d - recycle->%x",
6595 +                       state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6596 +                       state->remote->slot_queue_recycle);
6597 +
6598 +               /* A write barrier is necessary, but remote_event_signal
6599 +               ** contains one. */
6600 +               remote_event_signal(&state->remote->recycle);
6601 +       }
6602 +
6603 +       mutex_unlock(&state->recycle_mutex);
6604 +}
6605 +
6606 +/* Called by the slot handler - don't hold the bulk mutex */
6607 +static VCHIQ_STATUS_T
6608 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6609 +       int retry_poll)
6610 +{
6611 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6612 +
6613 +       vchiq_log_trace(vchiq_core_log_level,
6614 +               "%d: nb:%d %cx - p=%x rn=%x r=%x",
6615 +               service->state->id, service->localport,
6616 +               (queue == &service->bulk_tx) ? 't' : 'r',
6617 +               queue->process, queue->remote_notify, queue->remove);
6618 +
6619 +       if (service->state->is_master) {
6620 +               while (queue->remote_notify != queue->process) {
6621 +                       VCHIQ_BULK_T *bulk =
6622 +                               &queue->bulks[BULK_INDEX(queue->remote_notify)];
6623 +                       int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6624 +                               VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6625 +                       int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6626 +                               service->remoteport);
6627 +                       VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6628 +                       /* Only reply to non-dummy bulk requests */
6629 +                       if (bulk->remote_data) {
6630 +                               status = queue_message(service->state, NULL,
6631 +                                       msgid, &element, 1, 4, 0);
6632 +                               if (status != VCHIQ_SUCCESS)
6633 +                                       break;
6634 +                       }
6635 +                       queue->remote_notify++;
6636 +               }
6637 +       } else {
6638 +               queue->remote_notify = queue->process;
6639 +       }
6640 +
6641 +       if (status == VCHIQ_SUCCESS) {
6642 +               while (queue->remove != queue->remote_notify) {
6643 +                       VCHIQ_BULK_T *bulk =
6644 +                               &queue->bulks[BULK_INDEX(queue->remove)];
6645 +
6646 +                       /* Only generate callbacks for non-dummy bulk
6647 +                       ** requests, and non-terminated services */
6648 +                       if (bulk->data && service->instance) {
6649 +                               if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6650 +                                       if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6651 +                                               VCHIQ_SERVICE_STATS_INC(service,
6652 +                                                       bulk_tx_count);
6653 +                                               VCHIQ_SERVICE_STATS_ADD(service,
6654 +                                                       bulk_tx_bytes,
6655 +                                                       bulk->actual);
6656 +                                       } else {
6657 +                                               VCHIQ_SERVICE_STATS_INC(service,
6658 +                                                       bulk_rx_count);
6659 +                                               VCHIQ_SERVICE_STATS_ADD(service,
6660 +                                                       bulk_rx_bytes,
6661 +                                                       bulk->actual);
6662 +                                       }
6663 +                               } else {
6664 +                                       VCHIQ_SERVICE_STATS_INC(service,
6665 +                                               bulk_aborted_count);
6666 +                               }
6667 +                               if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6668 +                                       struct bulk_waiter *waiter;
6669 +                                       spin_lock(&bulk_waiter_spinlock);
6670 +                                       waiter = bulk->userdata;
6671 +                                       if (waiter) {
6672 +                                               waiter->actual = bulk->actual;
6673 +                                               up(&waiter->event);
6674 +                                       }
6675 +                                       spin_unlock(&bulk_waiter_spinlock);
6676 +                               } else if (bulk->mode ==
6677 +                                       VCHIQ_BULK_MODE_CALLBACK) {
6678 +                                       VCHIQ_REASON_T reason = (bulk->dir ==
6679 +                                               VCHIQ_BULK_TRANSMIT) ?
6680 +                                               ((bulk->actual ==
6681 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
6682 +                                               VCHIQ_BULK_TRANSMIT_ABORTED :
6683 +                                               VCHIQ_BULK_TRANSMIT_DONE) :
6684 +                                               ((bulk->actual ==
6685 +                                               VCHIQ_BULK_ACTUAL_ABORTED) ?
6686 +                                               VCHIQ_BULK_RECEIVE_ABORTED :
6687 +                                               VCHIQ_BULK_RECEIVE_DONE);
6688 +                                       status = make_service_callback(service,
6689 +                                               reason, NULL, bulk->userdata);
6690 +                                       if (status == VCHIQ_RETRY)
6691 +                                               break;
6692 +                               }
6693 +                       }
6694 +
6695 +                       queue->remove++;
6696 +                       up(&service->bulk_remove_event);
6697 +               }
6698 +               if (!retry_poll)
6699 +                       status = VCHIQ_SUCCESS;
6700 +       }
6701 +
6702 +       if (status == VCHIQ_RETRY)
6703 +               request_poll(service->state, service,
6704 +                       (queue == &service->bulk_tx) ?
6705 +                       VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6706 +
6707 +       return status;
6708 +}
6709 +
6710 +/* Called by the slot handler thread */
6711 +static void
6712 +poll_services(VCHIQ_STATE_T *state)
6713 +{
6714 +       int group, i;
6715 +
6716 +       for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6717 +               uint32_t flags;
6718 +               flags = atomic_xchg(&state->poll_services[group], 0);
6719 +               for (i = 0; flags; i++) {
6720 +                       if (flags & (1 << i)) {
6721 +                               VCHIQ_SERVICE_T *service =
6722 +                                       find_service_by_port(state,
6723 +                                               (group<<5) + i);
6724 +                               uint32_t service_flags;
6725 +                               flags &= ~(1 << i);
6726 +                               if (!service)
6727 +                                       continue;
6728 +                               service_flags =
6729 +                                       atomic_xchg(&service->poll_flags, 0);
6730 +                               if (service_flags &
6731 +                                       (1 << VCHIQ_POLL_REMOVE)) {
6732 +                                       vchiq_log_info(vchiq_core_log_level,
6733 +                                               "%d: ps - remove %d<->%d",
6734 +                                               state->id, service->localport,
6735 +                                               service->remoteport);
6736 +
6737 +                                       /* Make it look like a client, because
6738 +                                          it must be removed and not left in
6739 +                                          the LISTENING state. */
6740 +                                       service->public_fourcc =
6741 +                                               VCHIQ_FOURCC_INVALID;
6742 +
6743 +                                       if (vchiq_close_service_internal(
6744 +                                               service, 0/*!close_recvd*/) !=
6745 +                                               VCHIQ_SUCCESS)
6746 +                                               request_poll(state, service,
6747 +                                                       VCHIQ_POLL_REMOVE);
6748 +                               } else if (service_flags &
6749 +                                       (1 << VCHIQ_POLL_TERMINATE)) {
6750 +                                       vchiq_log_info(vchiq_core_log_level,
6751 +                                               "%d: ps - terminate %d<->%d",
6752 +                                               state->id, service->localport,
6753 +                                               service->remoteport);
6754 +                                       if (vchiq_close_service_internal(
6755 +                                               service, 0/*!close_recvd*/) !=
6756 +                                               VCHIQ_SUCCESS)
6757 +                                               request_poll(state, service,
6758 +                                                       VCHIQ_POLL_TERMINATE);
6759 +                               }
6760 +                               if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6761 +                                       notify_bulks(service,
6762 +                                               &service->bulk_tx,
6763 +                                               1/*retry_poll*/);
6764 +                               if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6765 +                                       notify_bulks(service,
6766 +                                               &service->bulk_rx,
6767 +                                               1/*retry_poll*/);
6768 +                               unlock_service(service);
6769 +                       }
6770 +               }
6771 +       }
6772 +}
6773 +
6774 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6775 +static int
6776 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6777 +{
6778 +       VCHIQ_STATE_T *state = service->state;
6779 +       int resolved = 0;
6780 +       int rc;
6781 +
6782 +       while ((queue->process != queue->local_insert) &&
6783 +               (queue->process != queue->remote_insert)) {
6784 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6785 +
6786 +               vchiq_log_trace(vchiq_core_log_level,
6787 +                       "%d: rb:%d %cx - li=%x ri=%x p=%x",
6788 +                       state->id, service->localport,
6789 +                       (queue == &service->bulk_tx) ? 't' : 'r',
6790 +                       queue->local_insert, queue->remote_insert,
6791 +                       queue->process);
6792 +
6793 +               WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6794 +               WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6795 +
6796 +               rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6797 +               if (rc != 0)
6798 +                       break;
6799 +
6800 +               vchiq_transfer_bulk(bulk);
6801 +               mutex_unlock(&state->bulk_transfer_mutex);
6802 +
6803 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6804 +                       const char *header = (queue == &service->bulk_tx) ?
6805 +                               "Send Bulk to" : "Recv Bulk from";
6806 +                       if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6807 +                               vchiq_log_info(vchiq_core_msg_log_level,
6808 +                                       "%s %c%c%c%c d:%d len:%d %x<->%x",
6809 +                                       header,
6810 +                                       VCHIQ_FOURCC_AS_4CHARS(
6811 +                                               service->base.fourcc),
6812 +                                       service->remoteport,
6813 +                                       bulk->size,
6814 +                                       (unsigned int)bulk->data,
6815 +                                       (unsigned int)bulk->remote_data);
6816 +                       else
6817 +                               vchiq_log_info(vchiq_core_msg_log_level,
6818 +                                       "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6819 +                                       " rx len:%d %x<->%x",
6820 +                                       header,
6821 +                                       VCHIQ_FOURCC_AS_4CHARS(
6822 +                                               service->base.fourcc),
6823 +                                       service->remoteport,
6824 +                                       bulk->size,
6825 +                                       bulk->remote_size,
6826 +                                       (unsigned int)bulk->data,
6827 +                                       (unsigned int)bulk->remote_data);
6828 +               }
6829 +
6830 +               vchiq_complete_bulk(bulk);
6831 +               queue->process++;
6832 +               resolved++;
6833 +       }
6834 +       return resolved;
6835 +}
6836 +
6837 +/* Called with the bulk_mutex held */
6838 +static void
6839 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6840 +{
6841 +       int is_tx = (queue == &service->bulk_tx);
6842 +       vchiq_log_trace(vchiq_core_log_level,
6843 +               "%d: aob:%d %cx - li=%x ri=%x p=%x",
6844 +               service->state->id, service->localport, is_tx ? 't' : 'r',
6845 +               queue->local_insert, queue->remote_insert, queue->process);
6846 +
6847 +       WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6848 +       WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6849 +
6850 +       while ((queue->process != queue->local_insert) ||
6851 +               (queue->process != queue->remote_insert)) {
6852 +               VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6853 +
6854 +               if (queue->process == queue->remote_insert) {
6855 +                       /* fabricate a matching dummy bulk */
6856 +                       bulk->remote_data = NULL;
6857 +                       bulk->remote_size = 0;
6858 +                       queue->remote_insert++;
6859 +               }
6860 +
6861 +               if (queue->process != queue->local_insert) {
6862 +                       vchiq_complete_bulk(bulk);
6863 +
6864 +                       vchiq_log_info(vchiq_core_msg_log_level,
6865 +                               "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6866 +                               "rx len:%d",
6867 +                               is_tx ? "Send Bulk to" : "Recv Bulk from",
6868 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6869 +                               service->remoteport,
6870 +                               bulk->size,
6871 +                               bulk->remote_size);
6872 +               } else {
6873 +                       /* fabricate a matching dummy bulk */
6874 +                       bulk->data = NULL;
6875 +                       bulk->size = 0;
6876 +                       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6877 +                       bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6878 +                               VCHIQ_BULK_RECEIVE;
6879 +                       queue->local_insert++;
6880 +               }
6881 +
6882 +               queue->process++;
6883 +       }
6884 +}
6885 +
6886 +/* Called from the slot handler thread */
6887 +static void
6888 +pause_bulks(VCHIQ_STATE_T *state)
6889 +{
6890 +       if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6891 +               WARN_ON_ONCE(1);
6892 +               atomic_set(&pause_bulks_count, 1);
6893 +               return;
6894 +       }
6895 +
6896 +       /* Block bulk transfers from all services */
6897 +       mutex_lock(&state->bulk_transfer_mutex);
6898 +}
6899 +
6900 +/* Called from the slot handler thread */
6901 +static void
6902 +resume_bulks(VCHIQ_STATE_T *state)
6903 +{
6904 +       int i;
6905 +       if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
6906 +               WARN_ON_ONCE(1);
6907 +               atomic_set(&pause_bulks_count, 0);
6908 +               return;
6909 +       }
6910 +
6911 +       /* Allow bulk transfers from all services */
6912 +       mutex_unlock(&state->bulk_transfer_mutex);
6913 +
6914 +       if (state->deferred_bulks == 0)
6915 +               return;
6916 +
6917 +       /* Deal with any bulks which had to be deferred due to being in
6918 +        * paused state.  Don't try to match up to number of deferred bulks
6919 +        * in case we've had something come and close the service in the
6920 +        * interim - just process all bulk queues for all services */
6921 +       vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
6922 +               __func__, state->deferred_bulks);
6923 +
6924 +       for (i = 0; i < state->unused_service; i++) {
6925 +               VCHIQ_SERVICE_T *service = state->services[i];
6926 +               int resolved_rx = 0;
6927 +               int resolved_tx = 0;
6928 +               if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
6929 +                       continue;
6930 +
6931 +               mutex_lock(&service->bulk_mutex);
6932 +               resolved_rx = resolve_bulks(service, &service->bulk_rx);
6933 +               resolved_tx = resolve_bulks(service, &service->bulk_tx);
6934 +               mutex_unlock(&service->bulk_mutex);
6935 +               if (resolved_rx)
6936 +                       notify_bulks(service, &service->bulk_rx, 1);
6937 +               if (resolved_tx)
6938 +                       notify_bulks(service, &service->bulk_tx, 1);
6939 +       }
6940 +       state->deferred_bulks = 0;
6941 +}
6942 +
6943 +static int
6944 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
6945 +{
6946 +       VCHIQ_SERVICE_T *service = NULL;
6947 +       int msgid, size;
6948 +       int type;
6949 +       unsigned int localport, remoteport;
6950 +
6951 +       msgid = header->msgid;
6952 +       size = header->size;
6953 +       type = VCHIQ_MSG_TYPE(msgid);
6954 +       localport = VCHIQ_MSG_DSTPORT(msgid);
6955 +       remoteport = VCHIQ_MSG_SRCPORT(msgid);
6956 +       if (size >= sizeof(struct vchiq_open_payload)) {
6957 +               const struct vchiq_open_payload *payload =
6958 +                       (struct vchiq_open_payload *)header->data;
6959 +               unsigned int fourcc;
6960 +
6961 +               fourcc = payload->fourcc;
6962 +               vchiq_log_info(vchiq_core_log_level,
6963 +                       "%d: prs OPEN@%x (%d->'%c%c%c%c')",
6964 +                       state->id, (unsigned int)header,
6965 +                       localport,
6966 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc));
6967 +
6968 +               service = get_listening_service(state, fourcc);
6969 +
6970 +               if (service) {
6971 +                       /* A matching service exists */
6972 +                       short version = payload->version;
6973 +                       short version_min = payload->version_min;
6974 +                       if ((service->version < version_min) ||
6975 +                               (version < service->version_min)) {
6976 +                               /* Version mismatch */
6977 +                               vchiq_loud_error_header();
6978 +                               vchiq_loud_error("%d: service %d (%c%c%c%c) "
6979 +                                       "version mismatch - local (%d, min %d)"
6980 +                                       " vs. remote (%d, min %d)",
6981 +                                       state->id, service->localport,
6982 +                                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
6983 +                                       service->version, service->version_min,
6984 +                                       version, version_min);
6985 +                               vchiq_loud_error_footer();
6986 +                               unlock_service(service);
6987 +                               service = NULL;
6988 +                               goto fail_open;
6989 +                       }
6990 +                       service->peer_version = version;
6991 +
6992 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
6993 +                               struct vchiq_openack_payload ack_payload = {
6994 +                                       service->version
6995 +                               };
6996 +                               VCHIQ_ELEMENT_T body = {
6997 +                                       &ack_payload,
6998 +                                       sizeof(ack_payload)
6999 +                               };
7000 +
7001 +                               /* Acknowledge the OPEN */
7002 +                               if (service->sync) {
7003 +                                       if (queue_message_sync(state, NULL,
7004 +                                               VCHIQ_MAKE_MSG(
7005 +                                                       VCHIQ_MSG_OPENACK,
7006 +                                                       service->localport,
7007 +                                                       remoteport),
7008 +                                               &body, 1, sizeof(ack_payload),
7009 +                                               0) == VCHIQ_RETRY)
7010 +                                               goto bail_not_ready;
7011 +                               } else {
7012 +                                       if (queue_message(state, NULL,
7013 +                                               VCHIQ_MAKE_MSG(
7014 +                                                       VCHIQ_MSG_OPENACK,
7015 +                                                       service->localport,
7016 +                                                       remoteport),
7017 +                                               &body, 1, sizeof(ack_payload),
7018 +                                               0) == VCHIQ_RETRY)
7019 +                                               goto bail_not_ready;
7020 +                               }
7021 +
7022 +                               /* The service is now open */
7023 +                               vchiq_set_service_state(service,
7024 +                                       service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7025 +                                       : VCHIQ_SRVSTATE_OPEN);
7026 +                       }
7027 +
7028 +                       service->remoteport = remoteport;
7029 +                       service->client_id = ((int *)header->data)[1];
7030 +                       if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7031 +                               NULL, NULL) == VCHIQ_RETRY) {
7032 +                               /* Bail out if not ready */
7033 +                               service->remoteport = VCHIQ_PORT_FREE;
7034 +                               goto bail_not_ready;
7035 +                       }
7036 +
7037 +                       /* Success - the message has been dealt with */
7038 +                       unlock_service(service);
7039 +                       return 1;
7040 +               }
7041 +       }
7042 +
7043 +fail_open:
7044 +       /* No available service, or an invalid request - send a CLOSE */
7045 +       if (queue_message(state, NULL,
7046 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7047 +               NULL, 0, 0, 0) == VCHIQ_RETRY)
7048 +               goto bail_not_ready;
7049 +
7050 +       return 1;
7051 +
7052 +bail_not_ready:
7053 +       if (service)
7054 +               unlock_service(service);
7055 +
7056 +       return 0;
7057 +}
7058 +
7059 +/* Called by the slot handler thread */
7060 +static void
7061 +parse_rx_slots(VCHIQ_STATE_T *state)
7062 +{
7063 +       VCHIQ_SHARED_STATE_T *remote = state->remote;
7064 +       VCHIQ_SERVICE_T *service = NULL;
7065 +       int tx_pos;
7066 +       DEBUG_INITIALISE(state->local)
7067 +
7068 +       tx_pos = remote->tx_pos;
7069 +
7070 +       while (state->rx_pos != tx_pos) {
7071 +               VCHIQ_HEADER_T *header;
7072 +               int msgid, size;
7073 +               int type;
7074 +               unsigned int localport, remoteport;
7075 +
7076 +               DEBUG_TRACE(PARSE_LINE);
7077 +               if (!state->rx_data) {
7078 +                       int rx_index;
7079 +                       WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7080 +                       rx_index = remote->slot_queue[
7081 +                               SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7082 +                               VCHIQ_SLOT_QUEUE_MASK];
7083 +                       state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7084 +                               rx_index);
7085 +                       state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7086 +
7087 +                       /* Initialise use_count to one, and increment
7088 +                       ** release_count at the end of the slot to avoid
7089 +                       ** releasing the slot prematurely. */
7090 +                       state->rx_info->use_count = 1;
7091 +                       state->rx_info->release_count = 0;
7092 +               }
7093 +
7094 +               header = (VCHIQ_HEADER_T *)(state->rx_data +
7095 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
7096 +               DEBUG_VALUE(PARSE_HEADER, (int)header);
7097 +               msgid = header->msgid;
7098 +               DEBUG_VALUE(PARSE_MSGID, msgid);
7099 +               size = header->size;
7100 +               type = VCHIQ_MSG_TYPE(msgid);
7101 +               localport = VCHIQ_MSG_DSTPORT(msgid);
7102 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
7103 +
7104 +               if (type != VCHIQ_MSG_DATA)
7105 +                       VCHIQ_STATS_INC(state, ctrl_rx_count);
7106 +
7107 +               switch (type) {
7108 +               case VCHIQ_MSG_OPENACK:
7109 +               case VCHIQ_MSG_CLOSE:
7110 +               case VCHIQ_MSG_DATA:
7111 +               case VCHIQ_MSG_BULK_RX:
7112 +               case VCHIQ_MSG_BULK_TX:
7113 +               case VCHIQ_MSG_BULK_RX_DONE:
7114 +               case VCHIQ_MSG_BULK_TX_DONE:
7115 +                       service = find_service_by_port(state, localport);
7116 +                       if ((!service || service->remoteport != remoteport) &&
7117 +                               (localport == 0) &&
7118 +                               (type == VCHIQ_MSG_CLOSE)) {
7119 +                               /* This could be a CLOSE from a client which
7120 +                                  hadn't yet received the OPENACK - look for
7121 +                                  the connected service */
7122 +                               if (service)
7123 +                                       unlock_service(service);
7124 +                               service = get_connected_service(state,
7125 +                                       remoteport);
7126 +                               if (service)
7127 +                                       vchiq_log_warning(vchiq_core_log_level,
7128 +                                               "%d: prs %s@%x (%d->%d) - "
7129 +                                               "found connected service %d",
7130 +                                               state->id, msg_type_str(type),
7131 +                                               (unsigned int)header,
7132 +                                               remoteport, localport,
7133 +                                               service->localport);
7134 +                       }
7135 +
7136 +                       if (!service) {
7137 +                               vchiq_log_error(vchiq_core_log_level,
7138 +                                       "%d: prs %s@%x (%d->%d) - "
7139 +                                       "invalid/closed service %d",
7140 +                                       state->id, msg_type_str(type),
7141 +                                       (unsigned int)header,
7142 +                                       remoteport, localport, localport);
7143 +                               goto skip_message;
7144 +                       }
7145 +                       break;
7146 +               default:
7147 +                       break;
7148 +               }
7149 +
7150 +               if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7151 +                       int svc_fourcc;
7152 +
7153 +                       svc_fourcc = service
7154 +                               ? service->base.fourcc
7155 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7156 +                       vchiq_log_info(vchiq_core_msg_log_level,
7157 +                               "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7158 +                               "len:%d",
7159 +                               msg_type_str(type), type,
7160 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7161 +                               remoteport, localport, size);
7162 +                       if (size > 0)
7163 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
7164 +                                       min(64, size));
7165 +               }
7166 +
7167 +               if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7168 +                       > VCHIQ_SLOT_SIZE) {
7169 +                       vchiq_log_error(vchiq_core_log_level,
7170 +                               "header %x (msgid %x) - size %x too big for "
7171 +                               "slot",
7172 +                               (unsigned int)header, (unsigned int)msgid,
7173 +                               (unsigned int)size);
7174 +                       WARN(1, "oversized for slot\n");
7175 +               }
7176 +
7177 +               switch (type) {
7178 +               case VCHIQ_MSG_OPEN:
7179 +                       WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7180 +                       if (!parse_open(state, header))
7181 +                               goto bail_not_ready;
7182 +                       break;
7183 +               case VCHIQ_MSG_OPENACK:
7184 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
7185 +                               const struct vchiq_openack_payload *payload =
7186 +                                       (struct vchiq_openack_payload *)
7187 +                                       header->data;
7188 +                               service->peer_version = payload->version;
7189 +                       }
7190 +                       vchiq_log_info(vchiq_core_log_level,
7191 +                               "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7192 +                               state->id, (unsigned int)header, size,
7193 +                               remoteport, localport, service->peer_version);
7194 +                       if (service->srvstate ==
7195 +                               VCHIQ_SRVSTATE_OPENING) {
7196 +                               service->remoteport = remoteport;
7197 +                               vchiq_set_service_state(service,
7198 +                                       VCHIQ_SRVSTATE_OPEN);
7199 +                               up(&service->remove_event);
7200 +                       } else
7201 +                               vchiq_log_error(vchiq_core_log_level,
7202 +                                       "OPENACK received in state %s",
7203 +                                       srvstate_names[service->srvstate]);
7204 +                       break;
7205 +               case VCHIQ_MSG_CLOSE:
7206 +                       WARN_ON(size != 0); /* There should be no data */
7207 +
7208 +                       vchiq_log_info(vchiq_core_log_level,
7209 +                               "%d: prs CLOSE@%x (%d->%d)",
7210 +                               state->id, (unsigned int)header,
7211 +                               remoteport, localport);
7212 +
7213 +                       mark_service_closing_internal(service, 1);
7214 +
7215 +                       if (vchiq_close_service_internal(service,
7216 +                               1/*close_recvd*/) == VCHIQ_RETRY)
7217 +                               goto bail_not_ready;
7218 +
7219 +                       vchiq_log_info(vchiq_core_log_level,
7220 +                               "Close Service %c%c%c%c s:%u d:%d",
7221 +                               VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7222 +                               service->localport,
7223 +                               service->remoteport);
7224 +                       break;
7225 +               case VCHIQ_MSG_DATA:
7226 +                       vchiq_log_trace(vchiq_core_log_level,
7227 +                               "%d: prs DATA@%x,%x (%d->%d)",
7228 +                               state->id, (unsigned int)header, size,
7229 +                               remoteport, localport);
7230 +
7231 +                       if ((service->remoteport == remoteport)
7232 +                               && (service->srvstate ==
7233 +                               VCHIQ_SRVSTATE_OPEN)) {
7234 +                               header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7235 +                               claim_slot(state->rx_info);
7236 +                               DEBUG_TRACE(PARSE_LINE);
7237 +                               if (make_service_callback(service,
7238 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
7239 +                                       NULL) == VCHIQ_RETRY) {
7240 +                                       DEBUG_TRACE(PARSE_LINE);
7241 +                                       goto bail_not_ready;
7242 +                               }
7243 +                               VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7244 +                               VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7245 +                                       size);
7246 +                       } else {
7247 +                               VCHIQ_STATS_INC(state, error_count);
7248 +                       }
7249 +                       break;
7250 +               case VCHIQ_MSG_CONNECT:
7251 +                       vchiq_log_info(vchiq_core_log_level,
7252 +                               "%d: prs CONNECT@%x",
7253 +                               state->id, (unsigned int)header);
7254 +                       up(&state->connect);
7255 +                       break;
7256 +               case VCHIQ_MSG_BULK_RX:
7257 +               case VCHIQ_MSG_BULK_TX: {
7258 +                       VCHIQ_BULK_QUEUE_T *queue;
7259 +                       WARN_ON(!state->is_master);
7260 +                       queue = (type == VCHIQ_MSG_BULK_RX) ?
7261 +                               &service->bulk_tx : &service->bulk_rx;
7262 +                       if ((service->remoteport == remoteport)
7263 +                               && (service->srvstate ==
7264 +                               VCHIQ_SRVSTATE_OPEN)) {
7265 +                               VCHIQ_BULK_T *bulk;
7266 +                               int resolved = 0;
7267 +
7268 +                               DEBUG_TRACE(PARSE_LINE);
7269 +                               if (mutex_lock_interruptible(
7270 +                                       &service->bulk_mutex) != 0) {
7271 +                                       DEBUG_TRACE(PARSE_LINE);
7272 +                                       goto bail_not_ready;
7273 +                               }
7274 +
7275 +                               WARN_ON(!(queue->remote_insert < queue->remove +
7276 +                                       VCHIQ_NUM_SERVICE_BULKS));
7277 +                               bulk = &queue->bulks[
7278 +                                       BULK_INDEX(queue->remote_insert)];
7279 +                               bulk->remote_data =
7280 +                                       (void *)((int *)header->data)[0];
7281 +                               bulk->remote_size = ((int *)header->data)[1];
7282 +                               wmb();
7283 +
7284 +                               vchiq_log_info(vchiq_core_log_level,
7285 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
7286 +                                       state->id, msg_type_str(type),
7287 +                                       (unsigned int)header,
7288 +                                       remoteport, localport,
7289 +                                       bulk->remote_size,
7290 +                                       (unsigned int)bulk->remote_data);
7291 +
7292 +                               queue->remote_insert++;
7293 +
7294 +                               if (atomic_read(&pause_bulks_count)) {
7295 +                                       state->deferred_bulks++;
7296 +                                       vchiq_log_info(vchiq_core_log_level,
7297 +                                               "%s: deferring bulk (%d)",
7298 +                                               __func__,
7299 +                                               state->deferred_bulks);
7300 +                                       if (state->conn_state !=
7301 +                                               VCHIQ_CONNSTATE_PAUSE_SENT)
7302 +                                               vchiq_log_error(
7303 +                                                       vchiq_core_log_level,
7304 +                                                       "%s: bulks paused in "
7305 +                                                       "unexpected state %s",
7306 +                                                       __func__,
7307 +                                                       conn_state_names[
7308 +                                                       state->conn_state]);
7309 +                               } else if (state->conn_state ==
7310 +                                       VCHIQ_CONNSTATE_CONNECTED) {
7311 +                                       DEBUG_TRACE(PARSE_LINE);
7312 +                                       resolved = resolve_bulks(service,
7313 +                                               queue);
7314 +                               }
7315 +
7316 +                               mutex_unlock(&service->bulk_mutex);
7317 +                               if (resolved)
7318 +                                       notify_bulks(service, queue,
7319 +                                               1/*retry_poll*/);
7320 +                       }
7321 +               } break;
7322 +               case VCHIQ_MSG_BULK_RX_DONE:
7323 +               case VCHIQ_MSG_BULK_TX_DONE:
7324 +                       WARN_ON(state->is_master);
7325 +                       if ((service->remoteport == remoteport)
7326 +                               && (service->srvstate !=
7327 +                               VCHIQ_SRVSTATE_FREE)) {
7328 +                               VCHIQ_BULK_QUEUE_T *queue;
7329 +                               VCHIQ_BULK_T *bulk;
7330 +
7331 +                               queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7332 +                                       &service->bulk_rx : &service->bulk_tx;
7333 +
7334 +                               DEBUG_TRACE(PARSE_LINE);
7335 +                               if (mutex_lock_interruptible(
7336 +                                       &service->bulk_mutex) != 0) {
7337 +                                       DEBUG_TRACE(PARSE_LINE);
7338 +                                       goto bail_not_ready;
7339 +                               }
7340 +                               if ((int)(queue->remote_insert -
7341 +                                       queue->local_insert) >= 0) {
7342 +                                       vchiq_log_error(vchiq_core_log_level,
7343 +                                               "%d: prs %s@%x (%d->%d) "
7344 +                                               "unexpected (ri=%d,li=%d)",
7345 +                                               state->id, msg_type_str(type),
7346 +                                               (unsigned int)header,
7347 +                                               remoteport, localport,
7348 +                                               queue->remote_insert,
7349 +                                               queue->local_insert);
7350 +                                       mutex_unlock(&service->bulk_mutex);
7351 +                                       break;
7352 +                               }
7353 +
7354 +                               BUG_ON(queue->process == queue->local_insert);
7355 +                               BUG_ON(queue->process != queue->remote_insert);
7356 +
7357 +                               bulk = &queue->bulks[
7358 +                                       BULK_INDEX(queue->remote_insert)];
7359 +                               bulk->actual = *(int *)header->data;
7360 +                               queue->remote_insert++;
7361 +
7362 +                               vchiq_log_info(vchiq_core_log_level,
7363 +                                       "%d: prs %s@%x (%d->%d) %x@%x",
7364 +                                       state->id, msg_type_str(type),
7365 +                                       (unsigned int)header,
7366 +                                       remoteport, localport,
7367 +                                       bulk->actual, (unsigned int)bulk->data);
7368 +
7369 +                               vchiq_log_trace(vchiq_core_log_level,
7370 +                                       "%d: prs:%d %cx li=%x ri=%x p=%x",
7371 +                                       state->id, localport,
7372 +                                       (type == VCHIQ_MSG_BULK_RX_DONE) ?
7373 +                                               'r' : 't',
7374 +                                       queue->local_insert,
7375 +                                       queue->remote_insert, queue->process);
7376 +
7377 +                               DEBUG_TRACE(PARSE_LINE);
7378 +                               WARN_ON(queue->process == queue->local_insert);
7379 +                               vchiq_complete_bulk(bulk);
7380 +                               queue->process++;
7381 +                               mutex_unlock(&service->bulk_mutex);
7382 +                               DEBUG_TRACE(PARSE_LINE);
7383 +                               notify_bulks(service, queue, 1/*retry_poll*/);
7384 +                               DEBUG_TRACE(PARSE_LINE);
7385 +                       }
7386 +                       break;
7387 +               case VCHIQ_MSG_PADDING:
7388 +                       vchiq_log_trace(vchiq_core_log_level,
7389 +                               "%d: prs PADDING@%x,%x",
7390 +                               state->id, (unsigned int)header, size);
7391 +                       break;
7392 +               case VCHIQ_MSG_PAUSE:
7393 +                       /* If initiated, signal the application thread */
7394 +                       vchiq_log_trace(vchiq_core_log_level,
7395 +                               "%d: prs PAUSE@%x,%x",
7396 +                               state->id, (unsigned int)header, size);
7397 +                       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7398 +                               vchiq_log_error(vchiq_core_log_level,
7399 +                                       "%d: PAUSE received in state PAUSED",
7400 +                                       state->id);
7401 +                               break;
7402 +                       }
7403 +                       if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7404 +                               /* Send a PAUSE in response */
7405 +                               if (queue_message(state, NULL,
7406 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7407 +                                       NULL, 0, 0, 0) == VCHIQ_RETRY)
7408 +                                       goto bail_not_ready;
7409 +                               if (state->is_master)
7410 +                                       pause_bulks(state);
7411 +                       }
7412 +                       /* At this point slot_mutex is held */
7413 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7414 +                       vchiq_platform_paused(state);
7415 +                       break;
7416 +               case VCHIQ_MSG_RESUME:
7417 +                       vchiq_log_trace(vchiq_core_log_level,
7418 +                               "%d: prs RESUME@%x,%x",
7419 +                               state->id, (unsigned int)header, size);
7420 +                       /* Release the slot mutex */
7421 +                       mutex_unlock(&state->slot_mutex);
7422 +                       if (state->is_master)
7423 +                               resume_bulks(state);
7424 +                       vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7425 +                       vchiq_platform_resumed(state);
7426 +                       break;
7427 +
7428 +               case VCHIQ_MSG_REMOTE_USE:
7429 +                       vchiq_on_remote_use(state);
7430 +                       break;
7431 +               case VCHIQ_MSG_REMOTE_RELEASE:
7432 +                       vchiq_on_remote_release(state);
7433 +                       break;
7434 +               case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7435 +                       vchiq_on_remote_use_active(state);
7436 +                       break;
7437 +
7438 +               default:
7439 +                       vchiq_log_error(vchiq_core_log_level,
7440 +                               "%d: prs invalid msgid %x@%x,%x",
7441 +                               state->id, msgid, (unsigned int)header, size);
7442 +                       WARN(1, "invalid message\n");
7443 +                       break;
7444 +               }
7445 +
7446 +skip_message:
7447 +               if (service) {
7448 +                       unlock_service(service);
7449 +                       service = NULL;
7450 +               }
7451 +
7452 +               state->rx_pos += calc_stride(size);
7453 +
7454 +               DEBUG_TRACE(PARSE_LINE);
7455 +               /* Perform some housekeeping when the end of the slot is
7456 +               ** reached. */
7457 +               if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7458 +                       /* Remove the extra reference count. */
7459 +                       release_slot(state, state->rx_info, NULL, NULL);
7460 +                       state->rx_data = NULL;
7461 +               }
7462 +       }
7463 +
7464 +bail_not_ready:
7465 +       if (service)
7466 +               unlock_service(service);
7467 +}
7468 +
7469 +/* Called by the slot handler thread */
7470 +static int
7471 +slot_handler_func(void *v)
7472 +{
7473 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7474 +       VCHIQ_SHARED_STATE_T *local = state->local;
7475 +       DEBUG_INITIALISE(local)
7476 +
7477 +       while (1) {
7478 +               DEBUG_COUNT(SLOT_HANDLER_COUNT);
7479 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7480 +               remote_event_wait(&local->trigger);
7481 +
7482 +               rmb();
7483 +
7484 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7485 +               if (state->poll_needed) {
7486 +                       /* Check if we need to suspend - may change our
7487 +                        * conn_state */
7488 +                       vchiq_platform_check_suspend(state);
7489 +
7490 +                       state->poll_needed = 0;
7491 +
7492 +                       /* Handle service polling and other rare conditions here
7493 +                       ** out of the mainline code */
7494 +                       switch (state->conn_state) {
7495 +                       case VCHIQ_CONNSTATE_CONNECTED:
7496 +                               /* Poll the services as requested */
7497 +                               poll_services(state);
7498 +                               break;
7499 +
7500 +                       case VCHIQ_CONNSTATE_PAUSING:
7501 +                               if (state->is_master)
7502 +                                       pause_bulks(state);
7503 +                               if (queue_message(state, NULL,
7504 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7505 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
7506 +                                       vchiq_set_conn_state(state,
7507 +                                               VCHIQ_CONNSTATE_PAUSE_SENT);
7508 +                               } else {
7509 +                                       if (state->is_master)
7510 +                                               resume_bulks(state);
7511 +                                       /* Retry later */
7512 +                                       state->poll_needed = 1;
7513 +                               }
7514 +                               break;
7515 +
7516 +                       case VCHIQ_CONNSTATE_PAUSED:
7517 +                               vchiq_platform_resume(state);
7518 +                               break;
7519 +
7520 +                       case VCHIQ_CONNSTATE_RESUMING:
7521 +                               if (queue_message(state, NULL,
7522 +                                       VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7523 +                                       NULL, 0, 0, 0) != VCHIQ_RETRY) {
7524 +                                       if (state->is_master)
7525 +                                               resume_bulks(state);
7526 +                                       vchiq_set_conn_state(state,
7527 +                                               VCHIQ_CONNSTATE_CONNECTED);
7528 +                                       vchiq_platform_resumed(state);
7529 +                               } else {
7530 +                                       /* This should really be impossible,
7531 +                                       ** since the PAUSE should have flushed
7532 +                                       ** through outstanding messages. */
7533 +                                       vchiq_log_error(vchiq_core_log_level,
7534 +                                               "Failed to send RESUME "
7535 +                                               "message");
7536 +                                       BUG();
7537 +                               }
7538 +                               break;
7539 +
7540 +                       case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7541 +                       case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7542 +                               vchiq_platform_handle_timeout(state);
7543 +                               break;
7544 +                       default:
7545 +                               break;
7546 +                       }
7547 +
7548 +
7549 +               }
7550 +
7551 +               DEBUG_TRACE(SLOT_HANDLER_LINE);
7552 +               parse_rx_slots(state);
7553 +       }
7554 +       return 0;
7555 +}
7556 +
7557 +
7558 +/* Called by the recycle thread */
7559 +static int
7560 +recycle_func(void *v)
7561 +{
7562 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7563 +       VCHIQ_SHARED_STATE_T *local = state->local;
7564 +
7565 +       while (1) {
7566 +               remote_event_wait(&local->recycle);
7567 +
7568 +               process_free_queue(state);
7569 +       }
7570 +       return 0;
7571 +}
7572 +
7573 +
7574 +/* Called by the sync thread */
7575 +static int
7576 +sync_func(void *v)
7577 +{
7578 +       VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7579 +       VCHIQ_SHARED_STATE_T *local = state->local;
7580 +       VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7581 +               state->remote->slot_sync);
7582 +
7583 +       while (1) {
7584 +               VCHIQ_SERVICE_T *service;
7585 +               int msgid, size;
7586 +               int type;
7587 +               unsigned int localport, remoteport;
7588 +
7589 +               remote_event_wait(&local->sync_trigger);
7590 +
7591 +               rmb();
7592 +
7593 +               msgid = header->msgid;
7594 +               size = header->size;
7595 +               type = VCHIQ_MSG_TYPE(msgid);
7596 +               localport = VCHIQ_MSG_DSTPORT(msgid);
7597 +               remoteport = VCHIQ_MSG_SRCPORT(msgid);
7598 +
7599 +               service = find_service_by_port(state, localport);
7600 +
7601 +               if (!service) {
7602 +                       vchiq_log_error(vchiq_sync_log_level,
7603 +                               "%d: sf %s@%x (%d->%d) - "
7604 +                               "invalid/closed service %d",
7605 +                               state->id, msg_type_str(type),
7606 +                               (unsigned int)header,
7607 +                               remoteport, localport, localport);
7608 +                       release_message_sync(state, header);
7609 +                       continue;
7610 +               }
7611 +
7612 +               if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7613 +                       int svc_fourcc;
7614 +
7615 +                       svc_fourcc = service
7616 +                               ? service->base.fourcc
7617 +                               : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7618 +                       vchiq_log_trace(vchiq_sync_log_level,
7619 +                               "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7620 +                               msg_type_str(type),
7621 +                               VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7622 +                               remoteport, localport, size);
7623 +                       if (size > 0)
7624 +                               vchiq_log_dump_mem("Rcvd", 0, header->data,
7625 +                                       min(64, size));
7626 +               }
7627 +
7628 +               switch (type) {
7629 +               case VCHIQ_MSG_OPENACK:
7630 +                       if (size >= sizeof(struct vchiq_openack_payload)) {
7631 +                               const struct vchiq_openack_payload *payload =
7632 +                                       (struct vchiq_openack_payload *)
7633 +                                       header->data;
7634 +                               service->peer_version = payload->version;
7635 +                       }
7636 +                       vchiq_log_info(vchiq_sync_log_level,
7637 +                               "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7638 +                               state->id, (unsigned int)header, size,
7639 +                               remoteport, localport, service->peer_version);
7640 +                       if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7641 +                               service->remoteport = remoteport;
7642 +                               vchiq_set_service_state(service,
7643 +                                       VCHIQ_SRVSTATE_OPENSYNC);
7644 +                               up(&service->remove_event);
7645 +                       }
7646 +                       release_message_sync(state, header);
7647 +                       break;
7648 +
7649 +               case VCHIQ_MSG_DATA:
7650 +                       vchiq_log_trace(vchiq_sync_log_level,
7651 +                               "%d: sf DATA@%x,%x (%d->%d)",
7652 +                               state->id, (unsigned int)header, size,
7653 +                               remoteport, localport);
7654 +
7655 +                       if ((service->remoteport == remoteport) &&
7656 +                               (service->srvstate ==
7657 +                               VCHIQ_SRVSTATE_OPENSYNC)) {
7658 +                               if (make_service_callback(service,
7659 +                                       VCHIQ_MESSAGE_AVAILABLE, header,
7660 +                                       NULL) == VCHIQ_RETRY)
7661 +                                       vchiq_log_error(vchiq_sync_log_level,
7662 +                                               "synchronous callback to "
7663 +                                               "service %d returns "
7664 +                                               "VCHIQ_RETRY",
7665 +                                               localport);
7666 +                       }
7667 +                       break;
7668 +
7669 +               default:
7670 +                       vchiq_log_error(vchiq_sync_log_level,
7671 +                               "%d: sf unexpected msgid %x@%x,%x",
7672 +                               state->id, msgid, (unsigned int)header, size);
7673 +                       release_message_sync(state, header);
7674 +                       break;
7675 +               }
7676 +
7677 +               unlock_service(service);
7678 +       }
7679 +
7680 +       return 0;
7681 +}
7682 +
7683 +
7684 +static void
7685 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7686 +{
7687 +       queue->local_insert = 0;
7688 +       queue->remote_insert = 0;
7689 +       queue->process = 0;
7690 +       queue->remote_notify = 0;
7691 +       queue->remove = 0;
7692 +}
7693 +
7694 +
7695 +inline const char *
7696 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7697 +{
7698 +       return conn_state_names[conn_state];
7699 +}
7700 +
7701 +
7702 +VCHIQ_SLOT_ZERO_T *
7703 +vchiq_init_slots(void *mem_base, int mem_size)
7704 +{
7705 +       int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7706 +       VCHIQ_SLOT_ZERO_T *slot_zero =
7707 +               (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7708 +       int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7709 +       int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7710 +
7711 +       /* Ensure there is enough memory to run an absolutely minimum system */
7712 +       num_slots -= first_data_slot;
7713 +
7714 +       if (num_slots < 4) {
7715 +               vchiq_log_error(vchiq_core_log_level,
7716 +                       "vchiq_init_slots - insufficient memory %x bytes",
7717 +                       mem_size);
7718 +               return NULL;
7719 +       }
7720 +
7721 +       memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7722 +
7723 +       slot_zero->magic = VCHIQ_MAGIC;
7724 +       slot_zero->version = VCHIQ_VERSION;
7725 +       slot_zero->version_min = VCHIQ_VERSION_MIN;
7726 +       slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7727 +       slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7728 +       slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7729 +       slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7730 +
7731 +       slot_zero->master.slot_sync = first_data_slot;
7732 +       slot_zero->master.slot_first = first_data_slot + 1;
7733 +       slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7734 +       slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7735 +       slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7736 +       slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7737 +
7738 +       return slot_zero;
7739 +}
7740 +
7741 +VCHIQ_STATUS_T
7742 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7743 +                int is_master)
7744 +{
7745 +       VCHIQ_SHARED_STATE_T *local;
7746 +       VCHIQ_SHARED_STATE_T *remote;
7747 +       VCHIQ_STATUS_T status;
7748 +       char threadname[10];
7749 +       static int id;
7750 +       int i;
7751 +
7752 +       vchiq_log_warning(vchiq_core_log_level,
7753 +               "%s: slot_zero = 0x%08lx, is_master = %d",
7754 +               __func__, (unsigned long)slot_zero, is_master);
7755 +
7756 +       /* Check the input configuration */
7757 +
7758 +       if (slot_zero->magic != VCHIQ_MAGIC) {
7759 +               vchiq_loud_error_header();
7760 +               vchiq_loud_error("Invalid VCHIQ magic value found.");
7761 +               vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7762 +                       (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7763 +               vchiq_loud_error_footer();
7764 +               return VCHIQ_ERROR;
7765 +       }
7766 +
7767 +       if (slot_zero->version < VCHIQ_VERSION_MIN) {
7768 +               vchiq_loud_error_header();
7769 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
7770 +               vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7771 +                       "(minimum %d)",
7772 +                       (unsigned int)slot_zero, slot_zero->version,
7773 +                       VCHIQ_VERSION_MIN);
7774 +               vchiq_loud_error("Restart with a newer VideoCore image.");
7775 +               vchiq_loud_error_footer();
7776 +               return VCHIQ_ERROR;
7777 +       }
7778 +
7779 +       if (VCHIQ_VERSION < slot_zero->version_min) {
7780 +               vchiq_loud_error_header();
7781 +               vchiq_loud_error("Incompatible VCHIQ versions found.");
7782 +               vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7783 +                       "minimum %d)",
7784 +                       (unsigned int)slot_zero, VCHIQ_VERSION,
7785 +                       slot_zero->version_min);
7786 +               vchiq_loud_error("Restart with a newer kernel.");
7787 +               vchiq_loud_error_footer();
7788 +               return VCHIQ_ERROR;
7789 +       }
7790 +
7791 +       if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7792 +                (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7793 +                (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7794 +                (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7795 +               vchiq_loud_error_header();
7796 +               if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7797 +                       vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7798 +                               "(expected %x)",
7799 +                               (unsigned int)slot_zero,
7800 +                               slot_zero->slot_zero_size,
7801 +                               sizeof(VCHIQ_SLOT_ZERO_T));
7802 +               if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7803 +                       vchiq_loud_error("slot_zero=%x: slot_size=%d "
7804 +                               "(expected %d",
7805 +                               (unsigned int)slot_zero, slot_zero->slot_size,
7806 +                               VCHIQ_SLOT_SIZE);
7807 +               if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7808 +                       vchiq_loud_error("slot_zero=%x: max_slots=%d "
7809 +                               "(expected %d)",
7810 +                               (unsigned int)slot_zero, slot_zero->max_slots,
7811 +                               VCHIQ_MAX_SLOTS);
7812 +               if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7813 +                       vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7814 +                               "(expected %d)",
7815 +                               (unsigned int)slot_zero,
7816 +                               slot_zero->max_slots_per_side,
7817 +                               VCHIQ_MAX_SLOTS_PER_SIDE);
7818 +               vchiq_loud_error_footer();
7819 +               return VCHIQ_ERROR;
7820 +       }
7821 +
7822 +       if (is_master) {
7823 +               local = &slot_zero->master;
7824 +               remote = &slot_zero->slave;
7825 +       } else {
7826 +               local = &slot_zero->slave;
7827 +               remote = &slot_zero->master;
7828 +       }
7829 +
7830 +       if (local->initialised) {
7831 +               vchiq_loud_error_header();
7832 +               if (remote->initialised)
7833 +                       vchiq_loud_error("local state has already been "
7834 +                               "initialised");
7835 +               else
7836 +                       vchiq_loud_error("master/slave mismatch - two %ss",
7837 +                               is_master ? "master" : "slave");
7838 +               vchiq_loud_error_footer();
7839 +               return VCHIQ_ERROR;
7840 +       }
7841 +
7842 +       memset(state, 0, sizeof(VCHIQ_STATE_T));
7843 +
7844 +       state->id = id++;
7845 +       state->is_master = is_master;
7846 +
7847 +       /*
7848 +               initialize shared state pointers
7849 +        */
7850 +
7851 +       state->local = local;
7852 +       state->remote = remote;
7853 +       state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7854 +
7855 +       /*
7856 +               initialize events and mutexes
7857 +        */
7858 +
7859 +       sema_init(&state->connect, 0);
7860 +       mutex_init(&state->mutex);
7861 +       sema_init(&state->trigger_event, 0);
7862 +       sema_init(&state->recycle_event, 0);
7863 +       sema_init(&state->sync_trigger_event, 0);
7864 +       sema_init(&state->sync_release_event, 0);
7865 +
7866 +       mutex_init(&state->slot_mutex);
7867 +       mutex_init(&state->recycle_mutex);
7868 +       mutex_init(&state->sync_mutex);
7869 +       mutex_init(&state->bulk_transfer_mutex);
7870 +
7871 +       sema_init(&state->slot_available_event, 0);
7872 +       sema_init(&state->slot_remove_event, 0);
7873 +       sema_init(&state->data_quota_event, 0);
7874 +
7875 +       state->slot_queue_available = 0;
7876 +
7877 +       for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7878 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
7879 +                       &state->service_quotas[i];
7880 +               sema_init(&service_quota->quota_event, 0);
7881 +       }
7882 +
7883 +       for (i = local->slot_first; i <= local->slot_last; i++) {
7884 +               local->slot_queue[state->slot_queue_available++] = i;
7885 +               up(&state->slot_available_event);
7886 +       }
7887 +
7888 +       state->default_slot_quota = state->slot_queue_available/2;
7889 +       state->default_message_quota =
7890 +               min((unsigned short)(state->default_slot_quota * 256),
7891 +               (unsigned short)~0);
7892 +
7893 +       state->previous_data_index = -1;
7894 +       state->data_use_count = 0;
7895 +       state->data_quota = state->slot_queue_available - 1;
7896 +
7897 +       local->trigger.event = &state->trigger_event;
7898 +       remote_event_create(&local->trigger);
7899 +       local->tx_pos = 0;
7900 +
7901 +       local->recycle.event = &state->recycle_event;
7902 +       remote_event_create(&local->recycle);
7903 +       local->slot_queue_recycle = state->slot_queue_available;
7904 +
7905 +       local->sync_trigger.event = &state->sync_trigger_event;
7906 +       remote_event_create(&local->sync_trigger);
7907 +
7908 +       local->sync_release.event = &state->sync_release_event;
7909 +       remote_event_create(&local->sync_release);
7910 +
7911 +       /* At start-of-day, the slot is empty and available */
7912 +       ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
7913 +               = VCHIQ_MSGID_PADDING;
7914 +       remote_event_signal_local(&local->sync_release);
7915 +
7916 +       local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
7917 +
7918 +       status = vchiq_platform_init_state(state);
7919 +
7920 +       /*
7921 +               bring up slot handler thread
7922 +        */
7923 +       snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
7924 +       state->slot_handler_thread = kthread_create(&slot_handler_func,
7925 +               (void *)state,
7926 +               threadname);
7927 +
7928 +       if (state->slot_handler_thread == NULL) {
7929 +               vchiq_loud_error_header();
7930 +               vchiq_loud_error("couldn't create thread %s", threadname);
7931 +               vchiq_loud_error_footer();
7932 +               return VCHIQ_ERROR;
7933 +       }
7934 +       set_user_nice(state->slot_handler_thread, -19);
7935 +       wake_up_process(state->slot_handler_thread);
7936 +
7937 +       snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
7938 +       state->recycle_thread = kthread_create(&recycle_func,
7939 +               (void *)state,
7940 +               threadname);
7941 +       if (state->recycle_thread == NULL) {
7942 +               vchiq_loud_error_header();
7943 +               vchiq_loud_error("couldn't create thread %s", threadname);
7944 +               vchiq_loud_error_footer();
7945 +               return VCHIQ_ERROR;
7946 +       }
7947 +       set_user_nice(state->recycle_thread, -19);
7948 +       wake_up_process(state->recycle_thread);
7949 +
7950 +       snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
7951 +       state->sync_thread = kthread_create(&sync_func,
7952 +               (void *)state,
7953 +               threadname);
7954 +       if (state->sync_thread == NULL) {
7955 +               vchiq_loud_error_header();
7956 +               vchiq_loud_error("couldn't create thread %s", threadname);
7957 +               vchiq_loud_error_footer();
7958 +               return VCHIQ_ERROR;
7959 +       }
7960 +       set_user_nice(state->sync_thread, -20);
7961 +       wake_up_process(state->sync_thread);
7962 +
7963 +       BUG_ON(state->id >= VCHIQ_MAX_STATES);
7964 +       vchiq_states[state->id] = state;
7965 +
7966 +       /* Indicate readiness to the other side */
7967 +       local->initialised = 1;
7968 +
7969 +       return status;
7970 +}
7971 +
7972 +/* Called from application thread when a client or server service is created. */
7973 +VCHIQ_SERVICE_T *
7974 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
7975 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
7976 +       VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
7977 +{
7978 +       VCHIQ_SERVICE_T *service;
7979 +
7980 +       service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
7981 +       if (service) {
7982 +               service->base.fourcc   = params->fourcc;
7983 +               service->base.callback = params->callback;
7984 +               service->base.userdata = params->userdata;
7985 +               service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
7986 +               service->ref_count     = 1;
7987 +               service->srvstate      = VCHIQ_SRVSTATE_FREE;
7988 +               service->userdata_term = userdata_term;
7989 +               service->localport     = VCHIQ_PORT_FREE;
7990 +               service->remoteport    = VCHIQ_PORT_FREE;
7991 +
7992 +               service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
7993 +                       VCHIQ_FOURCC_INVALID : params->fourcc;
7994 +               service->client_id     = 0;
7995 +               service->auto_close    = 1;
7996 +               service->sync          = 0;
7997 +               service->closing       = 0;
7998 +               atomic_set(&service->poll_flags, 0);
7999 +               service->version       = params->version;
8000 +               service->version_min   = params->version_min;
8001 +               service->state         = state;
8002 +               service->instance      = instance;
8003 +               service->service_use_count = 0;
8004 +               init_bulk_queue(&service->bulk_tx);
8005 +               init_bulk_queue(&service->bulk_rx);
8006 +               sema_init(&service->remove_event, 0);
8007 +               sema_init(&service->bulk_remove_event, 0);
8008 +               mutex_init(&service->bulk_mutex);
8009 +               memset(&service->stats, 0, sizeof(service->stats));
8010 +       } else {
8011 +               vchiq_log_error(vchiq_core_log_level,
8012 +                       "Out of memory");
8013 +       }
8014 +
8015 +       if (service) {
8016 +               VCHIQ_SERVICE_T **pservice = NULL;
8017 +               int i;
8018 +
8019 +               /* Although it is perfectly possible to use service_spinlock
8020 +               ** to protect the creation of services, it is overkill as it
8021 +               ** disables interrupts while the array is searched.
8022 +               ** The only danger is of another thread trying to create a
8023 +               ** service - service deletion is safe.
8024 +               ** Therefore it is preferable to use state->mutex which,
8025 +               ** although slower to claim, doesn't block interrupts while
8026 +               ** it is held.
8027 +               */
8028 +
8029 +               mutex_lock(&state->mutex);
8030 +
8031 +               /* Prepare to use a previously unused service */
8032 +               if (state->unused_service < VCHIQ_MAX_SERVICES)
8033 +                       pservice = &state->services[state->unused_service];
8034 +
8035 +               if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8036 +                       for (i = 0; i < state->unused_service; i++) {
8037 +                               VCHIQ_SERVICE_T *srv = state->services[i];
8038 +                               if (!srv) {
8039 +                                       pservice = &state->services[i];
8040 +                                       break;
8041 +                               }
8042 +                       }
8043 +               } else {
8044 +                       for (i = (state->unused_service - 1); i >= 0; i--) {
8045 +                               VCHIQ_SERVICE_T *srv = state->services[i];
8046 +                               if (!srv)
8047 +                                       pservice = &state->services[i];
8048 +                               else if ((srv->public_fourcc == params->fourcc)
8049 +                                       && ((srv->instance != instance) ||
8050 +                                       (srv->base.callback !=
8051 +                                       params->callback))) {
8052 +                                       /* There is another server using this
8053 +                                       ** fourcc which doesn't match. */
8054 +                                       pservice = NULL;
8055 +                                       break;
8056 +                               }
8057 +                       }
8058 +               }
8059 +
8060 +               if (pservice) {
8061 +                       service->localport = (pservice - state->services);
8062 +                       if (!handle_seq)
8063 +                               handle_seq = VCHIQ_MAX_STATES *
8064 +                                        VCHIQ_MAX_SERVICES;
8065 +                       service->handle = handle_seq |
8066 +                               (state->id * VCHIQ_MAX_SERVICES) |
8067 +                               service->localport;
8068 +                       handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8069 +                       *pservice = service;
8070 +                       if (pservice == &state->services[state->unused_service])
8071 +                               state->unused_service++;
8072 +               }
8073 +
8074 +               mutex_unlock(&state->mutex);
8075 +
8076 +               if (!pservice) {
8077 +                       kfree(service);
8078 +                       service = NULL;
8079 +               }
8080 +       }
8081 +
8082 +       if (service) {
8083 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
8084 +                       &state->service_quotas[service->localport];
8085 +               service_quota->slot_quota = state->default_slot_quota;
8086 +               service_quota->message_quota = state->default_message_quota;
8087 +               if (service_quota->slot_use_count == 0)
8088 +                       service_quota->previous_tx_index =
8089 +                               SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8090 +                               - 1;
8091 +
8092 +               /* Bring this service online */
8093 +               vchiq_set_service_state(service, srvstate);
8094 +
8095 +               vchiq_log_info(vchiq_core_msg_log_level,
8096 +                       "%s Service %c%c%c%c SrcPort:%d",
8097 +                       (srvstate == VCHIQ_SRVSTATE_OPENING)
8098 +                       ? "Open" : "Add",
8099 +                       VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8100 +                       service->localport);
8101 +       }
8102 +
8103 +       /* Don't unlock the service - leave it with a ref_count of 1. */
8104 +
8105 +       return service;
8106 +}
8107 +
8108 +VCHIQ_STATUS_T
8109 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8110 +{
8111 +       struct vchiq_open_payload payload = {
8112 +               service->base.fourcc,
8113 +               client_id,
8114 +               service->version,
8115 +               service->version_min
8116 +       };
8117 +       VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8118 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8119 +
8120 +       service->client_id = client_id;
8121 +       vchiq_use_service_internal(service);
8122 +       status = queue_message(service->state, NULL,
8123 +               VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8124 +               &body, 1, sizeof(payload), 1);
8125 +       if (status == VCHIQ_SUCCESS) {
8126 +               if (down_interruptible(&service->remove_event) != 0) {
8127 +                       status = VCHIQ_RETRY;
8128 +                       vchiq_release_service_internal(service);
8129 +               } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8130 +                       (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8131 +                       if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8132 +                               vchiq_log_error(vchiq_core_log_level,
8133 +                                       "%d: osi - srvstate = %s (ref %d)",
8134 +                                       service->state->id,
8135 +                                       srvstate_names[service->srvstate],
8136 +                                       service->ref_count);
8137 +                       status = VCHIQ_ERROR;
8138 +                       VCHIQ_SERVICE_STATS_INC(service, error_count);
8139 +                       vchiq_release_service_internal(service);
8140 +               }
8141 +       }
8142 +       return status;
8143 +}
8144 +
8145 +static void
8146 +release_service_messages(VCHIQ_SERVICE_T *service)
8147 +{
8148 +       VCHIQ_STATE_T *state = service->state;
8149 +       int slot_last = state->remote->slot_last;
8150 +       int i;
8151 +
8152 +       /* Release any claimed messages */
8153 +       for (i = state->remote->slot_first; i <= slot_last; i++) {
8154 +               VCHIQ_SLOT_INFO_T *slot_info =
8155 +                       SLOT_INFO_FROM_INDEX(state, i);
8156 +               if (slot_info->release_count != slot_info->use_count) {
8157 +                       char *data =
8158 +                               (char *)SLOT_DATA_FROM_INDEX(state, i);
8159 +                       unsigned int pos, end;
8160 +
8161 +                       end = VCHIQ_SLOT_SIZE;
8162 +                       if (data == state->rx_data)
8163 +                               /* This buffer is still being read from - stop
8164 +                               ** at the current read position */
8165 +                               end = state->rx_pos & VCHIQ_SLOT_MASK;
8166 +
8167 +                       pos = 0;
8168 +
8169 +                       while (pos < end) {
8170 +                               VCHIQ_HEADER_T *header =
8171 +                                       (VCHIQ_HEADER_T *)(data + pos);
8172 +                               int msgid = header->msgid;
8173 +                               int port = VCHIQ_MSG_DSTPORT(msgid);
8174 +                               if ((port == service->localport) &&
8175 +                                       (msgid & VCHIQ_MSGID_CLAIMED)) {
8176 +                                       vchiq_log_info(vchiq_core_log_level,
8177 +                                               "  fsi - hdr %x",
8178 +                                               (unsigned int)header);
8179 +                                       release_slot(state, slot_info, header,
8180 +                                               NULL);
8181 +                               }
8182 +                               pos += calc_stride(header->size);
8183 +                               if (pos > VCHIQ_SLOT_SIZE) {
8184 +                                       vchiq_log_error(vchiq_core_log_level,
8185 +                                               "fsi - pos %x: header %x, "
8186 +                                               "msgid %x, header->msgid %x, "
8187 +                                               "header->size %x",
8188 +                                               pos, (unsigned int)header,
8189 +                                               msgid, header->msgid,
8190 +                                               header->size);
8191 +                                       WARN(1, "invalid slot position\n");
8192 +                               }
8193 +                       }
8194 +               }
8195 +       }
8196 +}
8197 +
8198 +static int
8199 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8200 +{
8201 +       VCHIQ_STATUS_T status;
8202 +
8203 +       /* Abort any outstanding bulk transfers */
8204 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8205 +               return 0;
8206 +       abort_outstanding_bulks(service, &service->bulk_tx);
8207 +       abort_outstanding_bulks(service, &service->bulk_rx);
8208 +       mutex_unlock(&service->bulk_mutex);
8209 +
8210 +       status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8211 +       if (status == VCHIQ_SUCCESS)
8212 +               status = notify_bulks(service, &service->bulk_rx,
8213 +                       0/*!retry_poll*/);
8214 +       return (status == VCHIQ_SUCCESS);
8215 +}
8216 +
8217 +static VCHIQ_STATUS_T
8218 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8219 +{
8220 +       VCHIQ_STATUS_T status;
8221 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8222 +       int newstate;
8223 +
8224 +       switch (service->srvstate) {
8225 +       case VCHIQ_SRVSTATE_OPEN:
8226 +       case VCHIQ_SRVSTATE_CLOSESENT:
8227 +       case VCHIQ_SRVSTATE_CLOSERECVD:
8228 +               if (is_server) {
8229 +                       if (service->auto_close) {
8230 +                               service->client_id = 0;
8231 +                               service->remoteport = VCHIQ_PORT_FREE;
8232 +                               newstate = VCHIQ_SRVSTATE_LISTENING;
8233 +                       } else
8234 +                               newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8235 +               } else
8236 +                       newstate = VCHIQ_SRVSTATE_CLOSED;
8237 +               vchiq_set_service_state(service, newstate);
8238 +               break;
8239 +       case VCHIQ_SRVSTATE_LISTENING:
8240 +               break;
8241 +       default:
8242 +               vchiq_log_error(vchiq_core_log_level,
8243 +                       "close_service_complete(%x) called in state %s",
8244 +                       service->handle, srvstate_names[service->srvstate]);
8245 +               WARN(1, "close_service_complete in unexpected state\n");
8246 +               return VCHIQ_ERROR;
8247 +       }
8248 +
8249 +       status = make_service_callback(service,
8250 +               VCHIQ_SERVICE_CLOSED, NULL, NULL);
8251 +
8252 +       if (status != VCHIQ_RETRY) {
8253 +               int uc = service->service_use_count;
8254 +               int i;
8255 +               /* Complete the close process */
8256 +               for (i = 0; i < uc; i++)
8257 +                       /* cater for cases where close is forced and the
8258 +                       ** client may not close all it's handles */
8259 +                       vchiq_release_service_internal(service);
8260 +
8261 +               service->client_id = 0;
8262 +               service->remoteport = VCHIQ_PORT_FREE;
8263 +
8264 +               if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8265 +                       vchiq_free_service_internal(service);
8266 +               else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8267 +                       if (is_server)
8268 +                               service->closing = 0;
8269 +
8270 +                       up(&service->remove_event);
8271 +               }
8272 +       } else
8273 +               vchiq_set_service_state(service, failstate);
8274 +
8275 +       return status;
8276 +}
8277 +
8278 +/* Called by the slot handler */
8279 +VCHIQ_STATUS_T
8280 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8281 +{
8282 +       VCHIQ_STATE_T *state = service->state;
8283 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8284 +       int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8285 +
8286 +       vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8287 +               service->state->id, service->localport, close_recvd,
8288 +               srvstate_names[service->srvstate]);
8289 +
8290 +       switch (service->srvstate) {
8291 +       case VCHIQ_SRVSTATE_CLOSED:
8292 +       case VCHIQ_SRVSTATE_HIDDEN:
8293 +       case VCHIQ_SRVSTATE_LISTENING:
8294 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
8295 +               if (close_recvd)
8296 +                       vchiq_log_error(vchiq_core_log_level,
8297 +                               "vchiq_close_service_internal(1) called "
8298 +                               "in state %s",
8299 +                               srvstate_names[service->srvstate]);
8300 +               else if (is_server) {
8301 +                       if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8302 +                               status = VCHIQ_ERROR;
8303 +                       } else {
8304 +                               service->client_id = 0;
8305 +                               service->remoteport = VCHIQ_PORT_FREE;
8306 +                               if (service->srvstate ==
8307 +                                       VCHIQ_SRVSTATE_CLOSEWAIT)
8308 +                                       vchiq_set_service_state(service,
8309 +                                               VCHIQ_SRVSTATE_LISTENING);
8310 +                       }
8311 +                       up(&service->remove_event);
8312 +               } else
8313 +                       vchiq_free_service_internal(service);
8314 +               break;
8315 +       case VCHIQ_SRVSTATE_OPENING:
8316 +               if (close_recvd) {
8317 +                       /* The open was rejected - tell the user */
8318 +                       vchiq_set_service_state(service,
8319 +                               VCHIQ_SRVSTATE_CLOSEWAIT);
8320 +                       up(&service->remove_event);
8321 +               } else {
8322 +                       /* Shutdown mid-open - let the other side know */
8323 +                       status = queue_message(state, service,
8324 +                               VCHIQ_MAKE_MSG
8325 +                               (VCHIQ_MSG_CLOSE,
8326 +                               service->localport,
8327 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
8328 +                               NULL, 0, 0, 0);
8329 +               }
8330 +               break;
8331 +
8332 +       case VCHIQ_SRVSTATE_OPENSYNC:
8333 +               mutex_lock(&state->sync_mutex);
8334 +               /* Drop through */
8335 +
8336 +       case VCHIQ_SRVSTATE_OPEN:
8337 +               if (state->is_master || close_recvd) {
8338 +                       if (!do_abort_bulks(service))
8339 +                               status = VCHIQ_RETRY;
8340 +               }
8341 +
8342 +               release_service_messages(service);
8343 +
8344 +               if (status == VCHIQ_SUCCESS)
8345 +                       status = queue_message(state, service,
8346 +                               VCHIQ_MAKE_MSG
8347 +                               (VCHIQ_MSG_CLOSE,
8348 +                               service->localport,
8349 +                               VCHIQ_MSG_DSTPORT(service->remoteport)),
8350 +                               NULL, 0, 0, 0);
8351 +
8352 +               if (status == VCHIQ_SUCCESS) {
8353 +                       if (!close_recvd)
8354 +                               break;
8355 +               } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8356 +                       mutex_unlock(&state->sync_mutex);
8357 +                       break;
8358 +               } else
8359 +                       break;
8360 +
8361 +               status = close_service_complete(service,
8362 +                               VCHIQ_SRVSTATE_CLOSERECVD);
8363 +               break;
8364 +
8365 +       case VCHIQ_SRVSTATE_CLOSESENT:
8366 +               if (!close_recvd)
8367 +                       /* This happens when a process is killed mid-close */
8368 +                       break;
8369 +
8370 +               if (!state->is_master) {
8371 +                       if (!do_abort_bulks(service)) {
8372 +                               status = VCHIQ_RETRY;
8373 +                               break;
8374 +                       }
8375 +               }
8376 +
8377 +               if (status == VCHIQ_SUCCESS)
8378 +                       status = close_service_complete(service,
8379 +                               VCHIQ_SRVSTATE_CLOSERECVD);
8380 +               break;
8381 +
8382 +       case VCHIQ_SRVSTATE_CLOSERECVD:
8383 +               if (!close_recvd && is_server)
8384 +                       /* Force into LISTENING mode */
8385 +                       vchiq_set_service_state(service,
8386 +                               VCHIQ_SRVSTATE_LISTENING);
8387 +               status = close_service_complete(service,
8388 +                       VCHIQ_SRVSTATE_CLOSERECVD);
8389 +               break;
8390 +
8391 +       default:
8392 +               vchiq_log_error(vchiq_core_log_level,
8393 +                       "vchiq_close_service_internal(%d) called in state %s",
8394 +                       close_recvd, srvstate_names[service->srvstate]);
8395 +               break;
8396 +       }
8397 +
8398 +       return status;
8399 +}
8400 +
8401 +/* Called from the application process upon process death */
8402 +void
8403 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8404 +{
8405 +       VCHIQ_STATE_T *state = service->state;
8406 +
8407 +       vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8408 +               state->id, service->localport, service->remoteport);
8409 +
8410 +       mark_service_closing(service);
8411 +
8412 +       /* Mark the service for removal by the slot handler */
8413 +       request_poll(state, service, VCHIQ_POLL_REMOVE);
8414 +}
8415 +
8416 +/* Called from the slot handler */
8417 +void
8418 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8419 +{
8420 +       VCHIQ_STATE_T *state = service->state;
8421 +
8422 +       vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8423 +               state->id, service->localport);
8424 +
8425 +       switch (service->srvstate) {
8426 +       case VCHIQ_SRVSTATE_OPENING:
8427 +       case VCHIQ_SRVSTATE_CLOSED:
8428 +       case VCHIQ_SRVSTATE_HIDDEN:
8429 +       case VCHIQ_SRVSTATE_LISTENING:
8430 +       case VCHIQ_SRVSTATE_CLOSEWAIT:
8431 +               break;
8432 +       default:
8433 +               vchiq_log_error(vchiq_core_log_level,
8434 +                       "%d: fsi - (%d) in state %s",
8435 +                       state->id, service->localport,
8436 +                       srvstate_names[service->srvstate]);
8437 +               return;
8438 +       }
8439 +
8440 +       vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8441 +
8442 +       up(&service->remove_event);
8443 +
8444 +       /* Release the initial lock */
8445 +       unlock_service(service);
8446 +}
8447 +
8448 +VCHIQ_STATUS_T
8449 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8450 +{
8451 +       VCHIQ_SERVICE_T *service;
8452 +       int i;
8453 +
8454 +       /* Find all services registered to this client and enable them. */
8455 +       i = 0;
8456 +       while ((service = next_service_by_instance(state, instance,
8457 +               &i)) != NULL) {
8458 +               if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8459 +                       vchiq_set_service_state(service,
8460 +                               VCHIQ_SRVSTATE_LISTENING);
8461 +               unlock_service(service);
8462 +       }
8463 +
8464 +       if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8465 +               if (queue_message(state, NULL,
8466 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8467 +                       0, 1) == VCHIQ_RETRY)
8468 +                       return VCHIQ_RETRY;
8469 +
8470 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8471 +       }
8472 +
8473 +       if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8474 +               if (down_interruptible(&state->connect) != 0)
8475 +                       return VCHIQ_RETRY;
8476 +
8477 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8478 +               up(&state->connect);
8479 +       }
8480 +
8481 +       return VCHIQ_SUCCESS;
8482 +}
8483 +
8484 +VCHIQ_STATUS_T
8485 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8486 +{
8487 +       VCHIQ_SERVICE_T *service;
8488 +       int i;
8489 +
8490 +       /* Find all services registered to this client and enable them. */
8491 +       i = 0;
8492 +       while ((service = next_service_by_instance(state, instance,
8493 +               &i)) != NULL) {
8494 +               (void)vchiq_remove_service(service->handle);
8495 +               unlock_service(service);
8496 +       }
8497 +
8498 +       return VCHIQ_SUCCESS;
8499 +}
8500 +
8501 +VCHIQ_STATUS_T
8502 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8503 +{
8504 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8505 +
8506 +       switch (state->conn_state) {
8507 +       case VCHIQ_CONNSTATE_CONNECTED:
8508 +               /* Request a pause */
8509 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8510 +               request_poll(state, NULL, 0);
8511 +               break;
8512 +       default:
8513 +               vchiq_log_error(vchiq_core_log_level,
8514 +                       "vchiq_pause_internal in state %s\n",
8515 +                       conn_state_names[state->conn_state]);
8516 +               status = VCHIQ_ERROR;
8517 +               VCHIQ_STATS_INC(state, error_count);
8518 +               break;
8519 +       }
8520 +
8521 +       return status;
8522 +}
8523 +
8524 +VCHIQ_STATUS_T
8525 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8526 +{
8527 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8528 +
8529 +       if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8530 +               vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8531 +               request_poll(state, NULL, 0);
8532 +       } else {
8533 +               status = VCHIQ_ERROR;
8534 +               VCHIQ_STATS_INC(state, error_count);
8535 +       }
8536 +
8537 +       return status;
8538 +}
8539 +
8540 +VCHIQ_STATUS_T
8541 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8542 +{
8543 +       /* Unregister the service */
8544 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8545 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8546 +
8547 +       if (!service)
8548 +               return VCHIQ_ERROR;
8549 +
8550 +       vchiq_log_info(vchiq_core_log_level,
8551 +               "%d: close_service:%d",
8552 +               service->state->id, service->localport);
8553 +
8554 +       if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8555 +               (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8556 +               (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8557 +               unlock_service(service);
8558 +               return VCHIQ_ERROR;
8559 +       }
8560 +
8561 +       mark_service_closing(service);
8562 +
8563 +       if (current == service->state->slot_handler_thread) {
8564 +               status = vchiq_close_service_internal(service,
8565 +                       0/*!close_recvd*/);
8566 +               BUG_ON(status == VCHIQ_RETRY);
8567 +       } else {
8568 +       /* Mark the service for termination by the slot handler */
8569 +               request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8570 +       }
8571 +
8572 +       while (1) {
8573 +               if (down_interruptible(&service->remove_event) != 0) {
8574 +                       status = VCHIQ_RETRY;
8575 +                       break;
8576 +               }
8577 +
8578 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8579 +                       (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8580 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8581 +                       break;
8582 +
8583 +               vchiq_log_warning(vchiq_core_log_level,
8584 +                       "%d: close_service:%d - waiting in state %s",
8585 +                       service->state->id, service->localport,
8586 +                       srvstate_names[service->srvstate]);
8587 +       }
8588 +
8589 +       if ((status == VCHIQ_SUCCESS) &&
8590 +               (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8591 +               (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8592 +               status = VCHIQ_ERROR;
8593 +
8594 +       unlock_service(service);
8595 +
8596 +       return status;
8597 +}
8598 +
8599 +VCHIQ_STATUS_T
8600 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8601 +{
8602 +       /* Unregister the service */
8603 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8604 +       VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8605 +
8606 +       if (!service)
8607 +               return VCHIQ_ERROR;
8608 +
8609 +       vchiq_log_info(vchiq_core_log_level,
8610 +               "%d: remove_service:%d",
8611 +               service->state->id, service->localport);
8612 +
8613 +       if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8614 +               unlock_service(service);
8615 +               return VCHIQ_ERROR;
8616 +       }
8617 +
8618 +       mark_service_closing(service);
8619 +
8620 +       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8621 +               (current == service->state->slot_handler_thread)) {
8622 +               /* Make it look like a client, because it must be removed and
8623 +                  not left in the LISTENING state. */
8624 +               service->public_fourcc = VCHIQ_FOURCC_INVALID;
8625 +
8626 +               status = vchiq_close_service_internal(service,
8627 +                       0/*!close_recvd*/);
8628 +               BUG_ON(status == VCHIQ_RETRY);
8629 +       } else {
8630 +               /* Mark the service for removal by the slot handler */
8631 +               request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8632 +       }
8633 +       while (1) {
8634 +               if (down_interruptible(&service->remove_event) != 0) {
8635 +                       status = VCHIQ_RETRY;
8636 +                       break;
8637 +               }
8638 +
8639 +               if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8640 +                       (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8641 +                       break;
8642 +
8643 +               vchiq_log_warning(vchiq_core_log_level,
8644 +                       "%d: remove_service:%d - waiting in state %s",
8645 +                       service->state->id, service->localport,
8646 +                       srvstate_names[service->srvstate]);
8647 +       }
8648 +
8649 +       if ((status == VCHIQ_SUCCESS) &&
8650 +               (service->srvstate != VCHIQ_SRVSTATE_FREE))
8651 +               status = VCHIQ_ERROR;
8652 +
8653 +       unlock_service(service);
8654 +
8655 +       return status;
8656 +}
8657 +
8658 +
8659 +/* This function may be called by kernel threads or user threads.
8660 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8661 + * received and the call should be retried after being returned to user
8662 + * context.
8663 + * When called in blocking mode, the userdata field points to a bulk_waiter
8664 + * structure.
8665 + */
8666 +VCHIQ_STATUS_T
8667 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8668 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8669 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8670 +{
8671 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8672 +       VCHIQ_BULK_QUEUE_T *queue;
8673 +       VCHIQ_BULK_T *bulk;
8674 +       VCHIQ_STATE_T *state;
8675 +       struct bulk_waiter *bulk_waiter = NULL;
8676 +       const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8677 +       const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8678 +               VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8679 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8680 +
8681 +       if (!service ||
8682 +                (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8683 +                ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8684 +                (vchiq_check_service(service) != VCHIQ_SUCCESS))
8685 +               goto error_exit;
8686 +
8687 +       switch (mode) {
8688 +       case VCHIQ_BULK_MODE_NOCALLBACK:
8689 +       case VCHIQ_BULK_MODE_CALLBACK:
8690 +               break;
8691 +       case VCHIQ_BULK_MODE_BLOCKING:
8692 +               bulk_waiter = (struct bulk_waiter *)userdata;
8693 +               sema_init(&bulk_waiter->event, 0);
8694 +               bulk_waiter->actual = 0;
8695 +               bulk_waiter->bulk = NULL;
8696 +               break;
8697 +       case VCHIQ_BULK_MODE_WAITING:
8698 +               bulk_waiter = (struct bulk_waiter *)userdata;
8699 +               bulk = bulk_waiter->bulk;
8700 +               goto waiting;
8701 +       default:
8702 +               goto error_exit;
8703 +       }
8704 +
8705 +       state = service->state;
8706 +
8707 +       queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8708 +               &service->bulk_tx : &service->bulk_rx;
8709 +
8710 +       if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8711 +               status = VCHIQ_RETRY;
8712 +               goto error_exit;
8713 +       }
8714 +
8715 +       if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8716 +               VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8717 +               do {
8718 +                       mutex_unlock(&service->bulk_mutex);
8719 +                       if (down_interruptible(&service->bulk_remove_event)
8720 +                               != 0) {
8721 +                               status = VCHIQ_RETRY;
8722 +                               goto error_exit;
8723 +                       }
8724 +                       if (mutex_lock_interruptible(&service->bulk_mutex)
8725 +                               != 0) {
8726 +                               status = VCHIQ_RETRY;
8727 +                               goto error_exit;
8728 +                       }
8729 +               } while (queue->local_insert == queue->remove +
8730 +                               VCHIQ_NUM_SERVICE_BULKS);
8731 +       }
8732 +
8733 +       bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8734 +
8735 +       bulk->mode = mode;
8736 +       bulk->dir = dir;
8737 +       bulk->userdata = userdata;
8738 +       bulk->size = size;
8739 +       bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8740 +
8741 +       if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8742 +               VCHIQ_SUCCESS)
8743 +               goto unlock_error_exit;
8744 +
8745 +       wmb();
8746 +
8747 +       vchiq_log_info(vchiq_core_log_level,
8748 +               "%d: bt (%d->%d) %cx %x@%x %x",
8749 +               state->id,
8750 +               service->localport, service->remoteport, dir_char,
8751 +               size, (unsigned int)bulk->data, (unsigned int)userdata);
8752 +
8753 +       if (state->is_master) {
8754 +               queue->local_insert++;
8755 +               if (resolve_bulks(service, queue))
8756 +                       request_poll(state, service,
8757 +                               (dir == VCHIQ_BULK_TRANSMIT) ?
8758 +                               VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8759 +       } else {
8760 +               int payload[2] = { (int)bulk->data, bulk->size };
8761 +               VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8762 +
8763 +               status = queue_message(state, NULL,
8764 +                       VCHIQ_MAKE_MSG(dir_msgtype,
8765 +                               service->localport, service->remoteport),
8766 +                       &element, 1, sizeof(payload), 1);
8767 +               if (status != VCHIQ_SUCCESS) {
8768 +                       vchiq_complete_bulk(bulk);
8769 +                       goto unlock_error_exit;
8770 +               }
8771 +               queue->local_insert++;
8772 +       }
8773 +
8774 +       mutex_unlock(&service->bulk_mutex);
8775 +
8776 +       vchiq_log_trace(vchiq_core_log_level,
8777 +               "%d: bt:%d %cx li=%x ri=%x p=%x",
8778 +               state->id,
8779 +               service->localport, dir_char,
8780 +               queue->local_insert, queue->remote_insert, queue->process);
8781 +
8782 +waiting:
8783 +       unlock_service(service);
8784 +
8785 +       status = VCHIQ_SUCCESS;
8786 +
8787 +       if (bulk_waiter) {
8788 +               bulk_waiter->bulk = bulk;
8789 +               if (down_interruptible(&bulk_waiter->event) != 0)
8790 +                       status = VCHIQ_RETRY;
8791 +               else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8792 +                       status = VCHIQ_ERROR;
8793 +       }
8794 +
8795 +       return status;
8796 +
8797 +unlock_error_exit:
8798 +       mutex_unlock(&service->bulk_mutex);
8799 +
8800 +error_exit:
8801 +       if (service)
8802 +               unlock_service(service);
8803 +       return status;
8804 +}
8805 +
8806 +VCHIQ_STATUS_T
8807 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8808 +       const VCHIQ_ELEMENT_T *elements, unsigned int count)
8809 +{
8810 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8811 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8812 +
8813 +       unsigned int size = 0;
8814 +       unsigned int i;
8815 +
8816 +       if (!service ||
8817 +               (vchiq_check_service(service) != VCHIQ_SUCCESS))
8818 +               goto error_exit;
8819 +
8820 +       for (i = 0; i < (unsigned int)count; i++) {
8821 +               if (elements[i].size) {
8822 +                       if (elements[i].data == NULL) {
8823 +                               VCHIQ_SERVICE_STATS_INC(service, error_count);
8824 +                               goto error_exit;
8825 +                       }
8826 +                       size += elements[i].size;
8827 +               }
8828 +       }
8829 +
8830 +       if (size > VCHIQ_MAX_MSG_SIZE) {
8831 +               VCHIQ_SERVICE_STATS_INC(service, error_count);
8832 +               goto error_exit;
8833 +       }
8834 +
8835 +       switch (service->srvstate) {
8836 +       case VCHIQ_SRVSTATE_OPEN:
8837 +               status = queue_message(service->state, service,
8838 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8839 +                                       service->localport,
8840 +                                       service->remoteport),
8841 +                               elements, count, size, 1);
8842 +               break;
8843 +       case VCHIQ_SRVSTATE_OPENSYNC:
8844 +               status = queue_message_sync(service->state, service,
8845 +                               VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8846 +                                       service->localport,
8847 +                                       service->remoteport),
8848 +                               elements, count, size, 1);
8849 +               break;
8850 +       default:
8851 +               status = VCHIQ_ERROR;
8852 +               break;
8853 +       }
8854 +
8855 +error_exit:
8856 +       if (service)
8857 +               unlock_service(service);
8858 +
8859 +       return status;
8860 +}
8861 +
8862 +void
8863 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8864 +{
8865 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8866 +       VCHIQ_SHARED_STATE_T *remote;
8867 +       VCHIQ_STATE_T *state;
8868 +       int slot_index;
8869 +
8870 +       if (!service)
8871 +               return;
8872 +
8873 +       state = service->state;
8874 +       remote = state->remote;
8875 +
8876 +       slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8877 +
8878 +       if ((slot_index >= remote->slot_first) &&
8879 +               (slot_index <= remote->slot_last)) {
8880 +               int msgid = header->msgid;
8881 +               if (msgid & VCHIQ_MSGID_CLAIMED) {
8882 +                       VCHIQ_SLOT_INFO_T *slot_info =
8883 +                               SLOT_INFO_FROM_INDEX(state, slot_index);
8884 +
8885 +                       release_slot(state, slot_info, header, service);
8886 +               }
8887 +       } else if (slot_index == remote->slot_sync)
8888 +               release_message_sync(state, header);
8889 +
8890 +       unlock_service(service);
8891 +}
8892 +
8893 +static void
8894 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8895 +{
8896 +       header->msgid = VCHIQ_MSGID_PADDING;
8897 +       wmb();
8898 +       remote_event_signal(&state->remote->sync_release);
8899 +}
8900 +
8901 +VCHIQ_STATUS_T
8902 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8903 +{
8904 +   VCHIQ_STATUS_T status = VCHIQ_ERROR;
8905 +   VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8906 +
8907 +   if (!service ||
8908 +      (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
8909 +      !peer_version)
8910 +      goto exit;
8911 +   *peer_version = service->peer_version;
8912 +   status = VCHIQ_SUCCESS;
8913 +
8914 +exit:
8915 +   if (service)
8916 +      unlock_service(service);
8917 +   return status;
8918 +}
8919 +
8920 +VCHIQ_STATUS_T
8921 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
8922 +       int config_size, VCHIQ_CONFIG_T *pconfig)
8923 +{
8924 +       VCHIQ_CONFIG_T config;
8925 +
8926 +       (void)instance;
8927 +
8928 +       config.max_msg_size           = VCHIQ_MAX_MSG_SIZE;
8929 +       config.bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
8930 +       config.max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
8931 +       config.max_services           = VCHIQ_MAX_SERVICES;
8932 +       config.version                = VCHIQ_VERSION;
8933 +       config.version_min            = VCHIQ_VERSION_MIN;
8934 +
8935 +       if (config_size > sizeof(VCHIQ_CONFIG_T))
8936 +               return VCHIQ_ERROR;
8937 +
8938 +       memcpy(pconfig, &config,
8939 +               min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
8940 +
8941 +       return VCHIQ_SUCCESS;
8942 +}
8943 +
8944 +VCHIQ_STATUS_T
8945 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
8946 +       VCHIQ_SERVICE_OPTION_T option, int value)
8947 +{
8948 +       VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8949 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
8950 +
8951 +       if (service) {
8952 +               switch (option) {
8953 +               case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
8954 +                       service->auto_close = value;
8955 +                       status = VCHIQ_SUCCESS;
8956 +                       break;
8957 +
8958 +               case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
8959 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
8960 +                               &service->state->service_quotas[
8961 +                                       service->localport];
8962 +                       if (value == 0)
8963 +                               value = service->state->default_slot_quota;
8964 +                       if ((value >= service_quota->slot_use_count) &&
8965 +                                (value < (unsigned short)~0)) {
8966 +                               service_quota->slot_quota = value;
8967 +                               if ((value >= service_quota->slot_use_count) &&
8968 +                                       (service_quota->message_quota >=
8969 +                                        service_quota->message_use_count)) {
8970 +                                       /* Signal the service that it may have
8971 +                                       ** dropped below its quota */
8972 +                                       up(&service_quota->quota_event);
8973 +                               }
8974 +                               status = VCHIQ_SUCCESS;
8975 +                       }
8976 +               } break;
8977 +
8978 +               case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
8979 +                       VCHIQ_SERVICE_QUOTA_T *service_quota =
8980 +                               &service->state->service_quotas[
8981 +                                       service->localport];
8982 +                       if (value == 0)
8983 +                               value = service->state->default_message_quota;
8984 +                       if ((value >= service_quota->message_use_count) &&
8985 +                                (value < (unsigned short)~0)) {
8986 +                               service_quota->message_quota = value;
8987 +                               if ((value >=
8988 +                                       service_quota->message_use_count) &&
8989 +                                       (service_quota->slot_quota >=
8990 +                                       service_quota->slot_use_count))
8991 +                                       /* Signal the service that it may have
8992 +                                       ** dropped below its quota */
8993 +                                       up(&service_quota->quota_event);
8994 +                               status = VCHIQ_SUCCESS;
8995 +                       }
8996 +               } break;
8997 +
8998 +               case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
8999 +                       if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9000 +                               (service->srvstate ==
9001 +                               VCHIQ_SRVSTATE_LISTENING)) {
9002 +                               service->sync = value;
9003 +                               status = VCHIQ_SUCCESS;
9004 +                       }
9005 +                       break;
9006 +
9007 +               default:
9008 +                       break;
9009 +               }
9010 +               unlock_service(service);
9011 +       }
9012 +
9013 +       return status;
9014 +}
9015 +
9016 +void
9017 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9018 +       VCHIQ_SHARED_STATE_T *shared, const char *label)
9019 +{
9020 +       static const char *const debug_names[] = {
9021 +               "<entries>",
9022 +               "SLOT_HANDLER_COUNT",
9023 +               "SLOT_HANDLER_LINE",
9024 +               "PARSE_LINE",
9025 +               "PARSE_HEADER",
9026 +               "PARSE_MSGID",
9027 +               "AWAIT_COMPLETION_LINE",
9028 +               "DEQUEUE_MESSAGE_LINE",
9029 +               "SERVICE_CALLBACK_LINE",
9030 +               "MSG_QUEUE_FULL_COUNT",
9031 +               "COMPLETION_QUEUE_FULL_COUNT"
9032 +       };
9033 +       int i;
9034 +
9035 +       char buf[80];
9036 +       int len;
9037 +       len = snprintf(buf, sizeof(buf),
9038 +               "  %s: slots %d-%d tx_pos=%x recycle=%x",
9039 +               label, shared->slot_first, shared->slot_last,
9040 +               shared->tx_pos, shared->slot_queue_recycle);
9041 +       vchiq_dump(dump_context, buf, len + 1);
9042 +
9043 +       len = snprintf(buf, sizeof(buf),
9044 +               "    Slots claimed:");
9045 +       vchiq_dump(dump_context, buf, len + 1);
9046 +
9047 +       for (i = shared->slot_first; i <= shared->slot_last; i++) {
9048 +               VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9049 +               if (slot_info.use_count != slot_info.release_count) {
9050 +                       len = snprintf(buf, sizeof(buf),
9051 +                               "      %d: %d/%d", i, slot_info.use_count,
9052 +                               slot_info.release_count);
9053 +                       vchiq_dump(dump_context, buf, len + 1);
9054 +               }
9055 +       }
9056 +
9057 +       for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9058 +               len = snprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
9059 +                       debug_names[i], shared->debug[i], shared->debug[i]);
9060 +               vchiq_dump(dump_context, buf, len + 1);
9061 +       }
9062 +}
9063 +
9064 +void
9065 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9066 +{
9067 +       char buf[80];
9068 +       int len;
9069 +       int i;
9070 +
9071 +       len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9072 +               conn_state_names[state->conn_state]);
9073 +       vchiq_dump(dump_context, buf, len + 1);
9074 +
9075 +       len = snprintf(buf, sizeof(buf),
9076 +               "  tx_pos=%x(@%x), rx_pos=%x(@%x)",
9077 +               state->local->tx_pos,
9078 +               (uint32_t)state->tx_data +
9079 +                       (state->local_tx_pos & VCHIQ_SLOT_MASK),
9080 +               state->rx_pos,
9081 +               (uint32_t)state->rx_data +
9082 +                       (state->rx_pos & VCHIQ_SLOT_MASK));
9083 +       vchiq_dump(dump_context, buf, len + 1);
9084 +
9085 +       len = snprintf(buf, sizeof(buf),
9086 +               "  Version: %d (min %d)",
9087 +               VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9088 +       vchiq_dump(dump_context, buf, len + 1);
9089 +
9090 +       if (VCHIQ_ENABLE_STATS) {
9091 +               len = snprintf(buf, sizeof(buf),
9092 +                       "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9093 +                       "error_count=%d",
9094 +                       state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9095 +                       state->stats.error_count);
9096 +               vchiq_dump(dump_context, buf, len + 1);
9097 +       }
9098 +
9099 +       len = snprintf(buf, sizeof(buf),
9100 +               "  Slots: %d available (%d data), %d recyclable, %d stalls "
9101 +               "(%d data)",
9102 +               ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9103 +                       state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9104 +               state->data_quota - state->data_use_count,
9105 +               state->local->slot_queue_recycle - state->slot_queue_available,
9106 +               state->stats.slot_stalls, state->stats.data_stalls);
9107 +       vchiq_dump(dump_context, buf, len + 1);
9108 +
9109 +       vchiq_dump_platform_state(dump_context);
9110 +
9111 +       vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9112 +       vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9113 +
9114 +       vchiq_dump_platform_instances(dump_context);
9115 +
9116 +       for (i = 0; i < state->unused_service; i++) {
9117 +               VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9118 +
9119 +               if (service) {
9120 +                       vchiq_dump_service_state(dump_context, service);
9121 +                       unlock_service(service);
9122 +               }
9123 +       }
9124 +}
9125 +
9126 +void
9127 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9128 +{
9129 +       char buf[80];
9130 +       int len;
9131 +
9132 +       len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9133 +               service->localport, srvstate_names[service->srvstate],
9134 +               service->ref_count - 1); /*Don't include the lock just taken*/
9135 +
9136 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9137 +               char remoteport[30];
9138 +               VCHIQ_SERVICE_QUOTA_T *service_quota =
9139 +                       &service->state->service_quotas[service->localport];
9140 +               int fourcc = service->base.fourcc;
9141 +               int tx_pending, rx_pending;
9142 +               if (service->remoteport != VCHIQ_PORT_FREE) {
9143 +                       int len2 = snprintf(remoteport, sizeof(remoteport),
9144 +                               "%d", service->remoteport);
9145 +                       if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9146 +                               snprintf(remoteport + len2,
9147 +                                       sizeof(remoteport) - len2,
9148 +                                       " (client %x)", service->client_id);
9149 +               } else
9150 +                       strcpy(remoteport, "n/a");
9151 +
9152 +               len += snprintf(buf + len, sizeof(buf) - len,
9153 +                       " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9154 +                       VCHIQ_FOURCC_AS_4CHARS(fourcc),
9155 +                       remoteport,
9156 +                       service_quota->message_use_count,
9157 +                       service_quota->message_quota,
9158 +                       service_quota->slot_use_count,
9159 +                       service_quota->slot_quota);
9160 +
9161 +               vchiq_dump(dump_context, buf, len + 1);
9162 +
9163 +               tx_pending = service->bulk_tx.local_insert -
9164 +                       service->bulk_tx.remote_insert;
9165 +
9166 +               rx_pending = service->bulk_rx.local_insert -
9167 +                       service->bulk_rx.remote_insert;
9168 +
9169 +               len = snprintf(buf, sizeof(buf),
9170 +                       "  Bulk: tx_pending=%d (size %d),"
9171 +                       " rx_pending=%d (size %d)",
9172 +                       tx_pending,
9173 +                       tx_pending ? service->bulk_tx.bulks[
9174 +                       BULK_INDEX(service->bulk_tx.remove)].size : 0,
9175 +                       rx_pending,
9176 +                       rx_pending ? service->bulk_rx.bulks[
9177 +                       BULK_INDEX(service->bulk_rx.remove)].size : 0);
9178 +
9179 +               if (VCHIQ_ENABLE_STATS) {
9180 +                       vchiq_dump(dump_context, buf, len + 1);
9181 +
9182 +                       len = snprintf(buf, sizeof(buf),
9183 +                               "  Ctrl: tx_count=%d, tx_bytes=%llu, "
9184 +                               "rx_count=%d, rx_bytes=%llu",
9185 +                               service->stats.ctrl_tx_count,
9186 +                               service->stats.ctrl_tx_bytes,
9187 +                               service->stats.ctrl_rx_count,
9188 +                               service->stats.ctrl_rx_bytes);
9189 +                       vchiq_dump(dump_context, buf, len + 1);
9190 +
9191 +                       len = snprintf(buf, sizeof(buf),
9192 +                               "  Bulk: tx_count=%d, tx_bytes=%llu, "
9193 +                               "rx_count=%d, rx_bytes=%llu",
9194 +                               service->stats.bulk_tx_count,
9195 +                               service->stats.bulk_tx_bytes,
9196 +                               service->stats.bulk_rx_count,
9197 +                               service->stats.bulk_rx_bytes);
9198 +                       vchiq_dump(dump_context, buf, len + 1);
9199 +
9200 +                       len = snprintf(buf, sizeof(buf),
9201 +                               "  %d quota stalls, %d slot stalls, "
9202 +                               "%d bulk stalls, %d aborted, %d errors",
9203 +                               service->stats.quota_stalls,
9204 +                               service->stats.slot_stalls,
9205 +                               service->stats.bulk_stalls,
9206 +                               service->stats.bulk_aborted_count,
9207 +                               service->stats.error_count);
9208 +                }
9209 +       }
9210 +
9211 +       vchiq_dump(dump_context, buf, len + 1);
9212 +
9213 +       if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9214 +               vchiq_dump_platform_service_state(dump_context, service);
9215 +}
9216 +
9217 +
9218 +void
9219 +vchiq_loud_error_header(void)
9220 +{
9221 +       vchiq_log_error(vchiq_core_log_level,
9222 +               "============================================================"
9223 +               "================");
9224 +       vchiq_log_error(vchiq_core_log_level,
9225 +               "============================================================"
9226 +               "================");
9227 +       vchiq_log_error(vchiq_core_log_level, "=====");
9228 +}
9229 +
9230 +void
9231 +vchiq_loud_error_footer(void)
9232 +{
9233 +       vchiq_log_error(vchiq_core_log_level, "=====");
9234 +       vchiq_log_error(vchiq_core_log_level,
9235 +               "============================================================"
9236 +               "================");
9237 +       vchiq_log_error(vchiq_core_log_level,
9238 +               "============================================================"
9239 +               "================");
9240 +}
9241 +
9242 +
9243 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9244 +{
9245 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9246 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9247 +               status = queue_message(state, NULL,
9248 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9249 +                       NULL, 0, 0, 0);
9250 +       return status;
9251 +}
9252 +
9253 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9254 +{
9255 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9256 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9257 +               status = queue_message(state, NULL,
9258 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9259 +                       NULL, 0, 0, 0);
9260 +       return status;
9261 +}
9262 +
9263 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9264 +{
9265 +       VCHIQ_STATUS_T status = VCHIQ_RETRY;
9266 +       if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9267 +               status = queue_message(state, NULL,
9268 +                       VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9269 +                       NULL, 0, 0, 0);
9270 +       return status;
9271 +}
9272 +
9273 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9274 +       size_t numBytes)
9275 +{
9276 +       const uint8_t  *mem = (const uint8_t *)voidMem;
9277 +       size_t          offset;
9278 +       char            lineBuf[100];
9279 +       char           *s;
9280 +
9281 +       while (numBytes > 0) {
9282 +               s = lineBuf;
9283 +
9284 +               for (offset = 0; offset < 16; offset++) {
9285 +                       if (offset < numBytes)
9286 +                               s += snprintf(s, 4, "%02x ", mem[offset]);
9287 +                       else
9288 +                               s += snprintf(s, 4, "   ");
9289 +               }
9290 +
9291 +               for (offset = 0; offset < 16; offset++) {
9292 +                       if (offset < numBytes) {
9293 +                               uint8_t ch = mem[offset];
9294 +
9295 +                               if ((ch < ' ') || (ch > '~'))
9296 +                                       ch = '.';
9297 +                               *s++ = (char)ch;
9298 +                       }
9299 +               }
9300 +               *s++ = '\0';
9301 +
9302 +               if ((label != NULL) && (*label != '\0'))
9303 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
9304 +                               "%s: %08x: %s", label, addr, lineBuf);
9305 +               else
9306 +                       vchiq_log_trace(VCHIQ_LOG_TRACE,
9307 +                               "%08x: %s", addr, lineBuf);
9308 +
9309 +               addr += 16;
9310 +               mem += 16;
9311 +               if (numBytes > 16)
9312 +                       numBytes -= 16;
9313 +               else
9314 +                       numBytes = 0;
9315 +       }
9316 +}
9317 --- /dev/null
9318 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9319 @@ -0,0 +1,706 @@
9320 +/**
9321 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9322 + *
9323 + * Redistribution and use in source and binary forms, with or without
9324 + * modification, are permitted provided that the following conditions
9325 + * are met:
9326 + * 1. Redistributions of source code must retain the above copyright
9327 + *    notice, this list of conditions, and the following disclaimer,
9328 + *    without modification.
9329 + * 2. Redistributions in binary form must reproduce the above copyright
9330 + *    notice, this list of conditions and the following disclaimer in the
9331 + *    documentation and/or other materials provided with the distribution.
9332 + * 3. The names of the above-listed copyright holders may not be used
9333 + *    to endorse or promote products derived from this software without
9334 + *    specific prior written permission.
9335 + *
9336 + * ALTERNATIVELY, this software may be distributed under the terms of the
9337 + * GNU General Public License ("GPL") version 2, as published by the Free
9338 + * Software Foundation.
9339 + *
9340 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9341 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9342 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9343 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9344 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9345 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9346 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9347 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9348 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9349 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9350 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9351 + */
9352 +
9353 +#ifndef VCHIQ_CORE_H
9354 +#define VCHIQ_CORE_H
9355 +
9356 +#include <linux/mutex.h>
9357 +#include <linux/semaphore.h>
9358 +#include <linux/kthread.h>
9359 +
9360 +#include "vchiq_cfg.h"
9361 +
9362 +#include "vchiq.h"
9363 +
9364 +/* Run time control of log level, based on KERN_XXX level. */
9365 +#define VCHIQ_LOG_DEFAULT  4
9366 +#define VCHIQ_LOG_ERROR    3
9367 +#define VCHIQ_LOG_WARNING  4
9368 +#define VCHIQ_LOG_INFO     6
9369 +#define VCHIQ_LOG_TRACE    7
9370 +
9371 +#define VCHIQ_LOG_PREFIX   KERN_INFO "vchiq: "
9372 +
9373 +#ifndef vchiq_log_error
9374 +#define vchiq_log_error(cat, fmt, ...) \
9375 +       do { if (cat >= VCHIQ_LOG_ERROR) \
9376 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9377 +#endif
9378 +#ifndef vchiq_log_warning
9379 +#define vchiq_log_warning(cat, fmt, ...) \
9380 +       do { if (cat >= VCHIQ_LOG_WARNING) \
9381 +                printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9382 +#endif
9383 +#ifndef vchiq_log_info
9384 +#define vchiq_log_info(cat, fmt, ...) \
9385 +       do { if (cat >= VCHIQ_LOG_INFO) \
9386 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9387 +#endif
9388 +#ifndef vchiq_log_trace
9389 +#define vchiq_log_trace(cat, fmt, ...) \
9390 +       do { if (cat >= VCHIQ_LOG_TRACE) \
9391 +               printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9392 +#endif
9393 +
9394 +#define vchiq_loud_error(...) \
9395 +       vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9396 +
9397 +#ifndef vchiq_static_assert
9398 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9399 +       extern int vchiq_static_assert[(cond) ? 1 : -1]
9400 +#endif
9401 +
9402 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9403 +
9404 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9405 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9406 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9407 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9408 +
9409 +#define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
9410 +#define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9411 +#define VCHIQ_SLOT_ZERO_SLOTS  ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9412 +       VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9413 +
9414 +#define VCHIQ_MSG_PADDING            0  /* -                                 */
9415 +#define VCHIQ_MSG_CONNECT            1  /* -                                 */
9416 +#define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
9417 +#define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
9418 +#define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
9419 +#define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
9420 +#define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
9421 +#define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
9422 +#define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
9423 +#define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
9424 +#define VCHIQ_MSG_PAUSE             10  /* -                                 */
9425 +#define VCHIQ_MSG_RESUME            11  /* -                                 */
9426 +#define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
9427 +#define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
9428 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
9429 +
9430 +#define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
9431 +#define VCHIQ_PORT_FREE                0x1000
9432 +#define VCHIQ_PORT_IS_VALID(port)      (port < VCHIQ_PORT_FREE)
9433 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9434 +       ((type<<24) | (srcport<<12) | (dstport<<0))
9435 +#define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)msgid >> 24)
9436 +#define VCHIQ_MSG_SRCPORT(msgid) \
9437 +       (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9438 +#define VCHIQ_MSG_DSTPORT(msgid) \
9439 +       ((unsigned short)msgid & 0xfff)
9440 +
9441 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9442 +       ((fourcc) >> 24) & 0xff, \
9443 +       ((fourcc) >> 16) & 0xff, \
9444 +       ((fourcc) >>  8) & 0xff, \
9445 +       (fourcc) & 0xff
9446 +
9447 +/* Ensure the fields are wide enough */
9448 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9449 +       == 0);
9450 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9451 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9452 +       (unsigned int)VCHIQ_PORT_FREE);
9453 +
9454 +#define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9455 +#define VCHIQ_MSGID_CLAIMED            0x40000000
9456 +
9457 +#define VCHIQ_FOURCC_INVALID           0x00000000
9458 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc)  (fourcc != VCHIQ_FOURCC_INVALID)
9459 +
9460 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9461 +
9462 +typedef uint32_t BITSET_T;
9463 +
9464 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9465 +
9466 +#define BITSET_SIZE(b)        ((b + 31) >> 5)
9467 +#define BITSET_WORD(b)        (b >> 5)
9468 +#define BITSET_BIT(b)         (1 << (b & 31))
9469 +#define BITSET_ZERO(bs)       memset(bs, 0, sizeof(bs))
9470 +#define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9471 +#define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9472 +#define BITSET_CLR(bs, b)     (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9473 +
9474 +#if VCHIQ_ENABLE_STATS
9475 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9476 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9477 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9478 +       (service->stats. stat += addend)
9479 +#else
9480 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9481 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9482 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9483 +#endif
9484 +
9485 +enum {
9486 +       DEBUG_ENTRIES,
9487 +#if VCHIQ_ENABLE_DEBUG
9488 +       DEBUG_SLOT_HANDLER_COUNT,
9489 +       DEBUG_SLOT_HANDLER_LINE,
9490 +       DEBUG_PARSE_LINE,
9491 +       DEBUG_PARSE_HEADER,
9492 +       DEBUG_PARSE_MSGID,
9493 +       DEBUG_AWAIT_COMPLETION_LINE,
9494 +       DEBUG_DEQUEUE_MESSAGE_LINE,
9495 +       DEBUG_SERVICE_CALLBACK_LINE,
9496 +       DEBUG_MSG_QUEUE_FULL_COUNT,
9497 +       DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9498 +#endif
9499 +       DEBUG_MAX
9500 +};
9501 +
9502 +#if VCHIQ_ENABLE_DEBUG
9503 +
9504 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9505 +#define DEBUG_TRACE(d) \
9506 +       do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9507 +#define DEBUG_VALUE(d, v) \
9508 +       do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9509 +#define DEBUG_COUNT(d) \
9510 +       do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9511 +
9512 +#else /* VCHIQ_ENABLE_DEBUG */
9513 +
9514 +#define DEBUG_INITIALISE(local)
9515 +#define DEBUG_TRACE(d)
9516 +#define DEBUG_VALUE(d, v)
9517 +#define DEBUG_COUNT(d)
9518 +
9519 +#endif /* VCHIQ_ENABLE_DEBUG */
9520 +
9521 +typedef enum {
9522 +       VCHIQ_CONNSTATE_DISCONNECTED,
9523 +       VCHIQ_CONNSTATE_CONNECTING,
9524 +       VCHIQ_CONNSTATE_CONNECTED,
9525 +       VCHIQ_CONNSTATE_PAUSING,
9526 +       VCHIQ_CONNSTATE_PAUSE_SENT,
9527 +       VCHIQ_CONNSTATE_PAUSED,
9528 +       VCHIQ_CONNSTATE_RESUMING,
9529 +       VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9530 +       VCHIQ_CONNSTATE_RESUME_TIMEOUT
9531 +} VCHIQ_CONNSTATE_T;
9532 +
9533 +enum {
9534 +       VCHIQ_SRVSTATE_FREE,
9535 +       VCHIQ_SRVSTATE_HIDDEN,
9536 +       VCHIQ_SRVSTATE_LISTENING,
9537 +       VCHIQ_SRVSTATE_OPENING,
9538 +       VCHIQ_SRVSTATE_OPEN,
9539 +       VCHIQ_SRVSTATE_OPENSYNC,
9540 +       VCHIQ_SRVSTATE_CLOSESENT,
9541 +       VCHIQ_SRVSTATE_CLOSERECVD,
9542 +       VCHIQ_SRVSTATE_CLOSEWAIT,
9543 +       VCHIQ_SRVSTATE_CLOSED
9544 +};
9545 +
9546 +enum {
9547 +       VCHIQ_POLL_TERMINATE,
9548 +       VCHIQ_POLL_REMOVE,
9549 +       VCHIQ_POLL_TXNOTIFY,
9550 +       VCHIQ_POLL_RXNOTIFY,
9551 +       VCHIQ_POLL_COUNT
9552 +};
9553 +
9554 +typedef enum {
9555 +       VCHIQ_BULK_TRANSMIT,
9556 +       VCHIQ_BULK_RECEIVE
9557 +} VCHIQ_BULK_DIR_T;
9558 +
9559 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9560 +
9561 +typedef struct vchiq_bulk_struct {
9562 +       short mode;
9563 +       short dir;
9564 +       void *userdata;
9565 +       VCHI_MEM_HANDLE_T handle;
9566 +       void *data;
9567 +       int size;
9568 +       void *remote_data;
9569 +       int remote_size;
9570 +       int actual;
9571 +} VCHIQ_BULK_T;
9572 +
9573 +typedef struct vchiq_bulk_queue_struct {
9574 +       int local_insert;  /* Where to insert the next local bulk */
9575 +       int remote_insert; /* Where to insert the next remote bulk (master) */
9576 +       int process;       /* Bulk to transfer next */
9577 +       int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9578 +       int remove;        /* Bulk to notify the local client of, and remove,
9579 +                          ** next */
9580 +       VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9581 +} VCHIQ_BULK_QUEUE_T;
9582 +
9583 +typedef struct remote_event_struct {
9584 +       int armed;
9585 +       int fired;
9586 +       struct semaphore *event;
9587 +} REMOTE_EVENT_T;
9588 +
9589 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9590 +
9591 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9592 +
9593 +typedef struct vchiq_slot_struct {
9594 +       char data[VCHIQ_SLOT_SIZE];
9595 +} VCHIQ_SLOT_T;
9596 +
9597 +typedef struct vchiq_slot_info_struct {
9598 +       /* Use two counters rather than one to avoid the need for a mutex. */
9599 +       short use_count;
9600 +       short release_count;
9601 +} VCHIQ_SLOT_INFO_T;
9602 +
9603 +typedef struct vchiq_service_struct {
9604 +       VCHIQ_SERVICE_BASE_T base;
9605 +       VCHIQ_SERVICE_HANDLE_T handle;
9606 +       unsigned int ref_count;
9607 +       int srvstate;
9608 +       VCHIQ_USERDATA_TERM_T userdata_term;
9609 +       unsigned int localport;
9610 +       unsigned int remoteport;
9611 +       int public_fourcc;
9612 +       int client_id;
9613 +       char auto_close;
9614 +       char sync;
9615 +       char closing;
9616 +       atomic_t poll_flags;
9617 +       short version;
9618 +       short version_min;
9619 +       short peer_version;
9620 +
9621 +       VCHIQ_STATE_T *state;
9622 +       VCHIQ_INSTANCE_T instance;
9623 +
9624 +       int service_use_count;
9625 +
9626 +       VCHIQ_BULK_QUEUE_T bulk_tx;
9627 +       VCHIQ_BULK_QUEUE_T bulk_rx;
9628 +
9629 +       struct semaphore remove_event;
9630 +       struct semaphore bulk_remove_event;
9631 +       struct mutex bulk_mutex;
9632 +
9633 +       struct service_stats_struct {
9634 +               int quota_stalls;
9635 +               int slot_stalls;
9636 +               int bulk_stalls;
9637 +               int error_count;
9638 +               int ctrl_tx_count;
9639 +               int ctrl_rx_count;
9640 +               int bulk_tx_count;
9641 +               int bulk_rx_count;
9642 +               int bulk_aborted_count;
9643 +               uint64_t ctrl_tx_bytes;
9644 +               uint64_t ctrl_rx_bytes;
9645 +               uint64_t bulk_tx_bytes;
9646 +               uint64_t bulk_rx_bytes;
9647 +       } stats;
9648 +} VCHIQ_SERVICE_T;
9649 +
9650 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9651 +       statically allocated, since for accounting reasons a service's slot
9652 +       usage is carried over between users of the same port number.
9653 + */
9654 +typedef struct vchiq_service_quota_struct {
9655 +       unsigned short slot_quota;
9656 +       unsigned short slot_use_count;
9657 +       unsigned short message_quota;
9658 +       unsigned short message_use_count;
9659 +       struct semaphore quota_event;
9660 +       int previous_tx_index;
9661 +} VCHIQ_SERVICE_QUOTA_T;
9662 +
9663 +typedef struct vchiq_shared_state_struct {
9664 +
9665 +       /* A non-zero value here indicates that the content is valid. */
9666 +       int initialised;
9667 +
9668 +       /* The first and last (inclusive) slots allocated to the owner. */
9669 +       int slot_first;
9670 +       int slot_last;
9671 +
9672 +       /* The slot allocated to synchronous messages from the owner. */
9673 +       int slot_sync;
9674 +
9675 +       /* Signalling this event indicates that owner's slot handler thread
9676 +       ** should run. */
9677 +       REMOTE_EVENT_T trigger;
9678 +
9679 +       /* Indicates the byte position within the stream where the next message
9680 +       ** will be written. The least significant bits are an index into the
9681 +       ** slot. The next bits are the index of the slot in slot_queue. */
9682 +       int tx_pos;
9683 +
9684 +       /* This event should be signalled when a slot is recycled. */
9685 +       REMOTE_EVENT_T recycle;
9686 +
9687 +       /* The slot_queue index where the next recycled slot will be written. */
9688 +       int slot_queue_recycle;
9689 +
9690 +       /* This event should be signalled when a synchronous message is sent. */
9691 +       REMOTE_EVENT_T sync_trigger;
9692 +
9693 +       /* This event should be signalled when a synchronous message has been
9694 +       ** released. */
9695 +       REMOTE_EVENT_T sync_release;
9696 +
9697 +       /* A circular buffer of slot indexes. */
9698 +       int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9699 +
9700 +       /* Debugging state */
9701 +       int debug[DEBUG_MAX];
9702 +} VCHIQ_SHARED_STATE_T;
9703 +
9704 +typedef struct vchiq_slot_zero_struct {
9705 +       int magic;
9706 +       short version;
9707 +       short version_min;
9708 +       int slot_zero_size;
9709 +       int slot_size;
9710 +       int max_slots;
9711 +       int max_slots_per_side;
9712 +       int platform_data[2];
9713 +       VCHIQ_SHARED_STATE_T master;
9714 +       VCHIQ_SHARED_STATE_T slave;
9715 +       VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9716 +} VCHIQ_SLOT_ZERO_T;
9717 +
9718 +struct vchiq_state_struct {
9719 +       int id;
9720 +       int initialised;
9721 +       VCHIQ_CONNSTATE_T conn_state;
9722 +       int is_master;
9723 +
9724 +       VCHIQ_SHARED_STATE_T *local;
9725 +       VCHIQ_SHARED_STATE_T *remote;
9726 +       VCHIQ_SLOT_T *slot_data;
9727 +
9728 +       unsigned short default_slot_quota;
9729 +       unsigned short default_message_quota;
9730 +
9731 +       /* Event indicating connect message received */
9732 +       struct semaphore connect;
9733 +
9734 +       /* Mutex protecting services */
9735 +       struct mutex mutex;
9736 +       VCHIQ_INSTANCE_T *instance;
9737 +
9738 +       /* Processes incoming messages */
9739 +       struct task_struct *slot_handler_thread;
9740 +
9741 +       /* Processes recycled slots */
9742 +       struct task_struct *recycle_thread;
9743 +
9744 +       /* Processes synchronous messages */
9745 +       struct task_struct *sync_thread;
9746 +
9747 +       /* Local implementation of the trigger remote event */
9748 +       struct semaphore trigger_event;
9749 +
9750 +       /* Local implementation of the recycle remote event */
9751 +       struct semaphore recycle_event;
9752 +
9753 +       /* Local implementation of the sync trigger remote event */
9754 +       struct semaphore sync_trigger_event;
9755 +
9756 +       /* Local implementation of the sync release remote event */
9757 +       struct semaphore sync_release_event;
9758 +
9759 +       char *tx_data;
9760 +       char *rx_data;
9761 +       VCHIQ_SLOT_INFO_T *rx_info;
9762 +
9763 +       struct mutex slot_mutex;
9764 +
9765 +       struct mutex recycle_mutex;
9766 +
9767 +       struct mutex sync_mutex;
9768 +
9769 +       struct mutex bulk_transfer_mutex;
9770 +
9771 +       /* Indicates the byte position within the stream from where the next
9772 +       ** message will be read. The least significant bits are an index into
9773 +       ** the slot.The next bits are the index of the slot in
9774 +       ** remote->slot_queue. */
9775 +       int rx_pos;
9776 +
9777 +       /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9778 +               from remote->tx_pos. */
9779 +       int local_tx_pos;
9780 +
9781 +       /* The slot_queue index of the slot to become available next. */
9782 +       int slot_queue_available;
9783 +
9784 +       /* A flag to indicate if any poll has been requested */
9785 +       int poll_needed;
9786 +
9787 +       /* Ths index of the previous slot used for data messages. */
9788 +       int previous_data_index;
9789 +
9790 +       /* The number of slots occupied by data messages. */
9791 +       unsigned short data_use_count;
9792 +
9793 +       /* The maximum number of slots to be occupied by data messages. */
9794 +       unsigned short data_quota;
9795 +
9796 +       /* An array of bit sets indicating which services must be polled. */
9797 +       atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9798 +
9799 +       /* The number of the first unused service */
9800 +       int unused_service;
9801 +
9802 +       /* Signalled when a free slot becomes available. */
9803 +       struct semaphore slot_available_event;
9804 +
9805 +       struct semaphore slot_remove_event;
9806 +
9807 +       /* Signalled when a free data slot becomes available. */
9808 +       struct semaphore data_quota_event;
9809 +
9810 +       /* Incremented when there are bulk transfers which cannot be processed
9811 +        * whilst paused and must be processed on resume */
9812 +       int deferred_bulks;
9813 +
9814 +       struct state_stats_struct {
9815 +               int slot_stalls;
9816 +               int data_stalls;
9817 +               int ctrl_tx_count;
9818 +               int ctrl_rx_count;
9819 +               int error_count;
9820 +       } stats;
9821 +
9822 +       VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9823 +       VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9824 +       VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9825 +
9826 +       VCHIQ_PLATFORM_STATE_T platform_state;
9827 +};
9828 +
9829 +struct bulk_waiter {
9830 +       VCHIQ_BULK_T *bulk;
9831 +       struct semaphore event;
9832 +       int actual;
9833 +};
9834 +
9835 +extern spinlock_t bulk_waiter_spinlock;
9836 +
9837 +extern int vchiq_core_log_level;
9838 +extern int vchiq_core_msg_log_level;
9839 +extern int vchiq_sync_log_level;
9840 +
9841 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9842 +
9843 +extern const char *
9844 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9845 +
9846 +extern VCHIQ_SLOT_ZERO_T *
9847 +vchiq_init_slots(void *mem_base, int mem_size);
9848 +
9849 +extern VCHIQ_STATUS_T
9850 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9851 +       int is_master);
9852 +
9853 +extern VCHIQ_STATUS_T
9854 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9855 +
9856 +extern VCHIQ_SERVICE_T *
9857 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9858 +       const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9859 +       VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9860 +
9861 +extern VCHIQ_STATUS_T
9862 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9863 +
9864 +extern VCHIQ_STATUS_T
9865 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9866 +
9867 +extern void
9868 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9869 +
9870 +extern void
9871 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9872 +
9873 +extern VCHIQ_STATUS_T
9874 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9875 +
9876 +extern VCHIQ_STATUS_T
9877 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9878 +
9879 +extern VCHIQ_STATUS_T
9880 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9881 +
9882 +extern void
9883 +remote_event_pollall(VCHIQ_STATE_T *state);
9884 +
9885 +extern VCHIQ_STATUS_T
9886 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9887 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9888 +       VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9889 +
9890 +extern void
9891 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9892 +
9893 +extern void
9894 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9895 +
9896 +extern void
9897 +vchiq_loud_error_header(void);
9898 +
9899 +extern void
9900 +vchiq_loud_error_footer(void);
9901 +
9902 +extern void
9903 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
9904 +
9905 +static inline VCHIQ_SERVICE_T *
9906 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
9907 +{
9908 +       VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
9909 +               (VCHIQ_MAX_STATES - 1)];
9910 +       if (!state)
9911 +               return NULL;
9912 +
9913 +       return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
9914 +}
9915 +
9916 +extern VCHIQ_SERVICE_T *
9917 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
9918 +
9919 +extern VCHIQ_SERVICE_T *
9920 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
9921 +
9922 +extern VCHIQ_SERVICE_T *
9923 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
9924 +       VCHIQ_SERVICE_HANDLE_T handle);
9925 +
9926 +extern VCHIQ_SERVICE_T *
9927 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
9928 +       int *pidx);
9929 +
9930 +extern void
9931 +lock_service(VCHIQ_SERVICE_T *service);
9932 +
9933 +extern void
9934 +unlock_service(VCHIQ_SERVICE_T *service);
9935 +
9936 +/* The following functions are called from vchiq_core, and external
9937 +** implementations must be provided. */
9938 +
9939 +extern VCHIQ_STATUS_T
9940 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
9941 +       VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
9942 +
9943 +extern void
9944 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
9945 +
9946 +extern void
9947 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
9948 +
9949 +extern VCHIQ_STATUS_T
9950 +vchiq_copy_from_user(void *dst, const void *src, int size);
9951 +
9952 +extern void
9953 +remote_event_signal(REMOTE_EVENT_T *event);
9954 +
9955 +void
9956 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
9957 +
9958 +extern void
9959 +vchiq_platform_paused(VCHIQ_STATE_T *state);
9960 +
9961 +extern VCHIQ_STATUS_T
9962 +vchiq_platform_resume(VCHIQ_STATE_T *state);
9963 +
9964 +extern void
9965 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
9966 +
9967 +extern void
9968 +vchiq_dump(void *dump_context, const char *str, int len);
9969 +
9970 +extern void
9971 +vchiq_dump_platform_state(void *dump_context);
9972 +
9973 +extern void
9974 +vchiq_dump_platform_instances(void *dump_context);
9975 +
9976 +extern void
9977 +vchiq_dump_platform_service_state(void *dump_context,
9978 +       VCHIQ_SERVICE_T *service);
9979 +
9980 +extern VCHIQ_STATUS_T
9981 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
9982 +
9983 +extern VCHIQ_STATUS_T
9984 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
9985 +
9986 +extern void
9987 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
9988 +
9989 +extern void
9990 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
9991 +
9992 +extern VCHIQ_STATUS_T
9993 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
9994 +
9995 +extern VCHIQ_STATUS_T
9996 +vchiq_check_service(VCHIQ_SERVICE_T *service);
9997 +
9998 +extern void
9999 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10000 +
10001 +extern VCHIQ_STATUS_T
10002 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10003 +
10004 +extern VCHIQ_STATUS_T
10005 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10006 +
10007 +extern VCHIQ_STATUS_T
10008 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10009 +
10010 +extern void
10011 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10012 +       VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10013 +
10014 +extern void
10015 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10016 +
10017 +extern void
10018 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10019 +
10020 +
10021 +extern void
10022 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10023 +       size_t numBytes);
10024 +
10025 +#endif
10026 --- /dev/null
10027 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10028 @@ -0,0 +1,89 @@
10029 +#!/usr/bin/perl -w
10030 +
10031 +use strict;
10032 +
10033 +#
10034 +# Generate a version from available information
10035 +#
10036 +
10037 +my $prefix = shift @ARGV;
10038 +my $root = shift @ARGV;
10039 +
10040 +
10041 +if ( not defined $root ) {
10042 +       die "usage: $0 prefix root-dir\n";
10043 +}
10044 +
10045 +if ( ! -d $root ) {
10046 +       die "root directory $root not found\n";
10047 +}
10048 +
10049 +my $version = "unknown";
10050 +my $tainted = "";
10051 +
10052 +if ( -d "$root/.git" ) {
10053 +       # attempt to work out git version. only do so
10054 +       # on a linux build host, as cygwin builds are
10055 +       # already slow enough
10056 +
10057 +       if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10058 +               if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10059 +                       $version = "no git version";
10060 +               }
10061 +               else {
10062 +                       $version = <F>;
10063 +                       $version =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10064 +                       $version =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10065 +               }
10066 +
10067 +               if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10068 +                       $tainted = <G>;
10069 +                       $tainted =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10070 +                       $tainted =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10071 +                       if (length $tainted) {
10072 +                       $version = join ' ', $version, "(tainted)";
10073 +               }
10074 +               else {
10075 +                       $version = join ' ', $version, "(clean)";
10076 +         }             
10077 +               }
10078 +       }
10079 +}
10080 +
10081 +my $hostname = `hostname`;
10082 +$hostname =~ s/[ \r\n]*$//;     # chomp may not be enough (cygwin).
10083 +$hostname =~ s/^[ \r\n]*//;     # chomp may not be enough (cygwin).
10084 +
10085 +
10086 +print STDERR "Version $version\n";
10087 +print <<EOF;
10088 +#include "${prefix}_build_info.h"
10089 +#include <linux/broadcom/vc_debug_sym.h>
10090 +
10091 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10092 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10093 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time,    __TIME__ );
10094 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date,    __DATE__ );
10095 +
10096 +const char *vchiq_get_build_hostname( void )
10097 +{
10098 +   return vchiq_build_hostname;
10099 +}
10100 +
10101 +const char *vchiq_get_build_version( void )
10102 +{
10103 +   return vchiq_build_version;
10104 +}
10105 +
10106 +const char *vchiq_get_build_date( void )
10107 +{
10108 +   return vchiq_build_date;
10109 +}
10110 +
10111 +const char *vchiq_get_build_time( void )
10112 +{
10113 +   return vchiq_build_time;
10114 +}
10115 +EOF
10116 +
10117 +
10118 --- /dev/null
10119 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10120 @@ -0,0 +1,188 @@
10121 +/**
10122 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10123 + *
10124 + * Redistribution and use in source and binary forms, with or without
10125 + * modification, are permitted provided that the following conditions
10126 + * are met:
10127 + * 1. Redistributions of source code must retain the above copyright
10128 + *    notice, this list of conditions, and the following disclaimer,
10129 + *    without modification.
10130 + * 2. Redistributions in binary form must reproduce the above copyright
10131 + *    notice, this list of conditions and the following disclaimer in the
10132 + *    documentation and/or other materials provided with the distribution.
10133 + * 3. The names of the above-listed copyright holders may not be used
10134 + *    to endorse or promote products derived from this software without
10135 + *    specific prior written permission.
10136 + *
10137 + * ALTERNATIVELY, this software may be distributed under the terms of the
10138 + * GNU General Public License ("GPL") version 2, as published by the Free
10139 + * Software Foundation.
10140 + *
10141 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10142 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10143 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10144 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10145 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10146 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10147 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10148 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10149 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10150 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10151 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10152 + */
10153 +
10154 +#ifndef VCHIQ_IF_H
10155 +#define VCHIQ_IF_H
10156 +
10157 +#include "interface/vchi/vchi_mh.h"
10158 +
10159 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10160 +
10161 +#define VCHIQ_SLOT_SIZE     4096
10162 +#define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10163 +#define VCHIQ_CHANNEL_SIZE  VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10164 +
10165 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10166 +                       (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10167 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10168 +#define VCHIQ_GET_SERVICE_FOURCC(service)   vchiq_get_service_fourcc(service)
10169 +
10170 +typedef enum {
10171 +       VCHIQ_SERVICE_OPENED,         /* service, -, -             */
10172 +       VCHIQ_SERVICE_CLOSED,         /* service, -, -             */
10173 +       VCHIQ_MESSAGE_AVAILABLE,      /* service, header, -        */
10174 +       VCHIQ_BULK_TRANSMIT_DONE,     /* service, -, bulk_userdata */
10175 +       VCHIQ_BULK_RECEIVE_DONE,      /* service, -, bulk_userdata */
10176 +       VCHIQ_BULK_TRANSMIT_ABORTED,  /* service, -, bulk_userdata */
10177 +       VCHIQ_BULK_RECEIVE_ABORTED    /* service, -, bulk_userdata */
10178 +} VCHIQ_REASON_T;
10179 +
10180 +typedef enum {
10181 +       VCHIQ_ERROR   = -1,
10182 +       VCHIQ_SUCCESS = 0,
10183 +       VCHIQ_RETRY   = 1
10184 +} VCHIQ_STATUS_T;
10185 +
10186 +typedef enum {
10187 +       VCHIQ_BULK_MODE_CALLBACK,
10188 +       VCHIQ_BULK_MODE_BLOCKING,
10189 +       VCHIQ_BULK_MODE_NOCALLBACK,
10190 +       VCHIQ_BULK_MODE_WAITING         /* Reserved for internal use */
10191 +} VCHIQ_BULK_MODE_T;
10192 +
10193 +typedef enum {
10194 +       VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10195 +       VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10196 +       VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10197 +       VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10198 +} VCHIQ_SERVICE_OPTION_T;
10199 +
10200 +typedef struct vchiq_header_struct {
10201 +       /* The message identifier - opaque to applications. */
10202 +       int msgid;
10203 +
10204 +       /* Size of message data. */
10205 +       unsigned int size;
10206 +
10207 +       char data[0];           /* message */
10208 +} VCHIQ_HEADER_T;
10209 +
10210 +typedef struct {
10211 +       const void *data;
10212 +       unsigned int size;
10213 +} VCHIQ_ELEMENT_T;
10214 +
10215 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10216 +
10217 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10218 +       VCHIQ_SERVICE_HANDLE_T, void *);
10219 +
10220 +typedef struct vchiq_service_base_struct {
10221 +       int fourcc;
10222 +       VCHIQ_CALLBACK_T callback;
10223 +       void *userdata;
10224 +} VCHIQ_SERVICE_BASE_T;
10225 +
10226 +typedef struct vchiq_service_params_struct {
10227 +       int fourcc;
10228 +       VCHIQ_CALLBACK_T callback;
10229 +       void *userdata;
10230 +       short version;       /* Increment for non-trivial changes */
10231 +       short version_min;   /* Update for incompatible changes */
10232 +} VCHIQ_SERVICE_PARAMS_T;
10233 +
10234 +typedef struct vchiq_config_struct {
10235 +       unsigned int max_msg_size;
10236 +       unsigned int bulk_threshold; /* The message size above which it
10237 +                                       is better to use a bulk transfer
10238 +                                       (<= max_msg_size) */
10239 +       unsigned int max_outstanding_bulks;
10240 +       unsigned int max_services;
10241 +       short version;      /* The version of VCHIQ */
10242 +       short version_min;  /* The minimum compatible version of VCHIQ */
10243 +} VCHIQ_CONFIG_T;
10244 +
10245 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10246 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10247 +
10248 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10249 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10250 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10251 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10252 +       const VCHIQ_SERVICE_PARAMS_T *params,
10253 +       VCHIQ_SERVICE_HANDLE_T *pservice);
10254 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10255 +       const VCHIQ_SERVICE_PARAMS_T *params,
10256 +       VCHIQ_SERVICE_HANDLE_T *pservice);
10257 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10258 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10259 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10260 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10261 +       VCHIQ_SERVICE_HANDLE_T service);
10262 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10263 +
10264 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10265 +       const VCHIQ_ELEMENT_T *elements, unsigned int count);
10266 +extern void           vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10267 +       VCHIQ_HEADER_T *header);
10268 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10269 +       const void *data, unsigned int size, void *userdata);
10270 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10271 +       void *data, unsigned int size, void *userdata);
10272 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10273 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10274 +       const void *offset, unsigned int size, void *userdata);
10275 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10276 +       VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10277 +       void *offset, unsigned int size, void *userdata);
10278 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10279 +       const void *data, unsigned int size, void *userdata,
10280 +       VCHIQ_BULK_MODE_T mode);
10281 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10282 +       void *data, unsigned int size, void *userdata,
10283 +       VCHIQ_BULK_MODE_T mode);
10284 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10285 +       VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10286 +       void *userdata, VCHIQ_BULK_MODE_T mode);
10287 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10288 +       VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10289 +       void *userdata, VCHIQ_BULK_MODE_T mode);
10290 +extern int   vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10291 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10292 +extern int   vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10293 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10294 +       int config_size, VCHIQ_CONFIG_T *pconfig);
10295 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10296 +       VCHIQ_SERVICE_OPTION_T option, int value);
10297 +
10298 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10299 +       VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10300 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10301 +
10302 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10303 +       void *ptr, size_t num_bytes);
10304 +
10305 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10306 +      short *peer_version);
10307 +
10308 +#endif /* VCHIQ_IF_H */
10309 --- /dev/null
10310 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10311 @@ -0,0 +1,129 @@
10312 +/**
10313 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10314 + *
10315 + * Redistribution and use in source and binary forms, with or without
10316 + * modification, are permitted provided that the following conditions
10317 + * are met:
10318 + * 1. Redistributions of source code must retain the above copyright
10319 + *    notice, this list of conditions, and the following disclaimer,
10320 + *    without modification.
10321 + * 2. Redistributions in binary form must reproduce the above copyright
10322 + *    notice, this list of conditions and the following disclaimer in the
10323 + *    documentation and/or other materials provided with the distribution.
10324 + * 3. The names of the above-listed copyright holders may not be used
10325 + *    to endorse or promote products derived from this software without
10326 + *    specific prior written permission.
10327 + *
10328 + * ALTERNATIVELY, this software may be distributed under the terms of the
10329 + * GNU General Public License ("GPL") version 2, as published by the Free
10330 + * Software Foundation.
10331 + *
10332 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10333 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10334 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10335 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10336 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10337 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10338 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10339 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10340 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10341 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10342 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10343 + */
10344 +
10345 +#ifndef VCHIQ_IOCTLS_H
10346 +#define VCHIQ_IOCTLS_H
10347 +
10348 +#include <linux/ioctl.h>
10349 +#include "vchiq_if.h"
10350 +
10351 +#define VCHIQ_IOC_MAGIC 0xc4
10352 +#define VCHIQ_INVALID_HANDLE (~0)
10353 +
10354 +typedef struct {
10355 +       VCHIQ_SERVICE_PARAMS_T params;
10356 +       int is_open;
10357 +       int is_vchi;
10358 +       unsigned int handle;       /* OUT */
10359 +} VCHIQ_CREATE_SERVICE_T;
10360 +
10361 +typedef struct {
10362 +       unsigned int handle;
10363 +       unsigned int count;
10364 +       const VCHIQ_ELEMENT_T *elements;
10365 +} VCHIQ_QUEUE_MESSAGE_T;
10366 +
10367 +typedef struct {
10368 +       unsigned int handle;
10369 +       void *data;
10370 +       unsigned int size;
10371 +       void *userdata;
10372 +       VCHIQ_BULK_MODE_T mode;
10373 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10374 +
10375 +typedef struct {
10376 +       VCHIQ_REASON_T reason;
10377 +       VCHIQ_HEADER_T *header;
10378 +       void *service_userdata;
10379 +       void *bulk_userdata;
10380 +} VCHIQ_COMPLETION_DATA_T;
10381 +
10382 +typedef struct {
10383 +       unsigned int count;
10384 +       VCHIQ_COMPLETION_DATA_T *buf;
10385 +       unsigned int msgbufsize;
10386 +       unsigned int msgbufcount; /* IN/OUT */
10387 +       void **msgbufs;
10388 +} VCHIQ_AWAIT_COMPLETION_T;
10389 +
10390 +typedef struct {
10391 +       unsigned int handle;
10392 +       int blocking;
10393 +       unsigned int bufsize;
10394 +       void *buf;
10395 +} VCHIQ_DEQUEUE_MESSAGE_T;
10396 +
10397 +typedef struct {
10398 +       unsigned int config_size;
10399 +       VCHIQ_CONFIG_T *pconfig;
10400 +} VCHIQ_GET_CONFIG_T;
10401 +
10402 +typedef struct {
10403 +       unsigned int handle;
10404 +       VCHIQ_SERVICE_OPTION_T option;
10405 +       int value;
10406 +} VCHIQ_SET_SERVICE_OPTION_T;
10407 +
10408 +typedef struct {
10409 +       void     *virt_addr;
10410 +       size_t    num_bytes;
10411 +} VCHIQ_DUMP_MEM_T;
10412 +
10413 +#define VCHIQ_IOC_CONNECT              _IO(VCHIQ_IOC_MAGIC,   0)
10414 +#define VCHIQ_IOC_SHUTDOWN             _IO(VCHIQ_IOC_MAGIC,   1)
10415 +#define VCHIQ_IOC_CREATE_SERVICE \
10416 +       _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10417 +#define VCHIQ_IOC_REMOVE_SERVICE       _IO(VCHIQ_IOC_MAGIC,   3)
10418 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10419 +       _IOW(VCHIQ_IOC_MAGIC,  4, VCHIQ_QUEUE_MESSAGE_T)
10420 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10421 +       _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10422 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10423 +       _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10424 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10425 +       _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10426 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10427 +       _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10428 +#define VCHIQ_IOC_GET_CLIENT_ID        _IO(VCHIQ_IOC_MAGIC,   9)
10429 +#define VCHIQ_IOC_GET_CONFIG \
10430 +       _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10431 +#define VCHIQ_IOC_CLOSE_SERVICE        _IO(VCHIQ_IOC_MAGIC,   11)
10432 +#define VCHIQ_IOC_USE_SERVICE          _IO(VCHIQ_IOC_MAGIC,   12)
10433 +#define VCHIQ_IOC_RELEASE_SERVICE      _IO(VCHIQ_IOC_MAGIC,   13)
10434 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10435 +       _IOW(VCHIQ_IOC_MAGIC,  14, VCHIQ_SET_SERVICE_OPTION_T)
10436 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10437 +       _IOW(VCHIQ_IOC_MAGIC,  15, VCHIQ_DUMP_MEM_T)
10438 +#define VCHIQ_IOC_MAX                  15
10439 +
10440 +#endif
10441 --- /dev/null
10442 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10443 @@ -0,0 +1,456 @@
10444 +/**
10445 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10446 + *
10447 + * Redistribution and use in source and binary forms, with or without
10448 + * modification, are permitted provided that the following conditions
10449 + * are met:
10450 + * 1. Redistributions of source code must retain the above copyright
10451 + *    notice, this list of conditions, and the following disclaimer,
10452 + *    without modification.
10453 + * 2. Redistributions in binary form must reproduce the above copyright
10454 + *    notice, this list of conditions and the following disclaimer in the
10455 + *    documentation and/or other materials provided with the distribution.
10456 + * 3. The names of the above-listed copyright holders may not be used
10457 + *    to endorse or promote products derived from this software without
10458 + *    specific prior written permission.
10459 + *
10460 + * ALTERNATIVELY, this software may be distributed under the terms of the
10461 + * GNU General Public License ("GPL") version 2, as published by the Free
10462 + * Software Foundation.
10463 + *
10464 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10465 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10466 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10467 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10468 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10469 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10470 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10471 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10472 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10473 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10474 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10475 + */
10476 +
10477 +/* ---- Include Files ---------------------------------------------------- */
10478 +
10479 +#include <linux/kernel.h>
10480 +#include <linux/module.h>
10481 +#include <linux/mutex.h>
10482 +
10483 +#include "vchiq_core.h"
10484 +#include "vchiq_arm.h"
10485 +
10486 +/* ---- Public Variables ------------------------------------------------- */
10487 +
10488 +/* ---- Private Constants and Types -------------------------------------- */
10489 +
10490 +struct bulk_waiter_node {
10491 +       struct bulk_waiter bulk_waiter;
10492 +       int pid;
10493 +       struct list_head list;
10494 +};
10495 +
10496 +struct vchiq_instance_struct {
10497 +       VCHIQ_STATE_T *state;
10498 +
10499 +       int connected;
10500 +
10501 +       struct list_head bulk_waiter_list;
10502 +       struct mutex bulk_waiter_list_mutex;
10503 +};
10504 +
10505 +static VCHIQ_STATUS_T
10506 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10507 +       unsigned int size, VCHIQ_BULK_DIR_T dir);
10508 +
10509 +/****************************************************************************
10510 +*
10511 +*   vchiq_initialise
10512 +*
10513 +***************************************************************************/
10514 +#define VCHIQ_INIT_RETRIES 10
10515 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10516 +{
10517 +       VCHIQ_STATUS_T status = VCHIQ_ERROR;
10518 +       VCHIQ_STATE_T *state;
10519 +       VCHIQ_INSTANCE_T instance = NULL;
10520 +        int i;
10521 +
10522 +       vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10523 +
10524 +        /* VideoCore may not be ready due to boot up timing.
10525 +           It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10526 +        for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10527 +               state = vchiq_get_state();
10528 +               if (state)
10529 +                       break;
10530 +               udelay(500);
10531 +       }
10532 +       if (i==VCHIQ_INIT_RETRIES) {
10533 +               vchiq_log_error(vchiq_core_log_level,
10534 +                       "%s: videocore not initialized\n", __func__);
10535 +               goto failed;
10536 +       } else if (i>0) {
10537 +               vchiq_log_warning(vchiq_core_log_level,
10538 +                       "%s: videocore initialized after %d retries\n", __func__, i);
10539 +       }
10540 +
10541 +       instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10542 +       if (!instance) {
10543 +               vchiq_log_error(vchiq_core_log_level,
10544 +                       "%s: error allocating vchiq instance\n", __func__);
10545 +               goto failed;
10546 +       }
10547 +
10548 +       instance->connected = 0;
10549 +       instance->state = state;
10550 +       mutex_init(&instance->bulk_waiter_list_mutex);
10551 +       INIT_LIST_HEAD(&instance->bulk_waiter_list);
10552 +
10553 +       *instanceOut = instance;
10554 +
10555 +       status = VCHIQ_SUCCESS;
10556 +
10557 +failed:
10558 +       vchiq_log_trace(vchiq_core_log_level,
10559 +               "%s(%p): returning %d", __func__, instance, status);
10560 +
10561 +       return status;
10562 +}
10563 +EXPORT_SYMBOL(vchiq_initialise);
10564 +
10565 +/****************************************************************************
10566 +*
10567 +*   vchiq_shutdown
10568 +*
10569 +***************************************************************************/
10570 +
10571 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10572 +{
10573 +       VCHIQ_STATUS_T status;
10574 +       VCHIQ_STATE_T *state = instance->state;
10575 +
10576 +       vchiq_log_trace(vchiq_core_log_level,
10577 +               "%s(%p) called", __func__, instance);
10578 +
10579 +       if (mutex_lock_interruptible(&state->mutex) != 0)
10580 +               return VCHIQ_RETRY;
10581 +
10582 +       /* Remove all services */
10583 +       status = vchiq_shutdown_internal(state, instance);
10584 +
10585 +       mutex_unlock(&state->mutex);
10586 +
10587 +       vchiq_log_trace(vchiq_core_log_level,
10588 +               "%s(%p): returning %d", __func__, instance, status);
10589 +
10590 +       if (status == VCHIQ_SUCCESS) {
10591 +               struct list_head *pos, *next;
10592 +               list_for_each_safe(pos, next,
10593 +                               &instance->bulk_waiter_list) {
10594 +                       struct bulk_waiter_node *waiter;
10595 +                       waiter = list_entry(pos,
10596 +                                       struct bulk_waiter_node,
10597 +                                       list);
10598 +                       list_del(pos);
10599 +                       vchiq_log_info(vchiq_arm_log_level,
10600 +                                       "bulk_waiter - cleaned up %x "
10601 +                                       "for pid %d",
10602 +                                       (unsigned int)waiter, waiter->pid);
10603 +                       kfree(waiter);
10604 +               }
10605 +               kfree(instance);
10606 +       }
10607 +
10608 +       return status;
10609 +}
10610 +EXPORT_SYMBOL(vchiq_shutdown);
10611 +
10612 +/****************************************************************************
10613 +*
10614 +*   vchiq_is_connected
10615 +*
10616 +***************************************************************************/
10617 +
10618 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10619 +{
10620 +       return instance->connected;
10621 +}
10622 +
10623 +/****************************************************************************
10624 +*
10625 +*   vchiq_connect
10626 +*
10627 +***************************************************************************/
10628 +
10629 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10630 +{
10631 +       VCHIQ_STATUS_T status;
10632 +       VCHIQ_STATE_T *state = instance->state;
10633 +
10634 +       vchiq_log_trace(vchiq_core_log_level,
10635 +               "%s(%p) called", __func__, instance);
10636 +
10637 +       if (mutex_lock_interruptible(&state->mutex) != 0) {
10638 +               vchiq_log_trace(vchiq_core_log_level,
10639 +                       "%s: call to mutex_lock failed", __func__);
10640 +               status = VCHIQ_RETRY;
10641 +               goto failed;
10642 +       }
10643 +       status = vchiq_connect_internal(state, instance);
10644 +
10645 +       if (status == VCHIQ_SUCCESS)
10646 +               instance->connected = 1;
10647 +
10648 +       mutex_unlock(&state->mutex);
10649 +
10650 +failed:
10651 +       vchiq_log_trace(vchiq_core_log_level,
10652 +               "%s(%p): returning %d", __func__, instance, status);
10653 +
10654 +       return status;
10655 +}
10656 +EXPORT_SYMBOL(vchiq_connect);
10657 +
10658 +/****************************************************************************
10659 +*
10660 +*   vchiq_add_service
10661 +*
10662 +***************************************************************************/
10663 +
10664 +VCHIQ_STATUS_T vchiq_add_service(
10665 +       VCHIQ_INSTANCE_T              instance,
10666 +       const VCHIQ_SERVICE_PARAMS_T *params,
10667 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
10668 +{
10669 +       VCHIQ_STATUS_T status;
10670 +       VCHIQ_STATE_T *state = instance->state;
10671 +       VCHIQ_SERVICE_T *service = NULL;
10672 +       int srvstate;
10673 +
10674 +       vchiq_log_trace(vchiq_core_log_level,
10675 +               "%s(%p) called", __func__, instance);
10676 +
10677 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10678 +
10679 +       srvstate = vchiq_is_connected(instance)
10680 +               ? VCHIQ_SRVSTATE_LISTENING
10681 +               : VCHIQ_SRVSTATE_HIDDEN;
10682 +
10683 +       service = vchiq_add_service_internal(
10684 +               state,
10685 +               params,
10686 +               srvstate,
10687 +               instance,
10688 +               NULL);
10689 +
10690 +       if (service) {
10691 +               *phandle = service->handle;
10692 +               status = VCHIQ_SUCCESS;
10693 +       } else
10694 +               status = VCHIQ_ERROR;
10695 +
10696 +       vchiq_log_trace(vchiq_core_log_level,
10697 +               "%s(%p): returning %d", __func__, instance, status);
10698 +
10699 +       return status;
10700 +}
10701 +EXPORT_SYMBOL(vchiq_add_service);
10702 +
10703 +/****************************************************************************
10704 +*
10705 +*   vchiq_open_service
10706 +*
10707 +***************************************************************************/
10708 +
10709 +VCHIQ_STATUS_T vchiq_open_service(
10710 +       VCHIQ_INSTANCE_T              instance,
10711 +       const VCHIQ_SERVICE_PARAMS_T *params,
10712 +       VCHIQ_SERVICE_HANDLE_T       *phandle)
10713 +{
10714 +       VCHIQ_STATUS_T   status = VCHIQ_ERROR;
10715 +       VCHIQ_STATE_T   *state = instance->state;
10716 +       VCHIQ_SERVICE_T *service = NULL;
10717 +
10718 +       vchiq_log_trace(vchiq_core_log_level,
10719 +               "%s(%p) called", __func__, instance);
10720 +
10721 +       *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10722 +
10723 +       if (!vchiq_is_connected(instance))
10724 +               goto failed;
10725 +
10726 +       service = vchiq_add_service_internal(state,
10727 +               params,
10728 +               VCHIQ_SRVSTATE_OPENING,
10729 +               instance,
10730 +               NULL);
10731 +
10732 +       if (service) {
10733 +               status = vchiq_open_service_internal(service, current->pid);
10734 +               if (status == VCHIQ_SUCCESS)
10735 +                       *phandle = service->handle;
10736 +               else
10737 +                       vchiq_remove_service(service->handle);
10738 +       }
10739 +
10740 +failed:
10741 +       vchiq_log_trace(vchiq_core_log_level,
10742 +               "%s(%p): returning %d", __func__, instance, status);
10743 +
10744 +       return status;
10745 +}
10746 +EXPORT_SYMBOL(vchiq_open_service);
10747 +
10748 +VCHIQ_STATUS_T
10749 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10750 +       const void *data, unsigned int size, void *userdata)
10751 +{
10752 +       return vchiq_bulk_transfer(handle,
10753 +               VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10754 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10755 +}
10756 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10757 +
10758 +VCHIQ_STATUS_T
10759 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10760 +       unsigned int size, void *userdata)
10761 +{
10762 +       return vchiq_bulk_transfer(handle,
10763 +               VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10764 +               VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10765 +}
10766 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10767 +
10768 +VCHIQ_STATUS_T
10769 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10770 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10771 +{
10772 +       VCHIQ_STATUS_T status;
10773 +
10774 +       switch (mode) {
10775 +       case VCHIQ_BULK_MODE_NOCALLBACK:
10776 +       case VCHIQ_BULK_MODE_CALLBACK:
10777 +               status = vchiq_bulk_transfer(handle,
10778 +                       VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10779 +                       mode, VCHIQ_BULK_TRANSMIT);
10780 +               break;
10781 +       case VCHIQ_BULK_MODE_BLOCKING:
10782 +               status = vchiq_blocking_bulk_transfer(handle,
10783 +                       (void *)data, size, VCHIQ_BULK_TRANSMIT);
10784 +               break;
10785 +       default:
10786 +               return VCHIQ_ERROR;
10787 +       }
10788 +
10789 +       return status;
10790 +}
10791 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10792 +
10793 +VCHIQ_STATUS_T
10794 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10795 +       unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10796 +{
10797 +       VCHIQ_STATUS_T status;
10798 +
10799 +       switch (mode) {
10800 +       case VCHIQ_BULK_MODE_NOCALLBACK:
10801 +       case VCHIQ_BULK_MODE_CALLBACK:
10802 +               status = vchiq_bulk_transfer(handle,
10803 +                       VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10804 +                       mode, VCHIQ_BULK_RECEIVE);
10805 +               break;
10806 +       case VCHIQ_BULK_MODE_BLOCKING:
10807 +               status = vchiq_blocking_bulk_transfer(handle,
10808 +                       (void *)data, size, VCHIQ_BULK_RECEIVE);
10809 +               break;
10810 +       default:
10811 +               return VCHIQ_ERROR;
10812 +       }
10813 +
10814 +       return status;
10815 +}
10816 +EXPORT_SYMBOL(vchiq_bulk_receive);
10817 +
10818 +static VCHIQ_STATUS_T
10819 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10820 +       unsigned int size, VCHIQ_BULK_DIR_T dir)
10821 +{
10822 +       VCHIQ_INSTANCE_T instance;
10823 +       VCHIQ_SERVICE_T *service;
10824 +       VCHIQ_STATUS_T status;
10825 +       struct bulk_waiter_node *waiter = NULL;
10826 +       struct list_head *pos;
10827 +
10828 +       service = find_service_by_handle(handle);
10829 +       if (!service)
10830 +               return VCHIQ_ERROR;
10831 +
10832 +       instance = service->instance;
10833 +
10834 +       unlock_service(service);
10835 +
10836 +       mutex_lock(&instance->bulk_waiter_list_mutex);
10837 +       list_for_each(pos, &instance->bulk_waiter_list) {
10838 +               if (list_entry(pos, struct bulk_waiter_node,
10839 +                               list)->pid == current->pid) {
10840 +                       waiter = list_entry(pos,
10841 +                               struct bulk_waiter_node,
10842 +                               list);
10843 +                       list_del(pos);
10844 +                       break;
10845 +               }
10846 +       }
10847 +       mutex_unlock(&instance->bulk_waiter_list_mutex);
10848 +
10849 +       if (waiter) {
10850 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10851 +               if (bulk) {
10852 +                       /* This thread has an outstanding bulk transfer. */
10853 +                       if ((bulk->data != data) ||
10854 +                               (bulk->size != size)) {
10855 +                               /* This is not a retry of the previous one.
10856 +                               ** Cancel the signal when the transfer
10857 +                               ** completes. */
10858 +                               spin_lock(&bulk_waiter_spinlock);
10859 +                               bulk->userdata = NULL;
10860 +                               spin_unlock(&bulk_waiter_spinlock);
10861 +                       }
10862 +               }
10863 +       }
10864 +
10865 +       if (!waiter) {
10866 +               waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10867 +               if (!waiter) {
10868 +                       vchiq_log_error(vchiq_core_log_level,
10869 +                               "%s - out of memory", __func__);
10870 +                       return VCHIQ_ERROR;
10871 +               }
10872 +       }
10873 +
10874 +       status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10875 +               data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10876 +               dir);
10877 +       if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10878 +               !waiter->bulk_waiter.bulk) {
10879 +               VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10880 +               if (bulk) {
10881 +                       /* Cancel the signal when the transfer
10882 +                        ** completes. */
10883 +                       spin_lock(&bulk_waiter_spinlock);
10884 +                       bulk->userdata = NULL;
10885 +                       spin_unlock(&bulk_waiter_spinlock);
10886 +               }
10887 +               kfree(waiter);
10888 +       } else {
10889 +               waiter->pid = current->pid;
10890 +               mutex_lock(&instance->bulk_waiter_list_mutex);
10891 +               list_add(&waiter->list, &instance->bulk_waiter_list);
10892 +               mutex_unlock(&instance->bulk_waiter_list_mutex);
10893 +               vchiq_log_info(vchiq_arm_log_level,
10894 +                               "saved bulk_waiter %x for pid %d",
10895 +                               (unsigned int)waiter, current->pid);
10896 +       }
10897 +
10898 +       return status;
10899 +}
10900 --- /dev/null
10901 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10902 @@ -0,0 +1,71 @@
10903 +/**
10904 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10905 + *
10906 + * Redistribution and use in source and binary forms, with or without
10907 + * modification, are permitted provided that the following conditions
10908 + * are met:
10909 + * 1. Redistributions of source code must retain the above copyright
10910 + *    notice, this list of conditions, and the following disclaimer,
10911 + *    without modification.
10912 + * 2. Redistributions in binary form must reproduce the above copyright
10913 + *    notice, this list of conditions and the following disclaimer in the
10914 + *    documentation and/or other materials provided with the distribution.
10915 + * 3. The names of the above-listed copyright holders may not be used
10916 + *    to endorse or promote products derived from this software without
10917 + *    specific prior written permission.
10918 + *
10919 + * ALTERNATIVELY, this software may be distributed under the terms of the
10920 + * GNU General Public License ("GPL") version 2, as published by the Free
10921 + * Software Foundation.
10922 + *
10923 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10924 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10925 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10926 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10927 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10928 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10929 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10930 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10931 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10932 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10933 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10934 + */
10935 +
10936 +#ifndef VCHIQ_MEMDRV_H
10937 +#define VCHIQ_MEMDRV_H
10938 +
10939 +/* ---- Include Files ----------------------------------------------------- */
10940 +
10941 +#include <linux/kernel.h>
10942 +#include "vchiq_if.h"
10943 +
10944 +/* ---- Constants and Types ---------------------------------------------- */
10945 +
10946 +typedef struct {
10947 +        void                   *armSharedMemVirt;
10948 +        dma_addr_t              armSharedMemPhys;
10949 +        size_t                  armSharedMemSize;
10950 +
10951 +        void                   *vcSharedMemVirt;
10952 +        dma_addr_t              vcSharedMemPhys;
10953 +        size_t                  vcSharedMemSize;
10954 +} VCHIQ_SHARED_MEM_INFO_T;
10955 +
10956 +/* ---- Variable Externs ------------------------------------------------- */
10957 +
10958 +/* ---- Function Prototypes ---------------------------------------------- */
10959 +
10960 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
10961 +
10962 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
10963 +
10964 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
10965 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
10966 +
10967 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
10968 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
10969 +
10970 +VCHIQ_STATUS_T vchiq_userdrv_resume(
10971 +       const VCHIQ_PLATFORM_DATA_T * platform_data);
10972 +
10973 +#endif
10974 --- /dev/null
10975 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
10976 @@ -0,0 +1,58 @@
10977 +/**
10978 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10979 + *
10980 + * Redistribution and use in source and binary forms, with or without
10981 + * modification, are permitted provided that the following conditions
10982 + * are met:
10983 + * 1. Redistributions of source code must retain the above copyright
10984 + *    notice, this list of conditions, and the following disclaimer,
10985 + *    without modification.
10986 + * 2. Redistributions in binary form must reproduce the above copyright
10987 + *    notice, this list of conditions and the following disclaimer in the
10988 + *    documentation and/or other materials provided with the distribution.
10989 + * 3. The names of the above-listed copyright holders may not be used
10990 + *    to endorse or promote products derived from this software without
10991 + *    specific prior written permission.
10992 + *
10993 + * ALTERNATIVELY, this software may be distributed under the terms of the
10994 + * GNU General Public License ("GPL") version 2, as published by the Free
10995 + * Software Foundation.
10996 + *
10997 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10998 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10999 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11000 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11001 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11002 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11003 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11004 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11005 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11006 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11007 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11008 + */
11009 +
11010 +#ifndef VCHIQ_PAGELIST_H
11011 +#define VCHIQ_PAGELIST_H
11012 +
11013 +#ifndef PAGE_SIZE
11014 +#define PAGE_SIZE 4096
11015 +#endif
11016 +#define CACHE_LINE_SIZE 32
11017 +#define PAGELIST_WRITE 0
11018 +#define PAGELIST_READ 1
11019 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11020 +
11021 +typedef struct pagelist_struct {
11022 +       unsigned long length;
11023 +       unsigned short type;
11024 +       unsigned short offset;
11025 +       unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11026 +                                  pages at consecutive addresses. */
11027 +} PAGELIST_T;
11028 +
11029 +typedef struct fragments_struct {
11030 +       char headbuf[CACHE_LINE_SIZE];
11031 +       char tailbuf[CACHE_LINE_SIZE];
11032 +} FRAGMENTS_T;
11033 +
11034 +#endif /* VCHIQ_PAGELIST_H */
11035 --- /dev/null
11036 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11037 @@ -0,0 +1,254 @@
11038 +/**
11039 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11040 + *
11041 + * Redistribution and use in source and binary forms, with or without
11042 + * modification, are permitted provided that the following conditions
11043 + * are met:
11044 + * 1. Redistributions of source code must retain the above copyright
11045 + *    notice, this list of conditions, and the following disclaimer,
11046 + *    without modification.
11047 + * 2. Redistributions in binary form must reproduce the above copyright
11048 + *    notice, this list of conditions and the following disclaimer in the
11049 + *    documentation and/or other materials provided with the distribution.
11050 + * 3. The names of the above-listed copyright holders may not be used
11051 + *    to endorse or promote products derived from this software without
11052 + *    specific prior written permission.
11053 + *
11054 + * ALTERNATIVELY, this software may be distributed under the terms of the
11055 + * GNU General Public License ("GPL") version 2, as published by the Free
11056 + * Software Foundation.
11057 + *
11058 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11059 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11060 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11061 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11062 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11063 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11064 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11065 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11066 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11067 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11068 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11069 + */
11070 +
11071 +
11072 +#include <linux/proc_fs.h>
11073 +#include "vchiq_core.h"
11074 +#include "vchiq_arm.h"
11075 +
11076 +#if 1
11077 +
11078 +int vchiq_proc_init(void)
11079 +{
11080 +       return 0;
11081 +}
11082 +
11083 +void vchiq_proc_deinit(void)
11084 +{
11085 +}
11086 +
11087 +#else
11088 +
11089 +struct vchiq_proc_info {
11090 +       /* Global 'vc' proc entry used by all instances */
11091 +       struct proc_dir_entry *vc_cfg_dir;
11092 +
11093 +       /* one entry per client process */
11094 +       struct proc_dir_entry *clients;
11095 +
11096 +       /* log categories */
11097 +       struct proc_dir_entry *log_categories;
11098 +};
11099 +
11100 +static struct vchiq_proc_info proc_info;
11101 +
11102 +struct proc_dir_entry *vchiq_proc_top(void)
11103 +{
11104 +       BUG_ON(proc_info.vc_cfg_dir == NULL);
11105 +       return proc_info.vc_cfg_dir;
11106 +}
11107 +
11108 +/****************************************************************************
11109 +*
11110 +*   log category entries
11111 +*
11112 +***************************************************************************/
11113 +#define PROC_WRITE_BUF_SIZE 256
11114 +
11115 +#define VCHIQ_LOG_ERROR_STR   "error"
11116 +#define VCHIQ_LOG_WARNING_STR "warning"
11117 +#define VCHIQ_LOG_INFO_STR    "info"
11118 +#define VCHIQ_LOG_TRACE_STR   "trace"
11119 +
11120 +static int log_cfg_read(char *buffer,
11121 +       char **start,
11122 +       off_t off,
11123 +       int count,
11124 +       int *eof,
11125 +       void *data)
11126 +{
11127 +       int len = 0;
11128 +       char *log_value = NULL;
11129 +
11130 +       switch (*((int *)data)) {
11131 +       case VCHIQ_LOG_ERROR:
11132 +               log_value = VCHIQ_LOG_ERROR_STR;
11133 +               break;
11134 +       case VCHIQ_LOG_WARNING:
11135 +               log_value = VCHIQ_LOG_WARNING_STR;
11136 +               break;
11137 +       case VCHIQ_LOG_INFO:
11138 +               log_value = VCHIQ_LOG_INFO_STR;
11139 +               break;
11140 +       case VCHIQ_LOG_TRACE:
11141 +               log_value = VCHIQ_LOG_TRACE_STR;
11142 +               break;
11143 +       default:
11144 +               break;
11145 +       }
11146 +
11147 +       len += sprintf(buffer + len,
11148 +               "%s\n",
11149 +               log_value ? log_value : "(null)");
11150 +
11151 +       return len;
11152 +}
11153 +
11154 +
11155 +static int log_cfg_write(struct file *file,
11156 +       const char __user *buffer,
11157 +       unsigned long count,
11158 +       void *data)
11159 +{
11160 +       int *log_module = data;
11161 +       char kbuf[PROC_WRITE_BUF_SIZE + 1];
11162 +
11163 +       (void)file;
11164 +
11165 +       memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11166 +       if (count >= PROC_WRITE_BUF_SIZE)
11167 +               count = PROC_WRITE_BUF_SIZE;
11168 +
11169 +       if (copy_from_user(kbuf,
11170 +               buffer,
11171 +               count) != 0)
11172 +               return -EFAULT;
11173 +       kbuf[count - 1] = 0;
11174 +
11175 +       if (strncmp("error", kbuf, strlen("error")) == 0)
11176 +               *log_module = VCHIQ_LOG_ERROR;
11177 +       else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11178 +               *log_module = VCHIQ_LOG_WARNING;
11179 +       else if (strncmp("info", kbuf, strlen("info")) == 0)
11180 +               *log_module = VCHIQ_LOG_INFO;
11181 +       else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11182 +               *log_module = VCHIQ_LOG_TRACE;
11183 +       else
11184 +               *log_module = VCHIQ_LOG_DEFAULT;
11185 +
11186 +       return count;
11187 +}
11188 +
11189 +/* Log category proc entries */
11190 +struct vchiq_proc_log_entry {
11191 +       const char *name;
11192 +       int *plevel;
11193 +       struct proc_dir_entry *dir;
11194 +};
11195 +
11196 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11197 +       { "core", &vchiq_core_log_level },
11198 +       { "msg",  &vchiq_core_msg_log_level },
11199 +       { "sync", &vchiq_sync_log_level },
11200 +       { "susp", &vchiq_susp_log_level },
11201 +       { "arm",  &vchiq_arm_log_level },
11202 +};
11203 +static int n_log_entries =
11204 +       sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11205 +
11206 +/* create an entry under /proc/vc/log for each log category */
11207 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11208 +{
11209 +       struct proc_dir_entry *dir;
11210 +       size_t i;
11211 +       int ret = 0;
11212 +       dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11213 +       if (!dir)
11214 +               return -ENOMEM;
11215 +       proc_info.log_categories = dir;
11216 +
11217 +       for (i = 0; i < n_log_entries; i++) {
11218 +               dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11219 +                                       0644,
11220 +                                       proc_info.log_categories);
11221 +               if (!dir) {
11222 +                       ret = -ENOMEM;
11223 +                       break;
11224 +               }
11225 +
11226 +               dir->read_proc = &log_cfg_read;
11227 +               dir->write_proc = &log_cfg_write;
11228 +               dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11229 +
11230 +               vchiq_proc_log_entries[i].dir = dir;
11231 +       }
11232 +       return ret;
11233 +}
11234 +
11235 +
11236 +int vchiq_proc_init(void)
11237 +{
11238 +       BUG_ON(proc_info.vc_cfg_dir != NULL);
11239 +
11240 +       proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11241 +       if (proc_info.vc_cfg_dir == NULL)
11242 +               goto fail;
11243 +
11244 +       proc_info.clients = proc_mkdir("clients",
11245 +                               proc_info.vc_cfg_dir);
11246 +       if (!proc_info.clients)
11247 +               goto fail;
11248 +
11249 +       if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11250 +               goto fail;
11251 +
11252 +       return 0;
11253 +
11254 +fail:
11255 +       vchiq_proc_deinit();
11256 +       vchiq_log_error(vchiq_arm_log_level,
11257 +               "%s: failed to create proc directory",
11258 +               __func__);
11259 +
11260 +       return -ENOMEM;
11261 +}
11262 +
11263 +/* remove all the proc entries */
11264 +void vchiq_proc_deinit(void)
11265 +{
11266 +       /* log category entries */
11267 +       if (proc_info.log_categories) {
11268 +               size_t i;
11269 +               for (i = 0; i < n_log_entries; i++)
11270 +                       if (vchiq_proc_log_entries[i].dir)
11271 +                               remove_proc_entry(
11272 +                                       vchiq_proc_log_entries[i].name,
11273 +                                       proc_info.log_categories);
11274 +
11275 +               remove_proc_entry(proc_info.log_categories->name,
11276 +                                 proc_info.vc_cfg_dir);
11277 +       }
11278 +       if (proc_info.clients)
11279 +               remove_proc_entry(proc_info.clients->name,
11280 +                                 proc_info.vc_cfg_dir);
11281 +       if (proc_info.vc_cfg_dir)
11282 +               remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11283 +}
11284 +
11285 +struct proc_dir_entry *vchiq_clients_top(void)
11286 +{
11287 +       return proc_info.clients;
11288 +}
11289 +
11290 +#endif
11291 +
11292 --- /dev/null
11293 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11294 @@ -0,0 +1,815 @@
11295 +/**
11296 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11297 + *
11298 + * Redistribution and use in source and binary forms, with or without
11299 + * modification, are permitted provided that the following conditions
11300 + * are met:
11301 + * 1. Redistributions of source code must retain the above copyright
11302 + *    notice, this list of conditions, and the following disclaimer,
11303 + *    without modification.
11304 + * 2. Redistributions in binary form must reproduce the above copyright
11305 + *    notice, this list of conditions and the following disclaimer in the
11306 + *    documentation and/or other materials provided with the distribution.
11307 + * 3. The names of the above-listed copyright holders may not be used
11308 + *    to endorse or promote products derived from this software without
11309 + *    specific prior written permission.
11310 + *
11311 + * ALTERNATIVELY, this software may be distributed under the terms of the
11312 + * GNU General Public License ("GPL") version 2, as published by the Free
11313 + * Software Foundation.
11314 + *
11315 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11316 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11317 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11318 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11319 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11320 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11321 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11322 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11323 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11324 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11325 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11326 + */
11327 +#include <linux/module.h>
11328 +#include <linux/types.h>
11329 +
11330 +#include "interface/vchi/vchi.h"
11331 +#include "vchiq.h"
11332 +#include "vchiq_core.h"
11333 +
11334 +#include "vchiq_util.h"
11335 +
11336 +#include <stddef.h>
11337 +
11338 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11339 +
11340 +typedef struct {
11341 +       VCHIQ_SERVICE_HANDLE_T handle;
11342 +
11343 +       VCHIU_QUEUE_T queue;
11344 +
11345 +       VCHI_CALLBACK_T callback;
11346 +       void *callback_param;
11347 +} SHIM_SERVICE_T;
11348 +
11349 +/* ----------------------------------------------------------------------
11350 + * return pointer to the mphi message driver function table
11351 + * -------------------------------------------------------------------- */
11352 +const VCHI_MESSAGE_DRIVER_T *
11353 +vchi_mphi_message_driver_func_table(void)
11354 +{
11355 +       return NULL;
11356 +}
11357 +
11358 +/* ----------------------------------------------------------------------
11359 + * return a pointer to the 'single' connection driver fops
11360 + * -------------------------------------------------------------------- */
11361 +const VCHI_CONNECTION_API_T *
11362 +single_get_func_table(void)
11363 +{
11364 +       return NULL;
11365 +}
11366 +
11367 +VCHI_CONNECTION_T *vchi_create_connection(
11368 +       const VCHI_CONNECTION_API_T *function_table,
11369 +       const VCHI_MESSAGE_DRIVER_T *low_level)
11370 +{
11371 +       (void)function_table;
11372 +       (void)low_level;
11373 +       return NULL;
11374 +}
11375 +
11376 +/***********************************************************
11377 + * Name: vchi_msg_peek
11378 + *
11379 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
11380 + *             void **data,
11381 + *             uint32_t *msg_size,
11382 +
11383 +
11384 + *             VCHI_FLAGS_T flags
11385 + *
11386 + * Description: Routine to return a pointer to the current message (to allow in
11387 + *              place processing). The message can be removed using
11388 + *              vchi_msg_remove when you're finished
11389 + *
11390 + * Returns: int32_t - success == 0
11391 + *
11392 + ***********************************************************/
11393 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11394 +       void **data,
11395 +       uint32_t *msg_size,
11396 +       VCHI_FLAGS_T flags)
11397 +{
11398 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11399 +       VCHIQ_HEADER_T *header;
11400 +
11401 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11402 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11403 +
11404 +       if (flags == VCHI_FLAGS_NONE)
11405 +               if (vchiu_queue_is_empty(&service->queue))
11406 +                       return -1;
11407 +
11408 +       header = vchiu_queue_peek(&service->queue);
11409 +
11410 +       *data = header->data;
11411 +       *msg_size = header->size;
11412 +
11413 +       return 0;
11414 +}
11415 +EXPORT_SYMBOL(vchi_msg_peek);
11416 +
11417 +/***********************************************************
11418 + * Name: vchi_msg_remove
11419 + *
11420 + * Arguments:  const VCHI_SERVICE_HANDLE_T handle,
11421 + *
11422 + * Description: Routine to remove a message (after it has been read with
11423 + *              vchi_msg_peek)
11424 + *
11425 + * Returns: int32_t - success == 0
11426 + *
11427 + ***********************************************************/
11428 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11429 +{
11430 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11431 +       VCHIQ_HEADER_T *header;
11432 +
11433 +       header = vchiu_queue_pop(&service->queue);
11434 +
11435 +       vchiq_release_message(service->handle, header);
11436 +
11437 +       return 0;
11438 +}
11439 +EXPORT_SYMBOL(vchi_msg_remove);
11440 +
11441 +/***********************************************************
11442 + * Name: vchi_msg_queue
11443 + *
11444 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11445 + *             const void *data,
11446 + *             uint32_t data_size,
11447 + *             VCHI_FLAGS_T flags,
11448 + *             void *msg_handle,
11449 + *
11450 + * Description: Thin wrapper to queue a message onto a connection
11451 + *
11452 + * Returns: int32_t - success == 0
11453 + *
11454 + ***********************************************************/
11455 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11456 +       const void *data,
11457 +       uint32_t data_size,
11458 +       VCHI_FLAGS_T flags,
11459 +       void *msg_handle)
11460 +{
11461 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11462 +       VCHIQ_ELEMENT_T element = {data, data_size};
11463 +       VCHIQ_STATUS_T status;
11464 +
11465 +       (void)msg_handle;
11466 +
11467 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11468 +
11469 +       status = vchiq_queue_message(service->handle, &element, 1);
11470 +
11471 +       /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11472 +       ** implement a retry mechanism since this function is supposed
11473 +       ** to block until queued
11474 +       */
11475 +       while (status == VCHIQ_RETRY) {
11476 +               msleep(1);
11477 +               status = vchiq_queue_message(service->handle, &element, 1);
11478 +       }
11479 +
11480 +       return vchiq_status_to_vchi(status);
11481 +}
11482 +EXPORT_SYMBOL(vchi_msg_queue);
11483 +
11484 +/***********************************************************
11485 + * Name: vchi_bulk_queue_receive
11486 + *
11487 + * Arguments:  VCHI_BULK_HANDLE_T handle,
11488 + *             void *data_dst,
11489 + *             const uint32_t data_size,
11490 + *             VCHI_FLAGS_T flags
11491 + *             void *bulk_handle
11492 + *
11493 + * Description: Routine to setup a rcv buffer
11494 + *
11495 + * Returns: int32_t - success == 0
11496 + *
11497 + ***********************************************************/
11498 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11499 +       void *data_dst,
11500 +       uint32_t data_size,
11501 +       VCHI_FLAGS_T flags,
11502 +       void *bulk_handle)
11503 +{
11504 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11505 +       VCHIQ_BULK_MODE_T mode;
11506 +       VCHIQ_STATUS_T status;
11507 +
11508 +       switch ((int)flags) {
11509 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11510 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11511 +               WARN_ON(!service->callback);
11512 +               mode = VCHIQ_BULK_MODE_CALLBACK;
11513 +               break;
11514 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11515 +               mode = VCHIQ_BULK_MODE_BLOCKING;
11516 +               break;
11517 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11518 +       case VCHI_FLAGS_NONE:
11519 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
11520 +               break;
11521 +       default:
11522 +               WARN(1, "unsupported message\n");
11523 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
11524 +       }
11525 +
11526 +       status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11527 +               bulk_handle, mode);
11528 +
11529 +       /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11530 +       ** implement a retry mechanism since this function is supposed
11531 +       ** to block until queued
11532 +       */
11533 +       while (status == VCHIQ_RETRY) {
11534 +               msleep(1);
11535 +               status = vchiq_bulk_receive(service->handle, data_dst,
11536 +                       data_size, bulk_handle, mode);
11537 +       }
11538 +
11539 +       return vchiq_status_to_vchi(status);
11540 +}
11541 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11542 +
11543 +/***********************************************************
11544 + * Name: vchi_bulk_queue_transmit
11545 + *
11546 + * Arguments:  VCHI_BULK_HANDLE_T handle,
11547 + *             const void *data_src,
11548 + *             uint32_t data_size,
11549 + *             VCHI_FLAGS_T flags,
11550 + *             void *bulk_handle
11551 + *
11552 + * Description: Routine to transmit some data
11553 + *
11554 + * Returns: int32_t - success == 0
11555 + *
11556 + ***********************************************************/
11557 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11558 +       const void *data_src,
11559 +       uint32_t data_size,
11560 +       VCHI_FLAGS_T flags,
11561 +       void *bulk_handle)
11562 +{
11563 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11564 +       VCHIQ_BULK_MODE_T mode;
11565 +       VCHIQ_STATUS_T status;
11566 +
11567 +       switch ((int)flags) {
11568 +       case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11569 +               | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11570 +               WARN_ON(!service->callback);
11571 +               mode = VCHIQ_BULK_MODE_CALLBACK;
11572 +               break;
11573 +       case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11574 +       case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11575 +               mode = VCHIQ_BULK_MODE_BLOCKING;
11576 +               break;
11577 +       case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11578 +       case VCHI_FLAGS_NONE:
11579 +               mode = VCHIQ_BULK_MODE_NOCALLBACK;
11580 +               break;
11581 +       default:
11582 +               WARN(1, "unsupported message\n");
11583 +               return vchiq_status_to_vchi(VCHIQ_ERROR);
11584 +       }
11585 +
11586 +       status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11587 +               bulk_handle, mode);
11588 +
11589 +       /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11590 +       ** implement a retry mechanism since this function is supposed
11591 +       ** to block until queued
11592 +       */
11593 +       while (status == VCHIQ_RETRY) {
11594 +               msleep(1);
11595 +               status = vchiq_bulk_transmit(service->handle, data_src,
11596 +                       data_size, bulk_handle, mode);
11597 +       }
11598 +
11599 +       return vchiq_status_to_vchi(status);
11600 +}
11601 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11602 +
11603 +/***********************************************************
11604 + * Name: vchi_msg_dequeue
11605 + *
11606 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11607 + *             void *data,
11608 + *             uint32_t max_data_size_to_read,
11609 + *             uint32_t *actual_msg_size
11610 + *             VCHI_FLAGS_T flags
11611 + *
11612 + * Description: Routine to dequeue a message into the supplied buffer
11613 + *
11614 + * Returns: int32_t - success == 0
11615 + *
11616 + ***********************************************************/
11617 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11618 +       void *data,
11619 +       uint32_t max_data_size_to_read,
11620 +       uint32_t *actual_msg_size,
11621 +       VCHI_FLAGS_T flags)
11622 +{
11623 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11624 +       VCHIQ_HEADER_T *header;
11625 +
11626 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11627 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11628 +
11629 +       if (flags == VCHI_FLAGS_NONE)
11630 +               if (vchiu_queue_is_empty(&service->queue))
11631 +                       return -1;
11632 +
11633 +       header = vchiu_queue_pop(&service->queue);
11634 +
11635 +       memcpy(data, header->data, header->size < max_data_size_to_read ?
11636 +               header->size : max_data_size_to_read);
11637 +
11638 +       *actual_msg_size = header->size;
11639 +
11640 +       vchiq_release_message(service->handle, header);
11641 +
11642 +       return 0;
11643 +}
11644 +EXPORT_SYMBOL(vchi_msg_dequeue);
11645 +
11646 +/***********************************************************
11647 + * Name: vchi_msg_queuev
11648 + *
11649 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11650 + *             VCHI_MSG_VECTOR_T *vector,
11651 + *             uint32_t count,
11652 + *             VCHI_FLAGS_T flags,
11653 + *             void *msg_handle
11654 + *
11655 + * Description: Thin wrapper to queue a message onto a connection
11656 + *
11657 + * Returns: int32_t - success == 0
11658 + *
11659 + ***********************************************************/
11660 +
11661 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11662 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11663 +       offsetof(VCHIQ_ELEMENT_T, data));
11664 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11665 +       offsetof(VCHIQ_ELEMENT_T, size));
11666 +
11667 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11668 +       VCHI_MSG_VECTOR_T *vector,
11669 +       uint32_t count,
11670 +       VCHI_FLAGS_T flags,
11671 +       void *msg_handle)
11672 +{
11673 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11674 +
11675 +       (void)msg_handle;
11676 +
11677 +       WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11678 +
11679 +       return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11680 +               (const VCHIQ_ELEMENT_T *)vector, count));
11681 +}
11682 +EXPORT_SYMBOL(vchi_msg_queuev);
11683 +
11684 +/***********************************************************
11685 + * Name: vchi_held_msg_release
11686 + *
11687 + * Arguments:  VCHI_HELD_MSG_T *message
11688 + *
11689 + * Description: Routine to release a held message (after it has been read with
11690 + *              vchi_msg_hold)
11691 + *
11692 + * Returns: int32_t - success == 0
11693 + *
11694 + ***********************************************************/
11695 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11696 +{
11697 +       vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11698 +               (VCHIQ_HEADER_T *)message->message);
11699 +
11700 +       return 0;
11701 +}
11702 +
11703 +/***********************************************************
11704 + * Name: vchi_msg_hold
11705 + *
11706 + * Arguments:  VCHI_SERVICE_HANDLE_T handle,
11707 + *             void **data,
11708 + *             uint32_t *msg_size,
11709 + *             VCHI_FLAGS_T flags,
11710 + *             VCHI_HELD_MSG_T *message_handle
11711 + *
11712 + * Description: Routine to return a pointer to the current message (to allow
11713 + *              in place processing). The message is dequeued - don't forget
11714 + *              to release the message using vchi_held_msg_release when you're
11715 + *              finished.
11716 + *
11717 + * Returns: int32_t - success == 0
11718 + *
11719 + ***********************************************************/
11720 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11721 +       void **data,
11722 +       uint32_t *msg_size,
11723 +       VCHI_FLAGS_T flags,
11724 +       VCHI_HELD_MSG_T *message_handle)
11725 +{
11726 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11727 +       VCHIQ_HEADER_T *header;
11728 +
11729 +       WARN_ON((flags != VCHI_FLAGS_NONE) &&
11730 +               (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11731 +
11732 +       if (flags == VCHI_FLAGS_NONE)
11733 +               if (vchiu_queue_is_empty(&service->queue))
11734 +                       return -1;
11735 +
11736 +       header = vchiu_queue_pop(&service->queue);
11737 +
11738 +       *data = header->data;
11739 +       *msg_size = header->size;
11740 +
11741 +       message_handle->service =
11742 +               (struct opaque_vchi_service_t *)service->handle;
11743 +       message_handle->message = header;
11744 +
11745 +       return 0;
11746 +}
11747 +
11748 +/***********************************************************
11749 + * Name: vchi_initialise
11750 + *
11751 + * Arguments: VCHI_INSTANCE_T *instance_handle
11752 + *            VCHI_CONNECTION_T **connections
11753 + *            const uint32_t num_connections
11754 + *
11755 + * Description: Initialises the hardware but does not transmit anything
11756 + *              When run as a Host App this will be called twice hence the need
11757 + *              to malloc the state information
11758 + *
11759 + * Returns: 0 if successful, failure otherwise
11760 + *
11761 + ***********************************************************/
11762 +
11763 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11764 +{
11765 +       VCHIQ_INSTANCE_T instance;
11766 +       VCHIQ_STATUS_T status;
11767 +
11768 +       status = vchiq_initialise(&instance);
11769 +
11770 +       *instance_handle = (VCHI_INSTANCE_T)instance;
11771 +
11772 +       return vchiq_status_to_vchi(status);
11773 +}
11774 +EXPORT_SYMBOL(vchi_initialise);
11775 +
11776 +/***********************************************************
11777 + * Name: vchi_connect
11778 + *
11779 + * Arguments: VCHI_CONNECTION_T **connections
11780 + *            const uint32_t num_connections
11781 + *            VCHI_INSTANCE_T instance_handle)
11782 + *
11783 + * Description: Starts the command service on each connection,
11784 + *              causing INIT messages to be pinged back and forth
11785 + *
11786 + * Returns: 0 if successful, failure otherwise
11787 + *
11788 + ***********************************************************/
11789 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11790 +       const uint32_t num_connections,
11791 +       VCHI_INSTANCE_T instance_handle)
11792 +{
11793 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11794 +
11795 +       (void)connections;
11796 +       (void)num_connections;
11797 +
11798 +       return vchiq_connect(instance);
11799 +}
11800 +EXPORT_SYMBOL(vchi_connect);
11801 +
11802 +
11803 +/***********************************************************
11804 + * Name: vchi_disconnect
11805 + *
11806 + * Arguments: VCHI_INSTANCE_T instance_handle
11807 + *
11808 + * Description: Stops the command service on each connection,
11809 + *              causing DE-INIT messages to be pinged back and forth
11810 + *
11811 + * Returns: 0 if successful, failure otherwise
11812 + *
11813 + ***********************************************************/
11814 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11815 +{
11816 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11817 +       return vchiq_status_to_vchi(vchiq_shutdown(instance));
11818 +}
11819 +EXPORT_SYMBOL(vchi_disconnect);
11820 +
11821 +
11822 +/***********************************************************
11823 + * Name: vchi_service_open
11824 + * Name: vchi_service_create
11825 + *
11826 + * Arguments: VCHI_INSTANCE_T *instance_handle
11827 + *            SERVICE_CREATION_T *setup,
11828 + *            VCHI_SERVICE_HANDLE_T *handle
11829 + *
11830 + * Description: Routine to open a service
11831 + *
11832 + * Returns: int32_t - success == 0
11833 + *
11834 + ***********************************************************/
11835 +
11836 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11837 +       VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11838 +{
11839 +       SHIM_SERVICE_T *service =
11840 +               (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11841 +
11842 +       switch (reason) {
11843 +       case VCHIQ_MESSAGE_AVAILABLE:
11844 +               vchiu_queue_push(&service->queue, header);
11845 +
11846 +               if (service->callback)
11847 +                       service->callback(service->callback_param,
11848 +                               VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11849 +               break;
11850 +       case VCHIQ_BULK_TRANSMIT_DONE:
11851 +               if (service->callback)
11852 +                       service->callback(service->callback_param,
11853 +                               VCHI_CALLBACK_BULK_SENT, bulk_user);
11854 +               break;
11855 +       case VCHIQ_BULK_RECEIVE_DONE:
11856 +               if (service->callback)
11857 +                       service->callback(service->callback_param,
11858 +                               VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11859 +               break;
11860 +       case VCHIQ_SERVICE_CLOSED:
11861 +               if (service->callback)
11862 +                       service->callback(service->callback_param,
11863 +                               VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11864 +               break;
11865 +       case VCHIQ_SERVICE_OPENED:
11866 +               /* No equivalent VCHI reason */
11867 +               break;
11868 +       case VCHIQ_BULK_TRANSMIT_ABORTED:
11869 +               if (service->callback)
11870 +                       service->callback(service->callback_param,
11871 +                               VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user);
11872 +               break;
11873 +       case VCHIQ_BULK_RECEIVE_ABORTED:
11874 +               if (service->callback)
11875 +                       service->callback(service->callback_param,
11876 +                               VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user);
11877 +               break;
11878 +       default:
11879 +               WARN(1, "not supported\n");
11880 +               break;
11881 +       }
11882 +
11883 +       return VCHIQ_SUCCESS;
11884 +}
11885 +
11886 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
11887 +       SERVICE_CREATION_T *setup)
11888 +{
11889 +       SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
11890 +
11891 +       (void)instance;
11892 +
11893 +       if (service) {
11894 +               if (vchiu_queue_init(&service->queue, 64)) {
11895 +                       service->callback = setup->callback;
11896 +                       service->callback_param = setup->callback_param;
11897 +               } else {
11898 +                       kfree(service);
11899 +                       service = NULL;
11900 +               }
11901 +       }
11902 +
11903 +       return service;
11904 +}
11905 +
11906 +static void service_free(SHIM_SERVICE_T *service)
11907 +{
11908 +       if (service) {
11909 +               vchiu_queue_delete(&service->queue);
11910 +               kfree(service);
11911 +       }
11912 +}
11913 +
11914 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
11915 +       SERVICE_CREATION_T *setup,
11916 +       VCHI_SERVICE_HANDLE_T *handle)
11917 +{
11918 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11919 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
11920 +       if (service) {
11921 +               VCHIQ_SERVICE_PARAMS_T params;
11922 +               VCHIQ_STATUS_T status;
11923 +
11924 +               memset(&params, 0, sizeof(params));
11925 +               params.fourcc = setup->service_id;
11926 +               params.callback = shim_callback;
11927 +               params.userdata = service;
11928 +               params.version = setup->version.version;
11929 +               params.version_min = setup->version.version_min;
11930 +
11931 +               status = vchiq_open_service(instance, &params,
11932 +                       &service->handle);
11933 +               if (status != VCHIQ_SUCCESS) {
11934 +                       service_free(service);
11935 +                       service = NULL;
11936 +               }
11937 +       }
11938 +
11939 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
11940 +
11941 +       return (service != NULL) ? 0 : -1;
11942 +}
11943 +EXPORT_SYMBOL(vchi_service_open);
11944 +
11945 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
11946 +       SERVICE_CREATION_T *setup,
11947 +       VCHI_SERVICE_HANDLE_T *handle)
11948 +{
11949 +       VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11950 +       SHIM_SERVICE_T *service = service_alloc(instance, setup);
11951 +       if (service) {
11952 +               VCHIQ_SERVICE_PARAMS_T params;
11953 +               VCHIQ_STATUS_T status;
11954 +
11955 +               memset(&params, 0, sizeof(params));
11956 +               params.fourcc = setup->service_id;
11957 +               params.callback = shim_callback;
11958 +               params.userdata = service;
11959 +               params.version = setup->version.version;
11960 +               params.version_min = setup->version.version_min;
11961 +               status = vchiq_add_service(instance, &params, &service->handle);
11962 +
11963 +               if (status != VCHIQ_SUCCESS) {
11964 +                       service_free(service);
11965 +                       service = NULL;
11966 +               }
11967 +       }
11968 +
11969 +       *handle = (VCHI_SERVICE_HANDLE_T)service;
11970 +
11971 +       return (service != NULL) ? 0 : -1;
11972 +}
11973 +EXPORT_SYMBOL(vchi_service_create);
11974 +
11975 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
11976 +{
11977 +       int32_t ret = -1;
11978 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11979 +       if (service) {
11980 +               VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
11981 +               if (status == VCHIQ_SUCCESS) {
11982 +                       service_free(service);
11983 +                       service = NULL;
11984 +               }
11985 +
11986 +               ret = vchiq_status_to_vchi(status);
11987 +       }
11988 +       return ret;
11989 +}
11990 +EXPORT_SYMBOL(vchi_service_close);
11991 +
11992 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
11993 +{
11994 +       int32_t ret = -1;
11995 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11996 +       if (service) {
11997 +               VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
11998 +               if (status == VCHIQ_SUCCESS) {
11999 +                       service_free(service);
12000 +                       service = NULL;
12001 +               }
12002 +
12003 +               ret = vchiq_status_to_vchi(status);
12004 +       }
12005 +       return ret;
12006 +}
12007 +EXPORT_SYMBOL(vchi_service_destroy);
12008 +
12009 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12010 +{
12011 +   int32_t ret = -1;
12012 +   SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12013 +   if(service)
12014 +   {
12015 +      VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12016 +      ret = vchiq_status_to_vchi( status );
12017 +   }
12018 +   return ret;
12019 +}
12020 +EXPORT_SYMBOL(vchi_get_peer_version);
12021 +
12022 +/* ----------------------------------------------------------------------
12023 + * read a uint32_t from buffer.
12024 + * network format is defined to be little endian
12025 + * -------------------------------------------------------------------- */
12026 +uint32_t
12027 +vchi_readbuf_uint32(const void *_ptr)
12028 +{
12029 +       const unsigned char *ptr = _ptr;
12030 +       return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12031 +}
12032 +
12033 +/* ----------------------------------------------------------------------
12034 + * write a uint32_t to buffer.
12035 + * network format is defined to be little endian
12036 + * -------------------------------------------------------------------- */
12037 +void
12038 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12039 +{
12040 +       unsigned char *ptr = _ptr;
12041 +       ptr[0] = (unsigned char)((value >> 0)  & 0xFF);
12042 +       ptr[1] = (unsigned char)((value >> 8)  & 0xFF);
12043 +       ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12044 +       ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12045 +}
12046 +
12047 +/* ----------------------------------------------------------------------
12048 + * read a uint16_t from buffer.
12049 + * network format is defined to be little endian
12050 + * -------------------------------------------------------------------- */
12051 +uint16_t
12052 +vchi_readbuf_uint16(const void *_ptr)
12053 +{
12054 +       const unsigned char *ptr = _ptr;
12055 +       return ptr[0] | (ptr[1] << 8);
12056 +}
12057 +
12058 +/* ----------------------------------------------------------------------
12059 + * write a uint16_t into the buffer.
12060 + * network format is defined to be little endian
12061 + * -------------------------------------------------------------------- */
12062 +void
12063 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12064 +{
12065 +       unsigned char *ptr = _ptr;
12066 +       ptr[0] = (value >> 0)  & 0xFF;
12067 +       ptr[1] = (value >> 8)  & 0xFF;
12068 +}
12069 +
12070 +/***********************************************************
12071 + * Name: vchi_service_use
12072 + *
12073 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12074 + *
12075 + * Description: Routine to increment refcount on a service
12076 + *
12077 + * Returns: void
12078 + *
12079 + ***********************************************************/
12080 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12081 +{
12082 +       int32_t ret = -1;
12083 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12084 +       if (service)
12085 +               ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12086 +       return ret;
12087 +}
12088 +EXPORT_SYMBOL(vchi_service_use);
12089 +
12090 +/***********************************************************
12091 + * Name: vchi_service_release
12092 + *
12093 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12094 + *
12095 + * Description: Routine to decrement refcount on a service
12096 + *
12097 + * Returns: void
12098 + *
12099 + ***********************************************************/
12100 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12101 +{
12102 +       int32_t ret = -1;
12103 +       SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12104 +       if (service)
12105 +               ret = vchiq_status_to_vchi(
12106 +                       vchiq_release_service(service->handle));
12107 +       return ret;
12108 +}
12109 +EXPORT_SYMBOL(vchi_service_release);
12110 --- /dev/null
12111 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12112 @@ -0,0 +1,151 @@
12113 +/**
12114 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12115 + *
12116 + * Redistribution and use in source and binary forms, with or without
12117 + * modification, are permitted provided that the following conditions
12118 + * are met:
12119 + * 1. Redistributions of source code must retain the above copyright
12120 + *    notice, this list of conditions, and the following disclaimer,
12121 + *    without modification.
12122 + * 2. Redistributions in binary form must reproduce the above copyright
12123 + *    notice, this list of conditions and the following disclaimer in the
12124 + *    documentation and/or other materials provided with the distribution.
12125 + * 3. The names of the above-listed copyright holders may not be used
12126 + *    to endorse or promote products derived from this software without
12127 + *    specific prior written permission.
12128 + *
12129 + * ALTERNATIVELY, this software may be distributed under the terms of the
12130 + * GNU General Public License ("GPL") version 2, as published by the Free
12131 + * Software Foundation.
12132 + *
12133 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12134 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12135 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12136 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12137 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12138 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12139 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12140 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12141 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12142 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12143 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12144 + */
12145 +
12146 +#include "vchiq_util.h"
12147 +
12148 +static inline int is_pow2(int i)
12149 +{
12150 +       return i && !(i & (i - 1));
12151 +}
12152 +
12153 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12154 +{
12155 +       WARN_ON(!is_pow2(size));
12156 +
12157 +       queue->size = size;
12158 +       queue->read = 0;
12159 +       queue->write = 0;
12160 +
12161 +       sema_init(&queue->pop, 0);
12162 +       sema_init(&queue->push, 0);
12163 +
12164 +       queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12165 +       if (queue->storage == NULL) {
12166 +               vchiu_queue_delete(queue);
12167 +               return 0;
12168 +       }
12169 +       return 1;
12170 +}
12171 +
12172 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12173 +{
12174 +       if (queue->storage != NULL)
12175 +               kfree(queue->storage);
12176 +}
12177 +
12178 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12179 +{
12180 +       return queue->read == queue->write;
12181 +}
12182 +
12183 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12184 +{
12185 +       return queue->write == queue->read + queue->size;
12186 +}
12187 +
12188 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12189 +{
12190 +       while (queue->write == queue->read + queue->size) {
12191 +               if (down_interruptible(&queue->pop) != 0) {
12192 +                       flush_signals(current);
12193 +               }
12194 +       }
12195 +
12196 +       /*
12197 +        * Write to queue->storage must be visible after read from
12198 +        * queue->read
12199 +        */
12200 +       smp_mb();
12201 +
12202 +       queue->storage[queue->write & (queue->size - 1)] = header;
12203 +
12204 +       /*
12205 +        * Write to queue->storage must be visible before write to
12206 +        * queue->write
12207 +        */
12208 +       smp_wmb();
12209 +
12210 +       queue->write++;
12211 +
12212 +       up(&queue->push);
12213 +}
12214 +
12215 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12216 +{
12217 +       while (queue->write == queue->read) {
12218 +               if (down_interruptible(&queue->push) != 0) {
12219 +                       flush_signals(current);
12220 +               }
12221 +       }
12222 +
12223 +       up(&queue->push); // We haven't removed anything from the queue.
12224 +
12225 +       /*
12226 +        * Read from queue->storage must be visible after read from
12227 +        * queue->write
12228 +        */
12229 +       smp_rmb();
12230 +
12231 +       return queue->storage[queue->read & (queue->size - 1)];
12232 +}
12233 +
12234 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12235 +{
12236 +       VCHIQ_HEADER_T *header;
12237 +
12238 +       while (queue->write == queue->read) {
12239 +               if (down_interruptible(&queue->push) != 0) {
12240 +                       flush_signals(current);
12241 +               }
12242 +       }
12243 +
12244 +       /*
12245 +        * Read from queue->storage must be visible after read from
12246 +        * queue->write
12247 +        */
12248 +       smp_rmb();
12249 +
12250 +       header = queue->storage[queue->read & (queue->size - 1)];
12251 +
12252 +       /*
12253 +        * Read from queue->storage must be visible before write to
12254 +        * queue->read
12255 +        */
12256 +       smp_mb();
12257 +
12258 +       queue->read++;
12259 +
12260 +       up(&queue->pop);
12261 +
12262 +       return header;
12263 +}
12264 --- /dev/null
12265 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12266 @@ -0,0 +1,82 @@
12267 +/**
12268 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12269 + *
12270 + * Redistribution and use in source and binary forms, with or without
12271 + * modification, are permitted provided that the following conditions
12272 + * are met:
12273 + * 1. Redistributions of source code must retain the above copyright
12274 + *    notice, this list of conditions, and the following disclaimer,
12275 + *    without modification.
12276 + * 2. Redistributions in binary form must reproduce the above copyright
12277 + *    notice, this list of conditions and the following disclaimer in the
12278 + *    documentation and/or other materials provided with the distribution.
12279 + * 3. The names of the above-listed copyright holders may not be used
12280 + *    to endorse or promote products derived from this software without
12281 + *    specific prior written permission.
12282 + *
12283 + * ALTERNATIVELY, this software may be distributed under the terms of the
12284 + * GNU General Public License ("GPL") version 2, as published by the Free
12285 + * Software Foundation.
12286 + *
12287 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12288 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12289 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12290 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12291 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12292 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12293 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12294 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12295 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12296 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12297 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12298 + */
12299 +
12300 +#ifndef VCHIQ_UTIL_H
12301 +#define VCHIQ_UTIL_H
12302 +
12303 +#include <linux/types.h>
12304 +#include <linux/semaphore.h>
12305 +#include <linux/mutex.h>
12306 +#include <linux/bitops.h>
12307 +#include <linux/kthread.h>
12308 +#include <linux/wait.h>
12309 +#include <linux/vmalloc.h>
12310 +#include <linux/jiffies.h>
12311 +#include <linux/delay.h>
12312 +#include <linux/string.h>
12313 +#include <linux/types.h>
12314 +#include <linux/interrupt.h>
12315 +#include <linux/random.h>
12316 +#include <linux/sched.h>
12317 +#include <linux/ctype.h>
12318 +#include <linux/uaccess.h>
12319 +#include <linux/time.h>  /* for time_t */
12320 +#include <linux/slab.h>
12321 +#include <linux/vmalloc.h>
12322 +
12323 +#include "vchiq_if.h"
12324 +
12325 +typedef struct {
12326 +       int size;
12327 +       int read;
12328 +       int write;
12329 +
12330 +       struct semaphore pop;
12331 +       struct semaphore push;
12332 +
12333 +       VCHIQ_HEADER_T **storage;
12334 +} VCHIU_QUEUE_T;
12335 +
12336 +extern int  vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12337 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12338 +
12339 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12340 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12341 +
12342 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12343 +
12344 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12345 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12346 +
12347 +#endif
12348 +
12349 --- /dev/null
12350 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12351 @@ -0,0 +1,59 @@
12352 +/**
12353 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12354 + *
12355 + * Redistribution and use in source and binary forms, with or without
12356 + * modification, are permitted provided that the following conditions
12357 + * are met:
12358 + * 1. Redistributions of source code must retain the above copyright
12359 + *    notice, this list of conditions, and the following disclaimer,
12360 + *    without modification.
12361 + * 2. Redistributions in binary form must reproduce the above copyright
12362 + *    notice, this list of conditions and the following disclaimer in the
12363 + *    documentation and/or other materials provided with the distribution.
12364 + * 3. The names of the above-listed copyright holders may not be used
12365 + *    to endorse or promote products derived from this software without
12366 + *    specific prior written permission.
12367 + *
12368 + * ALTERNATIVELY, this software may be distributed under the terms of the
12369 + * GNU General Public License ("GPL") version 2, as published by the Free
12370 + * Software Foundation.
12371 + *
12372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12373 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12374 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12375 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12376 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12377 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12378 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12379 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12380 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12381 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12382 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12383 + */
12384 +#include "vchiq_build_info.h"
12385 +#include <linux/broadcom/vc_debug_sym.h>
12386 +
12387 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12388 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12389 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time,    __TIME__ );
12390 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date,    __DATE__ );
12391 +
12392 +const char *vchiq_get_build_hostname( void )
12393 +{
12394 +   return vchiq_build_hostname;
12395 +}
12396 +
12397 +const char *vchiq_get_build_version( void )
12398 +{
12399 +   return vchiq_build_version;
12400 +}
12401 +
12402 +const char *vchiq_get_build_date( void )
12403 +{
12404 +   return vchiq_build_date;
12405 +}
12406 +
12407 +const char *vchiq_get_build_time( void )
12408 +{
12409 +   return vchiq_build_time;
12410 +}