brcm2708: update linux 4.4 patches to latest version
[openwrt.git] / target / linux / brcm2708 / patches-4.4 / 0091-drm-vc4-Add-suport-for-3D-rendering-using-the-V3D-en.patch
1 From 6d38553e5ce7fb70dd1f89665caf58064a7e97d7 Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Mon, 2 Mar 2015 13:01:12 -0800
4 Subject: [PATCH 091/170] drm/vc4: Add suport for 3D rendering using the V3D
5  engine.
6
7 This is a squash of the out-of-tree development series.  Since that
8 series contained code from the first "get a demo triangle rendered
9 using a hacked up driver using binary shader code" to "plug the last
10 known security hole", it's hard to reconstruct a different series of
11 incremental development that's mergeable without security holes
12 throughout it.
13
14 Signed-off-by: Eric Anholt <eric@anholt.net>
15 ---
16  drivers/gpu/drm/vc4/Makefile               |  11 +-
17  drivers/gpu/drm/vc4/vc4_bo.c               | 476 +++++++++++++-
18  drivers/gpu/drm/vc4/vc4_crtc.c             |  98 ++-
19  drivers/gpu/drm/vc4/vc4_debugfs.c          |   3 +
20  drivers/gpu/drm/vc4/vc4_drv.c              |  45 +-
21  drivers/gpu/drm/vc4/vc4_drv.h              | 317 ++++++++++
22  drivers/gpu/drm/vc4/vc4_gem.c              | 686 +++++++++++++++++++++
23  drivers/gpu/drm/vc4/vc4_irq.c              | 211 +++++++
24  drivers/gpu/drm/vc4/vc4_kms.c              | 148 ++++-
25  drivers/gpu/drm/vc4/vc4_packet.h           | 384 ++++++++++++
26  drivers/gpu/drm/vc4/vc4_plane.c            |  40 ++
27  drivers/gpu/drm/vc4/vc4_qpu_defines.h      | 268 ++++++++
28  drivers/gpu/drm/vc4/vc4_render_cl.c        | 448 ++++++++++++++
29  drivers/gpu/drm/vc4/vc4_trace.h            |  63 ++
30  drivers/gpu/drm/vc4/vc4_trace_points.c     |  14 +
31  drivers/gpu/drm/vc4/vc4_v3d.c              | 268 ++++++++
32  drivers/gpu/drm/vc4/vc4_validate.c         | 958 +++++++++++++++++++++++++++++
33  drivers/gpu/drm/vc4/vc4_validate_shaders.c | 521 ++++++++++++++++
34  include/uapi/drm/vc4_drm.h                 | 229 +++++++
35  19 files changed, 5173 insertions(+), 15 deletions(-)
36  create mode 100644 drivers/gpu/drm/vc4/vc4_gem.c
37  create mode 100644 drivers/gpu/drm/vc4/vc4_irq.c
38  create mode 100644 drivers/gpu/drm/vc4/vc4_packet.h
39  create mode 100644 drivers/gpu/drm/vc4/vc4_qpu_defines.h
40  create mode 100644 drivers/gpu/drm/vc4/vc4_render_cl.c
41  create mode 100644 drivers/gpu/drm/vc4/vc4_trace.h
42  create mode 100644 drivers/gpu/drm/vc4/vc4_trace_points.c
43  create mode 100644 drivers/gpu/drm/vc4/vc4_v3d.c
44  create mode 100644 drivers/gpu/drm/vc4/vc4_validate.c
45  create mode 100644 drivers/gpu/drm/vc4/vc4_validate_shaders.c
46  create mode 100644 include/uapi/drm/vc4_drm.h
47
48 --- a/drivers/gpu/drm/vc4/Makefile
49 +++ b/drivers/gpu/drm/vc4/Makefile
50 @@ -8,10 +8,19 @@ vc4-y := \
51         vc4_crtc.o \
52         vc4_drv.o \
53         vc4_kms.o \
54 +       vc4_gem.o \
55         vc4_hdmi.o \
56         vc4_hvs.o \
57 -       vc4_plane.o
58 +       vc4_irq.o \
59 +       vc4_plane.o \
60 +       vc4_render_cl.o \
61 +       vc4_trace_points.o \
62 +       vc4_v3d.o \
63 +       vc4_validate.o \
64 +       vc4_validate_shaders.o
65  
66  vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
67  
68  obj-$(CONFIG_DRM_VC4)  += vc4.o
69 +
70 +CFLAGS_vc4_trace_points.o := -I$(src)
71 --- a/drivers/gpu/drm/vc4/vc4_bo.c
72 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
73 @@ -15,16 +15,174 @@
74   */
75  
76  #include "vc4_drv.h"
77 +#include "uapi/drm/vc4_drm.h"
78  
79 -struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
80 +static void vc4_bo_stats_dump(struct vc4_dev *vc4)
81  {
82 +       DRM_INFO("num bos allocated: %d\n",
83 +                vc4->bo_stats.num_allocated);
84 +       DRM_INFO("size bos allocated: %dkb\n",
85 +                vc4->bo_stats.size_allocated / 1024);
86 +       DRM_INFO("num bos used: %d\n",
87 +                vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
88 +       DRM_INFO("size bos used: %dkb\n",
89 +                (vc4->bo_stats.size_allocated -
90 +                 vc4->bo_stats.size_cached) / 1024);
91 +       DRM_INFO("num bos cached: %d\n",
92 +                vc4->bo_stats.num_cached);
93 +       DRM_INFO("size bos cached: %dkb\n",
94 +                vc4->bo_stats.size_cached / 1024);
95 +}
96 +
97 +static uint32_t bo_page_index(size_t size)
98 +{
99 +       return (size / PAGE_SIZE) - 1;
100 +}
101 +
102 +/* Must be called with bo_lock held. */
103 +static void vc4_bo_destroy(struct vc4_bo *bo)
104 +{
105 +       struct drm_gem_object *obj = &bo->base.base;
106 +       struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
107 +
108 +       if (bo->validated_shader) {
109 +               kfree(bo->validated_shader->texture_samples);
110 +               kfree(bo->validated_shader);
111 +               bo->validated_shader = NULL;
112 +       }
113 +
114 +       vc4->bo_stats.num_allocated--;
115 +       vc4->bo_stats.size_allocated -= obj->size;
116 +       drm_gem_cma_free_object(obj);
117 +}
118 +
119 +/* Must be called with bo_lock held. */
120 +static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
121 +{
122 +       struct drm_gem_object *obj = &bo->base.base;
123 +       struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
124 +
125 +       vc4->bo_stats.num_cached--;
126 +       vc4->bo_stats.size_cached -= obj->size;
127 +
128 +       list_del(&bo->unref_head);
129 +       list_del(&bo->size_head);
130 +}
131 +
132 +static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
133 +                                                    size_t size)
134 +{
135 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
136 +       uint32_t page_index = bo_page_index(size);
137 +
138 +       if (vc4->bo_cache.size_list_size <= page_index) {
139 +               uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
140 +                                       page_index + 1);
141 +               struct list_head *new_list;
142 +               uint32_t i;
143 +
144 +               new_list = kmalloc(new_size * sizeof(struct list_head),
145 +                                  GFP_KERNEL);
146 +               if (!new_list)
147 +                       return NULL;
148 +
149 +               /* Rebase the old cached BO lists to their new list
150 +                * head locations.
151 +                */
152 +               for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
153 +                       struct list_head *old_list = &vc4->bo_cache.size_list[i];
154 +                       if (list_empty(old_list))
155 +                               INIT_LIST_HEAD(&new_list[i]);
156 +                       else
157 +                               list_replace(old_list, &new_list[i]);
158 +               }
159 +               /* And initialize the brand new BO list heads. */
160 +               for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
161 +                       INIT_LIST_HEAD(&new_list[i]);
162 +
163 +               kfree(vc4->bo_cache.size_list);
164 +               vc4->bo_cache.size_list = new_list;
165 +               vc4->bo_cache.size_list_size = new_size;
166 +       }
167 +
168 +       return &vc4->bo_cache.size_list[page_index];
169 +}
170 +
171 +void vc4_bo_cache_purge(struct drm_device *dev)
172 +{
173 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
174 +
175 +       spin_lock(&vc4->bo_lock);
176 +       while (!list_empty(&vc4->bo_cache.time_list)) {
177 +               struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
178 +                                                   struct vc4_bo, unref_head);
179 +               vc4_bo_remove_from_cache(bo);
180 +               vc4_bo_destroy(bo);
181 +       }
182 +       spin_unlock(&vc4->bo_lock);
183 +}
184 +
185 +struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
186 +{
187 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
188 +       uint32_t size = roundup(unaligned_size, PAGE_SIZE);
189 +       uint32_t page_index = bo_page_index(size);
190         struct drm_gem_cma_object *cma_obj;
191 +       int pass;
192  
193 -       cma_obj = drm_gem_cma_create(dev, size);
194 -       if (IS_ERR(cma_obj))
195 +       if (size == 0)
196                 return NULL;
197 -       else
198 -               return to_vc4_bo(&cma_obj->base);
199 +
200 +       /* First, try to get a vc4_bo from the kernel BO cache. */
201 +       spin_lock(&vc4->bo_lock);
202 +       if (page_index < vc4->bo_cache.size_list_size &&
203 +           !list_empty(&vc4->bo_cache.size_list[page_index])) {
204 +               struct vc4_bo *bo =
205 +                       list_first_entry(&vc4->bo_cache.size_list[page_index],
206 +                                        struct vc4_bo, size_head);
207 +               vc4_bo_remove_from_cache(bo);
208 +               spin_unlock(&vc4->bo_lock);
209 +               kref_init(&bo->base.base.refcount);
210 +               return bo;
211 +       }
212 +       spin_unlock(&vc4->bo_lock);
213 +
214 +       /* Otherwise, make a new BO. */
215 +       for (pass = 0; ; pass++) {
216 +               cma_obj = drm_gem_cma_create(dev, size);
217 +               if (!IS_ERR(cma_obj))
218 +                       break;
219 +
220 +               switch (pass) {
221 +               case 0:
222 +                       /*
223 +                        * If we've run out of CMA memory, kill the cache of
224 +                        * CMA allocations we've got laying around and try again.
225 +                        */
226 +                       vc4_bo_cache_purge(dev);
227 +                       break;
228 +               case 1:
229 +                       /*
230 +                        * Getting desperate, so try to wait for any
231 +                        * previous rendering to finish, free its
232 +                        * unreferenced BOs to the cache, and then
233 +                        * free the cache.
234 +                        */
235 +                       vc4_wait_for_seqno(dev, vc4->emit_seqno, ~0ull, true);
236 +                       vc4_job_handle_completed(vc4);
237 +                       vc4_bo_cache_purge(dev);
238 +                       break;
239 +               case 3:
240 +                       DRM_ERROR("Failed to allocate from CMA:\n");
241 +                       vc4_bo_stats_dump(vc4);
242 +                       return NULL;
243 +               }
244 +       }
245 +
246 +       vc4->bo_stats.num_allocated++;
247 +       vc4->bo_stats.size_allocated += size;
248 +
249 +       return to_vc4_bo(&cma_obj->base);
250  }
251  
252  int vc4_dumb_create(struct drm_file *file_priv,
253 @@ -41,7 +199,129 @@ int vc4_dumb_create(struct drm_file *fil
254         if (args->size < args->pitch * args->height)
255                 args->size = args->pitch * args->height;
256  
257 -       bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
258 +       bo = vc4_bo_create(dev, args->size);
259 +       if (!bo)
260 +               return -ENOMEM;
261 +
262 +       ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
263 +       drm_gem_object_unreference_unlocked(&bo->base.base);
264 +
265 +       return ret;
266 +}
267 +
268 +static void
269 +vc4_bo_cache_free_old(struct drm_device *dev)
270 +{
271 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
272 +       unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
273 +
274 +       spin_lock(&vc4->bo_lock);
275 +       while (!list_empty(&vc4->bo_cache.time_list)) {
276 +               struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
277 +                                                   struct vc4_bo, unref_head);
278 +               if (time_before(expire_time, bo->free_time)) {
279 +                       mod_timer(&vc4->bo_cache.time_timer,
280 +                                 round_jiffies_up(jiffies +
281 +                                                  msecs_to_jiffies(1000)));
282 +                       spin_unlock(&vc4->bo_lock);
283 +                       return;
284 +               }
285 +
286 +               vc4_bo_remove_from_cache(bo);
287 +               vc4_bo_destroy(bo);
288 +       }
289 +       spin_unlock(&vc4->bo_lock);
290 +}
291 +
292 +/* Called on the last userspace/kernel unreference of the BO.  Returns
293 + * it to the BO cache if possible, otherwise frees it.
294 + *
295 + * Note that this is called with the struct_mutex held.
296 + */
297 +void vc4_free_object(struct drm_gem_object *gem_bo)
298 +{
299 +       struct drm_device *dev = gem_bo->dev;
300 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
301 +       struct vc4_bo *bo = to_vc4_bo(gem_bo);
302 +       struct list_head *cache_list;
303 +
304 +       /* If the object references someone else's memory, we can't cache it.
305 +        */
306 +       if (gem_bo->import_attach) {
307 +               vc4_bo_destroy(bo);
308 +               return;
309 +       }
310 +
311 +       /* Don't cache if it was publicly named. */
312 +       if (gem_bo->name) {
313 +               vc4_bo_destroy(bo);
314 +               return;
315 +       }
316 +
317 +       spin_lock(&vc4->bo_lock);
318 +       cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
319 +       if (!cache_list) {
320 +               vc4_bo_destroy(bo);
321 +               spin_unlock(&vc4->bo_lock);
322 +               return;
323 +       }
324 +
325 +       if (bo->validated_shader) {
326 +               kfree(bo->validated_shader->texture_samples);
327 +               kfree(bo->validated_shader);
328 +               bo->validated_shader = NULL;
329 +       }
330 +
331 +       bo->free_time = jiffies;
332 +       list_add(&bo->size_head, cache_list);
333 +       list_add(&bo->unref_head, &vc4->bo_cache.time_list);
334 +
335 +       vc4->bo_stats.num_cached++;
336 +       vc4->bo_stats.size_cached += gem_bo->size;
337 +       spin_unlock(&vc4->bo_lock);
338 +
339 +       vc4_bo_cache_free_old(dev);
340 +}
341 +
342 +static void vc4_bo_cache_time_work(struct work_struct *work)
343 +{
344 +       struct vc4_dev *vc4 =
345 +               container_of(work, struct vc4_dev, bo_cache.time_work);
346 +       struct drm_device *dev = vc4->dev;
347 +
348 +       vc4_bo_cache_free_old(dev);
349 +}
350 +
351 +static void vc4_bo_cache_time_timer(unsigned long data)
352 +{
353 +       struct drm_device *dev = (struct drm_device *)data;
354 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
355 +
356 +       schedule_work(&vc4->bo_cache.time_work);
357 +}
358 +
359 +struct dma_buf *
360 +vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
361 +{
362 +       struct vc4_bo *bo = to_vc4_bo(obj);
363 +
364 +       if (bo->validated_shader) {
365 +               DRM_ERROR("Attempting to export shader BO\n");
366 +               return ERR_PTR(-EINVAL);
367 +       }
368 +
369 +       return drm_gem_prime_export(dev, obj, flags);
370 +}
371 +
372 +int
373 +vc4_create_bo_ioctl(struct drm_device *dev, void *data,
374 +                   struct drm_file *file_priv)
375 +{
376 +       struct drm_vc4_create_bo *args = data;
377 +       struct vc4_bo *bo = NULL;
378 +       int ret;
379 +
380 +       bo = vc4_bo_create(dev, args->size);
381         if (!bo)
382                 return -ENOMEM;
383  
384 @@ -50,3 +330,187 @@ int vc4_dumb_create(struct drm_file *fil
385  
386         return ret;
387  }
388 +
389 +int
390 +vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
391 +                          struct drm_file *file_priv)
392 +{
393 +       struct drm_vc4_create_shader_bo *args = data;
394 +       struct vc4_bo *bo = NULL;
395 +       int ret;
396 +
397 +       if (args->size == 0)
398 +               return -EINVAL;
399 +
400 +       if (args->size % sizeof(u64) != 0)
401 +               return -EINVAL;
402 +
403 +       if (args->flags != 0) {
404 +               DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
405 +               return -EINVAL;
406 +       }
407 +
408 +       if (args->pad != 0) {
409 +               DRM_INFO("Pad set: 0x%08x\n", args->pad);
410 +               return -EINVAL;
411 +       }
412 +
413 +       bo = vc4_bo_create(dev, args->size);
414 +       if (!bo)
415 +               return -ENOMEM;
416 +
417 +       ret = copy_from_user(bo->base.vaddr,
418 +                            (void __user *)(uintptr_t)args->data,
419 +                            args->size);
420 +       if (ret != 0)
421 +               goto fail;
422 +
423 +       bo->validated_shader = vc4_validate_shader(&bo->base);
424 +       if (!bo->validated_shader) {
425 +               ret = -EINVAL;
426 +               goto fail;
427 +       }
428 +
429 +       /* We have to create the handle after validation, to avoid
430 +        * races for users to do doing things like mmap the shader BO.
431 +        */
432 +       ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
433 +
434 + fail:
435 +       drm_gem_object_unreference_unlocked(&bo->base.base);
436 +
437 +       return ret;
438 +}
439 +
440 +int
441 +vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
442 +                 struct drm_file *file_priv)
443 +{
444 +       struct drm_vc4_mmap_bo *args = data;
445 +       struct drm_gem_object *gem_obj;
446 +
447 +       gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
448 +       if (!gem_obj) {
449 +               DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
450 +               return -EINVAL;
451 +       }
452 +
453 +       /* The mmap offset was set up at BO allocation time. */
454 +       args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
455 +
456 +       drm_gem_object_unreference(gem_obj);
457 +       return 0;
458 +}
459 +
460 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
461 +{
462 +       struct drm_gem_object *gem_obj;
463 +       struct vc4_bo *bo;
464 +       int ret;
465 +
466 +       ret = drm_gem_mmap(filp, vma);
467 +       if (ret)
468 +               return ret;
469 +
470 +       gem_obj = vma->vm_private_data;
471 +       bo = to_vc4_bo(gem_obj);
472 +
473 +       if (bo->validated_shader) {
474 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
475 +               return -EINVAL;
476 +       }
477 +
478 +       /*
479 +        * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
480 +        * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
481 +        * the whole buffer.
482 +        */
483 +       vma->vm_flags &= ~VM_PFNMAP;
484 +       vma->vm_pgoff = 0;
485 +
486 +       ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
487 +                                   bo->base.vaddr, bo->base.paddr,
488 +                                   vma->vm_end - vma->vm_start);
489 +       if (ret)
490 +               drm_gem_vm_close(vma);
491 +
492 +       return ret;
493 +}
494 +
495 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
496 +{
497 +       struct vc4_bo *bo = to_vc4_bo(obj);
498 +
499 +       if (bo->validated_shader) {
500 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
501 +               return -EINVAL;
502 +       }
503 +
504 +       return drm_gem_cma_prime_mmap(obj, vma);
505 +}
506 +
507 +void *vc4_prime_vmap(struct drm_gem_object *obj)
508 +{
509 +       struct vc4_bo *bo = to_vc4_bo(obj);
510 +
511 +       if (bo->validated_shader) {
512 +               DRM_ERROR("mmaping of shader BOs not allowed.\n");
513 +               return ERR_PTR(-EINVAL);
514 +       }
515 +
516 +       return drm_gem_cma_prime_vmap(obj);
517 +}
518 +
519 +void vc4_bo_cache_init(struct drm_device *dev)
520 +{
521 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
522 +
523 +       spin_lock_init(&vc4->bo_lock);
524 +
525 +       INIT_LIST_HEAD(&vc4->bo_cache.time_list);
526 +
527 +       INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
528 +       setup_timer(&vc4->bo_cache.time_timer,
529 +                   vc4_bo_cache_time_timer,
530 +                   (unsigned long) dev);
531 +}
532 +
533 +void vc4_bo_cache_destroy(struct drm_device *dev)
534 +{
535 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
536 +
537 +       del_timer(&vc4->bo_cache.time_timer);
538 +       cancel_work_sync(&vc4->bo_cache.time_work);
539 +
540 +       vc4_bo_cache_purge(dev);
541 +
542 +       if (vc4->bo_stats.num_allocated) {
543 +               DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
544 +               vc4_bo_stats_dump(vc4);
545 +       }
546 +}
547 +
548 +#ifdef CONFIG_DEBUG_FS
549 +int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
550 +{
551 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
552 +       struct drm_device *dev = node->minor->dev;
553 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
554 +       struct vc4_bo_stats stats;
555 +
556 +       spin_lock(&vc4->bo_lock);
557 +       stats = vc4->bo_stats;
558 +       spin_unlock(&vc4->bo_lock);
559 +
560 +       seq_printf(m, "num bos allocated: %d\n", stats.num_allocated);
561 +       seq_printf(m, "size bos allocated: %dkb\n", stats.size_allocated / 1024);
562 +       seq_printf(m, "num bos used: %d\n", (stats.num_allocated -
563 +                                            stats.num_cached));
564 +       seq_printf(m, "size bos used: %dkb\n", (stats.size_allocated -
565 +                                               stats.size_cached) / 1024);
566 +       seq_printf(m, "num bos cached: %d\n", stats.num_cached);
567 +       seq_printf(m, "size bos cached: %dkb\n", stats.size_cached / 1024);
568 +
569 +       return 0;
570 +}
571 +#endif
572 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
573 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
574 @@ -35,6 +35,7 @@
575  #include "drm_atomic_helper.h"
576  #include "drm_crtc_helper.h"
577  #include "linux/clk.h"
578 +#include "drm_fb_cma_helper.h"
579  #include "linux/component.h"
580  #include "linux/of_device.h"
581  #include "vc4_drv.h"
582 @@ -476,10 +477,105 @@ static irqreturn_t vc4_crtc_irq_handler(
583         return ret;
584  }
585  
586 +struct vc4_async_flip_state {
587 +       struct drm_crtc *crtc;
588 +       struct drm_framebuffer *fb;
589 +       struct drm_pending_vblank_event *event;
590 +
591 +       struct vc4_seqno_cb cb;
592 +};
593 +
594 +/* Called when the V3D execution for the BO being flipped to is done, so that
595 + * we can actually update the plane's address to point to it.
596 + */
597 +static void
598 +vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
599 +{
600 +       struct vc4_async_flip_state *flip_state =
601 +               container_of(cb, struct vc4_async_flip_state, cb);
602 +       struct drm_crtc *crtc = flip_state->crtc;
603 +       struct drm_device *dev = crtc->dev;
604 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
605 +       struct drm_plane *plane = crtc->primary;
606 +
607 +       vc4_plane_async_set_fb(plane, flip_state->fb);
608 +       if (flip_state->event) {
609 +               unsigned long flags;
610 +               spin_lock_irqsave(&dev->event_lock, flags);
611 +               drm_crtc_send_vblank_event(crtc, flip_state->event);
612 +               spin_unlock_irqrestore(&dev->event_lock, flags);
613 +       }
614 +
615 +       drm_framebuffer_unreference(flip_state->fb);
616 +       kfree(flip_state);
617 +
618 +       up(&vc4->async_modeset);
619 +}
620 +
621 +/* Implements async (non-vblank-synced) page flips.
622 + *
623 + * The page flip ioctl needs to return immediately, so we grab the
624 + * modeset semaphore on the pipe, and queue the address update for
625 + * when V3D is done with the BO being flipped to.
626 + */
627 +static int vc4_async_page_flip(struct drm_crtc *crtc,
628 +                              struct drm_framebuffer *fb,
629 +                              struct drm_pending_vblank_event *event,
630 +                              uint32_t flags)
631 +{
632 +       struct drm_device *dev = crtc->dev;
633 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
634 +       struct drm_plane *plane = crtc->primary;
635 +       int ret = 0;
636 +       struct vc4_async_flip_state *flip_state;
637 +       struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
638 +       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
639 +
640 +       flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
641 +       if (!flip_state)
642 +               return -ENOMEM;
643 +
644 +       drm_framebuffer_reference(fb);
645 +       flip_state->fb = fb;
646 +       flip_state->crtc = crtc;
647 +       flip_state->event = event;
648 +
649 +       /* Make sure all other async modesetes have landed. */
650 +       ret = down_interruptible(&vc4->async_modeset);
651 +       if (ret) {
652 +               kfree(flip_state);
653 +               return ret;
654 +       }
655 +
656 +       /* Immediately update the plane's legacy fb pointer, so that later
657 +        * modeset prep sees the state that will be present when the semaphore
658 +        * is released.
659 +        */
660 +       drm_atomic_set_fb_for_plane(plane->state, fb);
661 +       plane->fb = fb;
662 +
663 +       vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
664 +                          vc4_async_page_flip_complete);
665 +
666 +       /* Driver takes ownership of state on successful async commit. */
667 +       return 0;
668 +}
669 +
670 +static int vc4_page_flip(struct drm_crtc *crtc,
671 +                 struct drm_framebuffer *fb,
672 +                 struct drm_pending_vblank_event *event,
673 +                 uint32_t flags)
674 +{
675 +       if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
676 +               return vc4_async_page_flip(crtc, fb, event, flags);
677 +       else
678 +               return drm_atomic_helper_page_flip(crtc, fb, event, flags);
679 +}
680 +
681  static const struct drm_crtc_funcs vc4_crtc_funcs = {
682         .set_config = drm_atomic_helper_set_config,
683         .destroy = vc4_crtc_destroy,
684 -       .page_flip = drm_atomic_helper_page_flip,
685 +       .page_flip = vc4_page_flip,
686         .set_property = NULL,
687         .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
688         .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
689 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c
690 +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
691 @@ -16,11 +16,14 @@
692  #include "vc4_regs.h"
693  
694  static const struct drm_info_list vc4_debugfs_list[] = {
695 +       {"bo_stats", vc4_bo_stats_debugfs, 0},
696         {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
697         {"hvs_regs", vc4_hvs_debugfs_regs, 0},
698         {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
699         {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
700         {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
701 +       {"v3d_ident", vc4_v3d_debugfs_ident, 0},
702 +       {"v3d_regs", vc4_v3d_debugfs_regs, 0},
703  };
704  
705  #define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
706 --- a/drivers/gpu/drm/vc4/vc4_drv.c
707 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
708 @@ -14,8 +14,10 @@
709  #include <linux/module.h>
710  #include <linux/of_platform.h>
711  #include <linux/platform_device.h>
712 +#include <soc/bcm2835/raspberrypi-firmware.h>
713  #include "drm_fb_cma_helper.h"
714  
715 +#include "uapi/drm/vc4_drm.h"
716  #include "vc4_drv.h"
717  #include "vc4_regs.h"
718  
719 @@ -63,7 +65,7 @@ static const struct file_operations vc4_
720         .open = drm_open,
721         .release = drm_release,
722         .unlocked_ioctl = drm_ioctl,
723 -       .mmap = drm_gem_cma_mmap,
724 +       .mmap = vc4_mmap,
725         .poll = drm_poll,
726         .read = drm_read,
727  #ifdef CONFIG_COMPAT
728 @@ -73,16 +75,28 @@ static const struct file_operations vc4_
729  };
730  
731  static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
732 +       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
733 +       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
734 +       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
735 +       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
736 +       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
737 +       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
738  };
739  
740  static struct drm_driver vc4_drm_driver = {
741         .driver_features = (DRIVER_MODESET |
742                             DRIVER_ATOMIC |
743                             DRIVER_GEM |
744 +                           DRIVER_HAVE_IRQ |
745                             DRIVER_PRIME),
746         .lastclose = vc4_lastclose,
747         .preclose = vc4_drm_preclose,
748  
749 +       .irq_handler = vc4_irq,
750 +       .irq_preinstall = vc4_irq_preinstall,
751 +       .irq_postinstall = vc4_irq_postinstall,
752 +       .irq_uninstall = vc4_irq_uninstall,
753 +
754         .enable_vblank = vc4_enable_vblank,
755         .disable_vblank = vc4_disable_vblank,
756         .get_vblank_counter = drm_vblank_count,
757 @@ -92,18 +106,18 @@ static struct drm_driver vc4_drm_driver
758         .debugfs_cleanup = vc4_debugfs_cleanup,
759  #endif
760  
761 -       .gem_free_object = drm_gem_cma_free_object,
762 +       .gem_free_object = vc4_free_object,
763         .gem_vm_ops = &drm_gem_cma_vm_ops,
764  
765         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
766         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
767         .gem_prime_import = drm_gem_prime_import,
768 -       .gem_prime_export = drm_gem_prime_export,
769 +       .gem_prime_export = vc4_prime_export,
770         .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
771         .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
772 -       .gem_prime_vmap = drm_gem_cma_prime_vmap,
773 +       .gem_prime_vmap = vc4_prime_vmap,
774         .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
775 -       .gem_prime_mmap = drm_gem_cma_prime_mmap,
776 +       .gem_prime_mmap = vc4_prime_mmap,
777  
778         .dumb_create = vc4_dumb_create,
779         .dumb_map_offset = drm_gem_cma_dumb_map_offset,
780 @@ -113,6 +127,8 @@ static struct drm_driver vc4_drm_driver
781         .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
782         .fops = &vc4_drm_fops,
783  
784 +       .gem_obj_size = sizeof(struct vc4_bo),
785 +
786         .name = DRIVER_NAME,
787         .desc = DRIVER_DESC,
788         .date = DRIVER_DATE,
789 @@ -153,6 +169,7 @@ static int vc4_drm_bind(struct device *d
790         struct drm_device *drm;
791         struct drm_connector *connector;
792         struct vc4_dev *vc4;
793 +       struct device_node *firmware_node;
794         int ret = 0;
795  
796         dev->coherent_dma_mask = DMA_BIT_MASK(32);
797 @@ -161,6 +178,14 @@ static int vc4_drm_bind(struct device *d
798         if (!vc4)
799                 return -ENOMEM;
800  
801 +       firmware_node = of_parse_phandle(dev->of_node, "firmware", 0);
802 +       vc4->firmware = rpi_firmware_get(firmware_node);
803 +       if (!vc4->firmware) {
804 +               DRM_DEBUG("Failed to get Raspberry Pi firmware reference.\n");
805 +               return -EPROBE_DEFER;
806 +       }
807 +       of_node_put(firmware_node);
808 +
809         drm = drm_dev_alloc(&vc4_drm_driver, dev);
810         if (!drm)
811                 return -ENOMEM;
812 @@ -170,13 +195,17 @@ static int vc4_drm_bind(struct device *d
813  
814         drm_dev_set_unique(drm, dev_name(dev));
815  
816 +       vc4_bo_cache_init(drm);
817 +
818         drm_mode_config_init(drm);
819         if (ret)
820                 goto unref;
821  
822 +       vc4_gem_init(drm);
823 +
824         ret = component_bind_all(dev, drm);
825         if (ret)
826 -               goto unref;
827 +               goto gem_destroy;
828  
829         ret = drm_dev_register(drm, 0);
830         if (ret < 0)
831 @@ -200,8 +229,11 @@ unregister:
832         drm_dev_unregister(drm);
833  unbind_all:
834         component_unbind_all(dev, drm);
835 +gem_destroy:
836 +       vc4_gem_destroy(drm);
837  unref:
838         drm_dev_unref(drm);
839 +       vc4_bo_cache_destroy(drm);
840         return ret;
841  }
842  
843 @@ -228,6 +260,7 @@ static struct platform_driver *const com
844         &vc4_hdmi_driver,
845         &vc4_crtc_driver,
846         &vc4_hvs_driver,
847 +       &vc4_v3d_driver,
848  };
849  
850  static int vc4_platform_drm_probe(struct platform_device *pdev)
851 --- a/drivers/gpu/drm/vc4/vc4_drv.h
852 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
853 @@ -15,8 +15,85 @@ struct vc4_dev {
854         struct vc4_hdmi *hdmi;
855         struct vc4_hvs *hvs;
856         struct vc4_crtc *crtc[3];
857 +       struct vc4_v3d *v3d;
858  
859         struct drm_fbdev_cma *fbdev;
860 +       struct rpi_firmware *firmware;
861 +
862 +       /* The kernel-space BO cache.  Tracks buffers that have been
863 +        * unreferenced by all other users (refcounts of 0!) but not
864 +        * yet freed, so we can do cheap allocations.
865 +        */
866 +       struct vc4_bo_cache {
867 +               /* Array of list heads for entries in the BO cache,
868 +                * based on number of pages, so we can do O(1) lookups
869 +                * in the cache when allocating.
870 +                */
871 +               struct list_head *size_list;
872 +               uint32_t size_list_size;
873 +
874 +               /* List of all BOs in the cache, ordered by age, so we
875 +                * can do O(1) lookups when trying to free old
876 +                * buffers.
877 +                */
878 +               struct list_head time_list;
879 +               struct work_struct time_work;
880 +               struct timer_list time_timer;
881 +       } bo_cache;
882 +
883 +       struct vc4_bo_stats {
884 +               u32 num_allocated;
885 +               u32 size_allocated;
886 +               u32 num_cached;
887 +               u32 size_cached;
888 +       } bo_stats;
889 +
890 +       /* Protects bo_cache and the BO stats. */
891 +       spinlock_t bo_lock;
892 +
893 +       /* Sequence number for the last job queued in job_list.
894 +        * Starts at 0 (no jobs emitted).
895 +        */
896 +       uint64_t emit_seqno;
897 +
898 +       /* Sequence number for the last completed job on the GPU.
899 +        * Starts at 0 (no jobs completed).
900 +        */
901 +       uint64_t finished_seqno;
902 +
903 +       /* List of all struct vc4_exec_info for jobs to be executed.
904 +        * The first job in the list is the one currently programmed
905 +        * into ct0ca/ct1ca for execution.
906 +        */
907 +       struct list_head job_list;
908 +       /* List of the finished vc4_exec_infos waiting to be freed by
909 +        * job_done_work.
910 +        */
911 +       struct list_head job_done_list;
912 +       spinlock_t job_lock;
913 +       wait_queue_head_t job_wait_queue;
914 +       struct work_struct job_done_work;
915 +
916 +       /* List of struct vc4_seqno_cb for callbacks to be made from a
917 +        * workqueue when the given seqno is passed.
918 +        */
919 +       struct list_head seqno_cb_list;
920 +
921 +       /* The binner overflow memory that's currently set up in
922 +        * BPOA/BPOS registers.  When overflow occurs and a new one is
923 +        * allocated, the previous one will be moved to
924 +        * vc4->current_exec's free list.
925 +        */
926 +       struct vc4_bo *overflow_mem;
927 +       struct work_struct overflow_mem_work;
928 +
929 +       struct {
930 +               uint32_t last_ct0ca, last_ct1ca;
931 +               struct timer_list timer;
932 +               struct work_struct reset_work;
933 +       } hangcheck;
934 +
935 +       struct semaphore async_modeset;
936  };
937  
938  static inline struct vc4_dev *
939 @@ -27,6 +104,25 @@ to_vc4_dev(struct drm_device *dev)
940  
941  struct vc4_bo {
942         struct drm_gem_cma_object base;
943 +
944 +       /* seqno of the last job to render to this BO. */
945 +       uint64_t seqno;
946 +
947 +       /* List entry for the BO's position in either
948 +        * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
949 +        */
950 +       struct list_head unref_head;
951 +
952 +       /* Time in jiffies when the BO was put in vc4->bo_cache. */
953 +       unsigned long free_time;
954 +
955 +       /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
956 +       struct list_head size_head;
957 +
958 +       /* Struct for shader validation state, if created by
959 +        * DRM_IOCTL_VC4_CREATE_SHADER_BO.
960 +        */
961 +       struct vc4_validated_shader_info *validated_shader;
962  };
963  
964  static inline struct vc4_bo *
965 @@ -35,6 +131,17 @@ to_vc4_bo(struct drm_gem_object *bo)
966         return (struct vc4_bo *)bo;
967  }
968  
969 +struct vc4_seqno_cb {
970 +       struct work_struct work;
971 +       uint64_t seqno;
972 +       void (*func)(struct vc4_seqno_cb *cb);
973 +};
974 +
975 +struct vc4_v3d {
976 +       struct platform_device *pdev;
977 +       void __iomem *regs;
978 +};
979 +
980  struct vc4_hvs {
981         struct platform_device *pdev;
982         void __iomem *regs;
983 @@ -72,9 +179,151 @@ to_vc4_encoder(struct drm_encoder *encod
984         return container_of(encoder, struct vc4_encoder, base);
985  }
986  
987 +#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
988 +#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
989  #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
990  #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
991  
992 +enum vc4_bo_mode {
993 +       VC4_MODE_UNDECIDED,
994 +       VC4_MODE_RENDER,
995 +       VC4_MODE_SHADER,
996 +};
997 +
998 +struct vc4_bo_exec_state {
999 +       struct drm_gem_cma_object *bo;
1000 +       enum vc4_bo_mode mode;
1001 +};
1002 +
1003 +struct vc4_exec_info {
1004 +       /* Sequence number for this bin/render job. */
1005 +       uint64_t seqno;
1006 +
1007 +       /* Kernel-space copy of the ioctl arguments */
1008 +       struct drm_vc4_submit_cl *args;
1009 +
1010 +       /* This is the array of BOs that were looked up at the start of exec.
1011 +        * Command validation will use indices into this array.
1012 +        */
1013 +       struct vc4_bo_exec_state *bo;
1014 +       uint32_t bo_count;
1015 +
1016 +       /* Pointers for our position in vc4->job_list */
1017 +       struct list_head head;
1018 +
1019 +       /* List of other BOs used in the job that need to be released
1020 +        * once the job is complete.
1021 +        */
1022 +       struct list_head unref_list;
1023 +
1024 +       /* Current unvalidated indices into @bo loaded by the non-hardware
1025 +        * VC4_PACKET_GEM_HANDLES.
1026 +        */
1027 +       uint32_t bo_index[2];
1028 +
1029 +       /* This is the BO where we store the validated command lists, shader
1030 +        * records, and uniforms.
1031 +        */
1032 +       struct drm_gem_cma_object *exec_bo;
1033 +
1034 +       /**
1035 +        * This tracks the per-shader-record state (packet 64) that
1036 +        * determines the length of the shader record and the offset
1037 +        * it's expected to be found at.  It gets read in from the
1038 +        * command lists.
1039 +        */
1040 +       struct vc4_shader_state {
1041 +               uint8_t packet;
1042 +               uint32_t addr;
1043 +               /* Maximum vertex index referenced by any primitive using this
1044 +                * shader state.
1045 +                */
1046 +               uint32_t max_index;
1047 +       } *shader_state;
1048 +
1049 +       /** How many shader states the user declared they were using. */
1050 +       uint32_t shader_state_size;
1051 +       /** How many shader state records the validator has seen. */
1052 +       uint32_t shader_state_count;
1053 +
1054 +       bool found_tile_binning_mode_config_packet;
1055 +       bool found_start_tile_binning_packet;
1056 +       bool found_increment_semaphore_packet;
1057 +       uint8_t bin_tiles_x, bin_tiles_y;
1058 +       struct drm_gem_cma_object *tile_bo;
1059 +       uint32_t tile_alloc_offset;
1060 +
1061 +       /**
1062 +        * Computed addresses pointing into exec_bo where we start the
1063 +        * bin thread (ct0) and render thread (ct1).
1064 +        */
1065 +       uint32_t ct0ca, ct0ea;
1066 +       uint32_t ct1ca, ct1ea;
1067 +
1068 +       /* Pointers to the shader recs.  These paddr gets incremented as CL
1069 +        * packets are relocated in validate_gl_shader_state, and the vaddrs
1070 +        * (u and v) get incremented and size decremented as the shader recs
1071 +        * themselves are validated.
1072 +        */
1073 +       void *shader_rec_u;
1074 +       void *shader_rec_v;
1075 +       uint32_t shader_rec_p;
1076 +       uint32_t shader_rec_size;
1077 +
1078 +       /* Pointers to the uniform data.  These pointers are incremented, and
1079 +        * size decremented, as each batch of uniforms is uploaded.
1080 +        */
1081 +       void *uniforms_u;
1082 +       void *uniforms_v;
1083 +       uint32_t uniforms_p;
1084 +       uint32_t uniforms_size;
1085 +};
1086 +
1087 +static inline struct vc4_exec_info *
1088 +vc4_first_job(struct vc4_dev *vc4)
1089 +{
1090 +       if (list_empty(&vc4->job_list))
1091 +               return NULL;
1092 +       return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
1093 +}
1094 +
1095 +/**
1096 + * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
1097 + * setup parameters.
1098 + *
1099 + * This will be used at draw time to relocate the reference to the texture
1100 + * contents in p0, and validate that the offset combined with
1101 + * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
1102 + * Note that the hardware treats unprovided config parameters as 0, so not all
1103 + * of them need to be set up for every texure sample, and we'll store ~0 as
1104 + * the offset to mark the unused ones.
1105 + *
1106 + * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
1107 + * Setup") for definitions of the texture parameters.
1108 + */
1109 +struct vc4_texture_sample_info {
1110 +       bool is_direct;
1111 +       uint32_t p_offset[4];
1112 +};
1113 +
1114 +/**
1115 + * struct vc4_validated_shader_info - information about validated shaders that
1116 + * needs to be used from command list validation.
1117 + *
1118 + * For a given shader, each time a shader state record references it, we need
1119 + * to verify that the shader doesn't read more uniforms than the shader state
1120 + * record's uniform BO pointer can provide, and we need to apply relocations
1121 + * and validate the shader state record's uniforms that define the texture
1122 + * samples.
1123 + */
1124 +struct vc4_validated_shader_info
1125 +{
1126 +       uint32_t uniforms_size;
1127 +       uint32_t uniforms_src_size;
1128 +       uint32_t num_texture_samples;
1129 +       struct vc4_texture_sample_info *texture_samples;
1130 +};
1131 +
1132  /**
1133   * _wait_for - magic (register) wait macro
1134   *
1135 @@ -111,6 +360,18 @@ int vc4_dumb_create(struct drm_file *fil
1136                     struct drm_mode_create_dumb *args);
1137  struct dma_buf *vc4_prime_export(struct drm_device *dev,
1138                                  struct drm_gem_object *obj, int flags);
1139 +int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
1140 +                       struct drm_file *file_priv);
1141 +int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
1142 +                              struct drm_file *file_priv);
1143 +int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
1144 +                     struct drm_file *file_priv);
1145 +int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
1146 +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
1147 +void *vc4_prime_vmap(struct drm_gem_object *obj);
1148 +void vc4_bo_cache_init(struct drm_device *dev);
1149 +void vc4_bo_cache_destroy(struct drm_device *dev);
1150 +int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
1151  
1152  /* vc4_crtc.c */
1153  extern struct platform_driver vc4_crtc_driver;
1154 @@ -126,10 +387,34 @@ void vc4_debugfs_cleanup(struct drm_mino
1155  /* vc4_drv.c */
1156  void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
1157  
1158 +/* vc4_gem.c */
1159 +void vc4_gem_init(struct drm_device *dev);
1160 +void vc4_gem_destroy(struct drm_device *dev);
1161 +int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1162 +                       struct drm_file *file_priv);
1163 +int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1164 +                        struct drm_file *file_priv);
1165 +int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1166 +                     struct drm_file *file_priv);
1167 +void vc4_submit_next_job(struct drm_device *dev);
1168 +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
1169 +                      uint64_t timeout_ns, bool interruptible);
1170 +void vc4_job_handle_completed(struct vc4_dev *vc4);
1171 +int vc4_queue_seqno_cb(struct drm_device *dev,
1172 +                      struct vc4_seqno_cb *cb, uint64_t seqno,
1173 +                      void (*func)(struct vc4_seqno_cb *cb));
1174 +
1175  /* vc4_hdmi.c */
1176  extern struct platform_driver vc4_hdmi_driver;
1177  int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
1178  
1179 +/* vc4_irq.c */
1180 +irqreturn_t vc4_irq(int irq, void *arg);
1181 +void vc4_irq_preinstall(struct drm_device *dev);
1182 +int vc4_irq_postinstall(struct drm_device *dev);
1183 +void vc4_irq_uninstall(struct drm_device *dev);
1184 +void vc4_irq_reset(struct drm_device *dev);
1185 +
1186  /* vc4_hvs.c */
1187  extern struct platform_driver vc4_hvs_driver;
1188  void vc4_hvs_dump_state(struct drm_device *dev);
1189 @@ -143,3 +428,35 @@ struct drm_plane *vc4_plane_init(struct
1190                                  enum drm_plane_type type);
1191  u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
1192  u32 vc4_plane_dlist_size(struct drm_plane_state *state);
1193 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb);
1194 +
1195 +/* vc4_v3d.c */
1196 +extern struct platform_driver vc4_v3d_driver;
1197 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
1198 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
1199 +int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
1200 +
1201 +/* vc4_validate.c */
1202 +int
1203 +vc4_validate_bin_cl(struct drm_device *dev,
1204 +                   void *validated,
1205 +                   void *unvalidated,
1206 +                   struct vc4_exec_info *exec);
1207 +
1208 +int
1209 +vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1210 +
1211 +struct vc4_validated_shader_info *
1212 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
1213 +
1214 +bool vc4_use_bo(struct vc4_exec_info *exec,
1215 +               uint32_t hindex,
1216 +               enum vc4_bo_mode mode,
1217 +               struct drm_gem_cma_object **obj);
1218 +
1219 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1220 +
1221 +bool vc4_check_tex_size(struct vc4_exec_info *exec,
1222 +                       struct drm_gem_cma_object *fbo,
1223 +                       uint32_t offset, uint8_t tiling_format,
1224 +                       uint32_t width, uint32_t height, uint8_t cpp);
1225 --- /dev/null
1226 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
1227 @@ -0,0 +1,686 @@
1228 +/*
1229 + * Copyright Â© 2014 Broadcom
1230 + *
1231 + * Permission is hereby granted, free of charge, to any person obtaining a
1232 + * copy of this software and associated documentation files (the "Software"),
1233 + * to deal in the Software without restriction, including without limitation
1234 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1235 + * and/or sell copies of the Software, and to permit persons to whom the
1236 + * Software is furnished to do so, subject to the following conditions:
1237 + *
1238 + * The above copyright notice and this permission notice (including the next
1239 + * paragraph) shall be included in all copies or substantial portions of the
1240 + * Software.
1241 + *
1242 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1243 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1244 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1245 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1246 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1247 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1248 + * IN THE SOFTWARE.
1249 + */
1250 +
1251 +#include <linux/module.h>
1252 +#include <linux/platform_device.h>
1253 +#include <linux/device.h>
1254 +#include <linux/io.h>
1255 +
1256 +#include "uapi/drm/vc4_drm.h"
1257 +#include "vc4_drv.h"
1258 +#include "vc4_regs.h"
1259 +#include "vc4_trace.h"
1260 +
1261 +static void
1262 +vc4_queue_hangcheck(struct drm_device *dev)
1263 +{
1264 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1265 +
1266 +       mod_timer(&vc4->hangcheck.timer,
1267 +                 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
1268 +}
1269 +
1270 +static void
1271 +vc4_reset(struct drm_device *dev)
1272 +{
1273 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1274 +
1275 +       DRM_INFO("Resetting GPU.\n");
1276 +       vc4_v3d_set_power(vc4, false);
1277 +       vc4_v3d_set_power(vc4, true);
1278 +
1279 +       vc4_irq_reset(dev);
1280 +
1281 +       /* Rearm the hangcheck -- another job might have been waiting
1282 +        * for our hung one to get kicked off, and vc4_irq_reset()
1283 +        * would have started it.
1284 +        */
1285 +       vc4_queue_hangcheck(dev);
1286 +}
1287 +
1288 +static void
1289 +vc4_reset_work(struct work_struct *work)
1290 +{
1291 +       struct vc4_dev *vc4 =
1292 +               container_of(work, struct vc4_dev, hangcheck.reset_work);
1293 +
1294 +       vc4_reset(vc4->dev);
1295 +}
1296 +
1297 +static void
1298 +vc4_hangcheck_elapsed(unsigned long data)
1299 +{
1300 +       struct drm_device *dev = (struct drm_device *)data;
1301 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1302 +       uint32_t ct0ca, ct1ca;
1303 +
1304 +       /* If idle, we can stop watching for hangs. */
1305 +       if (list_empty(&vc4->job_list))
1306 +               return;
1307 +
1308 +       ct0ca = V3D_READ(V3D_CTNCA(0));
1309 +       ct1ca = V3D_READ(V3D_CTNCA(1));
1310 +
1311 +       /* If we've made any progress in execution, rearm the timer
1312 +        * and wait.
1313 +        */
1314 +       if (ct0ca != vc4->hangcheck.last_ct0ca ||
1315 +           ct1ca != vc4->hangcheck.last_ct1ca) {
1316 +               vc4->hangcheck.last_ct0ca = ct0ca;
1317 +               vc4->hangcheck.last_ct1ca = ct1ca;
1318 +               vc4_queue_hangcheck(dev);
1319 +               return;
1320 +       }
1321 +
1322 +       /* We've gone too long with no progress, reset.  This has to
1323 +        * be done from a work struct, since resetting can sleep and
1324 +        * this timer hook isn't allowed to.
1325 +        */
1326 +       schedule_work(&vc4->hangcheck.reset_work);
1327 +}
1328 +
1329 +static void
1330 +submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
1331 +{
1332 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1333 +
1334 +       /* Stop any existing thread and set state to "stopped at halt" */
1335 +       V3D_WRITE(V3D_CTNCS(thread), V3D_CTRUN);
1336 +       barrier();
1337 +
1338 +       V3D_WRITE(V3D_CTNCA(thread), start);
1339 +       barrier();
1340 +
1341 +       /* Set the end address of the control list.  Writing this
1342 +        * register is what starts the job.
1343 +        */
1344 +       V3D_WRITE(V3D_CTNEA(thread), end);
1345 +       barrier();
1346 +}
1347 +
1348 +int
1349 +vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
1350 +                  bool interruptible)
1351 +{
1352 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1353 +       int ret = 0;
1354 +       unsigned long timeout_expire;
1355 +       DEFINE_WAIT(wait);
1356 +
1357 +       if (vc4->finished_seqno >= seqno)
1358 +               return 0;
1359 +
1360 +       if (timeout_ns == 0)
1361 +               return -ETIME;
1362 +
1363 +       timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
1364 +
1365 +       trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
1366 +       for (;;) {
1367 +               prepare_to_wait(&vc4->job_wait_queue, &wait,
1368 +                               interruptible ? TASK_INTERRUPTIBLE :
1369 +                               TASK_UNINTERRUPTIBLE);
1370 +
1371 +               if (interruptible && signal_pending(current)) {
1372 +                       ret = -ERESTARTSYS;
1373 +                       break;
1374 +               }
1375 +
1376 +               if (vc4->finished_seqno >= seqno)
1377 +                       break;
1378 +
1379 +               if (timeout_ns != ~0ull) {
1380 +                       if (time_after_eq(jiffies, timeout_expire)) {
1381 +                               ret = -ETIME;
1382 +                               break;
1383 +                       }
1384 +                       schedule_timeout(timeout_expire - jiffies);
1385 +               } else {
1386 +                       schedule();
1387 +               }
1388 +       }
1389 +
1390 +       finish_wait(&vc4->job_wait_queue, &wait);
1391 +       trace_vc4_wait_for_seqno_end(dev, seqno);
1392 +
1393 +       if (ret && ret != -ERESTARTSYS) {
1394 +               DRM_ERROR("timeout waiting for render thread idle\n");
1395 +               return ret;
1396 +       }
1397 +
1398 +       return 0;
1399 +}
1400 +
1401 +static void
1402 +vc4_flush_caches(struct drm_device *dev)
1403 +{
1404 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1405 +
1406 +       /* Flush the GPU L2 caches.  These caches sit on top of system
1407 +        * L3 (the 128kb or so shared with the CPU), and are
1408 +        * non-allocating in the L3.
1409 +        */
1410 +       V3D_WRITE(V3D_L2CACTL,
1411 +                 V3D_L2CACTL_L2CCLR);
1412 +
1413 +       V3D_WRITE(V3D_SLCACTL,
1414 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
1415 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
1416 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
1417 +                 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
1418 +}
1419 +
1420 +/* Sets the registers for the next job to be actually be executed in
1421 + * the hardware.
1422 + *
1423 + * The job_lock should be held during this.
1424 + */
1425 +void
1426 +vc4_submit_next_job(struct drm_device *dev)
1427 +{
1428 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1429 +       struct vc4_exec_info *exec = vc4_first_job(vc4);
1430 +
1431 +       if (!exec)
1432 +               return;
1433 +
1434 +       vc4_flush_caches(dev);
1435 +
1436 +       /* Disable the binner's pre-loaded overflow memory address */
1437 +       V3D_WRITE(V3D_BPOA, 0);
1438 +       V3D_WRITE(V3D_BPOS, 0);
1439 +
1440 +       if (exec->ct0ca != exec->ct0ea)
1441 +               submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
1442 +       submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
1443 +}
1444 +
1445 +static void
1446 +vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
1447 +{
1448 +       struct vc4_bo *bo;
1449 +       unsigned i;
1450 +
1451 +       for (i = 0; i < exec->bo_count; i++) {
1452 +               bo = to_vc4_bo(&exec->bo[i].bo->base);
1453 +               bo->seqno = seqno;
1454 +       }
1455 +
1456 +       list_for_each_entry(bo, &exec->unref_list, unref_head) {
1457 +               bo->seqno = seqno;
1458 +       }
1459 +}
1460 +
1461 +/* Queues a struct vc4_exec_info for execution.  If no job is
1462 + * currently executing, then submits it.
1463 + *
1464 + * Unlike most GPUs, our hardware only handles one command list at a
1465 + * time.  To queue multiple jobs at once, we'd need to edit the
1466 + * previous command list to have a jump to the new one at the end, and
1467 + * then bump the end address.  That's a change for a later date,
1468 + * though.
1469 + */
1470 +static void
1471 +vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
1472 +{
1473 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1474 +       uint64_t seqno = ++vc4->emit_seqno;
1475 +       unsigned long irqflags;
1476 +
1477 +       exec->seqno = seqno;
1478 +       vc4_update_bo_seqnos(exec, seqno);
1479 +
1480 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
1481 +       list_add_tail(&exec->head, &vc4->job_list);
1482 +
1483 +       /* If no job was executing, kick ours off.  Otherwise, it'll
1484 +        * get started when the previous job's frame done interrupt
1485 +        * occurs.
1486 +        */
1487 +       if (vc4_first_job(vc4) == exec) {
1488 +               vc4_submit_next_job(dev);
1489 +               vc4_queue_hangcheck(dev);
1490 +       }
1491 +
1492 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1493 +}
1494 +
1495 +/**
1496 + * Looks up a bunch of GEM handles for BOs and stores the array for
1497 + * use in the command validator that actually writes relocated
1498 + * addresses pointing to them.
1499 + */
1500 +static int
1501 +vc4_cl_lookup_bos(struct drm_device *dev,
1502 +                 struct drm_file *file_priv,
1503 +                 struct vc4_exec_info *exec)
1504 +{
1505 +       struct drm_vc4_submit_cl *args = exec->args;
1506 +       uint32_t *handles;
1507 +       int ret = 0;
1508 +       int i;
1509 +
1510 +       exec->bo_count = args->bo_handle_count;
1511 +
1512 +       if (!exec->bo_count) {
1513 +               /* See comment on bo_index for why we have to check
1514 +                * this.
1515 +                */
1516 +               DRM_ERROR("Rendering requires BOs to validate\n");
1517 +               return -EINVAL;
1518 +       }
1519 +
1520 +       exec->bo = kcalloc(exec->bo_count, sizeof(struct vc4_bo_exec_state),
1521 +                          GFP_KERNEL);
1522 +       if (!exec->bo) {
1523 +               DRM_ERROR("Failed to allocate validated BO pointers\n");
1524 +               return -ENOMEM;
1525 +       }
1526 +
1527 +       handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
1528 +       if (!handles) {
1529 +               DRM_ERROR("Failed to allocate incoming GEM handles\n");
1530 +               goto fail;
1531 +       }
1532 +
1533 +       ret = copy_from_user(handles,
1534 +                            (void __user *)(uintptr_t)args->bo_handles,
1535 +                            exec->bo_count * sizeof(uint32_t));
1536 +       if (ret) {
1537 +               DRM_ERROR("Failed to copy in GEM handles\n");
1538 +               goto fail;
1539 +       }
1540 +
1541 +       spin_lock(&file_priv->table_lock);
1542 +       for (i = 0; i < exec->bo_count; i++) {
1543 +               struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
1544 +                                                    handles[i]);
1545 +               if (!bo) {
1546 +                       DRM_ERROR("Failed to look up GEM BO %d: %d\n",
1547 +                                 i, handles[i]);
1548 +                       ret = -EINVAL;
1549 +                       spin_unlock(&file_priv->table_lock);
1550 +                       goto fail;
1551 +               }
1552 +               drm_gem_object_reference(bo);
1553 +               exec->bo[i].bo = (struct drm_gem_cma_object *)bo;
1554 +       }
1555 +       spin_unlock(&file_priv->table_lock);
1556 +
1557 +fail:
1558 +       kfree(handles);
1559 +       return 0;
1560 +}
1561 +
1562 +static int
1563 +vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
1564 +{
1565 +       struct drm_vc4_submit_cl *args = exec->args;
1566 +       void *temp = NULL;
1567 +       void *bin;
1568 +       int ret = 0;
1569 +       uint32_t bin_offset = 0;
1570 +       uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
1571 +                                            16);
1572 +       uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
1573 +       uint32_t exec_size = uniforms_offset + args->uniforms_size;
1574 +       uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
1575 +                                         args->shader_rec_count);
1576 +       struct vc4_bo *bo;
1577 +
1578 +       if (uniforms_offset < shader_rec_offset ||
1579 +           exec_size < uniforms_offset ||
1580 +           args->shader_rec_count >= (UINT_MAX /
1581 +                                         sizeof(struct vc4_shader_state)) ||
1582 +           temp_size < exec_size) {
1583 +               DRM_ERROR("overflow in exec arguments\n");
1584 +               goto fail;
1585 +       }
1586 +
1587 +       /* Allocate space where we'll store the copied in user command lists
1588 +        * and shader records.
1589 +        *
1590 +        * We don't just copy directly into the BOs because we need to
1591 +        * read the contents back for validation, and I think the
1592 +        * bo->vaddr is uncached access.
1593 +        */
1594 +       temp = kmalloc(temp_size, GFP_KERNEL);
1595 +       if (!temp) {
1596 +               DRM_ERROR("Failed to allocate storage for copying "
1597 +                         "in bin/render CLs.\n");
1598 +               ret = -ENOMEM;
1599 +               goto fail;
1600 +       }
1601 +       bin = temp + bin_offset;
1602 +       exec->shader_rec_u = temp + shader_rec_offset;
1603 +       exec->uniforms_u = temp + uniforms_offset;
1604 +       exec->shader_state = temp + exec_size;
1605 +       exec->shader_state_size = args->shader_rec_count;
1606 +
1607 +       ret = copy_from_user(bin,
1608 +                            (void __user *)(uintptr_t)args->bin_cl,
1609 +                            args->bin_cl_size);
1610 +       if (ret) {
1611 +               DRM_ERROR("Failed to copy in bin cl\n");
1612 +               goto fail;
1613 +       }
1614 +
1615 +       ret = copy_from_user(exec->shader_rec_u,
1616 +                            (void __user *)(uintptr_t)args->shader_rec,
1617 +                            args->shader_rec_size);
1618 +       if (ret) {
1619 +               DRM_ERROR("Failed to copy in shader recs\n");
1620 +               goto fail;
1621 +       }
1622 +
1623 +       ret = copy_from_user(exec->uniforms_u,
1624 +                            (void __user *)(uintptr_t)args->uniforms,
1625 +                            args->uniforms_size);
1626 +       if (ret) {
1627 +               DRM_ERROR("Failed to copy in uniforms cl\n");
1628 +               goto fail;
1629 +       }
1630 +
1631 +       bo = vc4_bo_create(dev, exec_size);
1632 +       if (!bo) {
1633 +               DRM_ERROR("Couldn't allocate BO for binning\n");
1634 +               ret = PTR_ERR(exec->exec_bo);
1635 +               goto fail;
1636 +       }
1637 +       exec->exec_bo = &bo->base;
1638 +
1639 +       list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
1640 +                     &exec->unref_list);
1641 +
1642 +       exec->ct0ca = exec->exec_bo->paddr + bin_offset;
1643 +
1644 +       exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
1645 +       exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
1646 +       exec->shader_rec_size = args->shader_rec_size;
1647 +
1648 +       exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
1649 +       exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
1650 +       exec->uniforms_size = args->uniforms_size;
1651 +
1652 +       ret = vc4_validate_bin_cl(dev,
1653 +                                 exec->exec_bo->vaddr + bin_offset,
1654 +                                 bin,
1655 +                                 exec);
1656 +       if (ret)
1657 +               goto fail;
1658 +
1659 +       ret = vc4_validate_shader_recs(dev, exec);
1660 +
1661 +fail:
1662 +       kfree(temp);
1663 +       return ret;
1664 +}
1665 +
1666 +static void
1667 +vc4_complete_exec(struct vc4_exec_info *exec)
1668 +{
1669 +       unsigned i;
1670 +
1671 +       if (exec->bo) {
1672 +               for (i = 0; i < exec->bo_count; i++)
1673 +                       drm_gem_object_unreference(&exec->bo[i].bo->base);
1674 +               kfree(exec->bo);
1675 +       }
1676 +
1677 +       while (!list_empty(&exec->unref_list)) {
1678 +               struct vc4_bo *bo = list_first_entry(&exec->unref_list,
1679 +                                                    struct vc4_bo, unref_head);
1680 +               list_del(&bo->unref_head);
1681 +               drm_gem_object_unreference(&bo->base.base);
1682 +       }
1683 +
1684 +       kfree(exec);
1685 +}
1686 +
1687 +void
1688 +vc4_job_handle_completed(struct vc4_dev *vc4)
1689 +{
1690 +       unsigned long irqflags;
1691 +       struct vc4_seqno_cb *cb, *cb_temp;
1692 +
1693 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
1694 +       while (!list_empty(&vc4->job_done_list)) {
1695 +               struct vc4_exec_info *exec =
1696 +                       list_first_entry(&vc4->job_done_list,
1697 +                                        struct vc4_exec_info, head);
1698 +               list_del(&exec->head);
1699 +
1700 +               spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1701 +               vc4_complete_exec(exec);
1702 +               spin_lock_irqsave(&vc4->job_lock, irqflags);
1703 +       }
1704 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1705 +
1706 +       list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1707 +               if (cb->seqno <= vc4->finished_seqno) {
1708 +                       list_del_init(&cb->work.entry);
1709 +                       schedule_work(&cb->work);
1710 +               }
1711 +       }
1712 +}
1713 +
1714 +static void vc4_seqno_cb_work(struct work_struct *work)
1715 +{
1716 +       struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1717 +       cb->func(cb);
1718 +}
1719 +
1720 +int vc4_queue_seqno_cb(struct drm_device *dev,
1721 +                      struct vc4_seqno_cb *cb, uint64_t seqno,
1722 +                      void (*func)(struct vc4_seqno_cb *cb))
1723 +{
1724 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1725 +       int ret = 0;
1726 +
1727 +       cb->func = func;
1728 +       INIT_WORK(&cb->work, vc4_seqno_cb_work);
1729 +
1730 +       mutex_lock(&dev->struct_mutex);
1731 +       if (seqno > vc4->finished_seqno) {
1732 +               cb->seqno = seqno;
1733 +               list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1734 +       } else {
1735 +               schedule_work(&cb->work);
1736 +       }
1737 +       mutex_unlock(&dev->struct_mutex);
1738 +
1739 +       return ret;
1740 +}
1741 +
1742 +/* Scheduled when any job has been completed, this walks the list of
1743 + * jobs that had completed and unrefs their BOs and frees their exec
1744 + * structs.
1745 + */
1746 +static void
1747 +vc4_job_done_work(struct work_struct *work)
1748 +{
1749 +       struct vc4_dev *vc4 =
1750 +               container_of(work, struct vc4_dev, job_done_work);
1751 +       struct drm_device *dev = vc4->dev;
1752 +
1753 +       /* Need the struct lock for drm_gem_object_unreference(). */
1754 +       mutex_lock(&dev->struct_mutex);
1755 +       vc4_job_handle_completed(vc4);
1756 +       mutex_unlock(&dev->struct_mutex);
1757 +}
1758 +
1759 +static int
1760 +vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1761 +                               uint64_t seqno,
1762 +                               uint64_t *timeout_ns)
1763 +{
1764 +       unsigned long start = jiffies;
1765 +       int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1766 +
1767 +       if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1768 +               uint64_t delta = jiffies_to_nsecs(jiffies - start);
1769 +               if (*timeout_ns >= delta)
1770 +                       *timeout_ns -= delta;
1771 +       }
1772 +
1773 +       return ret;
1774 +}
1775 +
1776 +int
1777 +vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1778 +                    struct drm_file *file_priv)
1779 +{
1780 +       struct drm_vc4_wait_seqno *args = data;
1781 +
1782 +       return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1783 +                                              &args->timeout_ns);
1784 +}
1785 +
1786 +int
1787 +vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1788 +                 struct drm_file *file_priv)
1789 +{
1790 +       int ret;
1791 +       struct drm_vc4_wait_bo *args = data;
1792 +       struct drm_gem_object *gem_obj;
1793 +       struct vc4_bo *bo;
1794 +
1795 +       gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1796 +       if (!gem_obj) {
1797 +               DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1798 +               return -EINVAL;
1799 +       }
1800 +       bo = to_vc4_bo(gem_obj);
1801 +
1802 +       ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns);
1803 +
1804 +       drm_gem_object_unreference(gem_obj);
1805 +       return ret;
1806 +}
1807 +
1808 +/**
1809 + * Submits a command list to the VC4.
1810 + *
1811 + * This is what is called batchbuffer emitting on other hardware.
1812 + */
1813 +int
1814 +vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1815 +                   struct drm_file *file_priv)
1816 +{
1817 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1818 +       struct drm_vc4_submit_cl *args = data;
1819 +       struct vc4_exec_info *exec;
1820 +       int ret;
1821 +
1822 +       if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
1823 +               DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
1824 +               return -EINVAL;
1825 +       }
1826 +
1827 +       exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1828 +       if (!exec) {
1829 +               DRM_ERROR("malloc failure on exec struct\n");
1830 +               return -ENOMEM;
1831 +       }
1832 +
1833 +       exec->args = args;
1834 +       INIT_LIST_HEAD(&exec->unref_list);
1835 +
1836 +       mutex_lock(&dev->struct_mutex);
1837 +
1838 +       ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1839 +       if (ret)
1840 +               goto fail;
1841 +
1842 +       if (exec->args->bin_cl_size != 0) {
1843 +               ret = vc4_get_bcl(dev, exec);
1844 +               if (ret)
1845 +                       goto fail;
1846 +       } else {
1847 +               exec->ct0ca = exec->ct0ea = 0;
1848 +       }
1849 +
1850 +       ret = vc4_get_rcl(dev, exec);
1851 +       if (ret)
1852 +               goto fail;
1853 +
1854 +       /* Clear this out of the struct we'll be putting in the queue,
1855 +        * since it's part of our stack.
1856 +        */
1857 +       exec->args = NULL;
1858 +
1859 +       vc4_queue_submit(dev, exec);
1860 +
1861 +       /* Return the seqno for our job. */
1862 +       args->seqno = vc4->emit_seqno;
1863 +
1864 +       mutex_unlock(&dev->struct_mutex);
1865 +
1866 +       return 0;
1867 +
1868 +fail:
1869 +       vc4_complete_exec(exec);
1870 +
1871 +       mutex_unlock(&dev->struct_mutex);
1872 +
1873 +       return ret;
1874 +}
1875 +
1876 +void
1877 +vc4_gem_init(struct drm_device *dev)
1878 +{
1879 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1880 +
1881 +       INIT_LIST_HEAD(&vc4->job_list);
1882 +       INIT_LIST_HEAD(&vc4->job_done_list);
1883 +       INIT_LIST_HEAD(&vc4->seqno_cb_list);
1884 +       spin_lock_init(&vc4->job_lock);
1885 +
1886 +       INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1887 +       setup_timer(&vc4->hangcheck.timer,
1888 +                   vc4_hangcheck_elapsed,
1889 +                   (unsigned long) dev);
1890 +
1891 +       INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1892 +}
1893 +
1894 +void
1895 +vc4_gem_destroy(struct drm_device *dev)
1896 +{
1897 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
1898 +
1899 +       /* Waiting for exec to finish would need to be done before
1900 +        * unregistering V3D.
1901 +        */
1902 +       WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1903 +
1904 +       /* V3D should already have disabled its interrupt and cleared
1905 +        * the overflow allocation registers.  Now free the object.
1906 +        */
1907 +       if (vc4->overflow_mem) {
1908 +               drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
1909 +               vc4->overflow_mem = NULL;
1910 +       }
1911 +
1912 +       vc4_bo_cache_destroy(dev);
1913 +}
1914 --- /dev/null
1915 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
1916 @@ -0,0 +1,211 @@
1917 +/*
1918 + * Copyright Â© 2014 Broadcom
1919 + *
1920 + * Permission is hereby granted, free of charge, to any person obtaining a
1921 + * copy of this software and associated documentation files (the "Software"),
1922 + * to deal in the Software without restriction, including without limitation
1923 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1924 + * and/or sell copies of the Software, and to permit persons to whom the
1925 + * Software is furnished to do so, subject to the following conditions:
1926 + *
1927 + * The above copyright notice and this permission notice (including the next
1928 + * paragraph) shall be included in all copies or substantial portions of the
1929 + * Software.
1930 + *
1931 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1932 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1933 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1934 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1935 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1936 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1937 + * IN THE SOFTWARE.
1938 + */
1939 +
1940 +/** DOC: Interrupt management for the V3D engine.
1941 + *
1942 + * We have an interrupt status register (V3D_INTCTL) which reports
1943 + * interrupts, and where writing 1 bits clears those interrupts.
1944 + * There are also a pair of interrupt registers
1945 + * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
1946 + * disables that specific interrupt, and 0s written are ignored
1947 + * (reading either one returns the set of enabled interrupts).
1948 + *
1949 + * When we take a render frame interrupt, we need to wake the
1950 + * processes waiting for some frame to be done, and get the next frame
1951 + * submitted ASAP (so the hardware doesn't sit idle when there's work
1952 + * to do).
1953 + *
1954 + * When we take the binner out of memory interrupt, we need to
1955 + * allocate some new memory and pass it to the binner so that the
1956 + * current job can make progress.
1957 + */
1958 +
1959 +#include "vc4_drv.h"
1960 +#include "vc4_regs.h"
1961 +
1962 +#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
1963 +                        V3D_INT_FRDONE)
1964 +
1965 +DECLARE_WAIT_QUEUE_HEAD(render_wait);
1966 +
1967 +static void
1968 +vc4_overflow_mem_work(struct work_struct *work)
1969 +{
1970 +       struct vc4_dev *vc4 =
1971 +               container_of(work, struct vc4_dev, overflow_mem_work);
1972 +       struct drm_device *dev = vc4->dev;
1973 +       struct vc4_bo *bo;
1974 +
1975 +       bo = vc4_bo_create(dev, 256 * 1024);
1976 +       if (!bo) {
1977 +               DRM_ERROR("Couldn't allocate binner overflow mem\n");
1978 +               return;
1979 +       }
1980 +
1981 +       /* If there's a job executing currently, then our previous
1982 +        * overflow allocation is getting used in that job and we need
1983 +        * to queue it to be released when the job is done.  But if no
1984 +        * job is executing at all, then we can free the old overflow
1985 +        * object direcctly.
1986 +        *
1987 +        * No lock necessary for this pointer since we're the only
1988 +        * ones that update the pointer, and our workqueue won't
1989 +        * reenter.
1990 +        */
1991 +       if (vc4->overflow_mem) {
1992 +               struct vc4_exec_info *current_exec;
1993 +               unsigned long irqflags;
1994 +
1995 +               spin_lock_irqsave(&vc4->job_lock, irqflags);
1996 +               current_exec = vc4_first_job(vc4);
1997 +               if (current_exec) {
1998 +                       vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
1999 +                       list_add_tail(&vc4->overflow_mem->unref_head,
2000 +                                     &current_exec->unref_list);
2001 +                       vc4->overflow_mem = NULL;
2002 +               }
2003 +               spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2004 +       }
2005 +
2006 +       if (vc4->overflow_mem) {
2007 +               drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
2008 +       }
2009 +       vc4->overflow_mem = bo;
2010 +
2011 +       V3D_WRITE(V3D_BPOA, bo->base.paddr);
2012 +       V3D_WRITE(V3D_BPOS, bo->base.base.size);
2013 +       V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
2014 +       V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
2015 +}
2016 +
2017 +static void
2018 +vc4_irq_finish_job(struct drm_device *dev)
2019 +{
2020 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2021 +       struct vc4_exec_info *exec = vc4_first_job(vc4);
2022 +
2023 +       if (!exec)
2024 +               return;
2025 +
2026 +       vc4->finished_seqno++;
2027 +       list_move_tail(&exec->head, &vc4->job_done_list);
2028 +       vc4_submit_next_job(dev);
2029 +
2030 +       wake_up_all(&vc4->job_wait_queue);
2031 +       schedule_work(&vc4->job_done_work);
2032 +}
2033 +
2034 +irqreturn_t
2035 +vc4_irq(int irq, void *arg)
2036 +{
2037 +       struct drm_device *dev = arg;
2038 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2039 +       uint32_t intctl;
2040 +       irqreturn_t status = IRQ_NONE;
2041 +
2042 +       barrier();
2043 +       intctl = V3D_READ(V3D_INTCTL);
2044 +
2045 +       /* Acknowledge the interrupts we're handling here. The render
2046 +        * frame done interrupt will be cleared, while OUTOMEM will
2047 +        * stay high until the underlying cause is cleared.
2048 +        */
2049 +       V3D_WRITE(V3D_INTCTL, intctl);
2050 +
2051 +       if (intctl & V3D_INT_OUTOMEM) {
2052 +               /* Disable OUTOMEM until the work is done. */
2053 +               V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
2054 +               schedule_work(&vc4->overflow_mem_work);
2055 +               status = IRQ_HANDLED;
2056 +       }
2057 +
2058 +       if (intctl & V3D_INT_FRDONE) {
2059 +               spin_lock(&vc4->job_lock);
2060 +               vc4_irq_finish_job(dev);
2061 +               spin_unlock(&vc4->job_lock);
2062 +               status = IRQ_HANDLED;
2063 +       }
2064 +
2065 +       return status;
2066 +}
2067 +
2068 +void
2069 +vc4_irq_preinstall(struct drm_device *dev)
2070 +{
2071 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2072 +
2073 +       init_waitqueue_head(&vc4->job_wait_queue);
2074 +       INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
2075 +
2076 +       /* Clear any pending interrupts someone might have left around
2077 +        * for us.
2078 +        */
2079 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2080 +}
2081 +
2082 +int
2083 +vc4_irq_postinstall(struct drm_device *dev)
2084 +{
2085 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2086 +
2087 +       /* Enable both the render done and out of memory interrupts. */
2088 +       V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2089 +
2090 +       return 0;
2091 +}
2092 +
2093 +void
2094 +vc4_irq_uninstall(struct drm_device *dev)
2095 +{
2096 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2097 +
2098 +       /* Disable sending interrupts for our driver's IRQs. */
2099 +       V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
2100 +
2101 +       /* Clear any pending interrupts we might have left. */
2102 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2103 +
2104 +       cancel_work_sync(&vc4->overflow_mem_work);
2105 +}
2106 +
2107 +/** Reinitializes interrupt registers when a GPU reset is performed. */
2108 +void vc4_irq_reset(struct drm_device *dev)
2109 +{
2110 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2111 +       unsigned long irqflags;
2112 +
2113 +       /* Acknowledge any stale IRQs. */
2114 +       V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2115 +
2116 +       /*
2117 +        * Turn all our interrupts on.  Binner out of memory is the
2118 +        * only one we expect to trigger at this point, since we've
2119 +        * just come from poweron and haven't supplied any overflow
2120 +        * memory yet.
2121 +        */
2122 +       V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
2123 +
2124 +       spin_lock_irqsave(&vc4->job_lock, irqflags);
2125 +       vc4_irq_finish_job(dev);
2126 +       spin_unlock_irqrestore(&vc4->job_lock, irqflags);
2127 +}
2128 --- a/drivers/gpu/drm/vc4/vc4_kms.c
2129 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
2130 @@ -15,6 +15,7 @@
2131   */
2132  
2133  #include "drm_crtc.h"
2134 +#include "drm_atomic.h"
2135  #include "drm_atomic_helper.h"
2136  #include "drm_crtc_helper.h"
2137  #include "drm_plane_helper.h"
2138 @@ -29,10 +30,151 @@ static void vc4_output_poll_changed(stru
2139                 drm_fbdev_cma_hotplug_event(vc4->fbdev);
2140  }
2141  
2142 +struct vc4_commit {
2143 +       struct drm_device *dev;
2144 +       struct drm_atomic_state *state;
2145 +       struct vc4_seqno_cb cb;
2146 +};
2147 +
2148 +static void
2149 +vc4_atomic_complete_commit(struct vc4_commit *c)
2150 +{
2151 +       struct drm_atomic_state *state = c->state;
2152 +       struct drm_device *dev = state->dev;
2153 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2154 +
2155 +       drm_atomic_helper_commit_modeset_disables(dev, state);
2156 +
2157 +       drm_atomic_helper_commit_planes(dev, state);
2158 +
2159 +       drm_atomic_helper_commit_modeset_enables(dev, state);
2160 +
2161 +       drm_atomic_helper_wait_for_vblanks(dev, state);
2162 +
2163 +       drm_atomic_helper_cleanup_planes(dev, state);
2164 +
2165 +       drm_atomic_state_free(state);
2166 +
2167 +       up(&vc4->async_modeset);
2168 +
2169 +       kfree(c);
2170 +}
2171 +
2172 +static void
2173 +vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
2174 +{
2175 +       struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
2176 +
2177 +       vc4_atomic_complete_commit(c);
2178 +}
2179 +
2180 +static struct vc4_commit *commit_init(struct drm_atomic_state *state)
2181 +{
2182 +       struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
2183 +
2184 +       if (!c)
2185 +               return NULL;
2186 +       c->dev = state->dev;
2187 +       c->state = state;
2188 +
2189 +       return c;
2190 +}
2191 +
2192 +/**
2193 + * vc4_atomic_commit - commit validated state object
2194 + * @dev: DRM device
2195 + * @state: the driver state object
2196 + * @async: asynchronous commit
2197 + *
2198 + * This function commits a with drm_atomic_helper_check() pre-validated state
2199 + * object. This can still fail when e.g. the framebuffer reservation fails. For
2200 + * now this doesn't implement asynchronous commits.
2201 + *
2202 + * RETURNS
2203 + * Zero for success or -errno.
2204 + */
2205 +static int vc4_atomic_commit(struct drm_device *dev,
2206 +                            struct drm_atomic_state *state,
2207 +                            bool async)
2208 +{
2209 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
2210 +       int ret;
2211 +       int i;
2212 +       uint64_t wait_seqno = 0;
2213 +       struct vc4_commit *c;
2214 +
2215 +       c = commit_init(state);
2216 +       if (!c)
2217 +               return -ENOMEM;
2218 +
2219 +       /* Make sure that any outstanding modesets have finished. */
2220 +       ret = down_interruptible(&vc4->async_modeset);
2221 +       if (ret) {
2222 +               kfree(c);
2223 +               return ret;
2224 +       }
2225 +
2226 +       ret = drm_atomic_helper_prepare_planes(dev, state);
2227 +       if (ret) {
2228 +               kfree(c);
2229 +               up(&vc4->async_modeset);
2230 +               return ret;
2231 +       }
2232 +
2233 +       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
2234 +               struct drm_plane *plane = state->planes[i];
2235 +               struct drm_plane_state *new_state = state->plane_states[i];
2236 +
2237 +               if (!plane)
2238 +                       continue;
2239 +
2240 +               if ((plane->state->fb != new_state->fb) && new_state->fb) {
2241 +                       struct drm_gem_cma_object *cma_bo =
2242 +                               drm_fb_cma_get_gem_obj(new_state->fb, 0);
2243 +                       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
2244 +                       wait_seqno = max(bo->seqno, wait_seqno);
2245 +               }
2246 +       }
2247 +
2248 +       /*
2249 +        * This is the point of no return - everything below never fails except
2250 +        * when the hw goes bonghits. Which means we can commit the new state on
2251 +        * the software side now.
2252 +        */
2253 +
2254 +       drm_atomic_helper_swap_state(dev, state);
2255 +
2256 +       /*
2257 +        * Everything below can be run asynchronously without the need to grab
2258 +        * any modeset locks at all under one condition: It must be guaranteed
2259 +        * that the asynchronous work has either been cancelled (if the driver
2260 +        * supports it, which at least requires that the framebuffers get
2261 +        * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2262 +        * before the new state gets committed on the software side with
2263 +        * drm_atomic_helper_swap_state().
2264 +        *
2265 +        * This scheme allows new atomic state updates to be prepared and
2266 +        * checked in parallel to the asynchronous completion of the previous
2267 +        * update. Which is important since compositors need to figure out the
2268 +        * composition of the next frame right after having submitted the
2269 +        * current layout.
2270 +        */
2271 +
2272 +       if (async) {
2273 +               vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
2274 +                                  vc4_atomic_complete_commit_seqno_cb);
2275 +       } else {
2276 +               vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
2277 +               vc4_atomic_complete_commit(c);
2278 +       }
2279 +
2280 +       return 0;
2281 +}
2282 +
2283  static const struct drm_mode_config_funcs vc4_mode_funcs = {
2284         .output_poll_changed = vc4_output_poll_changed,
2285         .atomic_check = drm_atomic_helper_check,
2286 -       .atomic_commit = drm_atomic_helper_commit,
2287 +       .atomic_commit = vc4_atomic_commit,
2288         .fb_create = drm_fb_cma_create,
2289  };
2290  
2291 @@ -41,6 +183,8 @@ int vc4_kms_load(struct drm_device *dev)
2292         struct vc4_dev *vc4 = to_vc4_dev(dev);
2293         int ret;
2294  
2295 +       sema_init(&vc4->async_modeset, 1);
2296 +
2297         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
2298         if (ret < 0) {
2299                 dev_err(dev->dev, "failed to initialize vblank\n");
2300 @@ -51,6 +195,8 @@ int vc4_kms_load(struct drm_device *dev)
2301         dev->mode_config.max_height = 2048;
2302         dev->mode_config.funcs = &vc4_mode_funcs;
2303         dev->mode_config.preferred_depth = 24;
2304 +       dev->mode_config.async_page_flip = true;
2305 +
2306         dev->vblank_disable_allowed = true;
2307  
2308         drm_mode_config_reset(dev);
2309 --- /dev/null
2310 +++ b/drivers/gpu/drm/vc4/vc4_packet.h
2311 @@ -0,0 +1,384 @@
2312 +/*
2313 + * Copyright Â© 2014 Broadcom
2314 + *
2315 + * Permission is hereby granted, free of charge, to any person obtaining a
2316 + * copy of this software and associated documentation files (the "Software"),
2317 + * to deal in the Software without restriction, including without limitation
2318 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2319 + * and/or sell copies of the Software, and to permit persons to whom the
2320 + * Software is furnished to do so, subject to the following conditions:
2321 + *
2322 + * The above copyright notice and this permission notice (including the next
2323 + * paragraph) shall be included in all copies or substantial portions of the
2324 + * Software.
2325 + *
2326 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2327 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2328 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
2329 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2330 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2331 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2332 + * IN THE SOFTWARE.
2333 + */
2334 +
2335 +#ifndef VC4_PACKET_H
2336 +#define VC4_PACKET_H
2337 +
2338 +#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
2339 +
2340 +enum vc4_packet {
2341 +        VC4_PACKET_HALT = 0,
2342 +        VC4_PACKET_NOP = 1,
2343 +
2344 +        VC4_PACKET_FLUSH = 4,
2345 +        VC4_PACKET_FLUSH_ALL = 5,
2346 +        VC4_PACKET_START_TILE_BINNING = 6,
2347 +        VC4_PACKET_INCREMENT_SEMAPHORE = 7,
2348 +        VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
2349 +
2350 +        VC4_PACKET_BRANCH = 16,
2351 +        VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
2352 +
2353 +        VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
2354 +        VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
2355 +        VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
2356 +        VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
2357 +        VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
2358 +        VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
2359 +
2360 +        VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
2361 +        VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
2362 +
2363 +        VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
2364 +        VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
2365 +
2366 +        VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
2367 +
2368 +        VC4_PACKET_GL_SHADER_STATE = 64,
2369 +        VC4_PACKET_NV_SHADER_STATE = 65,
2370 +        VC4_PACKET_VG_SHADER_STATE = 66,
2371 +
2372 +        VC4_PACKET_CONFIGURATION_BITS = 96,
2373 +        VC4_PACKET_FLAT_SHADE_FLAGS = 97,
2374 +        VC4_PACKET_POINT_SIZE = 98,
2375 +        VC4_PACKET_LINE_WIDTH = 99,
2376 +        VC4_PACKET_RHT_X_BOUNDARY = 100,
2377 +        VC4_PACKET_DEPTH_OFFSET = 101,
2378 +        VC4_PACKET_CLIP_WINDOW = 102,
2379 +        VC4_PACKET_VIEWPORT_OFFSET = 103,
2380 +        VC4_PACKET_Z_CLIPPING = 104,
2381 +        VC4_PACKET_CLIPPER_XY_SCALING = 105,
2382 +        VC4_PACKET_CLIPPER_Z_SCALING = 106,
2383 +
2384 +        VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
2385 +        VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
2386 +        VC4_PACKET_CLEAR_COLORS = 114,
2387 +        VC4_PACKET_TILE_COORDINATES = 115,
2388 +
2389 +        /* Not an actual hardware packet -- this is what we use to put
2390 +         * references to GEM bos in the command stream, since we need the u32
2391 +         * int the actual address packet in order to store the offset from the
2392 +         * start of the BO.
2393 +         */
2394 +        VC4_PACKET_GEM_HANDLES = 254,
2395 +} __attribute__ ((__packed__));
2396 +
2397 +#define VC4_PACKET_HALT_SIZE                                           1
2398 +#define VC4_PACKET_NOP_SIZE                                            1
2399 +#define VC4_PACKET_FLUSH_SIZE                                          1
2400 +#define VC4_PACKET_FLUSH_ALL_SIZE                                      1
2401 +#define VC4_PACKET_START_TILE_BINNING_SIZE                             1
2402 +#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE                            1
2403 +#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE                              1
2404 +#define VC4_PACKET_BRANCH_SIZE                                         5
2405 +#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE                             5
2406 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE                           1
2407 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE                   1
2408 +#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE                     5
2409 +#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE                      5
2410 +#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE                      7
2411 +#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE                       7
2412 +#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE                           14
2413 +#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE                             10
2414 +#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE                           1
2415 +#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE                   1
2416 +#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE                          2
2417 +#define VC4_PACKET_GL_SHADER_STATE_SIZE                                        5
2418 +#define VC4_PACKET_NV_SHADER_STATE_SIZE                                        5
2419 +#define VC4_PACKET_VG_SHADER_STATE_SIZE                                        5
2420 +#define VC4_PACKET_CONFIGURATION_BITS_SIZE                             4
2421 +#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE                               5
2422 +#define VC4_PACKET_POINT_SIZE_SIZE                                     5
2423 +#define VC4_PACKET_LINE_WIDTH_SIZE                                     5
2424 +#define VC4_PACKET_RHT_X_BOUNDARY_SIZE                                 3
2425 +#define VC4_PACKET_DEPTH_OFFSET_SIZE                                   5
2426 +#define VC4_PACKET_CLIP_WINDOW_SIZE                                    9
2427 +#define VC4_PACKET_VIEWPORT_OFFSET_SIZE                                        5
2428 +#define VC4_PACKET_Z_CLIPPING_SIZE                                     9
2429 +#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE                             9
2430 +#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE                              9
2431 +#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE                       16
2432 +#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE                     11
2433 +#define VC4_PACKET_CLEAR_COLORS_SIZE                                   14
2434 +#define VC4_PACKET_TILE_COORDINATES_SIZE                               3
2435 +#define VC4_PACKET_GEM_HANDLES_SIZE                                    9
2436 +
2437 +/** @{
2438 + * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2439 + * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
2440 +*/
2441 +#define VC4_TILING_FORMAT_LINEAR    0
2442 +#define VC4_TILING_FORMAT_T         1
2443 +#define VC4_TILING_FORMAT_LT        2
2444 +/** @} */
2445 +
2446 +/** @{
2447 + *
2448 + * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
2449 + * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
2450 + */
2451 +#define VC4_LOADSTORE_FULL_RES_EOF                     (1 << 3)
2452 +#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL       (1 << 2)
2453 +#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS              (1 << 1)
2454 +#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR           (1 << 0)
2455 +
2456 +/** @{
2457 + *
2458 + * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2459 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
2460 + */
2461 +
2462 +#define VC4_LOADSTORE_TILE_BUFFER_EOF                  (1 << 3)
2463 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
2464 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS      (1 << 1)
2465 +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR   (1 << 0)
2466 +
2467 +/** @} */
2468 +
2469 +/** @{
2470 + *
2471 + * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2472 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2473 + */
2474 +#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
2475 +#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR     (1 << 14)
2476 +#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR  (1 << 13)
2477 +#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP         (1 << 12)
2478 +
2479 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK      VC4_MASK(9, 8)
2480 +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT     8
2481 +#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888         0
2482 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER    1
2483 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565           2
2484 +/** @} */
2485 +
2486 +/** @{
2487 + *
2488 + * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
2489 + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
2490 + */
2491 +#define VC4_STORE_TILE_BUFFER_MODE_MASK            VC4_MASK(7, 6)
2492 +#define VC4_STORE_TILE_BUFFER_MODE_SHIFT           6
2493 +#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0         (0 << 6)
2494 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4     (1 << 6)
2495 +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16    (2 << 6)
2496 +
2497 +/** The values of the field are VC4_TILING_FORMAT_* */
2498 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK      VC4_MASK(5, 4)
2499 +#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT     4
2500 +
2501 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK      VC4_MASK(2, 0)
2502 +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT     0
2503 +#define VC4_LOADSTORE_TILE_BUFFER_NONE             0
2504 +#define VC4_LOADSTORE_TILE_BUFFER_COLOR            1
2505 +#define VC4_LOADSTORE_TILE_BUFFER_ZS               2
2506 +#define VC4_LOADSTORE_TILE_BUFFER_Z                3
2507 +#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK          4
2508 +#define VC4_LOADSTORE_TILE_BUFFER_FULL             5
2509 +/** @} */
2510 +
2511 +#define VC4_INDEX_BUFFER_U8                        (0 << 4)
2512 +#define VC4_INDEX_BUFFER_U16                       (1 << 4)
2513 +
2514 +/* This flag is only present in NV shader state. */
2515 +#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS         (1 << 3)
2516 +#define VC4_SHADER_FLAG_ENABLE_CLIPPING            (1 << 2)
2517 +#define VC4_SHADER_FLAG_VS_POINT_SIZE              (1 << 1)
2518 +#define VC4_SHADER_FLAG_FS_SINGLE_THREAD           (1 << 0)
2519 +
2520 +/** @{ byte 2 of config bits. */
2521 +#define VC4_CONFIG_BITS_EARLY_Z_UPDATE             (1 << 1)
2522 +#define VC4_CONFIG_BITS_EARLY_Z                    (1 << 0)
2523 +/** @} */
2524 +
2525 +/** @{ byte 1 of config bits. */
2526 +#define VC4_CONFIG_BITS_Z_UPDATE                   (1 << 7)
2527 +/** same values in this 3-bit field as PIPE_FUNC_* */
2528 +#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT           4
2529 +#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE        (1 << 3)
2530 +
2531 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO    (0 << 1)
2532 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD        (1 << 1)
2533 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR         (2 << 1)
2534 +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO       (3 << 1)
2535 +
2536 +#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT       (1 << 0)
2537 +/** @} */
2538 +
2539 +/** @{ byte 0 of config bits. */
2540 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
2541 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X   (1 << 6)
2542 +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X  (2 << 6)
2543 +
2544 +#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES        (1 << 4)
2545 +#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET        (1 << 3)
2546 +#define VC4_CONFIG_BITS_CW_PRIMITIVES              (1 << 2)
2547 +#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK           (1 << 1)
2548 +#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT          (1 << 0)
2549 +/** @} */
2550 +
2551 +/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
2552 +#define VC4_BIN_CONFIG_DB_NON_MS                   (1 << 7)
2553 +
2554 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK       VC4_MASK(6, 5)
2555 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT      5
2556 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32         0
2557 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64         1
2558 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128        2
2559 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256        3
2560 +
2561 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK  VC4_MASK(4, 3)
2562 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
2563 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32    0
2564 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64    1
2565 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128   2
2566 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256   3
2567 +
2568 +#define VC4_BIN_CONFIG_AUTO_INIT_TSDA              (1 << 2)
2569 +#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT           (1 << 1)
2570 +#define VC4_BIN_CONFIG_MS_MODE_4X                  (1 << 0)
2571 +/** @} */
2572 +
2573 +/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
2574 +#define VC4_RENDER_CONFIG_DB_NON_MS                (1 << 12)
2575 +#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
2576 +#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G      (1 << 10)
2577 +#define VC4_RENDER_CONFIG_COVERAGE_MODE            (1 << 9)
2578 +#define VC4_RENDER_CONFIG_ENABLE_VG_MASK           (1 << 8)
2579 +
2580 +/** The values of the field are VC4_TILING_FORMAT_* */
2581 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK       VC4_MASK(7, 6)
2582 +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT      6
2583 +
2584 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X         (0 << 4)
2585 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X         (1 << 4)
2586 +#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X        (2 << 4)
2587 +
2588 +#define VC4_RENDER_CONFIG_FORMAT_MASK              VC4_MASK(3, 2)
2589 +#define VC4_RENDER_CONFIG_FORMAT_SHIFT             2
2590 +#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED   0
2591 +#define VC4_RENDER_CONFIG_FORMAT_RGBA8888          1
2592 +#define VC4_RENDER_CONFIG_FORMAT_BGR565            2
2593 +
2594 +#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT        (1 << 1)
2595 +#define VC4_RENDER_CONFIG_MS_MODE_4X               (1 << 0)
2596 +
2597 +#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX         (1 << 4)
2598 +#define VC4_PRIMITIVE_LIST_FORMAT_32_XY            (3 << 4)
2599 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS      (0 << 0)
2600 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES       (1 << 0)
2601 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES   (2 << 0)
2602 +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT         (3 << 0)
2603 +
2604 +enum vc4_texture_data_type {
2605 +        VC4_TEXTURE_TYPE_RGBA8888 = 0,
2606 +        VC4_TEXTURE_TYPE_RGBX8888 = 1,
2607 +        VC4_TEXTURE_TYPE_RGBA4444 = 2,
2608 +        VC4_TEXTURE_TYPE_RGBA5551 = 3,
2609 +        VC4_TEXTURE_TYPE_RGB565 = 4,
2610 +        VC4_TEXTURE_TYPE_LUMINANCE = 5,
2611 +        VC4_TEXTURE_TYPE_ALPHA = 6,
2612 +        VC4_TEXTURE_TYPE_LUMALPHA = 7,
2613 +        VC4_TEXTURE_TYPE_ETC1 = 8,
2614 +        VC4_TEXTURE_TYPE_S16F = 9,
2615 +        VC4_TEXTURE_TYPE_S8 = 10,
2616 +        VC4_TEXTURE_TYPE_S16 = 11,
2617 +        VC4_TEXTURE_TYPE_BW1 = 12,
2618 +        VC4_TEXTURE_TYPE_A4 = 13,
2619 +        VC4_TEXTURE_TYPE_A1 = 14,
2620 +        VC4_TEXTURE_TYPE_RGBA64 = 15,
2621 +        VC4_TEXTURE_TYPE_RGBA32R = 16,
2622 +        VC4_TEXTURE_TYPE_YUV422R = 17,
2623 +};
2624 +
2625 +#define VC4_TEX_P0_OFFSET_MASK                     VC4_MASK(31, 12)
2626 +#define VC4_TEX_P0_OFFSET_SHIFT                    12
2627 +#define VC4_TEX_P0_CSWIZ_MASK                      VC4_MASK(11, 10)
2628 +#define VC4_TEX_P0_CSWIZ_SHIFT                     10
2629 +#define VC4_TEX_P0_CMMODE_MASK                     VC4_MASK(9, 9)
2630 +#define VC4_TEX_P0_CMMODE_SHIFT                    9
2631 +#define VC4_TEX_P0_FLIPY_MASK                      VC4_MASK(8, 8)
2632 +#define VC4_TEX_P0_FLIPY_SHIFT                     8
2633 +#define VC4_TEX_P0_TYPE_MASK                       VC4_MASK(7, 4)
2634 +#define VC4_TEX_P0_TYPE_SHIFT                      4
2635 +#define VC4_TEX_P0_MIPLVLS_MASK                    VC4_MASK(3, 0)
2636 +#define VC4_TEX_P0_MIPLVLS_SHIFT                   0
2637 +
2638 +#define VC4_TEX_P1_TYPE4_MASK                      VC4_MASK(31, 31)
2639 +#define VC4_TEX_P1_TYPE4_SHIFT                     31
2640 +#define VC4_TEX_P1_HEIGHT_MASK                     VC4_MASK(30, 20)
2641 +#define VC4_TEX_P1_HEIGHT_SHIFT                    20
2642 +#define VC4_TEX_P1_ETCFLIP_MASK                    VC4_MASK(19, 19)
2643 +#define VC4_TEX_P1_ETCFLIP_SHIFT                   19
2644 +#define VC4_TEX_P1_WIDTH_MASK                      VC4_MASK(18, 8)
2645 +#define VC4_TEX_P1_WIDTH_SHIFT                     8
2646 +
2647 +#define VC4_TEX_P1_MAGFILT_MASK                    VC4_MASK(7, 7)
2648 +#define VC4_TEX_P1_MAGFILT_SHIFT                   7
2649 +# define VC4_TEX_P1_MAGFILT_LINEAR                 0
2650 +# define VC4_TEX_P1_MAGFILT_NEAREST                1
2651 +
2652 +#define VC4_TEX_P1_MINFILT_MASK                    VC4_MASK(6, 4)
2653 +#define VC4_TEX_P1_MINFILT_SHIFT                   4
2654 +# define VC4_TEX_P1_MINFILT_LINEAR                 0
2655 +# define VC4_TEX_P1_MINFILT_NEAREST                1
2656 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR          2
2657 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN           3
2658 +# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR           4
2659 +# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN            5
2660 +
2661 +#define VC4_TEX_P1_WRAP_T_MASK                     VC4_MASK(3, 2)
2662 +#define VC4_TEX_P1_WRAP_T_SHIFT                    2
2663 +#define VC4_TEX_P1_WRAP_S_MASK                     VC4_MASK(1, 0)
2664 +#define VC4_TEX_P1_WRAP_S_SHIFT                    0
2665 +# define VC4_TEX_P1_WRAP_REPEAT                    0
2666 +# define VC4_TEX_P1_WRAP_CLAMP                     1
2667 +# define VC4_TEX_P1_WRAP_MIRROR                    2
2668 +# define VC4_TEX_P1_WRAP_BORDER                    3
2669 +
2670 +#define VC4_TEX_P2_PTYPE_MASK                      VC4_MASK(31, 30)
2671 +#define VC4_TEX_P2_PTYPE_SHIFT                     30
2672 +# define VC4_TEX_P2_PTYPE_IGNORED                  0
2673 +# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE          1
2674 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS   2
2675 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS      3
2676 +
2677 +/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
2678 +#define VC4_TEX_P2_CMST_MASK                       VC4_MASK(29, 12)
2679 +#define VC4_TEX_P2_CMST_SHIFT                      12
2680 +#define VC4_TEX_P2_BSLOD_MASK                      VC4_MASK(0, 0)
2681 +#define VC4_TEX_P2_BSLOD_SHIFT                     0
2682 +
2683 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
2684 +#define VC4_TEX_P2_CHEIGHT_MASK                    VC4_MASK(22, 12)
2685 +#define VC4_TEX_P2_CHEIGHT_SHIFT                   12
2686 +#define VC4_TEX_P2_CWIDTH_MASK                     VC4_MASK(10, 0)
2687 +#define VC4_TEX_P2_CWIDTH_SHIFT                    0
2688 +
2689 +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
2690 +#define VC4_TEX_P2_CYOFF_MASK                      VC4_MASK(22, 12)
2691 +#define VC4_TEX_P2_CYOFF_SHIFT                     12
2692 +#define VC4_TEX_P2_CXOFF_MASK                      VC4_MASK(10, 0)
2693 +#define VC4_TEX_P2_CXOFF_SHIFT                     0
2694 +
2695 +#endif /* VC4_PACKET_H */
2696 --- a/drivers/gpu/drm/vc4/vc4_plane.c
2697 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
2698 @@ -29,6 +29,14 @@ struct vc4_plane_state {
2699         u32 *dlist;
2700         u32 dlist_size; /* Number of dwords in allocated for the display list */
2701         u32 dlist_count; /* Number of used dwords in the display list. */
2702 +
2703 +       /* Offset in the dlist to pointer word 0. */
2704 +       u32 pw0_offset;
2705 +
2706 +       /* Offset where the plane's dlist was last stored in the
2707 +          hardware at vc4_crtc_atomic_flush() time.
2708 +       */
2709 +       u32 *hw_dlist;
2710  };
2711  
2712  static inline struct vc4_plane_state *
2713 @@ -207,6 +215,8 @@ static int vc4_plane_mode_set(struct drm
2714         /* Position Word 3: Context.  Written by the HVS. */
2715         vc4_dlist_write(vc4_state, 0xc0c0c0c0);
2716  
2717 +       vc4_state->pw0_offset = vc4_state->dlist_count;
2718 +
2719         /* Pointer Word 0: RGB / Y Pointer */
2720         vc4_dlist_write(vc4_state, bo->paddr + offset);
2721  
2722 @@ -258,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_pla
2723         struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2724         int i;
2725  
2726 +       vc4_state->hw_dlist = dlist;
2727 +
2728         /* Can't memcpy_toio() because it needs to be 32-bit writes. */
2729         for (i = 0; i < vc4_state->dlist_count; i++)
2730                 writel(vc4_state->dlist[i], &dlist[i]);
2731 @@ -272,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plan
2732         return vc4_state->dlist_count;
2733  }
2734  
2735 +/* Updates the plane to immediately (well, once the FIFO needs
2736 + * refilling) scan out from at a new framebuffer.
2737 + */
2738 +void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
2739 +{
2740 +       struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
2741 +       struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
2742 +       uint32_t addr;
2743 +
2744 +       /* We're skipping the address adjustment for negative origin,
2745 +        * because this is only called on the primary plane.
2746 +        */
2747 +       WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
2748 +       addr = bo->paddr + fb->offsets[0];
2749 +
2750 +       /* Write the new address into the hardware immediately.  The
2751 +        * scanout will start from this address as soon as the FIFO
2752 +        * needs to refill with pixels.
2753 +        */
2754 +       writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
2755 +
2756 +       /* Also update the CPU-side dlist copy, so that any later
2757 +        * atomic updates that don't do a new modeset on our plane
2758 +        * also use our updated address.
2759 +        */
2760 +       vc4_state->dlist[vc4_state->pw0_offset] = addr;
2761 +}
2762 +
2763  static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
2764         .prepare_fb = NULL,
2765         .cleanup_fb = NULL,
2766 --- /dev/null
2767 +++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
2768 @@ -0,0 +1,268 @@
2769 +/*
2770 + * Copyright Â© 2014 Broadcom
2771 + *
2772 + * Permission is hereby granted, free of charge, to any person obtaining a
2773 + * copy of this software and associated documentation files (the "Software"),
2774 + * to deal in the Software without restriction, including without limitation
2775 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2776 + * and/or sell copies of the Software, and to permit persons to whom the
2777 + * Software is furnished to do so, subject to the following conditions:
2778 + *
2779 + * The above copyright notice and this permission notice (including the next
2780 + * paragraph) shall be included in all copies or substantial portions of the
2781 + * Software.
2782 + *
2783 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2784 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2785 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
2786 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2787 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2788 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2789 + * IN THE SOFTWARE.
2790 + */
2791 +
2792 +#ifndef VC4_QPU_DEFINES_H
2793 +#define VC4_QPU_DEFINES_H
2794 +
2795 +enum qpu_op_add {
2796 +        QPU_A_NOP,
2797 +        QPU_A_FADD,
2798 +        QPU_A_FSUB,
2799 +        QPU_A_FMIN,
2800 +        QPU_A_FMAX,
2801 +        QPU_A_FMINABS,
2802 +        QPU_A_FMAXABS,
2803 +        QPU_A_FTOI,
2804 +        QPU_A_ITOF,
2805 +        QPU_A_ADD = 12,
2806 +        QPU_A_SUB,
2807 +        QPU_A_SHR,
2808 +        QPU_A_ASR,
2809 +        QPU_A_ROR,
2810 +        QPU_A_SHL,
2811 +        QPU_A_MIN,
2812 +        QPU_A_MAX,
2813 +        QPU_A_AND,
2814 +        QPU_A_OR,
2815 +        QPU_A_XOR,
2816 +        QPU_A_NOT,
2817 +        QPU_A_CLZ,
2818 +        QPU_A_V8ADDS = 30,
2819 +        QPU_A_V8SUBS = 31,
2820 +};
2821 +
2822 +enum qpu_op_mul {
2823 +        QPU_M_NOP,
2824 +        QPU_M_FMUL,
2825 +        QPU_M_MUL24,
2826 +        QPU_M_V8MULD,
2827 +        QPU_M_V8MIN,
2828 +        QPU_M_V8MAX,
2829 +        QPU_M_V8ADDS,
2830 +        QPU_M_V8SUBS,
2831 +};
2832 +
2833 +enum qpu_raddr {
2834 +        QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
2835 +        /* 0-31 are the plain regfile a or b fields */
2836 +        QPU_R_UNIF = 32,
2837 +        QPU_R_VARY = 35,
2838 +        QPU_R_ELEM_QPU = 38,
2839 +        QPU_R_NOP,
2840 +        QPU_R_XY_PIXEL_COORD = 41,
2841 +        QPU_R_MS_REV_FLAGS = 41,
2842 +        QPU_R_VPM = 48,
2843 +        QPU_R_VPM_LD_BUSY,
2844 +        QPU_R_VPM_LD_WAIT,
2845 +        QPU_R_MUTEX_ACQUIRE,
2846 +};
2847 +
2848 +enum qpu_waddr {
2849 +        /* 0-31 are the plain regfile a or b fields */
2850 +        QPU_W_ACC0 = 32, /* aka r0 */
2851 +        QPU_W_ACC1,
2852 +        QPU_W_ACC2,
2853 +        QPU_W_ACC3,
2854 +        QPU_W_TMU_NOSWAP,
2855 +        QPU_W_ACC5,
2856 +        QPU_W_HOST_INT,
2857 +        QPU_W_NOP,
2858 +        QPU_W_UNIFORMS_ADDRESS,
2859 +        QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
2860 +        QPU_W_MS_FLAGS = 42,
2861 +        QPU_W_REV_FLAG = 42,
2862 +        QPU_W_TLB_STENCIL_SETUP = 43,
2863 +        QPU_W_TLB_Z,
2864 +        QPU_W_TLB_COLOR_MS,
2865 +        QPU_W_TLB_COLOR_ALL,
2866 +        QPU_W_TLB_ALPHA_MASK,
2867 +        QPU_W_VPM,
2868 +        QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
2869 +        QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
2870 +        QPU_W_MUTEX_RELEASE,
2871 +        QPU_W_SFU_RECIP,
2872 +        QPU_W_SFU_RECIPSQRT,
2873 +        QPU_W_SFU_EXP,
2874 +        QPU_W_SFU_LOG,
2875 +        QPU_W_TMU0_S,
2876 +        QPU_W_TMU0_T,
2877 +        QPU_W_TMU0_R,
2878 +        QPU_W_TMU0_B,
2879 +        QPU_W_TMU1_S,
2880 +        QPU_W_TMU1_T,
2881 +        QPU_W_TMU1_R,
2882 +        QPU_W_TMU1_B,
2883 +};
2884 +
2885 +enum qpu_sig_bits {
2886 +        QPU_SIG_SW_BREAKPOINT,
2887 +        QPU_SIG_NONE,
2888 +        QPU_SIG_THREAD_SWITCH,
2889 +        QPU_SIG_PROG_END,
2890 +        QPU_SIG_WAIT_FOR_SCOREBOARD,
2891 +        QPU_SIG_SCOREBOARD_UNLOCK,
2892 +        QPU_SIG_LAST_THREAD_SWITCH,
2893 +        QPU_SIG_COVERAGE_LOAD,
2894 +        QPU_SIG_COLOR_LOAD,
2895 +        QPU_SIG_COLOR_LOAD_END,
2896 +        QPU_SIG_LOAD_TMU0,
2897 +        QPU_SIG_LOAD_TMU1,
2898 +        QPU_SIG_ALPHA_MASK_LOAD,
2899 +        QPU_SIG_SMALL_IMM,
2900 +        QPU_SIG_LOAD_IMM,
2901 +        QPU_SIG_BRANCH
2902 +};
2903 +
2904 +enum qpu_mux {
2905 +        /* hardware mux values */
2906 +        QPU_MUX_R0,
2907 +        QPU_MUX_R1,
2908 +        QPU_MUX_R2,
2909 +        QPU_MUX_R3,
2910 +        QPU_MUX_R4,
2911 +        QPU_MUX_R5,
2912 +        QPU_MUX_A,
2913 +        QPU_MUX_B,
2914 +
2915 +        /* non-hardware mux values */
2916 +        QPU_MUX_IMM,
2917 +};
2918 +
2919 +enum qpu_cond {
2920 +        QPU_COND_NEVER,
2921 +        QPU_COND_ALWAYS,
2922 +        QPU_COND_ZS,
2923 +        QPU_COND_ZC,
2924 +        QPU_COND_NS,
2925 +        QPU_COND_NC,
2926 +        QPU_COND_CS,
2927 +        QPU_COND_CC,
2928 +};
2929 +
2930 +enum qpu_pack_mul {
2931 +        QPU_PACK_MUL_NOP,
2932 +        QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
2933 +        QPU_PACK_MUL_8A,
2934 +        QPU_PACK_MUL_8B,
2935 +        QPU_PACK_MUL_8C,
2936 +        QPU_PACK_MUL_8D,
2937 +};
2938 +
2939 +enum qpu_pack_a {
2940 +        QPU_PACK_A_NOP,
2941 +        /* convert to 16 bit float if float input, or to int16. */
2942 +        QPU_PACK_A_16A,
2943 +        QPU_PACK_A_16B,
2944 +        /* replicated to each 8 bits of the 32-bit dst. */
2945 +        QPU_PACK_A_8888,
2946 +        /* Convert to 8-bit unsigned int. */
2947 +        QPU_PACK_A_8A,
2948 +        QPU_PACK_A_8B,
2949 +        QPU_PACK_A_8C,
2950 +        QPU_PACK_A_8D,
2951 +
2952 +        /* Saturating variants of the previous instructions. */
2953 +        QPU_PACK_A_32_SAT, /* int-only */
2954 +        QPU_PACK_A_16A_SAT, /* int or float */
2955 +        QPU_PACK_A_16B_SAT,
2956 +        QPU_PACK_A_8888_SAT,
2957 +        QPU_PACK_A_8A_SAT,
2958 +        QPU_PACK_A_8B_SAT,
2959 +        QPU_PACK_A_8C_SAT,
2960 +        QPU_PACK_A_8D_SAT,
2961 +};
2962 +
2963 +enum qpu_unpack_r4 {
2964 +        QPU_UNPACK_R4_NOP,
2965 +        QPU_UNPACK_R4_F16A_TO_F32,
2966 +        QPU_UNPACK_R4_F16B_TO_F32,
2967 +        QPU_UNPACK_R4_8D_REP,
2968 +        QPU_UNPACK_R4_8A,
2969 +        QPU_UNPACK_R4_8B,
2970 +        QPU_UNPACK_R4_8C,
2971 +        QPU_UNPACK_R4_8D,
2972 +};
2973 +
2974 +#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
2975 +/* Using the GNU statement expression extension */
2976 +#define QPU_SET_FIELD(value, field)                                       \
2977 +        ({                                                                \
2978 +                uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
2979 +                assert((fieldval & ~ field ## _MASK) == 0);               \
2980 +                fieldval & field ## _MASK;                                \
2981 +         })
2982 +
2983 +#define QPU_GET_FIELD(word, field) ((uint32_t)(((word)  & field ## _MASK) >> field ## _SHIFT))
2984 +
2985 +#define QPU_SIG_SHIFT                   60
2986 +#define QPU_SIG_MASK                    QPU_MASK(63, 60)
2987 +
2988 +#define QPU_UNPACK_SHIFT                57
2989 +#define QPU_UNPACK_MASK                 QPU_MASK(59, 57)
2990 +
2991 +/**
2992 + * If set, the pack field means PACK_MUL or R4 packing, instead of normal
2993 + * regfile a packing.
2994 + */
2995 +#define QPU_PM                          ((uint64_t)1 << 56)
2996 +
2997 +#define QPU_PACK_SHIFT                  52
2998 +#define QPU_PACK_MASK                   QPU_MASK(55, 52)
2999 +
3000 +#define QPU_COND_ADD_SHIFT              49
3001 +#define QPU_COND_ADD_MASK               QPU_MASK(51, 49)
3002 +#define QPU_COND_MUL_SHIFT              46
3003 +#define QPU_COND_MUL_MASK               QPU_MASK(48, 46)
3004 +
3005 +#define QPU_SF                          ((uint64_t)1 << 45)
3006 +
3007 +#define QPU_WADDR_ADD_SHIFT             38
3008 +#define QPU_WADDR_ADD_MASK              QPU_MASK(43, 38)
3009 +#define QPU_WADDR_MUL_SHIFT             32
3010 +#define QPU_WADDR_MUL_MASK              QPU_MASK(37, 32)
3011 +
3012 +#define QPU_OP_MUL_SHIFT                29
3013 +#define QPU_OP_MUL_MASK                 QPU_MASK(31, 29)
3014 +
3015 +#define QPU_RADDR_A_SHIFT               18
3016 +#define QPU_RADDR_A_MASK                QPU_MASK(23, 18)
3017 +#define QPU_RADDR_B_SHIFT               12
3018 +#define QPU_RADDR_B_MASK                QPU_MASK(17, 12)
3019 +#define QPU_SMALL_IMM_SHIFT             12
3020 +#define QPU_SMALL_IMM_MASK              QPU_MASK(17, 12)
3021 +
3022 +#define QPU_ADD_A_SHIFT                 9
3023 +#define QPU_ADD_A_MASK                  QPU_MASK(11, 9)
3024 +#define QPU_ADD_B_SHIFT                 6
3025 +#define QPU_ADD_B_MASK                  QPU_MASK(8, 6)
3026 +#define QPU_MUL_A_SHIFT                 3
3027 +#define QPU_MUL_A_MASK                  QPU_MASK(5, 3)
3028 +#define QPU_MUL_B_SHIFT                 0
3029 +#define QPU_MUL_B_MASK                  QPU_MASK(2, 0)
3030 +
3031 +#define QPU_WS                          ((uint64_t)1 << 44)
3032 +
3033 +#define QPU_OP_ADD_SHIFT                24
3034 +#define QPU_OP_ADD_MASK                 QPU_MASK(28, 24)
3035 +
3036 +#endif /* VC4_QPU_DEFINES_H */
3037 --- /dev/null
3038 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
3039 @@ -0,0 +1,448 @@
3040 +/*
3041 + * Copyright Â© 2014-2015 Broadcom
3042 + *
3043 + * Permission is hereby granted, free of charge, to any person obtaining a
3044 + * copy of this software and associated documentation files (the "Software"),
3045 + * to deal in the Software without restriction, including without limitation
3046 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3047 + * and/or sell copies of the Software, and to permit persons to whom the
3048 + * Software is furnished to do so, subject to the following conditions:
3049 + *
3050 + * The above copyright notice and this permission notice (including the next
3051 + * paragraph) shall be included in all copies or substantial portions of the
3052 + * Software.
3053 + *
3054 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3055 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3056 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
3057 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3058 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3059 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3060 + * IN THE SOFTWARE.
3061 + */
3062 +
3063 +/**
3064 + * DOC: Render command list generation
3065 + *
3066 + * In the VC4 driver, render command list generation is performed by the
3067 + * kernel instead of userspace.  We do this because validating a
3068 + * user-submitted command list is hard to get right and has high CPU overhead,
3069 + * while the number of valid configurations for render command lists is
3070 + * actually fairly low.
3071 + */
3072 +
3073 +#include "uapi/drm/vc4_drm.h"
3074 +#include "vc4_drv.h"
3075 +#include "vc4_packet.h"
3076 +
3077 +struct vc4_rcl_setup {
3078 +       struct drm_gem_cma_object *color_read;
3079 +       struct drm_gem_cma_object *color_ms_write;
3080 +       struct drm_gem_cma_object *zs_read;
3081 +       struct drm_gem_cma_object *zs_write;
3082 +
3083 +       struct drm_gem_cma_object *rcl;
3084 +       u32 next_offset;
3085 +};
3086 +
3087 +static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
3088 +{
3089 +       *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
3090 +       setup->next_offset += 1;
3091 +}
3092 +
3093 +static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
3094 +{
3095 +       *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
3096 +       setup->next_offset += 2;
3097 +}
3098 +
3099 +static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
3100 +{
3101 +       *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
3102 +       setup->next_offset += 4;
3103 +}
3104 +
3105 +
3106 +/*
3107 + * Emits a no-op STORE_TILE_BUFFER_GENERAL.
3108 + *
3109 + * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
3110 + * some sort before another load is triggered.
3111 + */
3112 +static void vc4_store_before_load(struct vc4_rcl_setup *setup)
3113 +{
3114 +       rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3115 +       rcl_u16(setup,
3116 +               VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
3117 +                             VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
3118 +               VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
3119 +               VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
3120 +               VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
3121 +       rcl_u32(setup, 0); /* no address, since we're in None mode */
3122 +}
3123 +
3124 +/*
3125 + * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
3126 + *
3127 + * The tile coordinates packet triggers a pending load if there is one, are
3128 + * used for clipping during rendering, and determine where loads/stores happen
3129 + * relative to their base address.
3130 + */
3131 +static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
3132 +                                uint32_t x, uint32_t y)
3133 +{
3134 +       rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
3135 +       rcl_u8(setup, x);
3136 +       rcl_u8(setup, y);
3137 +}
3138 +
3139 +static void emit_tile(struct vc4_exec_info *exec,
3140 +                     struct vc4_rcl_setup *setup,
3141 +                     uint8_t x, uint8_t y, bool first, bool last)
3142 +{
3143 +       struct drm_vc4_submit_cl *args = exec->args;
3144 +       bool has_bin = args->bin_cl_size != 0;
3145 +
3146 +       /* Note that the load doesn't actually occur until the
3147 +        * tile coords packet is processed, and only one load
3148 +        * may be outstanding at a time.
3149 +        */
3150 +       if (setup->color_read) {
3151 +               rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3152 +               rcl_u16(setup, args->color_read.bits);
3153 +               rcl_u32(setup,
3154 +                       setup->color_read->paddr + args->color_read.offset);
3155 +       }
3156 +
3157 +       if (setup->zs_read) {
3158 +               if (setup->color_read) {
3159 +                       /* Exec previous load. */
3160 +                       vc4_tile_coordinates(setup, x, y);
3161 +                       vc4_store_before_load(setup);
3162 +               }
3163 +
3164 +               rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
3165 +               rcl_u16(setup, args->zs_read.bits);
3166 +               rcl_u32(setup, setup->zs_read->paddr + args->zs_read.offset);
3167 +       }
3168 +
3169 +       /* Clipping depends on tile coordinates having been
3170 +        * emitted, so we always need one here.
3171 +        */
3172 +       vc4_tile_coordinates(setup, x, y);
3173 +
3174 +       /* Wait for the binner before jumping to the first
3175 +        * tile's lists.
3176 +        */
3177 +       if (first && has_bin)
3178 +               rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
3179 +
3180 +       if (has_bin) {
3181 +               rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
3182 +               rcl_u32(setup, (exec->tile_bo->paddr +
3183 +                               exec->tile_alloc_offset +
3184 +                               (y * exec->bin_tiles_x + x) * 32));
3185 +       }
3186 +
3187 +       if (setup->zs_write) {
3188 +               rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3189 +               rcl_u16(setup, args->zs_write.bits |
3190 +                       (setup->color_ms_write ?
3191 +                        VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR : 0));
3192 +               rcl_u32(setup,
3193 +                       (setup->zs_write->paddr + args->zs_write.offset) |
3194 +                       ((last && !setup->color_ms_write) ?
3195 +                        VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
3196 +       }
3197 +
3198 +       if (setup->color_ms_write) {
3199 +               if (setup->zs_write) {
3200 +                       /* Reset after previous store */
3201 +                       vc4_tile_coordinates(setup, x, y);
3202 +               }
3203 +
3204 +               if (last)
3205 +                       rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
3206 +               else
3207 +                       rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
3208 +       }
3209 +}
3210 +
3211 +static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
3212 +                            struct vc4_rcl_setup *setup)
3213 +{
3214 +       struct drm_vc4_submit_cl *args = exec->args;
3215 +       bool has_bin = args->bin_cl_size != 0;
3216 +       uint8_t min_x_tile = args->min_x_tile;
3217 +       uint8_t min_y_tile = args->min_y_tile;
3218 +       uint8_t max_x_tile = args->max_x_tile;
3219 +       uint8_t max_y_tile = args->max_y_tile;
3220 +       uint8_t xtiles = max_x_tile - min_x_tile + 1;
3221 +       uint8_t ytiles = max_y_tile - min_y_tile + 1;
3222 +       uint8_t x, y;
3223 +       uint32_t size, loop_body_size;
3224 +
3225 +       size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
3226 +       loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
3227 +
3228 +       if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3229 +               size += VC4_PACKET_CLEAR_COLORS_SIZE +
3230 +                       VC4_PACKET_TILE_COORDINATES_SIZE +
3231 +                       VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3232 +       }
3233 +
3234 +       if (setup->color_read) {
3235 +               loop_body_size += (VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE);
3236 +       }
3237 +       if (setup->zs_read) {
3238 +               if (setup->color_read) {
3239 +                       loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3240 +                       loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3241 +               }
3242 +               loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
3243 +       }
3244 +
3245 +       if (has_bin) {
3246 +               size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
3247 +               loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
3248 +       }
3249 +
3250 +       if (setup->zs_write)
3251 +               loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
3252 +       if (setup->color_ms_write) {
3253 +               if (setup->zs_write)
3254 +                       loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
3255 +               loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
3256 +       }
3257 +       size += xtiles * ytiles * loop_body_size;
3258 +
3259 +       setup->rcl = &vc4_bo_create(dev, size)->base;
3260 +       if (!setup->rcl)
3261 +               return -ENOMEM;
3262 +       list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
3263 +                     &exec->unref_list);
3264 +
3265 +       rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
3266 +       rcl_u32(setup,
3267 +               (setup->color_ms_write ?
3268 +                (setup->color_ms_write->paddr +
3269 +                 args->color_ms_write.offset) :
3270 +                0));
3271 +       rcl_u16(setup, args->width);
3272 +       rcl_u16(setup, args->height);
3273 +       rcl_u16(setup, args->color_ms_write.bits);
3274 +
3275 +       /* The tile buffer gets cleared when the previous tile is stored.  If
3276 +        * the clear values changed between frames, then the tile buffer has
3277 +        * stale clear values in it, so we have to do a store in None mode (no
3278 +        * writes) so that we trigger the tile buffer clear.
3279 +        */
3280 +       if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
3281 +               rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
3282 +               rcl_u32(setup, args->clear_color[0]);
3283 +               rcl_u32(setup, args->clear_color[1]);
3284 +               rcl_u32(setup, args->clear_z);
3285 +               rcl_u8(setup, args->clear_s);
3286 +
3287 +               vc4_tile_coordinates(setup, 0, 0);
3288 +
3289 +               rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
3290 +               rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
3291 +               rcl_u32(setup, 0); /* no address, since we're in None mode */
3292 +       }
3293 +
3294 +       for (y = min_y_tile; y <= max_y_tile; y++) {
3295 +               for (x = min_x_tile; x <= max_x_tile; x++) {
3296 +                       bool first = (x == min_x_tile && y == min_y_tile);
3297 +                       bool last = (x == max_x_tile && y == max_y_tile);
3298 +                       emit_tile(exec, setup, x, y, first, last);
3299 +               }
3300 +       }
3301 +
3302 +       BUG_ON(setup->next_offset != size);
3303 +       exec->ct1ca = setup->rcl->paddr;
3304 +       exec->ct1ea = setup->rcl->paddr + setup->next_offset;
3305 +
3306 +       return 0;
3307 +}
3308 +
3309 +static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
3310 +                                struct drm_gem_cma_object **obj,
3311 +                                struct drm_vc4_submit_rcl_surface *surf)
3312 +{
3313 +       uint8_t tiling = VC4_GET_FIELD(surf->bits,
3314 +                                      VC4_LOADSTORE_TILE_BUFFER_TILING);
3315 +       uint8_t buffer = VC4_GET_FIELD(surf->bits,
3316 +                                      VC4_LOADSTORE_TILE_BUFFER_BUFFER);
3317 +       uint8_t format = VC4_GET_FIELD(surf->bits,
3318 +                                      VC4_LOADSTORE_TILE_BUFFER_FORMAT);
3319 +       int cpp;
3320 +
3321 +       if (surf->pad != 0) {
3322 +               DRM_ERROR("Padding unset\n");
3323 +               return -EINVAL;
3324 +       }
3325 +
3326 +       if (surf->hindex == ~0)
3327 +               return 0;
3328 +
3329 +       if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3330 +               return -EINVAL;
3331 +
3332 +       if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
3333 +                          VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
3334 +                          VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
3335 +               DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
3336 +                         surf->bits);
3337 +               return -EINVAL;
3338 +       }
3339 +
3340 +       if (tiling > VC4_TILING_FORMAT_LT) {
3341 +               DRM_ERROR("Bad tiling format\n");
3342 +               return -EINVAL;
3343 +       }
3344 +
3345 +       if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
3346 +               if (format != 0) {
3347 +                       DRM_ERROR("No color format should be set for ZS\n");
3348 +                       return -EINVAL;
3349 +               }
3350 +               cpp = 4;
3351 +       } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
3352 +               switch (format) {
3353 +               case VC4_LOADSTORE_TILE_BUFFER_BGR565:
3354 +               case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
3355 +                       cpp = 2;
3356 +                       break;
3357 +               case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
3358 +                       cpp = 4;
3359 +                       break;
3360 +               default:
3361 +                       DRM_ERROR("Bad tile buffer format\n");
3362 +                       return -EINVAL;
3363 +               }
3364 +       } else {
3365 +               DRM_ERROR("Bad load/store buffer %d.\n", buffer);
3366 +               return -EINVAL;
3367 +       }
3368 +
3369 +       if (surf->offset & 0xf) {
3370 +               DRM_ERROR("load/store buffer must be 16b aligned.\n");
3371 +               return -EINVAL;
3372 +       }
3373 +
3374 +       if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3375 +                               exec->args->width, exec->args->height, cpp)) {
3376 +               return -EINVAL;
3377 +       }
3378 +
3379 +       return 0;
3380 +}
3381 +
3382 +static int
3383 +vc4_rcl_ms_surface_setup(struct vc4_exec_info *exec,
3384 +                        struct drm_gem_cma_object **obj,
3385 +                        struct drm_vc4_submit_rcl_surface *surf)
3386 +{
3387 +       uint8_t tiling = VC4_GET_FIELD(surf->bits,
3388 +                                      VC4_RENDER_CONFIG_MEMORY_FORMAT);
3389 +       uint8_t format = VC4_GET_FIELD(surf->bits,
3390 +                                      VC4_RENDER_CONFIG_FORMAT);
3391 +       int cpp;
3392 +
3393 +       if (surf->pad != 0) {
3394 +               DRM_ERROR("Padding unset\n");
3395 +               return -EINVAL;
3396 +       }
3397 +
3398 +       if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
3399 +                          VC4_RENDER_CONFIG_FORMAT_MASK)) {
3400 +               DRM_ERROR("Unknown bits in render config: 0x%04x\n",
3401 +                         surf->bits);
3402 +               return -EINVAL;
3403 +       }
3404 +
3405 +       if (surf->hindex == ~0)
3406 +               return 0;
3407 +
3408 +       if (!vc4_use_bo(exec, surf->hindex, VC4_MODE_RENDER, obj))
3409 +               return -EINVAL;
3410 +
3411 +       if (tiling > VC4_TILING_FORMAT_LT) {
3412 +               DRM_ERROR("Bad tiling format\n");
3413 +               return -EINVAL;
3414 +       }
3415 +
3416 +       switch (format) {
3417 +       case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
3418 +       case VC4_RENDER_CONFIG_FORMAT_BGR565:
3419 +               cpp = 2;
3420 +               break;
3421 +       case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
3422 +               cpp = 4;
3423 +               break;
3424 +       default:
3425 +               DRM_ERROR("Bad tile buffer format\n");
3426 +               return -EINVAL;
3427 +       }
3428 +
3429 +       if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
3430 +                               exec->args->width, exec->args->height, cpp)) {
3431 +               return -EINVAL;
3432 +       }
3433 +
3434 +       return 0;
3435 +}
3436 +
3437 +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
3438 +{
3439 +       struct vc4_rcl_setup setup = {0};
3440 +       struct drm_vc4_submit_cl *args = exec->args;
3441 +       bool has_bin = args->bin_cl_size != 0;
3442 +       int ret;
3443 +
3444 +       if (args->min_x_tile > args->max_x_tile ||
3445 +           args->min_y_tile > args->max_y_tile) {
3446 +               DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
3447 +                         args->min_x_tile, args->min_y_tile,
3448 +                         args->max_x_tile, args->max_y_tile);
3449 +               return -EINVAL;
3450 +       }
3451 +
3452 +       if (has_bin &&
3453 +           (args->max_x_tile > exec->bin_tiles_x ||
3454 +            args->max_y_tile > exec->bin_tiles_y)) {
3455 +               DRM_ERROR("Render tiles (%d,%d) outside of bin config (%d,%d)\n",
3456 +                         args->max_x_tile, args->max_y_tile,
3457 +                         exec->bin_tiles_x, exec->bin_tiles_y);
3458 +               return -EINVAL;
3459 +       }
3460 +
3461 +       ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
3462 +       if (ret)
3463 +               return ret;
3464 +
3465 +       ret = vc4_rcl_ms_surface_setup(exec, &setup.color_ms_write,
3466 +                                      &args->color_ms_write);
3467 +       if (ret)
3468 +               return ret;
3469 +
3470 +       ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
3471 +       if (ret)
3472 +               return ret;
3473 +
3474 +       ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
3475 +       if (ret)
3476 +               return ret;
3477 +
3478 +       /* We shouldn't even have the job submitted to us if there's no
3479 +        * surface to write out.
3480 +        */
3481 +       if (!setup.color_ms_write && !setup.zs_write) {
3482 +               DRM_ERROR("RCL requires color or Z/S write\n");
3483 +               return -EINVAL;
3484 +       }
3485 +
3486 +       return vc4_create_rcl_bo(dev, exec, &setup);
3487 +}
3488 --- /dev/null
3489 +++ b/drivers/gpu/drm/vc4/vc4_trace.h
3490 @@ -0,0 +1,63 @@
3491 +/*
3492 + * Copyright (C) 2015 Broadcom
3493 + *
3494 + * This program is free software; you can redistribute it and/or modify
3495 + * it under the terms of the GNU General Public License version 2 as
3496 + * published by the Free Software Foundation.
3497 + */
3498 +
3499 +#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
3500 +#define _VC4_TRACE_H_
3501 +
3502 +#include <linux/stringify.h>
3503 +#include <linux/types.h>
3504 +#include <linux/tracepoint.h>
3505 +
3506 +#undef TRACE_SYSTEM
3507 +#define TRACE_SYSTEM vc4
3508 +#define TRACE_INCLUDE_FILE vc4_trace
3509 +
3510 +TRACE_EVENT(vc4_wait_for_seqno_begin,
3511 +           TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
3512 +           TP_ARGS(dev, seqno, timeout),
3513 +
3514 +           TP_STRUCT__entry(
3515 +                            __field(u32, dev)
3516 +                            __field(u64, seqno)
3517 +                            __field(u64, timeout)
3518 +                            ),
3519 +
3520 +           TP_fast_assign(
3521 +                          __entry->dev = dev->primary->index;
3522 +                          __entry->seqno = seqno;
3523 +                          __entry->timeout = timeout;
3524 +                          ),
3525 +
3526 +           TP_printk("dev=%u, seqno=%llu, timeout=%llu",
3527 +                     __entry->dev, __entry->seqno, __entry->timeout)
3528 +);
3529 +
3530 +TRACE_EVENT(vc4_wait_for_seqno_end,
3531 +           TP_PROTO(struct drm_device *dev, uint64_t seqno),
3532 +           TP_ARGS(dev, seqno),
3533 +
3534 +           TP_STRUCT__entry(
3535 +                            __field(u32, dev)
3536 +                            __field(u64, seqno)
3537 +                            ),
3538 +
3539 +           TP_fast_assign(
3540 +                          __entry->dev = dev->primary->index;
3541 +                          __entry->seqno = seqno;
3542 +                          ),
3543 +
3544 +           TP_printk("dev=%u, seqno=%llu",
3545 +                     __entry->dev, __entry->seqno)
3546 +);
3547 +
3548 +#endif /* _VC4_TRACE_H_ */
3549 +
3550 +/* This part must be outside protection */
3551 +#undef TRACE_INCLUDE_PATH
3552 +#define TRACE_INCLUDE_PATH .
3553 +#include <trace/define_trace.h>
3554 --- /dev/null
3555 +++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
3556 @@ -0,0 +1,14 @@
3557 +/*
3558 + * Copyright (C) 2015 Broadcom
3559 + *
3560 + * This program is free software; you can redistribute it and/or modify
3561 + * it under the terms of the GNU General Public License version 2 as
3562 + * published by the Free Software Foundation.
3563 + */
3564 +
3565 +#include "vc4_drv.h"
3566 +
3567 +#ifndef __CHECKER__
3568 +#define CREATE_TRACE_POINTS
3569 +#include "vc4_trace.h"
3570 +#endif
3571 --- /dev/null
3572 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
3573 @@ -0,0 +1,268 @@
3574 +/*
3575 + * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3576 + * Copyright (C) 2013 Red Hat
3577 + * Author: Rob Clark <robdclark@gmail.com>
3578 + *
3579 + * This program is free software; you can redistribute it and/or modify it
3580 + * under the terms of the GNU General Public License version 2 as published by
3581 + * the Free Software Foundation.
3582 + *
3583 + * This program is distributed in the hope that it will be useful, but WITHOUT
3584 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3585 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
3586 + * more details.
3587 + *
3588 + * You should have received a copy of the GNU General Public License along with
3589 + * this program.  If not, see <http://www.gnu.org/licenses/>.
3590 + */
3591 +
3592 +#include "linux/component.h"
3593 +#include "soc/bcm2835/raspberrypi-firmware.h"
3594 +#include "vc4_drv.h"
3595 +#include "vc4_regs.h"
3596 +
3597 +#ifdef CONFIG_DEBUG_FS
3598 +#define REGDEF(reg) { reg, #reg }
3599 +static const struct {
3600 +       uint32_t reg;
3601 +       const char *name;
3602 +} vc4_reg_defs[] = {
3603 +       REGDEF(V3D_IDENT0),
3604 +       REGDEF(V3D_IDENT1),
3605 +       REGDEF(V3D_IDENT2),
3606 +       REGDEF(V3D_SCRATCH),
3607 +       REGDEF(V3D_L2CACTL),
3608 +       REGDEF(V3D_SLCACTL),
3609 +       REGDEF(V3D_INTCTL),
3610 +       REGDEF(V3D_INTENA),
3611 +       REGDEF(V3D_INTDIS),
3612 +       REGDEF(V3D_CT0CS),
3613 +       REGDEF(V3D_CT1CS),
3614 +       REGDEF(V3D_CT0EA),
3615 +       REGDEF(V3D_CT1EA),
3616 +       REGDEF(V3D_CT0CA),
3617 +       REGDEF(V3D_CT1CA),
3618 +       REGDEF(V3D_CT00RA0),
3619 +       REGDEF(V3D_CT01RA0),
3620 +       REGDEF(V3D_CT0LC),
3621 +       REGDEF(V3D_CT1LC),
3622 +       REGDEF(V3D_CT0PC),
3623 +       REGDEF(V3D_CT1PC),
3624 +       REGDEF(V3D_PCS),
3625 +       REGDEF(V3D_BFC),
3626 +       REGDEF(V3D_RFC),
3627 +       REGDEF(V3D_BPCA),
3628 +       REGDEF(V3D_BPCS),
3629 +       REGDEF(V3D_BPOA),
3630 +       REGDEF(V3D_BPOS),
3631 +       REGDEF(V3D_BXCF),
3632 +       REGDEF(V3D_SQRSV0),
3633 +       REGDEF(V3D_SQRSV1),
3634 +       REGDEF(V3D_SQCNTL),
3635 +       REGDEF(V3D_SRQPC),
3636 +       REGDEF(V3D_SRQUA),
3637 +       REGDEF(V3D_SRQUL),
3638 +       REGDEF(V3D_SRQCS),
3639 +       REGDEF(V3D_VPACNTL),
3640 +       REGDEF(V3D_VPMBASE),
3641 +       REGDEF(V3D_PCTRC),
3642 +       REGDEF(V3D_PCTRE),
3643 +       REGDEF(V3D_PCTR0),
3644 +       REGDEF(V3D_PCTRS0),
3645 +       REGDEF(V3D_PCTR1),
3646 +       REGDEF(V3D_PCTRS1),
3647 +       REGDEF(V3D_PCTR2),
3648 +       REGDEF(V3D_PCTRS2),
3649 +       REGDEF(V3D_PCTR3),
3650 +       REGDEF(V3D_PCTRS3),
3651 +       REGDEF(V3D_PCTR4),
3652 +       REGDEF(V3D_PCTRS4),
3653 +       REGDEF(V3D_PCTR5),
3654 +       REGDEF(V3D_PCTRS5),
3655 +       REGDEF(V3D_PCTR6),
3656 +       REGDEF(V3D_PCTRS6),
3657 +       REGDEF(V3D_PCTR7),
3658 +       REGDEF(V3D_PCTRS7),
3659 +       REGDEF(V3D_PCTR8),
3660 +       REGDEF(V3D_PCTRS8),
3661 +       REGDEF(V3D_PCTR9),
3662 +       REGDEF(V3D_PCTRS9),
3663 +       REGDEF(V3D_PCTR10),
3664 +       REGDEF(V3D_PCTRS10),
3665 +       REGDEF(V3D_PCTR11),
3666 +       REGDEF(V3D_PCTRS11),
3667 +       REGDEF(V3D_PCTR12),
3668 +       REGDEF(V3D_PCTRS12),
3669 +       REGDEF(V3D_PCTR13),
3670 +       REGDEF(V3D_PCTRS13),
3671 +       REGDEF(V3D_PCTR14),
3672 +       REGDEF(V3D_PCTRS14),
3673 +       REGDEF(V3D_PCTR15),
3674 +       REGDEF(V3D_PCTRS15),
3675 +       REGDEF(V3D_BGE),
3676 +       REGDEF(V3D_FDBGO),
3677 +       REGDEF(V3D_FDBGB),
3678 +       REGDEF(V3D_FDBGR),
3679 +       REGDEF(V3D_FDBGS),
3680 +       REGDEF(V3D_ERRSTAT),
3681 +};
3682 +
3683 +int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
3684 +{
3685 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
3686 +       struct drm_device *dev = node->minor->dev;
3687 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3688 +       int i;
3689 +
3690 +       for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
3691 +               seq_printf(m, "%s (0x%04x): 0x%08x\n",
3692 +                          vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
3693 +                          V3D_READ(vc4_reg_defs[i].reg));
3694 +       }
3695 +
3696 +       return 0;
3697 +}
3698 +
3699 +int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
3700 +{
3701 +       struct drm_info_node *node = (struct drm_info_node *) m->private;
3702 +       struct drm_device *dev = node->minor->dev;
3703 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3704 +       uint32_t ident1 = V3D_READ(V3D_IDENT1);
3705 +       uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
3706 +       uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
3707 +       uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
3708 +
3709 +       seq_printf(m, "Revision:   %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
3710 +       seq_printf(m, "Slices:     %d\n", nslc);
3711 +       seq_printf(m, "TMUs:       %d\n", nslc * tups);
3712 +       seq_printf(m, "QPUs:       %d\n", nslc * qups);
3713 +       seq_printf(m, "Semaphores: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
3714 +
3715 +       return 0;
3716 +}
3717 +#endif /* CONFIG_DEBUG_FS */
3718 +
3719 +/*
3720 + * Asks the firmware to turn on power to the V3D engine.
3721 + *
3722 + * This may be doable with just the clocks interface, though this
3723 + * packet does some other register setup from the firmware, too.
3724 + */
3725 +int
3726 +vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
3727 +{
3728 +       u32 packet = on;
3729 +
3730 +       return rpi_firmware_property(vc4->firmware,
3731 +                                    RPI_FIRMWARE_SET_ENABLE_QPU,
3732 +                                    &packet, sizeof(packet));
3733 +}
3734 +
3735 +static void vc4_v3d_init_hw(struct drm_device *dev)
3736 +{
3737 +       struct vc4_dev *vc4 = to_vc4_dev(dev);
3738 +
3739 +       /* Take all the memory that would have been reserved for user
3740 +        * QPU programs, since we don't have an interface for running
3741 +        * them, anyway.
3742 +        */
3743 +       V3D_WRITE(V3D_VPMBASE, 0);
3744 +}
3745 +
3746 +static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
3747 +{
3748 +       struct platform_device *pdev = to_platform_device(dev);
3749 +       struct drm_device *drm = dev_get_drvdata(master);
3750 +       struct vc4_dev *vc4 = to_vc4_dev(drm);
3751 +       struct vc4_v3d *v3d = NULL;
3752 +       int ret;
3753 +
3754 +       v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
3755 +       if (!v3d)
3756 +               return -ENOMEM;
3757 +
3758 +       v3d->pdev = pdev;
3759 +
3760 +       v3d->regs = vc4_ioremap_regs(pdev, 0);
3761 +       if (IS_ERR(v3d->regs))
3762 +               return PTR_ERR(v3d->regs);
3763 +
3764 +       vc4->v3d = v3d;
3765 +
3766 +       ret = vc4_v3d_set_power(vc4, true);
3767 +       if (ret)
3768 +               return ret;
3769 +
3770 +       if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
3771 +               DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
3772 +                         V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
3773 +               return -EINVAL;
3774 +       }
3775 +
3776 +       /* Reset the binner overflow address/size at setup, to be sure
3777 +        * we don't reuse an old one.
3778 +        */
3779 +       V3D_WRITE(V3D_BPOA, 0);
3780 +       V3D_WRITE(V3D_BPOS, 0);
3781 +
3782 +       vc4_v3d_init_hw(drm);
3783 +
3784 +       ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
3785 +       if (ret) {
3786 +               DRM_ERROR("Failed to install IRQ handler\n");
3787 +               return ret;
3788 +       }
3789 +
3790 +       return 0;
3791 +}
3792 +
3793 +static void vc4_v3d_unbind(struct device *dev, struct device *master,
3794 +                           void *data)
3795 +{
3796 +       struct drm_device *drm = dev_get_drvdata(master);
3797 +       struct vc4_dev *vc4 = to_vc4_dev(drm);
3798 +
3799 +       drm_irq_uninstall(drm);
3800 +
3801 +       /* Disable the binner's overflow memory address, so the next
3802 +        * driver probe (if any) doesn't try to reuse our old
3803 +        * allocation.
3804 +        */
3805 +       V3D_WRITE(V3D_BPOA, 0);
3806 +       V3D_WRITE(V3D_BPOS, 0);
3807 +
3808 +       vc4_v3d_set_power(vc4, false);
3809 +
3810 +       vc4->v3d = NULL;
3811 +}
3812 +
3813 +static const struct component_ops vc4_v3d_ops = {
3814 +       .bind   = vc4_v3d_bind,
3815 +       .unbind = vc4_v3d_unbind,
3816 +};
3817 +
3818 +static int vc4_v3d_dev_probe(struct platform_device *pdev)
3819 +{
3820 +       return component_add(&pdev->dev, &vc4_v3d_ops);
3821 +}
3822 +
3823 +static int vc4_v3d_dev_remove(struct platform_device *pdev)
3824 +{
3825 +       component_del(&pdev->dev, &vc4_v3d_ops);
3826 +       return 0;
3827 +}
3828 +
3829 +static const struct of_device_id vc4_v3d_dt_match[] = {
3830 +       { .compatible = "brcm,vc4-v3d" },
3831 +       {}
3832 +};
3833 +
3834 +struct platform_driver vc4_v3d_driver = {
3835 +       .probe = vc4_v3d_dev_probe,
3836 +       .remove = vc4_v3d_dev_remove,
3837 +       .driver = {
3838 +               .name = "vc4_v3d",
3839 +               .of_match_table = vc4_v3d_dt_match,
3840 +       },
3841 +};
3842 --- /dev/null
3843 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
3844 @@ -0,0 +1,958 @@
3845 +/*
3846 + * Copyright Â© 2014 Broadcom
3847 + *
3848 + * Permission is hereby granted, free of charge, to any person obtaining a
3849 + * copy of this software and associated documentation files (the "Software"),
3850 + * to deal in the Software without restriction, including without limitation
3851 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3852 + * and/or sell copies of the Software, and to permit persons to whom the
3853 + * Software is furnished to do so, subject to the following conditions:
3854 + *
3855 + * The above copyright notice and this permission notice (including the next
3856 + * paragraph) shall be included in all copies or substantial portions of the
3857 + * Software.
3858 + *
3859 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3860 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3861 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
3862 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3863 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3864 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
3865 + * IN THE SOFTWARE.
3866 + */
3867 +
3868 +/**
3869 + * Command list validator for VC4.
3870 + *
3871 + * The VC4 has no IOMMU between it and system memory.  So, a user with
3872 + * access to execute command lists could escalate privilege by
3873 + * overwriting system memory (drawing to it as a framebuffer) or
3874 + * reading system memory it shouldn't (reading it as a texture, or
3875 + * uniform data, or vertex data).
3876 + *
3877 + * This validates command lists to ensure that all accesses are within
3878 + * the bounds of the GEM objects referenced.  It explicitly whitelists
3879 + * packets, and looks at the offsets in any address fields to make
3880 + * sure they're constrained within the BOs they reference.
3881 + *
3882 + * Note that because of the validation that's happening anyway, this
3883 + * is where GEM relocation processing happens.
3884 + */
3885 +
3886 +#include "uapi/drm/vc4_drm.h"
3887 +#include "vc4_drv.h"
3888 +#include "vc4_packet.h"
3889 +
3890 +#define VALIDATE_ARGS \
3891 +       struct vc4_exec_info *exec,                     \
3892 +       void *validated,                                \
3893 +       void *untrusted
3894 +
3895 +
3896 +/** Return the width in pixels of a 64-byte microtile. */
3897 +static uint32_t
3898 +utile_width(int cpp)
3899 +{
3900 +       switch (cpp) {
3901 +       case 1:
3902 +       case 2:
3903 +               return 8;
3904 +       case 4:
3905 +               return 4;
3906 +       case 8:
3907 +               return 2;
3908 +       default:
3909 +               DRM_ERROR("unknown cpp: %d\n", cpp);
3910 +               return 1;
3911 +       }
3912 +}
3913 +
3914 +/** Return the height in pixels of a 64-byte microtile. */
3915 +static uint32_t
3916 +utile_height(int cpp)
3917 +{
3918 +       switch (cpp) {
3919 +       case 1:
3920 +               return 8;
3921 +       case 2:
3922 +       case 4:
3923 +       case 8:
3924 +               return 4;
3925 +       default:
3926 +               DRM_ERROR("unknown cpp: %d\n", cpp);
3927 +               return 1;
3928 +       }
3929 +}
3930 +
3931 +/**
3932 + * The texture unit decides what tiling format a particular miplevel is using
3933 + * this function, so we lay out our miptrees accordingly.
3934 + */
3935 +static bool
3936 +size_is_lt(uint32_t width, uint32_t height, int cpp)
3937 +{
3938 +       return (width <= 4 * utile_width(cpp) ||
3939 +               height <= 4 * utile_height(cpp));
3940 +}
3941 +
3942 +bool
3943 +vc4_use_bo(struct vc4_exec_info *exec,
3944 +          uint32_t hindex,
3945 +          enum vc4_bo_mode mode,
3946 +          struct drm_gem_cma_object **obj)
3947 +{
3948 +       *obj = NULL;
3949 +
3950 +       if (hindex >= exec->bo_count) {
3951 +               DRM_ERROR("BO index %d greater than BO count %d\n",
3952 +                         hindex, exec->bo_count);
3953 +               return false;
3954 +       }
3955 +
3956 +       if (exec->bo[hindex].mode != mode) {
3957 +               if (exec->bo[hindex].mode == VC4_MODE_UNDECIDED) {
3958 +                       exec->bo[hindex].mode = mode;
3959 +               } else {
3960 +                       DRM_ERROR("BO index %d reused with mode %d vs %d\n",
3961 +                                 hindex, exec->bo[hindex].mode, mode);
3962 +                       return false;
3963 +               }
3964 +       }
3965 +
3966 +       *obj = exec->bo[hindex].bo;
3967 +       return true;
3968 +}
3969 +
3970 +static bool
3971 +vc4_use_handle(struct vc4_exec_info *exec,
3972 +              uint32_t gem_handles_packet_index,
3973 +              enum vc4_bo_mode mode,
3974 +              struct drm_gem_cma_object **obj)
3975 +{
3976 +       return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index],
3977 +                         mode, obj);
3978 +}
3979 +
3980 +static uint32_t
3981 +gl_shader_rec_size(uint32_t pointer_bits)
3982 +{
3983 +       uint32_t attribute_count = pointer_bits & 7;
3984 +       bool extended = pointer_bits & 8;
3985 +
3986 +       if (attribute_count == 0)
3987 +               attribute_count = 8;
3988 +
3989 +       if (extended)
3990 +               return 100 + attribute_count * 4;
3991 +       else
3992 +               return 36 + attribute_count * 8;
3993 +}
3994 +
3995 +bool
3996 +vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
3997 +                  uint32_t offset, uint8_t tiling_format,
3998 +                  uint32_t width, uint32_t height, uint8_t cpp)
3999 +{
4000 +       uint32_t aligned_width, aligned_height, stride, size;
4001 +       uint32_t utile_w = utile_width(cpp);
4002 +       uint32_t utile_h = utile_height(cpp);
4003 +
4004 +       /* The shaded vertex format stores signed 12.4 fixed point
4005 +        * (-2048,2047) offsets from the viewport center, so we should
4006 +        * never have a render target larger than 4096.  The texture
4007 +        * unit can only sample from 2048x2048, so it's even more
4008 +        * restricted.  This lets us avoid worrying about overflow in
4009 +        * our math.
4010 +        */
4011 +       if (width > 4096 || height > 4096) {
4012 +               DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
4013 +               return false;
4014 +       }
4015 +
4016 +       switch (tiling_format) {
4017 +       case VC4_TILING_FORMAT_LINEAR:
4018 +               aligned_width = round_up(width, utile_w);
4019 +               aligned_height = height;
4020 +               break;
4021 +       case VC4_TILING_FORMAT_T:
4022 +               aligned_width = round_up(width, utile_w * 8);
4023 +               aligned_height = round_up(height, utile_h * 8);
4024 +               break;
4025 +       case VC4_TILING_FORMAT_LT:
4026 +               aligned_width = round_up(width, utile_w);
4027 +               aligned_height = round_up(height, utile_h);
4028 +               break;
4029 +       default:
4030 +               DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
4031 +               return false;
4032 +       }
4033 +
4034 +       stride = aligned_width * cpp;
4035 +       size = stride * aligned_height;
4036 +
4037 +       if (size + offset < size ||
4038 +           size + offset > fbo->base.size) {
4039 +               DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %d)\n",
4040 +                         width, height,
4041 +                         aligned_width, aligned_height,
4042 +                         size, offset, fbo->base.size);
4043 +               return false;
4044 +       }
4045 +
4046 +       return true;
4047 +}
4048 +
4049 +static int
4050 +validate_flush_all(VALIDATE_ARGS)
4051 +{
4052 +       if (exec->found_increment_semaphore_packet) {
4053 +               DRM_ERROR("VC4_PACKET_FLUSH_ALL after "
4054 +                         "VC4_PACKET_INCREMENT_SEMAPHORE\n");
4055 +               return -EINVAL;
4056 +       }
4057 +
4058 +       return 0;
4059 +}
4060 +
4061 +static int
4062 +validate_start_tile_binning(VALIDATE_ARGS)
4063 +{
4064 +       if (exec->found_start_tile_binning_packet) {
4065 +               DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
4066 +               return -EINVAL;
4067 +       }
4068 +       exec->found_start_tile_binning_packet = true;
4069 +
4070 +       if (!exec->found_tile_binning_mode_config_packet) {
4071 +               DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4072 +               return -EINVAL;
4073 +       }
4074 +
4075 +       return 0;
4076 +}
4077 +
4078 +static int
4079 +validate_increment_semaphore(VALIDATE_ARGS)
4080 +{
4081 +       if (exec->found_increment_semaphore_packet) {
4082 +               DRM_ERROR("Duplicate VC4_PACKET_INCREMENT_SEMAPHORE\n");
4083 +               return -EINVAL;
4084 +       }
4085 +       exec->found_increment_semaphore_packet = true;
4086 +
4087 +       /* Once we've found the semaphore increment, there should be one FLUSH
4088 +        * then the end of the command list.  The FLUSH actually triggers the
4089 +        * increment, so we only need to make sure there
4090 +        */
4091 +
4092 +       return 0;
4093 +}
4094 +
4095 +static int
4096 +validate_indexed_prim_list(VALIDATE_ARGS)
4097 +{
4098 +       struct drm_gem_cma_object *ib;
4099 +       uint32_t length = *(uint32_t *)(untrusted + 1);
4100 +       uint32_t offset = *(uint32_t *)(untrusted + 5);
4101 +       uint32_t max_index = *(uint32_t *)(untrusted + 9);
4102 +       uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
4103 +       struct vc4_shader_state *shader_state;
4104 +
4105 +       if (exec->found_increment_semaphore_packet) {
4106 +               DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4107 +               return -EINVAL;
4108 +       }
4109 +
4110 +       /* Check overflow condition */
4111 +       if (exec->shader_state_count == 0) {
4112 +               DRM_ERROR("shader state must precede primitives\n");
4113 +               return -EINVAL;
4114 +       }
4115 +       shader_state = &exec->shader_state[exec->shader_state_count - 1];
4116 +
4117 +       if (max_index > shader_state->max_index)
4118 +               shader_state->max_index = max_index;
4119 +
4120 +       if (!vc4_use_handle(exec, 0, VC4_MODE_RENDER, &ib))
4121 +               return -EINVAL;
4122 +
4123 +       if (offset > ib->base.size ||
4124 +           (ib->base.size - offset) / index_size < length) {
4125 +               DRM_ERROR("IB access overflow (%d + %d*%d > %d)\n",
4126 +                         offset, length, index_size, ib->base.size);
4127 +               return -EINVAL;
4128 +       }
4129 +
4130 +       *(uint32_t *)(validated + 5) = ib->paddr + offset;
4131 +
4132 +       return 0;
4133 +}
4134 +
4135 +static int
4136 +validate_gl_array_primitive(VALIDATE_ARGS)
4137 +{
4138 +       uint32_t length = *(uint32_t *)(untrusted + 1);
4139 +       uint32_t base_index = *(uint32_t *)(untrusted + 5);
4140 +       uint32_t max_index;
4141 +       struct vc4_shader_state *shader_state;
4142 +
4143 +       if (exec->found_increment_semaphore_packet) {
4144 +               DRM_ERROR("Drawing after VC4_PACKET_INCREMENT_SEMAPHORE\n");
4145 +               return -EINVAL;
4146 +       }
4147 +
4148 +       /* Check overflow condition */
4149 +       if (exec->shader_state_count == 0) {
4150 +               DRM_ERROR("shader state must precede primitives\n");
4151 +               return -EINVAL;
4152 +       }
4153 +       shader_state = &exec->shader_state[exec->shader_state_count - 1];
4154 +
4155 +       if (length + base_index < length) {
4156 +               DRM_ERROR("primitive vertex count overflow\n");
4157 +               return -EINVAL;
4158 +       }
4159 +       max_index = length + base_index - 1;
4160 +
4161 +       if (max_index > shader_state->max_index)
4162 +               shader_state->max_index = max_index;
4163 +
4164 +       return 0;
4165 +}
4166 +
4167 +static int
4168 +validate_gl_shader_state(VALIDATE_ARGS)
4169 +{
4170 +       uint32_t i = exec->shader_state_count++;
4171 +
4172 +       if (i >= exec->shader_state_size) {
4173 +               DRM_ERROR("More requests for shader states than declared\n");
4174 +               return -EINVAL;
4175 +       }
4176 +
4177 +       exec->shader_state[i].packet = VC4_PACKET_GL_SHADER_STATE;
4178 +       exec->shader_state[i].addr = *(uint32_t *)untrusted;
4179 +       exec->shader_state[i].max_index = 0;
4180 +
4181 +       if (exec->shader_state[i].addr & ~0xf) {
4182 +               DRM_ERROR("high bits set in GL shader rec reference\n");
4183 +               return -EINVAL;
4184 +       }
4185 +
4186 +       *(uint32_t *)validated = (exec->shader_rec_p +
4187 +                                 exec->shader_state[i].addr);
4188 +
4189 +       exec->shader_rec_p +=
4190 +               roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
4191 +
4192 +       return 0;
4193 +}
4194 +
4195 +static int
4196 +validate_nv_shader_state(VALIDATE_ARGS)
4197 +{
4198 +       uint32_t i = exec->shader_state_count++;
4199 +
4200 +       if (i >= exec->shader_state_size) {
4201 +               DRM_ERROR("More requests for shader states than declared\n");
4202 +               return -EINVAL;
4203 +       }
4204 +
4205 +       exec->shader_state[i].packet = VC4_PACKET_NV_SHADER_STATE;
4206 +       exec->shader_state[i].addr = *(uint32_t *)untrusted;
4207 +
4208 +       if (exec->shader_state[i].addr & 15) {
4209 +               DRM_ERROR("NV shader state address 0x%08x misaligned\n",
4210 +                         exec->shader_state[i].addr);
4211 +               return -EINVAL;
4212 +       }
4213 +
4214 +       *(uint32_t *)validated = (exec->shader_state[i].addr +
4215 +                                 exec->shader_rec_p);
4216 +
4217 +       return 0;
4218 +}
4219 +
4220 +static int
4221 +validate_tile_binning_config(VALIDATE_ARGS)
4222 +{
4223 +       struct drm_device *dev = exec->exec_bo->base.dev;
4224 +       uint8_t flags;
4225 +       uint32_t tile_state_size, tile_alloc_size;
4226 +       uint32_t tile_count;
4227 +
4228 +       if (exec->found_tile_binning_mode_config_packet) {
4229 +               DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
4230 +               return -EINVAL;
4231 +       }
4232 +       exec->found_tile_binning_mode_config_packet = true;
4233 +
4234 +       exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
4235 +       exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
4236 +       tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
4237 +       flags = *(uint8_t *)(untrusted + 14);
4238 +
4239 +       if (exec->bin_tiles_x == 0 ||
4240 +           exec->bin_tiles_y == 0) {
4241 +               DRM_ERROR("Tile binning config of %dx%d too small\n",
4242 +                         exec->bin_tiles_x, exec->bin_tiles_y);
4243 +               return -EINVAL;
4244 +       }
4245 +
4246 +       if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
4247 +                    VC4_BIN_CONFIG_TILE_BUFFER_64BIT |
4248 +                    VC4_BIN_CONFIG_MS_MODE_4X)) {
4249 +               DRM_ERROR("unsupported bining config flags 0x%02x\n", flags);
4250 +               return -EINVAL;
4251 +       }
4252 +
4253 +       /* The tile state data array is 48 bytes per tile, and we put it at
4254 +        * the start of a BO containing both it and the tile alloc.
4255 +        */
4256 +       tile_state_size = 48 * tile_count;
4257 +
4258 +       /* Since the tile alloc array will follow us, align. */
4259 +       exec->tile_alloc_offset = roundup(tile_state_size, 4096);
4260 +
4261 +       *(uint8_t *)(validated + 14) =
4262 +               ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
4263 +                           VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
4264 +                VC4_BIN_CONFIG_AUTO_INIT_TSDA |
4265 +                VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
4266 +                              VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
4267 +                VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
4268 +                              VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
4269 +
4270 +       /* Initial block size. */
4271 +       tile_alloc_size = 32 * tile_count;
4272 +
4273 +       /*
4274 +        * The initial allocation gets rounded to the next 256 bytes before
4275 +        * the hardware starts fulfilling further allocations.
4276 +        */
4277 +       tile_alloc_size = roundup(tile_alloc_size, 256);
4278 +
4279 +       /* Add space for the extra allocations.  This is what gets used first,
4280 +        * before overflow memory.  It must have at least 4096 bytes, but we
4281 +        * want to avoid overflow memory usage if possible.
4282 +        */
4283 +       tile_alloc_size += 1024 * 1024;
4284 +
4285 +       exec->tile_bo = &vc4_bo_create(dev, exec->tile_alloc_offset +
4286 +                                      tile_alloc_size)->base;
4287 +       if (!exec->tile_bo)
4288 +               return -ENOMEM;
4289 +       list_add_tail(&to_vc4_bo(&exec->tile_bo->base)->unref_head,
4290 +                    &exec->unref_list);
4291 +
4292 +       /* tile alloc address. */
4293 +       *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
4294 +                                       exec->tile_alloc_offset);
4295 +       /* tile alloc size. */
4296 +       *(uint32_t *)(validated + 4) = tile_alloc_size;
4297 +       /* tile state address. */
4298 +       *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
4299 +
4300 +       return 0;
4301 +}
4302 +
4303 +static int
4304 +validate_gem_handles(VALIDATE_ARGS)
4305 +{
4306 +       memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
4307 +       return 0;
4308 +}
4309 +
4310 +#define VC4_DEFINE_PACKET(packet, name, func) \
4311 +       [packet] = { packet ## _SIZE, name, func }
4312 +
4313 +static const struct cmd_info {
4314 +       uint16_t len;
4315 +       const char *name;
4316 +       int (*func)(struct vc4_exec_info *exec, void *validated,
4317 +                   void *untrusted);
4318 +} cmd_info[] = {
4319 +       VC4_DEFINE_PACKET(VC4_PACKET_HALT, "halt", NULL),
4320 +       VC4_DEFINE_PACKET(VC4_PACKET_NOP, "nop", NULL),
4321 +       VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, "flush", NULL),
4322 +       VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, "flush all state", validate_flush_all),
4323 +       VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, "start tile binning", validate_start_tile_binning),
4324 +       VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, "increment semaphore", validate_increment_semaphore),
4325 +
4326 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, "Indexed Primitive List", validate_indexed_prim_list),
4327 +
4328 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, "Vertex Array Primitives", validate_gl_array_primitive),
4329 +
4330 +       /* This is only used by clipped primitives (packets 48 and 49), which
4331 +        * we don't support parsing yet.
4332 +        */
4333 +       VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, "primitive list format", NULL),
4334 +
4335 +       VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, "GL Shader State", validate_gl_shader_state),
4336 +       VC4_DEFINE_PACKET(VC4_PACKET_NV_SHADER_STATE, "NV Shader State", validate_nv_shader_state),
4337 +
4338 +       VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, "configuration bits", NULL),
4339 +       VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, "flat shade flags", NULL),
4340 +       VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, "point size", NULL),
4341 +       VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, "line width", NULL),
4342 +       VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, "RHT X boundary", NULL),
4343 +       VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, "Depth Offset", NULL),
4344 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, "Clip Window", NULL),
4345 +       VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, "Viewport Offset", NULL),
4346 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, "Clipper XY Scaling", NULL),
4347 +       /* Note: The docs say this was also 105, but it was 106 in the
4348 +        * initial userland code drop.
4349 +        */
4350 +       VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, "Clipper Z Scale and Offset", NULL),
4351 +
4352 +       VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, "tile binning configuration", validate_tile_binning_config),
4353 +
4354 +       VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, "GEM handles", validate_gem_handles),
4355 +};
4356 +
4357 +int
4358 +vc4_validate_bin_cl(struct drm_device *dev,
4359 +                   void *validated,
4360 +                   void *unvalidated,
4361 +                   struct vc4_exec_info *exec)
4362 +{
4363 +       uint32_t len = exec->args->bin_cl_size;
4364 +       uint32_t dst_offset = 0;
4365 +       uint32_t src_offset = 0;
4366 +
4367 +       while (src_offset < len) {
4368 +               void *dst_pkt = validated + dst_offset;
4369 +               void *src_pkt = unvalidated + src_offset;
4370 +               u8 cmd = *(uint8_t *)src_pkt;
4371 +               const struct cmd_info *info;
4372 +
4373 +               if (cmd > ARRAY_SIZE(cmd_info)) {
4374 +                       DRM_ERROR("0x%08x: packet %d out of bounds\n",
4375 +                                 src_offset, cmd);
4376 +                       return -EINVAL;
4377 +               }
4378 +
4379 +               info = &cmd_info[cmd];
4380 +               if (!info->name) {
4381 +                       DRM_ERROR("0x%08x: packet %d invalid\n",
4382 +                                 src_offset, cmd);
4383 +                       return -EINVAL;
4384 +               }
4385 +
4386 +#if 0
4387 +               DRM_INFO("0x%08x: packet %d (%s) size %d processing...\n",
4388 +                        src_offset, cmd, info->name, info->len);
4389 +#endif
4390 +
4391 +               if (src_offset + info->len > len) {
4392 +                       DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
4393 +                                 "exceeds bounds (0x%08x)\n",
4394 +                                 src_offset, cmd, info->name, info->len,
4395 +                                 src_offset + len);
4396 +                       return -EINVAL;
4397 +               }
4398 +
4399 +               if (cmd != VC4_PACKET_GEM_HANDLES)
4400 +                       memcpy(dst_pkt, src_pkt, info->len);
4401 +
4402 +               if (info->func && info->func(exec,
4403 +                                            dst_pkt + 1,
4404 +                                            src_pkt + 1)) {
4405 +                       DRM_ERROR("0x%08x: packet %d (%s) failed to "
4406 +                                 "validate\n",
4407 +                                 src_offset, cmd, info->name);
4408 +                       return -EINVAL;
4409 +               }
4410 +
4411 +               src_offset += info->len;
4412 +               /* GEM handle loading doesn't produce HW packets. */
4413 +               if (cmd != VC4_PACKET_GEM_HANDLES)
4414 +                       dst_offset += info->len;
4415 +
4416 +               /* When the CL hits halt, it'll stop reading anything else. */
4417 +               if (cmd == VC4_PACKET_HALT)
4418 +                       break;
4419 +       }
4420 +
4421 +       exec->ct0ea = exec->ct0ca + dst_offset;
4422 +
4423 +       if (!exec->found_start_tile_binning_packet) {
4424 +               DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
4425 +               return -EINVAL;
4426 +       }
4427 +
4428 +       if (!exec->found_increment_semaphore_packet) {
4429 +               DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE\n");
4430 +               return -EINVAL;
4431 +       }
4432 +
4433 +       return 0;
4434 +}
4435 +
4436 +static bool
4437 +reloc_tex(struct vc4_exec_info *exec,
4438 +         void *uniform_data_u,
4439 +         struct vc4_texture_sample_info *sample,
4440 +         uint32_t texture_handle_index)
4441 +
4442 +{
4443 +       struct drm_gem_cma_object *tex;
4444 +       uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
4445 +       uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
4446 +       uint32_t p2 = (sample->p_offset[2] != ~0 ?
4447 +                      *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
4448 +       uint32_t p3 = (sample->p_offset[3] != ~0 ?
4449 +                      *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
4450 +       uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
4451 +       uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
4452 +       uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
4453 +       uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
4454 +       uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
4455 +       uint32_t cpp, tiling_format, utile_w, utile_h;
4456 +       uint32_t i;
4457 +       uint32_t cube_map_stride = 0;
4458 +       enum vc4_texture_data_type type;
4459 +
4460 +       if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
4461 +               return false;
4462 +
4463 +       if (sample->is_direct) {
4464 +               uint32_t remaining_size = tex->base.size - p0;
4465 +               if (p0 > tex->base.size - 4) {
4466 +                       DRM_ERROR("UBO offset greater than UBO size\n");
4467 +                       goto fail;
4468 +               }
4469 +               if (p1 > remaining_size - 4) {
4470 +                       DRM_ERROR("UBO clamp would allow reads outside of UBO\n");
4471 +                       goto fail;
4472 +               }
4473 +               *validated_p0 = tex->paddr + p0;
4474 +               return true;
4475 +       }
4476 +
4477 +       if (width == 0)
4478 +               width = 2048;
4479 +       if (height == 0)
4480 +               height = 2048;
4481 +
4482 +       if (p0 & VC4_TEX_P0_CMMODE_MASK) {
4483 +               if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
4484 +                   VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
4485 +                       cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
4486 +               if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
4487 +                   VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
4488 +                       if (cube_map_stride) {
4489 +                               DRM_ERROR("Cube map stride set twice\n");
4490 +                               goto fail;
4491 +                       }
4492 +
4493 +                       cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
4494 +               }
4495 +               if (!cube_map_stride) {
4496 +                       DRM_ERROR("Cube map stride not set\n");
4497 +                       goto fail;
4498 +               }
4499 +       }
4500 +
4501 +       type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
4502 +               (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
4503 +
4504 +       switch (type) {
4505 +       case VC4_TEXTURE_TYPE_RGBA8888:
4506 +       case VC4_TEXTURE_TYPE_RGBX8888:
4507 +       case VC4_TEXTURE_TYPE_RGBA32R:
4508 +               cpp = 4;
4509 +               break;
4510 +       case VC4_TEXTURE_TYPE_RGBA4444:
4511 +       case VC4_TEXTURE_TYPE_RGBA5551:
4512 +       case VC4_TEXTURE_TYPE_RGB565:
4513 +       case VC4_TEXTURE_TYPE_LUMALPHA:
4514 +       case VC4_TEXTURE_TYPE_S16F:
4515 +       case VC4_TEXTURE_TYPE_S16:
4516 +               cpp = 2;
4517 +               break;
4518 +       case VC4_TEXTURE_TYPE_LUMINANCE:
4519 +       case VC4_TEXTURE_TYPE_ALPHA:
4520 +       case VC4_TEXTURE_TYPE_S8:
4521 +               cpp = 1;
4522 +               break;
4523 +       case VC4_TEXTURE_TYPE_ETC1:
4524 +       case VC4_TEXTURE_TYPE_BW1:
4525 +       case VC4_TEXTURE_TYPE_A4:
4526 +       case VC4_TEXTURE_TYPE_A1:
4527 +       case VC4_TEXTURE_TYPE_RGBA64:
4528 +       case VC4_TEXTURE_TYPE_YUV422R:
4529 +       default:
4530 +               DRM_ERROR("Texture format %d unsupported\n", type);
4531 +               goto fail;
4532 +       }
4533 +       utile_w = utile_width(cpp);
4534 +       utile_h = utile_height(cpp);
4535 +
4536 +       if (type == VC4_TEXTURE_TYPE_RGBA32R) {
4537 +               tiling_format = VC4_TILING_FORMAT_LINEAR;
4538 +       } else {
4539 +               if (size_is_lt(width, height, cpp))
4540 +                       tiling_format = VC4_TILING_FORMAT_LT;
4541 +               else
4542 +                       tiling_format = VC4_TILING_FORMAT_T;
4543 +       }
4544 +
4545 +       if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
4546 +                               tiling_format, width, height, cpp)) {
4547 +               goto fail;
4548 +       }
4549 +
4550 +       /* The mipmap levels are stored before the base of the texture.  Make
4551 +        * sure there is actually space in the BO.
4552 +        */
4553 +       for (i = 1; i <= miplevels; i++) {
4554 +               uint32_t level_width = max(width >> i, 1u);
4555 +               uint32_t level_height = max(height >> i, 1u);
4556 +               uint32_t aligned_width, aligned_height;
4557 +               uint32_t level_size;
4558 +
4559 +               /* Once the levels get small enough, they drop from T to LT. */
4560 +               if (tiling_format == VC4_TILING_FORMAT_T &&
4561 +                   size_is_lt(level_width, level_height, cpp)) {
4562 +                       tiling_format = VC4_TILING_FORMAT_LT;
4563 +               }
4564 +
4565 +               switch (tiling_format) {
4566 +               case VC4_TILING_FORMAT_T:
4567 +                       aligned_width = round_up(level_width, utile_w * 8);
4568 +                       aligned_height = round_up(level_height, utile_h * 8);
4569 +                       break;
4570 +               case VC4_TILING_FORMAT_LT:
4571 +                       aligned_width = round_up(level_width, utile_w);
4572 +                       aligned_height = round_up(level_height, utile_h);
4573 +                       break;
4574 +               default:
4575 +                       aligned_width = round_up(level_width, utile_w);
4576 +                       aligned_height = level_height;
4577 +                       break;
4578 +               }
4579 +
4580 +               level_size = aligned_width * cpp * aligned_height;
4581 +
4582 +               if (offset < level_size) {
4583 +                       DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
4584 +                                 "overflowed buffer bounds (offset %d)\n",
4585 +                                 i, level_width, level_height,
4586 +                                 aligned_width, aligned_height,
4587 +                                 level_size, offset);
4588 +                       goto fail;
4589 +               }
4590 +
4591 +               offset -= level_size;
4592 +       }
4593 +
4594 +       *validated_p0 = tex->paddr + p0;
4595 +
4596 +       return true;
4597 + fail:
4598 +       DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
4599 +       DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
4600 +       DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
4601 +       DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
4602 +       return false;
4603 +}
4604 +
4605 +static int
4606 +validate_shader_rec(struct drm_device *dev,
4607 +                   struct vc4_exec_info *exec,
4608 +                   struct vc4_shader_state *state)
4609 +{
4610 +       uint32_t *src_handles;
4611 +       void *pkt_u, *pkt_v;
4612 +       enum shader_rec_reloc_type {
4613 +               RELOC_CODE,
4614 +               RELOC_VBO,
4615 +       };
4616 +       struct shader_rec_reloc {
4617 +               enum shader_rec_reloc_type type;
4618 +               uint32_t offset;
4619 +       };
4620 +       static const struct shader_rec_reloc gl_relocs[] = {
4621 +               { RELOC_CODE, 4 },  /* fs */
4622 +               { RELOC_CODE, 16 }, /* vs */
4623 +               { RELOC_CODE, 28 }, /* cs */
4624 +       };
4625 +       static const struct shader_rec_reloc nv_relocs[] = {
4626 +               { RELOC_CODE, 4 }, /* fs */
4627 +               { RELOC_VBO, 12 }
4628 +       };
4629 +       const struct shader_rec_reloc *relocs;
4630 +       struct drm_gem_cma_object *bo[ARRAY_SIZE(gl_relocs) + 8];
4631 +       uint32_t nr_attributes = 0, nr_fixed_relocs, nr_relocs, packet_size;
4632 +       int i;
4633 +       struct vc4_validated_shader_info *validated_shader;
4634 +
4635 +       if (state->packet == VC4_PACKET_NV_SHADER_STATE) {
4636 +               relocs = nv_relocs;
4637 +               nr_fixed_relocs = ARRAY_SIZE(nv_relocs);
4638 +
4639 +               packet_size = 16;
4640 +       } else {
4641 +               relocs = gl_relocs;
4642 +               nr_fixed_relocs = ARRAY_SIZE(gl_relocs);
4643 +
4644 +               nr_attributes = state->addr & 0x7;
4645 +               if (nr_attributes == 0)
4646 +                       nr_attributes = 8;
4647 +               packet_size = gl_shader_rec_size(state->addr);
4648 +       }
4649 +       nr_relocs = nr_fixed_relocs + nr_attributes;
4650 +
4651 +       if (nr_relocs * 4 > exec->shader_rec_size) {
4652 +               DRM_ERROR("overflowed shader recs reading %d handles "
4653 +                         "from %d bytes left\n",
4654 +                         nr_relocs, exec->shader_rec_size);
4655 +               return -EINVAL;
4656 +       }
4657 +       src_handles = exec->shader_rec_u;
4658 +       exec->shader_rec_u += nr_relocs * 4;
4659 +       exec->shader_rec_size -= nr_relocs * 4;
4660 +
4661 +       if (packet_size > exec->shader_rec_size) {
4662 +               DRM_ERROR("overflowed shader recs copying %db packet "
4663 +                         "from %d bytes left\n",
4664 +                         packet_size, exec->shader_rec_size);
4665 +               return -EINVAL;
4666 +       }
4667 +       pkt_u = exec->shader_rec_u;
4668 +       pkt_v = exec->shader_rec_v;
4669 +       memcpy(pkt_v, pkt_u, packet_size);
4670 +       exec->shader_rec_u += packet_size;
4671 +       /* Shader recs have to be aligned to 16 bytes (due to the attribute
4672 +        * flags being in the low bytes), so round the next validated shader
4673 +        * rec address up.  This should be safe, since we've got so many
4674 +        * relocations in a shader rec packet.
4675 +        */
4676 +       BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
4677 +       exec->shader_rec_v += roundup(packet_size, 16);
4678 +       exec->shader_rec_size -= packet_size;
4679 +
4680 +       for (i = 0; i < nr_relocs; i++) {
4681 +               enum vc4_bo_mode mode;
4682 +
4683 +               if (i < nr_fixed_relocs && relocs[i].type == RELOC_CODE)
4684 +                       mode = VC4_MODE_SHADER;
4685 +               else
4686 +                       mode = VC4_MODE_RENDER;
4687 +
4688 +               if (!vc4_use_bo(exec, src_handles[i], mode, &bo[i])) {
4689 +                       return false;
4690 +               }
4691 +       }
4692 +
4693 +       for (i = 0; i < nr_fixed_relocs; i++) {
4694 +               uint32_t o = relocs[i].offset;
4695 +               uint32_t src_offset = *(uint32_t *)(pkt_u + o);
4696 +               uint32_t *texture_handles_u;
4697 +               void *uniform_data_u;
4698 +               uint32_t tex;
4699 +
4700 +               *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
4701 +
4702 +               switch (relocs[i].type) {
4703 +               case RELOC_CODE:
4704 +                       if (src_offset != 0) {
4705 +                               DRM_ERROR("Shaders must be at offset 0 of "
4706 +                                         "the BO.\n");
4707 +                               goto fail;
4708 +                       }
4709 +
4710 +                       validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
4711 +                       if (!validated_shader)
4712 +                               goto fail;
4713 +
4714 +                       if (validated_shader->uniforms_src_size >
4715 +                           exec->uniforms_size) {
4716 +                               DRM_ERROR("Uniforms src buffer overflow\n");
4717 +                               goto fail;
4718 +                       }
4719 +
4720 +                       texture_handles_u = exec->uniforms_u;
4721 +                       uniform_data_u = (texture_handles_u +
4722 +                                         validated_shader->num_texture_samples);
4723 +
4724 +                       memcpy(exec->uniforms_v, uniform_data_u,
4725 +                              validated_shader->uniforms_size);
4726 +
4727 +                       for (tex = 0;
4728 +                            tex < validated_shader->num_texture_samples;
4729 +                            tex++) {
4730 +                               if (!reloc_tex(exec,
4731 +                                              uniform_data_u,
4732 +                                              &validated_shader->texture_samples[tex],
4733 +                                              texture_handles_u[tex])) {
4734 +                                       goto fail;
4735 +                               }
4736 +                       }
4737 +
4738 +                       *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
4739 +
4740 +                       exec->uniforms_u += validated_shader->uniforms_src_size;
4741 +                       exec->uniforms_v += validated_shader->uniforms_size;
4742 +                       exec->uniforms_p += validated_shader->uniforms_size;
4743 +
4744 +                       break;
4745 +
4746 +               case RELOC_VBO:
4747 +                       break;
4748 +               }
4749 +       }
4750 +
4751 +       for (i = 0; i < nr_attributes; i++) {
4752 +               struct drm_gem_cma_object *vbo = bo[nr_fixed_relocs + i];
4753 +               uint32_t o = 36 + i * 8;
4754 +               uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
4755 +               uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
4756 +               uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
4757 +               uint32_t max_index;
4758 +
4759 +               if (state->addr & 0x8)
4760 +                       stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
4761 +
4762 +               if (vbo->base.size < offset ||
4763 +                   vbo->base.size - offset < attr_size) {
4764 +                       DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
4765 +                                 offset, attr_size, vbo->base.size);
4766 +                       return -EINVAL;
4767 +               }
4768 +
4769 +               if (stride != 0) {
4770 +                       max_index = ((vbo->base.size - offset - attr_size) /
4771 +                                    stride);
4772 +                       if (state->max_index > max_index) {
4773 +                               DRM_ERROR("primitives use index %d out of supplied %d\n",
4774 +                                         state->max_index, max_index);
4775 +                               return -EINVAL;
4776 +                       }
4777 +               }
4778 +
4779 +               *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
4780 +       }
4781 +
4782 +       return 0;
4783 +
4784 +fail:
4785 +       return -EINVAL;
4786 +}
4787 +
4788 +int
4789 +vc4_validate_shader_recs(struct drm_device *dev,
4790 +                        struct vc4_exec_info *exec)
4791 +{
4792 +       uint32_t i;
4793 +       int ret = 0;
4794 +
4795 +       for (i = 0; i < exec->shader_state_count; i++) {
4796 +               ret = validate_shader_rec(dev, exec, &exec->shader_state[i]);
4797 +               if (ret)
4798 +                       return ret;
4799 +       }
4800 +
4801 +       return ret;
4802 +}
4803 --- /dev/null
4804 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4805 @@ -0,0 +1,521 @@
4806 +/*
4807 + * Copyright Â© 2014 Broadcom
4808 + *
4809 + * Permission is hereby granted, free of charge, to any person obtaining a
4810 + * copy of this software and associated documentation files (the "Software"),
4811 + * to deal in the Software without restriction, including without limitation
4812 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4813 + * and/or sell copies of the Software, and to permit persons to whom the
4814 + * Software is furnished to do so, subject to the following conditions:
4815 + *
4816 + * The above copyright notice and this permission notice (including the next
4817 + * paragraph) shall be included in all copies or substantial portions of the
4818 + * Software.
4819 + *
4820 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4821 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4822 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
4823 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4824 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4825 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4826 + * IN THE SOFTWARE.
4827 + */
4828 +
4829 +/**
4830 + * DOC: Shader validator for VC4.
4831 + *
4832 + * The VC4 has no IOMMU between it and system memory.  So, a user with access
4833 + * to execute shaders could escalate privilege by overwriting system memory
4834 + * (using the VPM write address register in the general-purpose DMA mode) or
4835 + * reading system memory it shouldn't (reading it as a texture, or uniform
4836 + * data, or vertex data).
4837 + *
4838 + * This walks over a shader starting from some offset within a BO, ensuring
4839 + * that its accesses are appropriately bounded, and recording how many texture
4840 + * accesses are made and where so that we can do relocations for them in the
4841 + * uniform stream.
4842 + *
4843 + * The kernel API has shaders stored in user-mapped BOs.  The BOs will be
4844 + * forcibly unmapped from the process before validation, and any cache of
4845 + * validated state will be flushed if the mapping is faulted back in.
4846 + *
4847 + * Storing the shaders in BOs means that the validation process will be slow
4848 + * due to uncached reads, but since shaders are long-lived and shader BOs are
4849 + * never actually modified, this shouldn't be a problem.
4850 + */
4851 +
4852 +#include "vc4_drv.h"
4853 +#include "vc4_qpu_defines.h"
4854 +
4855 +struct vc4_shader_validation_state {
4856 +       struct vc4_texture_sample_info tmu_setup[2];
4857 +       int tmu_write_count[2];
4858 +
4859 +       /* For registers that were last written to by a MIN instruction with
4860 +        * one argument being a uniform, the address of the uniform.
4861 +        * Otherwise, ~0.
4862 +        *
4863 +        * This is used for the validation of direct address memory reads.
4864 +        */
4865 +       uint32_t live_min_clamp_offsets[32 + 32 + 4];
4866 +       bool live_max_clamp_regs[32 + 32 + 4];
4867 +};
4868 +
4869 +static uint32_t
4870 +waddr_to_live_reg_index(uint32_t waddr, bool is_b)
4871 +{
4872 +       if (waddr < 32) {
4873 +               if (is_b)
4874 +                       return 32 + waddr;
4875 +               else
4876 +                       return waddr;
4877 +       } else if (waddr <= QPU_W_ACC3) {
4878 +
4879 +               return 64 + waddr - QPU_W_ACC0;
4880 +       } else {
4881 +               return ~0;
4882 +       }
4883 +}
4884 +
4885 +static uint32_t
4886 +raddr_add_a_to_live_reg_index(uint64_t inst)
4887 +{
4888 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4889 +       uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
4890 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4891 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4892 +
4893 +       if (add_a == QPU_MUX_A) {
4894 +               return raddr_a;
4895 +       } else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) {
4896 +               return 32 + raddr_b;
4897 +       } else if (add_a <= QPU_MUX_R3) {
4898 +               return 64 + add_a;
4899 +       } else {
4900 +               return ~0;
4901 +       }
4902 +}
4903 +
4904 +static bool
4905 +is_tmu_submit(uint32_t waddr)
4906 +{
4907 +       return (waddr == QPU_W_TMU0_S ||
4908 +               waddr == QPU_W_TMU1_S);
4909 +}
4910 +
4911 +static bool
4912 +is_tmu_write(uint32_t waddr)
4913 +{
4914 +       return (waddr >= QPU_W_TMU0_S &&
4915 +               waddr <= QPU_W_TMU1_B);
4916 +}
4917 +
4918 +static bool
4919 +record_validated_texture_sample(struct vc4_validated_shader_info *validated_shader,
4920 +                               struct vc4_shader_validation_state *validation_state,
4921 +                               int tmu)
4922 +{
4923 +       uint32_t s = validated_shader->num_texture_samples;
4924 +       int i;
4925 +       struct vc4_texture_sample_info *temp_samples;
4926 +
4927 +       temp_samples = krealloc(validated_shader->texture_samples,
4928 +                               (s + 1) * sizeof(*temp_samples),
4929 +                               GFP_KERNEL);
4930 +       if (!temp_samples)
4931 +               return false;
4932 +
4933 +       memcpy(&temp_samples[s],
4934 +              &validation_state->tmu_setup[tmu],
4935 +              sizeof(*temp_samples));
4936 +
4937 +       validated_shader->num_texture_samples = s + 1;
4938 +       validated_shader->texture_samples = temp_samples;
4939 +
4940 +       for (i = 0; i < 4; i++)
4941 +               validation_state->tmu_setup[tmu].p_offset[i] = ~0;
4942 +
4943 +       return true;
4944 +}
4945 +
4946 +static bool
4947 +check_tmu_write(uint64_t inst,
4948 +               struct vc4_validated_shader_info *validated_shader,
4949 +               struct vc4_shader_validation_state *validation_state,
4950 +               bool is_mul)
4951 +{
4952 +       uint32_t waddr = (is_mul ?
4953 +                         QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
4954 +                         QPU_GET_FIELD(inst, QPU_WADDR_ADD));
4955 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
4956 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
4957 +       int tmu = waddr > QPU_W_TMU0_B;
4958 +       bool submit = is_tmu_submit(waddr);
4959 +       bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
4960 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
4961 +
4962 +       if (is_direct) {
4963 +               uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
4964 +               uint32_t clamp_reg, clamp_offset;
4965 +
4966 +               if (sig == QPU_SIG_SMALL_IMM) {
4967 +                       DRM_ERROR("direct TMU read used small immediate\n");
4968 +                       return false;
4969 +               }
4970 +
4971 +               /* Make sure that this texture load is an add of the base
4972 +                * address of the UBO to a clamped offset within the UBO.
4973 +                */
4974 +               if (is_mul ||
4975 +                   QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
4976 +                       DRM_ERROR("direct TMU load wasn't an add\n");
4977 +                       return false;
4978 +               }
4979 +
4980 +               /* We assert that the the clamped address is the first
4981 +                * argument, and the UBO base address is the second argument.
4982 +                * This is arbitrary, but simpler than supporting flipping the
4983 +                * two either way.
4984 +                */
4985 +               clamp_reg = raddr_add_a_to_live_reg_index(inst);
4986 +               if (clamp_reg == ~0) {
4987 +                       DRM_ERROR("direct TMU load wasn't clamped\n");
4988 +                       return false;
4989 +               }
4990 +
4991 +               clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
4992 +               if (clamp_offset == ~0) {
4993 +                       DRM_ERROR("direct TMU load wasn't clamped\n");
4994 +                       return false;
4995 +               }
4996 +
4997 +               /* Store the clamp value's offset in p1 (see reloc_tex() in
4998 +                * vc4_validate.c).
4999 +                */
5000 +               validation_state->tmu_setup[tmu].p_offset[1] =
5001 +                       clamp_offset;
5002 +
5003 +               if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5004 +                   !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
5005 +                       DRM_ERROR("direct TMU load didn't add to a uniform\n");
5006 +                       return false;
5007 +               }
5008 +
5009 +               validation_state->tmu_setup[tmu].is_direct = true;
5010 +       } else {
5011 +               if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
5012 +                                             raddr_b == QPU_R_UNIF)) {
5013 +                       DRM_ERROR("uniform read in the same instruction as "
5014 +                                 "texture setup.\n");
5015 +                       return false;
5016 +               }
5017 +       }
5018 +
5019 +       if (validation_state->tmu_write_count[tmu] >= 4) {
5020 +               DRM_ERROR("TMU%d got too many parameters before dispatch\n",
5021 +                         tmu);
5022 +               return false;
5023 +       }
5024 +       validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
5025 +               validated_shader->uniforms_size;
5026 +       validation_state->tmu_write_count[tmu]++;
5027 +       /* Since direct uses a RADDR uniform reference, it will get counted in
5028 +        * check_instruction_reads()
5029 +        */
5030 +       if (!is_direct)
5031 +               validated_shader->uniforms_size += 4;
5032 +
5033 +       if (submit) {
5034 +               if (!record_validated_texture_sample(validated_shader,
5035 +                                                    validation_state, tmu)) {
5036 +                       return false;
5037 +               }
5038 +
5039 +               validation_state->tmu_write_count[tmu] = 0;
5040 +       }
5041 +
5042 +       return true;
5043 +}
5044 +
5045 +static bool
5046 +check_register_write(uint64_t inst,
5047 +                    struct vc4_validated_shader_info *validated_shader,
5048 +                    struct vc4_shader_validation_state *validation_state,
5049 +                    bool is_mul)
5050 +{
5051 +       uint32_t waddr = (is_mul ?
5052 +                         QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
5053 +                         QPU_GET_FIELD(inst, QPU_WADDR_ADD));
5054 +
5055 +       switch (waddr) {
5056 +       case QPU_W_UNIFORMS_ADDRESS:
5057 +               /* XXX: We'll probably need to support this for reladdr, but
5058 +                * it's definitely a security-related one.
5059 +                */
5060 +               DRM_ERROR("uniforms address load unsupported\n");
5061 +               return false;
5062 +
5063 +       case QPU_W_TLB_COLOR_MS:
5064 +       case QPU_W_TLB_COLOR_ALL:
5065 +       case QPU_W_TLB_Z:
5066 +               /* These only interact with the tile buffer, not main memory,
5067 +                * so they're safe.
5068 +                */
5069 +               return true;
5070 +
5071 +       case QPU_W_TMU0_S:
5072 +       case QPU_W_TMU0_T:
5073 +       case QPU_W_TMU0_R:
5074 +       case QPU_W_TMU0_B:
5075 +       case QPU_W_TMU1_S:
5076 +       case QPU_W_TMU1_T:
5077 +       case QPU_W_TMU1_R:
5078 +       case QPU_W_TMU1_B:
5079 +               return check_tmu_write(inst, validated_shader, validation_state,
5080 +                                      is_mul);
5081 +
5082 +       case QPU_W_HOST_INT:
5083 +       case QPU_W_TMU_NOSWAP:
5084 +       case QPU_W_TLB_ALPHA_MASK:
5085 +       case QPU_W_MUTEX_RELEASE:
5086 +               /* XXX: I haven't thought about these, so don't support them
5087 +                * for now.
5088 +                */
5089 +               DRM_ERROR("Unsupported waddr %d\n", waddr);
5090 +               return false;
5091 +
5092 +       case QPU_W_VPM_ADDR:
5093 +               DRM_ERROR("General VPM DMA unsupported\n");
5094 +               return false;
5095 +
5096 +       case QPU_W_VPM:
5097 +       case QPU_W_VPMVCD_SETUP:
5098 +               /* We allow VPM setup in general, even including VPM DMA
5099 +                * configuration setup, because the (unsafe) DMA can only be
5100 +                * triggered by QPU_W_VPM_ADDR writes.
5101 +                */
5102 +               return true;
5103 +
5104 +       case QPU_W_TLB_STENCIL_SETUP:
5105 +                return true;
5106 +       }
5107 +
5108 +       return true;
5109 +}
5110 +
5111 +static void
5112 +track_live_clamps(uint64_t inst,
5113 +                 struct vc4_validated_shader_info *validated_shader,
5114 +                 struct vc4_shader_validation_state *validation_state)
5115 +{
5116 +       uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
5117 +       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5118 +       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5119 +       uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
5120 +       uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
5121 +       uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
5122 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5123 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5124 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5125 +       bool ws = inst & QPU_WS;
5126 +       uint32_t lri_add_a, lri_add, lri_mul;
5127 +       bool add_a_is_min_0;
5128 +
5129 +       /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
5130 +        * before we clear previous live state.
5131 +        */
5132 +       lri_add_a = raddr_add_a_to_live_reg_index(inst);
5133 +       add_a_is_min_0 = (lri_add_a != ~0 &&
5134 +                         validation_state->live_max_clamp_regs[lri_add_a]);
5135 +
5136 +       /* Clear live state for registers written by our instruction. */
5137 +       lri_add = waddr_to_live_reg_index(waddr_add, ws);
5138 +       lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
5139 +       if (lri_mul != ~0) {
5140 +               validation_state->live_max_clamp_regs[lri_mul] = false;
5141 +               validation_state->live_min_clamp_offsets[lri_mul] = ~0;
5142 +       }
5143 +       if (lri_add != ~0) {
5144 +               validation_state->live_max_clamp_regs[lri_add] = false;
5145 +               validation_state->live_min_clamp_offsets[lri_add] = ~0;
5146 +       } else {
5147 +               /* Nothing further to do for live tracking, since only ADDs
5148 +                * generate new live clamp registers.
5149 +                */
5150 +               return;
5151 +       }
5152 +
5153 +       /* Now, handle remaining live clamp tracking for the ADD operation. */
5154 +
5155 +       if (cond_add != QPU_COND_ALWAYS)
5156 +               return;
5157 +
5158 +       if (op_add == QPU_A_MAX) {
5159 +               /* Track live clamps of a value to a minimum of 0 (in either
5160 +                * arg).
5161 +                */
5162 +               if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
5163 +                   (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
5164 +                       return;
5165 +               }
5166 +
5167 +               validation_state->live_max_clamp_regs[lri_add] = true;
5168 +       } if (op_add == QPU_A_MIN) {
5169 +               /* Track live clamps of a value clamped to a minimum of 0 and
5170 +                * a maximum of some uniform's offset.
5171 +                */
5172 +               if (!add_a_is_min_0)
5173 +                       return;
5174 +
5175 +               if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
5176 +                   !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
5177 +                     sig != QPU_SIG_SMALL_IMM)) {
5178 +                       return;
5179 +               }
5180 +
5181 +               validation_state->live_min_clamp_offsets[lri_add] =
5182 +                       validated_shader->uniforms_size;
5183 +       }
5184 +}
5185 +
5186 +static bool
5187 +check_instruction_writes(uint64_t inst,
5188 +                        struct vc4_validated_shader_info *validated_shader,
5189 +                        struct vc4_shader_validation_state *validation_state)
5190 +{
5191 +       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
5192 +       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
5193 +       bool ok;
5194 +
5195 +       if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
5196 +               DRM_ERROR("ADD and MUL both set up textures\n");
5197 +               return false;
5198 +       }
5199 +
5200 +       ok = (check_register_write(inst, validated_shader, validation_state, false) &&
5201 +             check_register_write(inst, validated_shader, validation_state, true));
5202 +
5203 +       track_live_clamps(inst, validated_shader, validation_state);
5204 +
5205 +       return ok;
5206 +}
5207 +
5208 +static bool
5209 +check_instruction_reads(uint64_t inst,
5210 +                       struct vc4_validated_shader_info *validated_shader)
5211 +{
5212 +       uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
5213 +       uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
5214 +       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5215 +
5216 +       if (raddr_a == QPU_R_UNIF ||
5217 +           (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
5218 +               /* This can't overflow the uint32_t, because we're reading 8
5219 +                * bytes of instruction to increment by 4 here, so we'd
5220 +                * already be OOM.
5221 +                */
5222 +               validated_shader->uniforms_size += 4;
5223 +       }
5224 +
5225 +       return true;
5226 +}
5227 +
5228 +struct vc4_validated_shader_info *
5229 +vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
5230 +{
5231 +       bool found_shader_end = false;
5232 +       int shader_end_ip = 0;
5233 +       uint32_t ip, max_ip;
5234 +       uint64_t *shader;
5235 +       struct vc4_validated_shader_info *validated_shader;
5236 +       struct vc4_shader_validation_state validation_state;
5237 +       int i;
5238 +
5239 +       memset(&validation_state, 0, sizeof(validation_state));
5240 +
5241 +       for (i = 0; i < 8; i++)
5242 +               validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
5243 +       for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
5244 +               validation_state.live_min_clamp_offsets[i] = ~0;
5245 +
5246 +       shader = shader_obj->vaddr;
5247 +       max_ip = shader_obj->base.size / sizeof(uint64_t);
5248 +
5249 +       validated_shader = kcalloc(sizeof(*validated_shader), 1, GFP_KERNEL);
5250 +       if (!validated_shader)
5251 +               return NULL;
5252 +
5253 +       for (ip = 0; ip < max_ip; ip++) {
5254 +               uint64_t inst = shader[ip];
5255 +               uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
5256 +
5257 +               switch (sig) {
5258 +               case QPU_SIG_NONE:
5259 +               case QPU_SIG_WAIT_FOR_SCOREBOARD:
5260 +               case QPU_SIG_SCOREBOARD_UNLOCK:
5261 +               case QPU_SIG_COLOR_LOAD:
5262 +               case QPU_SIG_LOAD_TMU0:
5263 +               case QPU_SIG_LOAD_TMU1:
5264 +               case QPU_SIG_PROG_END:
5265 +               case QPU_SIG_SMALL_IMM:
5266 +                       if (!check_instruction_writes(inst, validated_shader,
5267 +                                                     &validation_state)) {
5268 +                               DRM_ERROR("Bad write at ip %d\n", ip);
5269 +                               goto fail;
5270 +                       }
5271 +
5272 +                       if (!check_instruction_reads(inst, validated_shader))
5273 +                               goto fail;
5274 +
5275 +                       if (sig == QPU_SIG_PROG_END) {
5276 +                               found_shader_end = true;
5277 +                               shader_end_ip = ip;
5278 +                       }
5279 +
5280 +                       break;
5281 +
5282 +               case QPU_SIG_LOAD_IMM:
5283 +                       if (!check_instruction_writes(inst, validated_shader,
5284 +                                                     &validation_state)) {
5285 +                               DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
5286 +                               goto fail;
5287 +                       }
5288 +                       break;
5289 +
5290 +               default:
5291 +                       DRM_ERROR("Unsupported QPU signal %d at "
5292 +                                 "instruction %d\n", sig, ip);
5293 +                       goto fail;
5294 +               }
5295 +
5296 +               /* There are two delay slots after program end is signaled
5297 +                * that are still executed, then we're finished.
5298 +                */
5299 +               if (found_shader_end && ip == shader_end_ip + 2)
5300 +                       break;
5301 +       }
5302 +
5303 +       if (ip == max_ip) {
5304 +               DRM_ERROR("shader failed to terminate before "
5305 +                         "shader BO end at %d\n",
5306 +                         shader_obj->base.size);
5307 +               goto fail;
5308 +       }
5309 +
5310 +       /* Again, no chance of integer overflow here because the worst case
5311 +        * scenario is 8 bytes of uniforms plus handles per 8-byte
5312 +        * instruction.
5313 +        */
5314 +       validated_shader->uniforms_src_size =
5315 +               (validated_shader->uniforms_size +
5316 +                4 * validated_shader->num_texture_samples);
5317 +
5318 +       return validated_shader;
5319 +
5320 +fail:
5321 +       if (validated_shader) {
5322 +               kfree(validated_shader->texture_samples);
5323 +               kfree(validated_shader);
5324 +       }
5325 +       return NULL;
5326 +}
5327 --- /dev/null
5328 +++ b/include/uapi/drm/vc4_drm.h
5329 @@ -0,0 +1,229 @@
5330 +/*
5331 + * Copyright Â© 2014-2015 Broadcom
5332 + *
5333 + * Permission is hereby granted, free of charge, to any person obtaining a
5334 + * copy of this software and associated documentation files (the "Software"),
5335 + * to deal in the Software without restriction, including without limitation
5336 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
5337 + * and/or sell copies of the Software, and to permit persons to whom the
5338 + * Software is furnished to do so, subject to the following conditions:
5339 + *
5340 + * The above copyright notice and this permission notice (including the next
5341 + * paragraph) shall be included in all copies or substantial portions of the
5342 + * Software.
5343 + *
5344 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5345 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5346 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
5347 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
5348 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
5349 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
5350 + * IN THE SOFTWARE.
5351 + */
5352 +
5353 +#ifndef _UAPI_VC4_DRM_H_
5354 +#define _UAPI_VC4_DRM_H_
5355 +
5356 +#include <drm/drm.h>
5357 +
5358 +#define DRM_VC4_SUBMIT_CL                         0x00
5359 +#define DRM_VC4_WAIT_SEQNO                        0x01
5360 +#define DRM_VC4_WAIT_BO                           0x02
5361 +#define DRM_VC4_CREATE_BO                         0x03
5362 +#define DRM_VC4_MMAP_BO                           0x04
5363 +#define DRM_VC4_CREATE_SHADER_BO                  0x05
5364 +
5365 +#define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
5366 +#define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
5367 +#define DRM_IOCTL_VC4_WAIT_BO             DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
5368 +#define DRM_IOCTL_VC4_CREATE_BO           DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
5369 +#define DRM_IOCTL_VC4_MMAP_BO             DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
5370 +#define DRM_IOCTL_VC4_CREATE_SHADER_BO    DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
5371 +
5372 +struct drm_vc4_submit_rcl_surface {
5373 +       uint32_t hindex; /* Handle index, or ~0 if not present. */
5374 +       uint32_t offset; /* Offset to start of buffer. */
5375 +       /*
5376 +         * Bits for either render config (color_ms_write) or load/store packet.
5377 +        */
5378 +       uint16_t bits;
5379 +       uint16_t pad;
5380 +};
5381 +
5382 +/**
5383 + * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
5384 + * engine.
5385 + *
5386 + * Drivers typically use GPU BOs to store batchbuffers / command lists and
5387 + * their associated state.  However, because the VC4 lacks an MMU, we have to
5388 + * do validation of memory accesses by the GPU commands.  If we were to store
5389 + * our commands in BOs, we'd need to do uncached readback from them to do the
5390 + * validation process, which is too expensive.  Instead, userspace accumulates
5391 + * commands and associated state in plain memory, then the kernel copies the
5392 + * data to its own address space, and then validates and stores it in a GPU
5393 + * BO.
5394 + */
5395 +struct drm_vc4_submit_cl {
5396 +       /* Pointer to the binner command list.
5397 +        *
5398 +        * This is the first set of commands executed, which runs the
5399 +        * coordinate shader to determine where primitives land on the screen,
5400 +        * then writes out the state updates and draw calls necessary per tile
5401 +        * to the tile allocation BO.
5402 +        */
5403 +       uint64_t bin_cl;
5404 +
5405 +       /* Pointer to the shader records.
5406 +        *
5407 +        * Shader records are the structures read by the hardware that contain
5408 +        * pointers to uniforms, shaders, and vertex attributes.  The
5409 +        * reference to the shader record has enough information to determine
5410 +        * how many pointers are necessary (fixed number for shaders/uniforms,
5411 +        * and an attribute count), so those BO indices into bo_handles are
5412 +        * just stored as uint32_ts before each shader record passed in.
5413 +        */
5414 +       uint64_t shader_rec;
5415 +
5416 +       /* Pointer to uniform data and texture handles for the textures
5417 +        * referenced by the shader.
5418 +        *
5419 +        * For each shader state record, there is a set of uniform data in the
5420 +        * order referenced by the record (FS, VS, then CS).  Each set of
5421 +        * uniform data has a uint32_t index into bo_handles per texture
5422 +        * sample operation, in the order the QPU_W_TMUn_S writes appear in
5423 +        * the program.  Following the texture BO handle indices is the actual
5424 +        * uniform data.
5425 +        *
5426 +        * The individual uniform state blocks don't have sizes passed in,
5427 +        * because the kernel has to determine the sizes anyway during shader
5428 +        * code validation.
5429 +        */
5430 +       uint64_t uniforms;
5431 +       uint64_t bo_handles;
5432 +
5433 +       /* Size in bytes of the binner command list. */
5434 +       uint32_t bin_cl_size;
5435 +       /* Size in bytes of the set of shader records. */
5436 +       uint32_t shader_rec_size;
5437 +       /* Number of shader records.
5438 +        *
5439 +        * This could just be computed from the contents of shader_records and
5440 +        * the address bits of references to them from the bin CL, but it
5441 +        * keeps the kernel from having to resize some allocations it makes.
5442 +        */
5443 +       uint32_t shader_rec_count;
5444 +       /* Size in bytes of the uniform state. */
5445 +       uint32_t uniforms_size;
5446 +
5447 +       /* Number of BO handles passed in (size is that times 4). */
5448 +       uint32_t bo_handle_count;
5449 +
5450 +       /* RCL setup: */
5451 +       uint16_t width;
5452 +       uint16_t height;
5453 +       uint8_t min_x_tile;
5454 +       uint8_t min_y_tile;
5455 +       uint8_t max_x_tile;
5456 +       uint8_t max_y_tile;
5457 +       struct drm_vc4_submit_rcl_surface color_read;
5458 +       struct drm_vc4_submit_rcl_surface color_ms_write;
5459 +       struct drm_vc4_submit_rcl_surface zs_read;
5460 +       struct drm_vc4_submit_rcl_surface zs_write;
5461 +       uint32_t clear_color[2];
5462 +       uint32_t clear_z;
5463 +       uint8_t clear_s;
5464 +
5465 +       uint32_t pad:24;
5466 +
5467 +#define VC4_SUBMIT_CL_USE_CLEAR_COLOR                  (1 << 0)
5468 +       uint32_t flags;
5469 +
5470 +       /* Returned value of the seqno of this render job (for the
5471 +        * wait ioctl).
5472 +        */
5473 +       uint64_t seqno;
5474 +};
5475 +
5476 +/**
5477 + * struct drm_vc4_wait_seqno - ioctl argument for waiting for
5478 + * DRM_VC4_SUBMIT_CL completion using its returned seqno.
5479 + *
5480 + * timeout_ns is the timeout in nanoseconds, where "0" means "don't
5481 + * block, just return the status."
5482 + */
5483 +struct drm_vc4_wait_seqno {
5484 +       uint64_t seqno;
5485 +       uint64_t timeout_ns;
5486 +};
5487 +
5488 +/**
5489 + * struct drm_vc4_wait_bo - ioctl argument for waiting for
5490 + * completion of the last DRM_VC4_SUBMIT_CL on a BO.
5491 + *
5492 + * This is useful for cases where multiple processes might be
5493 + * rendering to a BO and you want to wait for all rendering to be
5494 + * completed.
5495 + */
5496 +struct drm_vc4_wait_bo {
5497 +       uint32_t handle;
5498 +       uint32_t pad;
5499 +       uint64_t timeout_ns;
5500 +};
5501 +
5502 +/**
5503 + * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
5504 + *
5505 + * There are currently no values for the flags argument, but it may be
5506 + * used in a future extension.
5507 + */
5508 +struct drm_vc4_create_bo {
5509 +       uint32_t size;
5510 +       uint32_t flags;
5511 +       /** Returned GEM handle for the BO. */
5512 +       uint32_t handle;
5513 +       uint32_t pad;
5514 +};
5515 +
5516 +/**
5517 + * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
5518 + * shader BOs.
5519 + *
5520 + * Since allowing a shader to be overwritten while it's also being
5521 + * executed from would allow privlege escalation, shaders must be
5522 + * created using this ioctl, and they can't be mmapped later.
5523 + */
5524 +struct drm_vc4_create_shader_bo {
5525 +       /* Size of the data argument. */
5526 +       uint32_t size;
5527 +       /* Flags, currently must be 0. */
5528 +       uint32_t flags;
5529 +
5530 +       /* Pointer to the data. */
5531 +       uint64_t data;
5532 +
5533 +       /** Returned GEM handle for the BO. */
5534 +       uint32_t handle;
5535 +       /* Pad, must be 0. */
5536 +       uint32_t pad;
5537 +};
5538 +
5539 +/**
5540 + * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
5541 + *
5542 + * This doesn't actually perform an mmap.  Instead, it returns the
5543 + * offset you need to use in an mmap on the DRM device node.  This
5544 + * means that tools like valgrind end up knowing about the mapped
5545 + * memory.
5546 + *
5547 + * There are currently no values for the flags argument, but it may be
5548 + * used in a future extension.
5549 + */
5550 +struct drm_vc4_mmap_bo {
5551 +       /** Handle for the object being mapped. */
5552 +       uint32_t handle;
5553 +       uint32_t flags;
5554 +       /** offset into the drm node to use for subsequent mmap call. */
5555 +       uint64_t offset;
5556 +};
5557 +
5558 +#endif /* _UAPI_VC4_DRM_H_ */