ASoC: rt5645: Correct the naming and setting of ADC Boost Volume Control
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34
35 #define VMW_RES_HT_ORDER 12
36
37 /**
38  * struct vmw_resource_relocation - Relocation info for resources
39  *
40  * @head: List head for the software context's relocation list.
41  * @res: Non-ref-counted pointer to the resource.
42  * @offset: Offset of 4 byte entries into the command buffer where the
43  * id that needs fixup is located.
44  */
45 struct vmw_resource_relocation {
46         struct list_head head;
47         const struct vmw_resource *res;
48         unsigned long offset;
49 };
50
51 /**
52  * struct vmw_resource_val_node - Validation info for resources
53  *
54  * @head: List head for the software context's resource list.
55  * @hash: Hash entry for quick resouce to val_node lookup.
56  * @res: Ref-counted pointer to the resource.
57  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58  * @new_backup: Refcounted pointer to the new backup buffer.
59  * @staged_bindings: If @res is a context, tracks bindings set up during
60  * the command batch. Otherwise NULL.
61  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62  * @first_usage: Set to true the first time the resource is referenced in
63  * the command stream.
64  * @switching_backup: The command stream provides a new backup buffer for a
65  * resource.
66  * @no_buffer_needed: This means @switching_backup is true on first buffer
67  * reference. So resource reservation does not need to allocate a backup
68  * buffer for the resource.
69  */
70 struct vmw_resource_val_node {
71         struct list_head head;
72         struct drm_hash_item hash;
73         struct vmw_resource *res;
74         struct vmw_dma_buffer *new_backup;
75         struct vmw_ctx_binding_state *staged_bindings;
76         unsigned long new_backup_offset;
77         u32 first_usage : 1;
78         u32 switching_backup : 1;
79         u32 no_buffer_needed : 1;
80 };
81
82 /**
83  * struct vmw_cmd_entry - Describe a command for the verifier
84  *
85  * @user_allow: Whether allowed from the execbuf ioctl.
86  * @gb_disable: Whether disabled if guest-backed objects are available.
87  * @gb_enable: Whether enabled iff guest-backed objects are available.
88  */
89 struct vmw_cmd_entry {
90         int (*func) (struct vmw_private *, struct vmw_sw_context *,
91                      SVGA3dCmdHeader *);
92         bool user_allow;
93         bool gb_disable;
94         bool gb_enable;
95 };
96
97 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
98         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99                                        (_gb_disable), (_gb_enable)}
100
101 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102                                         struct vmw_sw_context *sw_context,
103                                         struct vmw_resource *ctx);
104 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105                                  struct vmw_sw_context *sw_context,
106                                  SVGAMobId *id,
107                                  struct vmw_dma_buffer **vmw_bo_p);
108 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109                                    struct vmw_dma_buffer *vbo,
110                                    bool validate_as_mob,
111                                    uint32_t *p_val_node);
112
113
114 /**
115  * vmw_resources_unreserve - unreserve resources previously reserved for
116  * command submission.
117  *
118  * @sw_context: pointer to the software context
119  * @backoff: Whether command submission failed.
120  */
121 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
122                                     bool backoff)
123 {
124         struct vmw_resource_val_node *val;
125         struct list_head *list = &sw_context->resource_list;
126
127         if (sw_context->dx_query_mob && !backoff)
128                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129                                           sw_context->dx_query_mob);
130
131         list_for_each_entry(val, list, head) {
132                 struct vmw_resource *res = val->res;
133                 bool switch_backup =
134                         (backoff) ? false : val->switching_backup;
135
136                 /*
137                  * Transfer staged context bindings to the
138                  * persistent context binding tracker.
139                  */
140                 if (unlikely(val->staged_bindings)) {
141                         if (!backoff) {
142                                 vmw_binding_state_commit
143                                         (vmw_context_binding_state(val->res),
144                                          val->staged_bindings);
145                         }
146
147                         if (val->staged_bindings != sw_context->staged_bindings)
148                                 vmw_binding_state_free(val->staged_bindings);
149                         else
150                                 sw_context->staged_bindings_inuse = false;
151                         val->staged_bindings = NULL;
152                 }
153                 vmw_resource_unreserve(res, switch_backup, val->new_backup,
154                                        val->new_backup_offset);
155                 vmw_dmabuf_unreference(&val->new_backup);
156         }
157 }
158
159 /**
160  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161  * added to the validate list.
162  *
163  * @dev_priv: Pointer to the device private:
164  * @sw_context: The validation context:
165  * @node: The validation node holding this context.
166  */
167 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168                                    struct vmw_sw_context *sw_context,
169                                    struct vmw_resource_val_node *node)
170 {
171         int ret;
172
173         ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174         if (unlikely(ret != 0))
175                 goto out_err;
176
177         if (!sw_context->staged_bindings) {
178                 sw_context->staged_bindings =
179                         vmw_binding_state_alloc(dev_priv);
180                 if (IS_ERR(sw_context->staged_bindings)) {
181                         DRM_ERROR("Failed to allocate context binding "
182                                   "information.\n");
183                         ret = PTR_ERR(sw_context->staged_bindings);
184                         sw_context->staged_bindings = NULL;
185                         goto out_err;
186                 }
187         }
188
189         if (sw_context->staged_bindings_inuse) {
190                 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191                 if (IS_ERR(node->staged_bindings)) {
192                         DRM_ERROR("Failed to allocate context binding "
193                                   "information.\n");
194                         ret = PTR_ERR(node->staged_bindings);
195                         node->staged_bindings = NULL;
196                         goto out_err;
197                 }
198         } else {
199                 node->staged_bindings = sw_context->staged_bindings;
200                 sw_context->staged_bindings_inuse = true;
201         }
202
203         return 0;
204 out_err:
205         return ret;
206 }
207
208 /**
209  * vmw_resource_val_add - Add a resource to the software context's
210  * resource list if it's not already on it.
211  *
212  * @sw_context: Pointer to the software context.
213  * @res: Pointer to the resource.
214  * @p_node On successful return points to a valid pointer to a
215  * struct vmw_resource_val_node, if non-NULL on entry.
216  */
217 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218                                 struct vmw_resource *res,
219                                 struct vmw_resource_val_node **p_node)
220 {
221         struct vmw_private *dev_priv = res->dev_priv;
222         struct vmw_resource_val_node *node;
223         struct drm_hash_item *hash;
224         int ret;
225
226         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
227                                     &hash) == 0)) {
228                 node = container_of(hash, struct vmw_resource_val_node, hash);
229                 node->first_usage = false;
230                 if (unlikely(p_node != NULL))
231                         *p_node = node;
232                 return 0;
233         }
234
235         node = kzalloc(sizeof(*node), GFP_KERNEL);
236         if (unlikely(node == NULL)) {
237                 DRM_ERROR("Failed to allocate a resource validation "
238                           "entry.\n");
239                 return -ENOMEM;
240         }
241
242         node->hash.key = (unsigned long) res;
243         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244         if (unlikely(ret != 0)) {
245                 DRM_ERROR("Failed to initialize a resource validation "
246                           "entry.\n");
247                 kfree(node);
248                 return ret;
249         }
250         node->res = vmw_resource_reference(res);
251         node->first_usage = true;
252         if (unlikely(p_node != NULL))
253                 *p_node = node;
254
255         if (!dev_priv->has_mob) {
256                 list_add_tail(&node->head, &sw_context->resource_list);
257                 return 0;
258         }
259
260         switch (vmw_res_type(res)) {
261         case vmw_res_context:
262         case vmw_res_dx_context:
263                 list_add(&node->head, &sw_context->ctx_resource_list);
264                 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265                 break;
266         case vmw_res_cotable:
267                 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268                 break;
269         default:
270                 list_add_tail(&node->head, &sw_context->resource_list);
271                 break;
272         }
273
274         return ret;
275 }
276
277 /**
278  * vmw_view_res_val_add - Add a view and the surface it's pointing to
279  * to the validation list
280  *
281  * @sw_context: The software context holding the validation list.
282  * @view: Pointer to the view resource.
283  *
284  * Returns 0 if success, negative error code otherwise.
285  */
286 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287                                 struct vmw_resource *view)
288 {
289         int ret;
290
291         /*
292          * First add the resource the view is pointing to, otherwise
293          * it may be swapped out when the view is validated.
294          */
295         ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296         if (ret)
297                 return ret;
298
299         return vmw_resource_val_add(sw_context, view, NULL);
300 }
301
302 /**
303  * vmw_view_id_val_add - Look up a view and add it and the surface it's
304  * pointing to to the validation list.
305  *
306  * @sw_context: The software context holding the validation list.
307  * @view_type: The view type to look up.
308  * @id: view id of the view.
309  *
310  * The view is represented by a view id and the DX context it's created on,
311  * or scheduled for creation on. If there is no DX context set, the function
312  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313  */
314 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315                                enum vmw_view_type view_type, u32 id)
316 {
317         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318         struct vmw_resource *view;
319         int ret;
320
321         if (!ctx_node) {
322                 DRM_ERROR("DX Context not set.\n");
323                 return -EINVAL;
324         }
325
326         view = vmw_view_lookup(sw_context->man, view_type, id);
327         if (IS_ERR(view))
328                 return PTR_ERR(view);
329
330         ret = vmw_view_res_val_add(sw_context, view);
331         vmw_resource_unreference(&view);
332
333         return ret;
334 }
335
336 /**
337  * vmw_resource_context_res_add - Put resources previously bound to a context on
338  * the validation list
339  *
340  * @dev_priv: Pointer to a device private structure
341  * @sw_context: Pointer to a software context used for this command submission
342  * @ctx: Pointer to the context resource
343  *
344  * This function puts all resources that were previously bound to @ctx on
345  * the resource validation list. This is part of the context state reemission
346  */
347 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348                                         struct vmw_sw_context *sw_context,
349                                         struct vmw_resource *ctx)
350 {
351         struct list_head *binding_list;
352         struct vmw_ctx_bindinfo *entry;
353         int ret = 0;
354         struct vmw_resource *res;
355         u32 i;
356
357         /* Add all cotables to the validation list. */
358         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360                         res = vmw_context_cotable(ctx, i);
361                         if (IS_ERR(res))
362                                 continue;
363
364                         ret = vmw_resource_val_add(sw_context, res, NULL);
365                         vmw_resource_unreference(&res);
366                         if (unlikely(ret != 0))
367                                 return ret;
368                 }
369         }
370
371
372         /* Add all resources bound to the context to the validation list */
373         mutex_lock(&dev_priv->binding_mutex);
374         binding_list = vmw_context_binding_list(ctx);
375
376         list_for_each_entry(entry, binding_list, ctx_list) {
377                 /* entry->res is not refcounted */
378                 res = vmw_resource_reference_unless_doomed(entry->res);
379                 if (unlikely(res == NULL))
380                         continue;
381
382                 if (vmw_res_type(entry->res) == vmw_res_view)
383                         ret = vmw_view_res_val_add(sw_context, entry->res);
384                 else
385                         ret = vmw_resource_val_add(sw_context, entry->res,
386                                                    NULL);
387                 vmw_resource_unreference(&res);
388                 if (unlikely(ret != 0))
389                         break;
390         }
391
392         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393                 struct vmw_dma_buffer *dx_query_mob;
394
395                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396                 if (dx_query_mob)
397                         ret = vmw_bo_to_validate_list(sw_context,
398                                                       dx_query_mob,
399                                                       true, NULL);
400         }
401
402         mutex_unlock(&dev_priv->binding_mutex);
403         return ret;
404 }
405
406 /**
407  * vmw_resource_relocation_add - Add a relocation to the relocation list
408  *
409  * @list: Pointer to head of relocation list.
410  * @res: The resource.
411  * @offset: Offset into the command buffer currently being parsed where the
412  * id that needs fixup is located. Granularity is 4 bytes.
413  */
414 static int vmw_resource_relocation_add(struct list_head *list,
415                                        const struct vmw_resource *res,
416                                        unsigned long offset)
417 {
418         struct vmw_resource_relocation *rel;
419
420         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421         if (unlikely(rel == NULL)) {
422                 DRM_ERROR("Failed to allocate a resource relocation.\n");
423                 return -ENOMEM;
424         }
425
426         rel->res = res;
427         rel->offset = offset;
428         list_add_tail(&rel->head, list);
429
430         return 0;
431 }
432
433 /**
434  * vmw_resource_relocations_free - Free all relocations on a list
435  *
436  * @list: Pointer to the head of the relocation list.
437  */
438 static void vmw_resource_relocations_free(struct list_head *list)
439 {
440         struct vmw_resource_relocation *rel, *n;
441
442         list_for_each_entry_safe(rel, n, list, head) {
443                 list_del(&rel->head);
444                 kfree(rel);
445         }
446 }
447
448 /**
449  * vmw_resource_relocations_apply - Apply all relocations on a list
450  *
451  * @cb: Pointer to the start of the command buffer bein patch. This need
452  * not be the same buffer as the one being parsed when the relocation
453  * list was built, but the contents must be the same modulo the
454  * resource ids.
455  * @list: Pointer to the head of the relocation list.
456  */
457 static void vmw_resource_relocations_apply(uint32_t *cb,
458                                            struct list_head *list)
459 {
460         struct vmw_resource_relocation *rel;
461
462         list_for_each_entry(rel, list, head) {
463                 if (likely(rel->res != NULL))
464                         cb[rel->offset] = rel->res->id;
465                 else
466                         cb[rel->offset] = SVGA_3D_CMD_NOP;
467         }
468 }
469
470 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471                            struct vmw_sw_context *sw_context,
472                            SVGA3dCmdHeader *header)
473 {
474         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
475 }
476
477 static int vmw_cmd_ok(struct vmw_private *dev_priv,
478                       struct vmw_sw_context *sw_context,
479                       SVGA3dCmdHeader *header)
480 {
481         return 0;
482 }
483
484 /**
485  * vmw_bo_to_validate_list - add a bo to a validate list
486  *
487  * @sw_context: The software context used for this command submission batch.
488  * @bo: The buffer object to add.
489  * @validate_as_mob: Validate this buffer as a MOB.
490  * @p_val_node: If non-NULL Will be updated with the validate node number
491  * on return.
492  *
493  * Returns -EINVAL if the limit of number of buffer objects per command
494  * submission is reached.
495  */
496 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497                                    struct vmw_dma_buffer *vbo,
498                                    bool validate_as_mob,
499                                    uint32_t *p_val_node)
500 {
501         uint32_t val_node;
502         struct vmw_validate_buffer *vval_buf;
503         struct ttm_validate_buffer *val_buf;
504         struct drm_hash_item *hash;
505         int ret;
506
507         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
508                                     &hash) == 0)) {
509                 vval_buf = container_of(hash, struct vmw_validate_buffer,
510                                         hash);
511                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512                         DRM_ERROR("Inconsistent buffer usage.\n");
513                         return -EINVAL;
514                 }
515                 val_buf = &vval_buf->base;
516                 val_node = vval_buf - sw_context->val_bufs;
517         } else {
518                 val_node = sw_context->cur_val_buf;
519                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520                         DRM_ERROR("Max number of DMA buffers per submission "
521                                   "exceeded.\n");
522                         return -EINVAL;
523                 }
524                 vval_buf = &sw_context->val_bufs[val_node];
525                 vval_buf->hash.key = (unsigned long) vbo;
526                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527                 if (unlikely(ret != 0)) {
528                         DRM_ERROR("Failed to initialize a buffer validation "
529                                   "entry.\n");
530                         return ret;
531                 }
532                 ++sw_context->cur_val_buf;
533                 val_buf = &vval_buf->base;
534                 val_buf->bo = ttm_bo_reference(&vbo->base);
535                 val_buf->shared = false;
536                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537                 vval_buf->validate_as_mob = validate_as_mob;
538         }
539
540         if (p_val_node)
541                 *p_val_node = val_node;
542
543         return 0;
544 }
545
546 /**
547  * vmw_resources_reserve - Reserve all resources on the sw_context's
548  * resource list.
549  *
550  * @sw_context: Pointer to the software context.
551  *
552  * Note that since vmware's command submission currently is protected by
553  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554  * since only a single thread at once will attempt this.
555  */
556 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
557 {
558         struct vmw_resource_val_node *val;
559         int ret = 0;
560
561         list_for_each_entry(val, &sw_context->resource_list, head) {
562                 struct vmw_resource *res = val->res;
563
564                 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565                 if (unlikely(ret != 0))
566                         return ret;
567
568                 if (res->backup) {
569                         struct vmw_dma_buffer *vbo = res->backup;
570
571                         ret = vmw_bo_to_validate_list
572                                 (sw_context, vbo,
573                                  vmw_resource_needs_backup(res), NULL);
574
575                         if (unlikely(ret != 0))
576                                 return ret;
577                 }
578         }
579
580         if (sw_context->dx_query_mob) {
581                 struct vmw_dma_buffer *expected_dx_query_mob;
582
583                 expected_dx_query_mob =
584                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585                 if (expected_dx_query_mob &&
586                     expected_dx_query_mob != sw_context->dx_query_mob) {
587                         ret = -EINVAL;
588                 }
589         }
590
591         return ret;
592 }
593
594 /**
595  * vmw_resources_validate - Validate all resources on the sw_context's
596  * resource list.
597  *
598  * @sw_context: Pointer to the software context.
599  *
600  * Before this function is called, all resource backup buffers must have
601  * been validated.
602  */
603 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
604 {
605         struct vmw_resource_val_node *val;
606         int ret;
607
608         list_for_each_entry(val, &sw_context->resource_list, head) {
609                 struct vmw_resource *res = val->res;
610                 struct vmw_dma_buffer *backup = res->backup;
611
612                 ret = vmw_resource_validate(res);
613                 if (unlikely(ret != 0)) {
614                         if (ret != -ERESTARTSYS)
615                                 DRM_ERROR("Failed to validate resource.\n");
616                         return ret;
617                 }
618
619                 /* Check if the resource switched backup buffer */
620                 if (backup && res->backup && (backup != res->backup)) {
621                         struct vmw_dma_buffer *vbo = res->backup;
622
623                         ret = vmw_bo_to_validate_list
624                                 (sw_context, vbo,
625                                  vmw_resource_needs_backup(res), NULL);
626                         if (ret) {
627                                 ttm_bo_unreserve(&vbo->base);
628                                 return ret;
629                         }
630                 }
631         }
632         return 0;
633 }
634
635 /**
636  * vmw_cmd_res_reloc_add - Add a resource to a software context's
637  * relocation- and validation lists.
638  *
639  * @dev_priv: Pointer to a struct vmw_private identifying the device.
640  * @sw_context: Pointer to the software context.
641  * @id_loc: Pointer to where the id that needs translation is located.
642  * @res: Valid pointer to a struct vmw_resource.
643  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644  * used for this resource is returned here.
645  */
646 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647                                  struct vmw_sw_context *sw_context,
648                                  uint32_t *id_loc,
649                                  struct vmw_resource *res,
650                                  struct vmw_resource_val_node **p_val)
651 {
652         int ret;
653         struct vmw_resource_val_node *node;
654
655         *p_val = NULL;
656         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657                                           res,
658                                           id_loc - sw_context->buf_start);
659         if (unlikely(ret != 0))
660                 return ret;
661
662         ret = vmw_resource_val_add(sw_context, res, &node);
663         if (unlikely(ret != 0))
664                 return ret;
665
666         if (p_val)
667                 *p_val = node;
668
669         return 0;
670 }
671
672
673 /**
674  * vmw_cmd_res_check - Check that a resource is present and if so, put it
675  * on the resource validate list unless it's already there.
676  *
677  * @dev_priv: Pointer to a device private structure.
678  * @sw_context: Pointer to the software context.
679  * @res_type: Resource type.
680  * @converter: User-space visisble type specific information.
681  * @id_loc: Pointer to the location in the command buffer currently being
682  * parsed from where the user-space resource id handle is located.
683  * @p_val: Pointer to pointer to resource validalidation node. Populated
684  * on exit.
685  */
686 static int
687 vmw_cmd_res_check(struct vmw_private *dev_priv,
688                   struct vmw_sw_context *sw_context,
689                   enum vmw_res_type res_type,
690                   const struct vmw_user_resource_conv *converter,
691                   uint32_t *id_loc,
692                   struct vmw_resource_val_node **p_val)
693 {
694         struct vmw_res_cache_entry *rcache =
695                 &sw_context->res_cache[res_type];
696         struct vmw_resource *res;
697         struct vmw_resource_val_node *node;
698         int ret;
699
700         if (*id_loc == SVGA3D_INVALID_ID) {
701                 if (p_val)
702                         *p_val = NULL;
703                 if (res_type == vmw_res_context) {
704                         DRM_ERROR("Illegal context invalid id.\n");
705                         return -EINVAL;
706                 }
707                 return 0;
708         }
709
710         /*
711          * Fastpath in case of repeated commands referencing the same
712          * resource
713          */
714
715         if (likely(rcache->valid && *id_loc == rcache->handle)) {
716                 const struct vmw_resource *res = rcache->res;
717
718                 rcache->node->first_usage = false;
719                 if (p_val)
720                         *p_val = rcache->node;
721
722                 return vmw_resource_relocation_add
723                         (&sw_context->res_relocations, res,
724                          id_loc - sw_context->buf_start);
725         }
726
727         ret = vmw_user_resource_lookup_handle(dev_priv,
728                                               sw_context->fp->tfile,
729                                               *id_loc,
730                                               converter,
731                                               &res);
732         if (unlikely(ret != 0)) {
733                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
734                           (unsigned) *id_loc);
735                 dump_stack();
736                 return ret;
737         }
738
739         rcache->valid = true;
740         rcache->res = res;
741         rcache->handle = *id_loc;
742
743         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
744                                     res, &node);
745         if (unlikely(ret != 0))
746                 goto out_no_reloc;
747
748         rcache->node = node;
749         if (p_val)
750                 *p_val = node;
751         vmw_resource_unreference(&res);
752         return 0;
753
754 out_no_reloc:
755         BUG_ON(sw_context->error_resource != NULL);
756         sw_context->error_resource = res;
757
758         return ret;
759 }
760
761 /**
762  * vmw_rebind_dx_query - Rebind DX query associated with the context
763  *
764  * @ctx_res: context the query belongs to
765  *
766  * This function assumes binding_mutex is held.
767  */
768 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769 {
770         struct vmw_private *dev_priv = ctx_res->dev_priv;
771         struct vmw_dma_buffer *dx_query_mob;
772         struct {
773                 SVGA3dCmdHeader header;
774                 SVGA3dCmdDXBindAllQuery body;
775         } *cmd;
776
777
778         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781                 return 0;
782
783         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785         if (cmd == NULL) {
786                 DRM_ERROR("Failed to rebind queries.\n");
787                 return -ENOMEM;
788         }
789
790         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791         cmd->header.size = sizeof(cmd->body);
792         cmd->body.cid = ctx_res->id;
793         cmd->body.mobid = dx_query_mob->base.mem.start;
794         vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798         return 0;
799 }
800
801 /**
802  * vmw_rebind_contexts - Rebind all resources previously bound to
803  * referenced contexts.
804  *
805  * @sw_context: Pointer to the software context.
806  *
807  * Rebind context binding points that have been scrubbed because of eviction.
808  */
809 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
810 {
811         struct vmw_resource_val_node *val;
812         int ret;
813
814         list_for_each_entry(val, &sw_context->resource_list, head) {
815                 if (unlikely(!val->staged_bindings))
816                         break;
817
818                 ret = vmw_binding_rebind_all
819                         (vmw_context_binding_state(val->res));
820                 if (unlikely(ret != 0)) {
821                         if (ret != -ERESTARTSYS)
822                                 DRM_ERROR("Failed to rebind context.\n");
823                         return ret;
824                 }
825
826                 ret = vmw_rebind_all_dx_query(val->res);
827                 if (ret != 0)
828                         return ret;
829         }
830
831         return 0;
832 }
833
834 /**
835  * vmw_view_bindings_add - Add an array of view bindings to a context
836  * binding state tracker.
837  *
838  * @sw_context: The execbuf state used for this command.
839  * @view_type: View type for the bindings.
840  * @binding_type: Binding type for the bindings.
841  * @shader_slot: The shader slot to user for the bindings.
842  * @view_ids: Array of view ids to be bound.
843  * @num_views: Number of view ids in @view_ids.
844  * @first_slot: The binding slot to be used for the first view id in @view_ids.
845  */
846 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847                                  enum vmw_view_type view_type,
848                                  enum vmw_ctx_binding_type binding_type,
849                                  uint32 shader_slot,
850                                  uint32 view_ids[], u32 num_views,
851                                  u32 first_slot)
852 {
853         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854         struct vmw_cmdbuf_res_manager *man;
855         u32 i;
856         int ret;
857
858         if (!ctx_node) {
859                 DRM_ERROR("DX Context not set.\n");
860                 return -EINVAL;
861         }
862
863         man = sw_context->man;
864         for (i = 0; i < num_views; ++i) {
865                 struct vmw_ctx_bindinfo_view binding;
866                 struct vmw_resource *view = NULL;
867
868                 if (view_ids[i] != SVGA3D_INVALID_ID) {
869                         view = vmw_view_lookup(man, view_type, view_ids[i]);
870                         if (IS_ERR(view)) {
871                                 DRM_ERROR("View not found.\n");
872                                 return PTR_ERR(view);
873                         }
874
875                         ret = vmw_view_res_val_add(sw_context, view);
876                         if (ret) {
877                                 DRM_ERROR("Could not add view to "
878                                           "validation list.\n");
879                                 vmw_resource_unreference(&view);
880                                 return ret;
881                         }
882                 }
883                 binding.bi.ctx = ctx_node->res;
884                 binding.bi.res = view;
885                 binding.bi.bt = binding_type;
886                 binding.shader_slot = shader_slot;
887                 binding.slot = first_slot + i;
888                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889                                 shader_slot, binding.slot);
890                 if (view)
891                         vmw_resource_unreference(&view);
892         }
893
894         return 0;
895 }
896
897 /**
898  * vmw_cmd_cid_check - Check a command header for valid context information.
899  *
900  * @dev_priv: Pointer to a device private structure.
901  * @sw_context: Pointer to the software context.
902  * @header: A command header with an embedded user-space context handle.
903  *
904  * Convenience function: Call vmw_cmd_res_check with the user-space context
905  * handle embedded in @header.
906  */
907 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908                              struct vmw_sw_context *sw_context,
909                              SVGA3dCmdHeader *header)
910 {
911         struct vmw_cid_cmd {
912                 SVGA3dCmdHeader header;
913                 uint32_t cid;
914         } *cmd;
915
916         cmd = container_of(header, struct vmw_cid_cmd, header);
917         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918                                  user_context_converter, &cmd->cid, NULL);
919 }
920
921 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922                                            struct vmw_sw_context *sw_context,
923                                            SVGA3dCmdHeader *header)
924 {
925         struct vmw_sid_cmd {
926                 SVGA3dCmdHeader header;
927                 SVGA3dCmdSetRenderTarget body;
928         } *cmd;
929         struct vmw_resource_val_node *ctx_node;
930         struct vmw_resource_val_node *res_node;
931         int ret;
932
933         cmd = container_of(header, struct vmw_sid_cmd, header);
934
935         if (cmd->body.type >= SVGA3D_RT_MAX) {
936                 DRM_ERROR("Illegal render target type %u.\n",
937                           (unsigned) cmd->body.type);
938                 return -EINVAL;
939         }
940
941         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942                                 user_context_converter, &cmd->body.cid,
943                                 &ctx_node);
944         if (unlikely(ret != 0))
945                 return ret;
946
947         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948                                 user_surface_converter,
949                                 &cmd->body.target.sid, &res_node);
950         if (unlikely(ret != 0))
951                 return ret;
952
953         if (dev_priv->has_mob) {
954                 struct vmw_ctx_bindinfo_view binding;
955
956                 binding.bi.ctx = ctx_node->res;
957                 binding.bi.res = res_node ? res_node->res : NULL;
958                 binding.bi.bt = vmw_ctx_binding_rt;
959                 binding.slot = cmd->body.type;
960                 vmw_binding_add(ctx_node->staged_bindings,
961                                 &binding.bi, 0, binding.slot);
962         }
963
964         return 0;
965 }
966
967 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968                                       struct vmw_sw_context *sw_context,
969                                       SVGA3dCmdHeader *header)
970 {
971         struct vmw_sid_cmd {
972                 SVGA3dCmdHeader header;
973                 SVGA3dCmdSurfaceCopy body;
974         } *cmd;
975         int ret;
976
977         cmd = container_of(header, struct vmw_sid_cmd, header);
978
979         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980                                 user_surface_converter,
981                                 &cmd->body.src.sid, NULL);
982         if (ret)
983                 return ret;
984
985         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986                                  user_surface_converter,
987                                  &cmd->body.dest.sid, NULL);
988 }
989
990 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991                                       struct vmw_sw_context *sw_context,
992                                       SVGA3dCmdHeader *header)
993 {
994         struct {
995                 SVGA3dCmdHeader header;
996                 SVGA3dCmdDXBufferCopy body;
997         } *cmd;
998         int ret;
999
1000         cmd = container_of(header, typeof(*cmd), header);
1001         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002                                 user_surface_converter,
1003                                 &cmd->body.src, NULL);
1004         if (ret != 0)
1005                 return ret;
1006
1007         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008                                  user_surface_converter,
1009                                  &cmd->body.dest, NULL);
1010 }
1011
1012 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013                                    struct vmw_sw_context *sw_context,
1014                                    SVGA3dCmdHeader *header)
1015 {
1016         struct {
1017                 SVGA3dCmdHeader header;
1018                 SVGA3dCmdDXPredCopyRegion body;
1019         } *cmd;
1020         int ret;
1021
1022         cmd = container_of(header, typeof(*cmd), header);
1023         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024                                 user_surface_converter,
1025                                 &cmd->body.srcSid, NULL);
1026         if (ret != 0)
1027                 return ret;
1028
1029         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030                                  user_surface_converter,
1031                                  &cmd->body.dstSid, NULL);
1032 }
1033
1034 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035                                      struct vmw_sw_context *sw_context,
1036                                      SVGA3dCmdHeader *header)
1037 {
1038         struct vmw_sid_cmd {
1039                 SVGA3dCmdHeader header;
1040                 SVGA3dCmdSurfaceStretchBlt body;
1041         } *cmd;
1042         int ret;
1043
1044         cmd = container_of(header, struct vmw_sid_cmd, header);
1045         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046                                 user_surface_converter,
1047                                 &cmd->body.src.sid, NULL);
1048         if (unlikely(ret != 0))
1049                 return ret;
1050         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051                                  user_surface_converter,
1052                                  &cmd->body.dest.sid, NULL);
1053 }
1054
1055 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056                                          struct vmw_sw_context *sw_context,
1057                                          SVGA3dCmdHeader *header)
1058 {
1059         struct vmw_sid_cmd {
1060                 SVGA3dCmdHeader header;
1061                 SVGA3dCmdBlitSurfaceToScreen body;
1062         } *cmd;
1063
1064         cmd = container_of(header, struct vmw_sid_cmd, header);
1065
1066         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067                                  user_surface_converter,
1068                                  &cmd->body.srcImage.sid, NULL);
1069 }
1070
1071 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072                                  struct vmw_sw_context *sw_context,
1073                                  SVGA3dCmdHeader *header)
1074 {
1075         struct vmw_sid_cmd {
1076                 SVGA3dCmdHeader header;
1077                 SVGA3dCmdPresent body;
1078         } *cmd;
1079
1080
1081         cmd = container_of(header, struct vmw_sid_cmd, header);
1082
1083         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084                                  user_surface_converter, &cmd->body.sid,
1085                                  NULL);
1086 }
1087
1088 /**
1089  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090  *
1091  * @dev_priv: The device private structure.
1092  * @new_query_bo: The new buffer holding query results.
1093  * @sw_context: The software context used for this command submission.
1094  *
1095  * This function checks whether @new_query_bo is suitable for holding
1096  * query results, and if another buffer currently is pinned for query
1097  * results. If so, the function prepares the state of @sw_context for
1098  * switching pinned buffers after successful submission of the current
1099  * command batch.
1100  */
1101 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102                                        struct vmw_dma_buffer *new_query_bo,
1103                                        struct vmw_sw_context *sw_context)
1104 {
1105         struct vmw_res_cache_entry *ctx_entry =
1106                 &sw_context->res_cache[vmw_res_context];
1107         int ret;
1108
1109         BUG_ON(!ctx_entry->valid);
1110         sw_context->last_query_ctx = ctx_entry->res;
1111
1112         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113
1114                 if (unlikely(new_query_bo->base.num_pages > 4)) {
1115                         DRM_ERROR("Query buffer too large.\n");
1116                         return -EINVAL;
1117                 }
1118
1119                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1120                         sw_context->needs_post_query_barrier = true;
1121                         ret = vmw_bo_to_validate_list(sw_context,
1122                                                       sw_context->cur_query_bo,
1123                                                       dev_priv->has_mob, NULL);
1124                         if (unlikely(ret != 0))
1125                                 return ret;
1126                 }
1127                 sw_context->cur_query_bo = new_query_bo;
1128
1129                 ret = vmw_bo_to_validate_list(sw_context,
1130                                               dev_priv->dummy_query_bo,
1131                                               dev_priv->has_mob, NULL);
1132                 if (unlikely(ret != 0))
1133                         return ret;
1134
1135         }
1136
1137         return 0;
1138 }
1139
1140
1141 /**
1142  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143  *
1144  * @dev_priv: The device private structure.
1145  * @sw_context: The software context used for this command submission batch.
1146  *
1147  * This function will check if we're switching query buffers, and will then,
1148  * issue a dummy occlusion query wait used as a query barrier. When the fence
1149  * object following that query wait has signaled, we are sure that all
1150  * preceding queries have finished, and the old query buffer can be unpinned.
1151  * However, since both the new query buffer and the old one are fenced with
1152  * that fence, we can do an asynchronus unpin now, and be sure that the
1153  * old query buffer won't be moved until the fence has signaled.
1154  *
1155  * As mentioned above, both the new - and old query buffers need to be fenced
1156  * using a sequence emitted *after* calling this function.
1157  */
1158 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159                                      struct vmw_sw_context *sw_context)
1160 {
1161         /*
1162          * The validate list should still hold references to all
1163          * contexts here.
1164          */
1165
1166         if (sw_context->needs_post_query_barrier) {
1167                 struct vmw_res_cache_entry *ctx_entry =
1168                         &sw_context->res_cache[vmw_res_context];
1169                 struct vmw_resource *ctx;
1170                 int ret;
1171
1172                 BUG_ON(!ctx_entry->valid);
1173                 ctx = ctx_entry->res;
1174
1175                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176
1177                 if (unlikely(ret != 0))
1178                         DRM_ERROR("Out of fifo space for dummy query.\n");
1179         }
1180
1181         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182                 if (dev_priv->pinned_bo) {
1183                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184                         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185                 }
1186
1187                 if (!sw_context->needs_post_query_barrier) {
1188                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189
1190                         /*
1191                          * We pin also the dummy_query_bo buffer so that we
1192                          * don't need to validate it when emitting
1193                          * dummy queries in context destroy paths.
1194                          */
1195
1196                         if (!dev_priv->dummy_query_bo_pinned) {
1197                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198                                                     true);
1199                                 dev_priv->dummy_query_bo_pinned = true;
1200                         }
1201
1202                         BUG_ON(sw_context->last_query_ctx == NULL);
1203                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1204                         dev_priv->query_cid_valid = true;
1205                         dev_priv->pinned_bo =
1206                                 vmw_dmabuf_reference(sw_context->cur_query_bo);
1207                 }
1208         }
1209 }
1210
1211 /**
1212  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213  * handle to a MOB id.
1214  *
1215  * @dev_priv: Pointer to a device private structure.
1216  * @sw_context: The software context used for this command batch validation.
1217  * @id: Pointer to the user-space handle to be translated.
1218  * @vmw_bo_p: Points to a location that, on successful return will carry
1219  * a reference-counted pointer to the DMA buffer identified by the
1220  * user-space handle in @id.
1221  *
1222  * This function saves information needed to translate a user-space buffer
1223  * handle to a MOB id. The translation does not take place immediately, but
1224  * during a call to vmw_apply_relocations(). This function builds a relocation
1225  * list and a list of buffers to validate. The former needs to be freed using
1226  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227  * needs to be freed using vmw_clear_validations.
1228  */
1229 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230                                  struct vmw_sw_context *sw_context,
1231                                  SVGAMobId *id,
1232                                  struct vmw_dma_buffer **vmw_bo_p)
1233 {
1234         struct vmw_dma_buffer *vmw_bo = NULL;
1235         uint32_t handle = *id;
1236         struct vmw_relocation *reloc;
1237         int ret;
1238
1239         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1240         if (unlikely(ret != 0)) {
1241                 DRM_ERROR("Could not find or use MOB buffer.\n");
1242                 ret = -EINVAL;
1243                 goto out_no_reloc;
1244         }
1245
1246         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1247                 DRM_ERROR("Max number relocations per submission"
1248                           " exceeded\n");
1249                 ret = -EINVAL;
1250                 goto out_no_reloc;
1251         }
1252
1253         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1254         reloc->mob_loc = id;
1255         reloc->location = NULL;
1256
1257         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1258         if (unlikely(ret != 0))
1259                 goto out_no_reloc;
1260
1261         *vmw_bo_p = vmw_bo;
1262         return 0;
1263
1264 out_no_reloc:
1265         vmw_dmabuf_unreference(&vmw_bo);
1266         *vmw_bo_p = NULL;
1267         return ret;
1268 }
1269
1270 /**
1271  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1272  * handle to a valid SVGAGuestPtr
1273  *
1274  * @dev_priv: Pointer to a device private structure.
1275  * @sw_context: The software context used for this command batch validation.
1276  * @ptr: Pointer to the user-space handle to be translated.
1277  * @vmw_bo_p: Points to a location that, on successful return will carry
1278  * a reference-counted pointer to the DMA buffer identified by the
1279  * user-space handle in @id.
1280  *
1281  * This function saves information needed to translate a user-space buffer
1282  * handle to a valid SVGAGuestPtr. The translation does not take place
1283  * immediately, but during a call to vmw_apply_relocations().
1284  * This function builds a relocation list and a list of buffers to validate.
1285  * The former needs to be freed using either vmw_apply_relocations() or
1286  * vmw_free_relocations(). The latter needs to be freed using
1287  * vmw_clear_validations.
1288  */
1289 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1290                                    struct vmw_sw_context *sw_context,
1291                                    SVGAGuestPtr *ptr,
1292                                    struct vmw_dma_buffer **vmw_bo_p)
1293 {
1294         struct vmw_dma_buffer *vmw_bo = NULL;
1295         uint32_t handle = ptr->gmrId;
1296         struct vmw_relocation *reloc;
1297         int ret;
1298
1299         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1300         if (unlikely(ret != 0)) {
1301                 DRM_ERROR("Could not find or use GMR region.\n");
1302                 ret = -EINVAL;
1303                 goto out_no_reloc;
1304         }
1305
1306         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1307                 DRM_ERROR("Max number relocations per submission"
1308                           " exceeded\n");
1309                 ret = -EINVAL;
1310                 goto out_no_reloc;
1311         }
1312
1313         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1314         reloc->location = ptr;
1315
1316         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1317         if (unlikely(ret != 0))
1318                 goto out_no_reloc;
1319
1320         *vmw_bo_p = vmw_bo;
1321         return 0;
1322
1323 out_no_reloc:
1324         vmw_dmabuf_unreference(&vmw_bo);
1325         *vmw_bo_p = NULL;
1326         return ret;
1327 }
1328
1329
1330
1331 /**
1332  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1333  *
1334  * @dev_priv: Pointer to a device private struct.
1335  * @sw_context: The software context used for this command submission.
1336  * @header: Pointer to the command header in the command stream.
1337  *
1338  * This function adds the new query into the query COTABLE
1339  */
1340 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1341                                    struct vmw_sw_context *sw_context,
1342                                    SVGA3dCmdHeader *header)
1343 {
1344         struct vmw_dx_define_query_cmd {
1345                 SVGA3dCmdHeader header;
1346                 SVGA3dCmdDXDefineQuery q;
1347         } *cmd;
1348
1349         int    ret;
1350         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1351         struct vmw_resource *cotable_res;
1352
1353
1354         if (ctx_node == NULL) {
1355                 DRM_ERROR("DX Context not set for query.\n");
1356                 return -EINVAL;
1357         }
1358
1359         cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1360
1361         if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1362             cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1363                 return -EINVAL;
1364
1365         cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1366         ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1367         vmw_resource_unreference(&cotable_res);
1368
1369         return ret;
1370 }
1371
1372
1373
1374 /**
1375  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1376  *
1377  * @dev_priv: Pointer to a device private struct.
1378  * @sw_context: The software context used for this command submission.
1379  * @header: Pointer to the command header in the command stream.
1380  *
1381  * The query bind operation will eventually associate the query ID
1382  * with its backing MOB.  In this function, we take the user mode
1383  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1384  * kernel mode equivalent.
1385  */
1386 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1387                                  struct vmw_sw_context *sw_context,
1388                                  SVGA3dCmdHeader *header)
1389 {
1390         struct vmw_dx_bind_query_cmd {
1391                 SVGA3dCmdHeader header;
1392                 SVGA3dCmdDXBindQuery q;
1393         } *cmd;
1394
1395         struct vmw_dma_buffer *vmw_bo;
1396         int    ret;
1397
1398
1399         cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1400
1401         /*
1402          * Look up the buffer pointed to by q.mobid, put it on the relocation
1403          * list so its kernel mode MOB ID can be filled in later
1404          */
1405         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1406                                     &vmw_bo);
1407
1408         if (ret != 0)
1409                 return ret;
1410
1411         sw_context->dx_query_mob = vmw_bo;
1412         sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1413
1414         vmw_dmabuf_unreference(&vmw_bo);
1415
1416         return ret;
1417 }
1418
1419
1420
1421 /**
1422  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1423  *
1424  * @dev_priv: Pointer to a device private struct.
1425  * @sw_context: The software context used for this command submission.
1426  * @header: Pointer to the command header in the command stream.
1427  */
1428 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1429                                   struct vmw_sw_context *sw_context,
1430                                   SVGA3dCmdHeader *header)
1431 {
1432         struct vmw_begin_gb_query_cmd {
1433                 SVGA3dCmdHeader header;
1434                 SVGA3dCmdBeginGBQuery q;
1435         } *cmd;
1436
1437         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1438                            header);
1439
1440         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1441                                  user_context_converter, &cmd->q.cid,
1442                                  NULL);
1443 }
1444
1445 /**
1446  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1447  *
1448  * @dev_priv: Pointer to a device private struct.
1449  * @sw_context: The software context used for this command submission.
1450  * @header: Pointer to the command header in the command stream.
1451  */
1452 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1453                                struct vmw_sw_context *sw_context,
1454                                SVGA3dCmdHeader *header)
1455 {
1456         struct vmw_begin_query_cmd {
1457                 SVGA3dCmdHeader header;
1458                 SVGA3dCmdBeginQuery q;
1459         } *cmd;
1460
1461         cmd = container_of(header, struct vmw_begin_query_cmd,
1462                            header);
1463
1464         if (unlikely(dev_priv->has_mob)) {
1465                 struct {
1466                         SVGA3dCmdHeader header;
1467                         SVGA3dCmdBeginGBQuery q;
1468                 } gb_cmd;
1469
1470                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1471
1472                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1473                 gb_cmd.header.size = cmd->header.size;
1474                 gb_cmd.q.cid = cmd->q.cid;
1475                 gb_cmd.q.type = cmd->q.type;
1476
1477                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1478                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1479         }
1480
1481         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1482                                  user_context_converter, &cmd->q.cid,
1483                                  NULL);
1484 }
1485
1486 /**
1487  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1488  *
1489  * @dev_priv: Pointer to a device private struct.
1490  * @sw_context: The software context used for this command submission.
1491  * @header: Pointer to the command header in the command stream.
1492  */
1493 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1494                                 struct vmw_sw_context *sw_context,
1495                                 SVGA3dCmdHeader *header)
1496 {
1497         struct vmw_dma_buffer *vmw_bo;
1498         struct vmw_query_cmd {
1499                 SVGA3dCmdHeader header;
1500                 SVGA3dCmdEndGBQuery q;
1501         } *cmd;
1502         int ret;
1503
1504         cmd = container_of(header, struct vmw_query_cmd, header);
1505         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1506         if (unlikely(ret != 0))
1507                 return ret;
1508
1509         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1510                                     &cmd->q.mobid,
1511                                     &vmw_bo);
1512         if (unlikely(ret != 0))
1513                 return ret;
1514
1515         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1516
1517         vmw_dmabuf_unreference(&vmw_bo);
1518         return ret;
1519 }
1520
1521 /**
1522  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1523  *
1524  * @dev_priv: Pointer to a device private struct.
1525  * @sw_context: The software context used for this command submission.
1526  * @header: Pointer to the command header in the command stream.
1527  */
1528 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1529                              struct vmw_sw_context *sw_context,
1530                              SVGA3dCmdHeader *header)
1531 {
1532         struct vmw_dma_buffer *vmw_bo;
1533         struct vmw_query_cmd {
1534                 SVGA3dCmdHeader header;
1535                 SVGA3dCmdEndQuery q;
1536         } *cmd;
1537         int ret;
1538
1539         cmd = container_of(header, struct vmw_query_cmd, header);
1540         if (dev_priv->has_mob) {
1541                 struct {
1542                         SVGA3dCmdHeader header;
1543                         SVGA3dCmdEndGBQuery q;
1544                 } gb_cmd;
1545
1546                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1547
1548                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1549                 gb_cmd.header.size = cmd->header.size;
1550                 gb_cmd.q.cid = cmd->q.cid;
1551                 gb_cmd.q.type = cmd->q.type;
1552                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1553                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1554
1555                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1556                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1557         }
1558
1559         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1560         if (unlikely(ret != 0))
1561                 return ret;
1562
1563         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1564                                       &cmd->q.guestResult,
1565                                       &vmw_bo);
1566         if (unlikely(ret != 0))
1567                 return ret;
1568
1569         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1570
1571         vmw_dmabuf_unreference(&vmw_bo);
1572         return ret;
1573 }
1574
1575 /**
1576  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1577  *
1578  * @dev_priv: Pointer to a device private struct.
1579  * @sw_context: The software context used for this command submission.
1580  * @header: Pointer to the command header in the command stream.
1581  */
1582 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1583                                  struct vmw_sw_context *sw_context,
1584                                  SVGA3dCmdHeader *header)
1585 {
1586         struct vmw_dma_buffer *vmw_bo;
1587         struct vmw_query_cmd {
1588                 SVGA3dCmdHeader header;
1589                 SVGA3dCmdWaitForGBQuery q;
1590         } *cmd;
1591         int ret;
1592
1593         cmd = container_of(header, struct vmw_query_cmd, header);
1594         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1595         if (unlikely(ret != 0))
1596                 return ret;
1597
1598         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1599                                     &cmd->q.mobid,
1600                                     &vmw_bo);
1601         if (unlikely(ret != 0))
1602                 return ret;
1603
1604         vmw_dmabuf_unreference(&vmw_bo);
1605         return 0;
1606 }
1607
1608 /**
1609  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1610  *
1611  * @dev_priv: Pointer to a device private struct.
1612  * @sw_context: The software context used for this command submission.
1613  * @header: Pointer to the command header in the command stream.
1614  */
1615 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1616                               struct vmw_sw_context *sw_context,
1617                               SVGA3dCmdHeader *header)
1618 {
1619         struct vmw_dma_buffer *vmw_bo;
1620         struct vmw_query_cmd {
1621                 SVGA3dCmdHeader header;
1622                 SVGA3dCmdWaitForQuery q;
1623         } *cmd;
1624         int ret;
1625
1626         cmd = container_of(header, struct vmw_query_cmd, header);
1627         if (dev_priv->has_mob) {
1628                 struct {
1629                         SVGA3dCmdHeader header;
1630                         SVGA3dCmdWaitForGBQuery q;
1631                 } gb_cmd;
1632
1633                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1634
1635                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1636                 gb_cmd.header.size = cmd->header.size;
1637                 gb_cmd.q.cid = cmd->q.cid;
1638                 gb_cmd.q.type = cmd->q.type;
1639                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1640                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1641
1642                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1643                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1644         }
1645
1646         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1647         if (unlikely(ret != 0))
1648                 return ret;
1649
1650         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1651                                       &cmd->q.guestResult,
1652                                       &vmw_bo);
1653         if (unlikely(ret != 0))
1654                 return ret;
1655
1656         vmw_dmabuf_unreference(&vmw_bo);
1657         return 0;
1658 }
1659
1660 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1661                        struct vmw_sw_context *sw_context,
1662                        SVGA3dCmdHeader *header)
1663 {
1664         struct vmw_dma_buffer *vmw_bo = NULL;
1665         struct vmw_surface *srf = NULL;
1666         struct vmw_dma_cmd {
1667                 SVGA3dCmdHeader header;
1668                 SVGA3dCmdSurfaceDMA dma;
1669         } *cmd;
1670         int ret;
1671         SVGA3dCmdSurfaceDMASuffix *suffix;
1672         uint32_t bo_size;
1673
1674         cmd = container_of(header, struct vmw_dma_cmd, header);
1675         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1676                                                header->size - sizeof(*suffix));
1677
1678         /* Make sure device and verifier stays in sync. */
1679         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1680                 DRM_ERROR("Invalid DMA suffix size.\n");
1681                 return -EINVAL;
1682         }
1683
1684         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1685                                       &cmd->dma.guest.ptr,
1686                                       &vmw_bo);
1687         if (unlikely(ret != 0))
1688                 return ret;
1689
1690         /* Make sure DMA doesn't cross BO boundaries. */
1691         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1692         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1693                 DRM_ERROR("Invalid DMA offset.\n");
1694                 return -EINVAL;
1695         }
1696
1697         bo_size -= cmd->dma.guest.ptr.offset;
1698         if (unlikely(suffix->maximumOffset > bo_size))
1699                 suffix->maximumOffset = bo_size;
1700
1701         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1702                                 user_surface_converter, &cmd->dma.host.sid,
1703                                 NULL);
1704         if (unlikely(ret != 0)) {
1705                 if (unlikely(ret != -ERESTARTSYS))
1706                         DRM_ERROR("could not find surface for DMA.\n");
1707                 goto out_no_surface;
1708         }
1709
1710         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1711
1712         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1713                              header);
1714
1715 out_no_surface:
1716         vmw_dmabuf_unreference(&vmw_bo);
1717         return ret;
1718 }
1719
1720 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1721                         struct vmw_sw_context *sw_context,
1722                         SVGA3dCmdHeader *header)
1723 {
1724         struct vmw_draw_cmd {
1725                 SVGA3dCmdHeader header;
1726                 SVGA3dCmdDrawPrimitives body;
1727         } *cmd;
1728         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1729                 (unsigned long)header + sizeof(*cmd));
1730         SVGA3dPrimitiveRange *range;
1731         uint32_t i;
1732         uint32_t maxnum;
1733         int ret;
1734
1735         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1736         if (unlikely(ret != 0))
1737                 return ret;
1738
1739         cmd = container_of(header, struct vmw_draw_cmd, header);
1740         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1741
1742         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1743                 DRM_ERROR("Illegal number of vertex declarations.\n");
1744                 return -EINVAL;
1745         }
1746
1747         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1748                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1749                                         user_surface_converter,
1750                                         &decl->array.surfaceId, NULL);
1751                 if (unlikely(ret != 0))
1752                         return ret;
1753         }
1754
1755         maxnum = (header->size - sizeof(cmd->body) -
1756                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1757         if (unlikely(cmd->body.numRanges > maxnum)) {
1758                 DRM_ERROR("Illegal number of index ranges.\n");
1759                 return -EINVAL;
1760         }
1761
1762         range = (SVGA3dPrimitiveRange *) decl;
1763         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1764                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1765                                         user_surface_converter,
1766                                         &range->indexArray.surfaceId, NULL);
1767                 if (unlikely(ret != 0))
1768                         return ret;
1769         }
1770         return 0;
1771 }
1772
1773
1774 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1775                              struct vmw_sw_context *sw_context,
1776                              SVGA3dCmdHeader *header)
1777 {
1778         struct vmw_tex_state_cmd {
1779                 SVGA3dCmdHeader header;
1780                 SVGA3dCmdSetTextureState state;
1781         } *cmd;
1782
1783         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1784           ((unsigned long) header + header->size + sizeof(header));
1785         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1786                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1787         struct vmw_resource_val_node *ctx_node;
1788         struct vmw_resource_val_node *res_node;
1789         int ret;
1790
1791         cmd = container_of(header, struct vmw_tex_state_cmd,
1792                            header);
1793
1794         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1795                                 user_context_converter, &cmd->state.cid,
1796                                 &ctx_node);
1797         if (unlikely(ret != 0))
1798                 return ret;
1799
1800         for (; cur_state < last_state; ++cur_state) {
1801                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1802                         continue;
1803
1804                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1805                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
1806                                   (unsigned) cur_state->stage);
1807                         return -EINVAL;
1808                 }
1809
1810                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1811                                         user_surface_converter,
1812                                         &cur_state->value, &res_node);
1813                 if (unlikely(ret != 0))
1814                         return ret;
1815
1816                 if (dev_priv->has_mob) {
1817                         struct vmw_ctx_bindinfo_tex binding;
1818
1819                         binding.bi.ctx = ctx_node->res;
1820                         binding.bi.res = res_node ? res_node->res : NULL;
1821                         binding.bi.bt = vmw_ctx_binding_tex;
1822                         binding.texture_stage = cur_state->stage;
1823                         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1824                                         0, binding.texture_stage);
1825                 }
1826         }
1827
1828         return 0;
1829 }
1830
1831 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1832                                       struct vmw_sw_context *sw_context,
1833                                       void *buf)
1834 {
1835         struct vmw_dma_buffer *vmw_bo;
1836         int ret;
1837
1838         struct {
1839                 uint32_t header;
1840                 SVGAFifoCmdDefineGMRFB body;
1841         } *cmd = buf;
1842
1843         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1844                                       &cmd->body.ptr,
1845                                       &vmw_bo);
1846         if (unlikely(ret != 0))
1847                 return ret;
1848
1849         vmw_dmabuf_unreference(&vmw_bo);
1850
1851         return ret;
1852 }
1853
1854
1855 /**
1856  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1857  * switching
1858  *
1859  * @dev_priv: Pointer to a device private struct.
1860  * @sw_context: The software context being used for this batch.
1861  * @val_node: The validation node representing the resource.
1862  * @buf_id: Pointer to the user-space backup buffer handle in the command
1863  * stream.
1864  * @backup_offset: Offset of backup into MOB.
1865  *
1866  * This function prepares for registering a switch of backup buffers
1867  * in the resource metadata just prior to unreserving. It's basically a wrapper
1868  * around vmw_cmd_res_switch_backup with a different interface.
1869  */
1870 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1871                                      struct vmw_sw_context *sw_context,
1872                                      struct vmw_resource_val_node *val_node,
1873                                      uint32_t *buf_id,
1874                                      unsigned long backup_offset)
1875 {
1876         struct vmw_dma_buffer *dma_buf;
1877         int ret;
1878
1879         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1880         if (ret)
1881                 return ret;
1882
1883         val_node->switching_backup = true;
1884         if (val_node->first_usage)
1885                 val_node->no_buffer_needed = true;
1886
1887         vmw_dmabuf_unreference(&val_node->new_backup);
1888         val_node->new_backup = dma_buf;
1889         val_node->new_backup_offset = backup_offset;
1890
1891         return 0;
1892 }
1893
1894
1895 /**
1896  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1897  *
1898  * @dev_priv: Pointer to a device private struct.
1899  * @sw_context: The software context being used for this batch.
1900  * @res_type: The resource type.
1901  * @converter: Information about user-space binding for this resource type.
1902  * @res_id: Pointer to the user-space resource handle in the command stream.
1903  * @buf_id: Pointer to the user-space backup buffer handle in the command
1904  * stream.
1905  * @backup_offset: Offset of backup into MOB.
1906  *
1907  * This function prepares for registering a switch of backup buffers
1908  * in the resource metadata just prior to unreserving. It's basically a wrapper
1909  * around vmw_cmd_res_switch_backup with a different interface.
1910  */
1911 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1912                                  struct vmw_sw_context *sw_context,
1913                                  enum vmw_res_type res_type,
1914                                  const struct vmw_user_resource_conv
1915                                  *converter,
1916                                  uint32_t *res_id,
1917                                  uint32_t *buf_id,
1918                                  unsigned long backup_offset)
1919 {
1920         struct vmw_resource_val_node *val_node;
1921         int ret;
1922
1923         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1924                                 converter, res_id, &val_node);
1925         if (ret)
1926                 return ret;
1927
1928         return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1929                                          buf_id, backup_offset);
1930 }
1931
1932 /**
1933  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1934  * command
1935  *
1936  * @dev_priv: Pointer to a device private struct.
1937  * @sw_context: The software context being used for this batch.
1938  * @header: Pointer to the command header in the command stream.
1939  */
1940 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1941                                    struct vmw_sw_context *sw_context,
1942                                    SVGA3dCmdHeader *header)
1943 {
1944         struct vmw_bind_gb_surface_cmd {
1945                 SVGA3dCmdHeader header;
1946                 SVGA3dCmdBindGBSurface body;
1947         } *cmd;
1948
1949         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1950
1951         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1952                                      user_surface_converter,
1953                                      &cmd->body.sid, &cmd->body.mobid,
1954                                      0);
1955 }
1956
1957 /**
1958  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1959  * command
1960  *
1961  * @dev_priv: Pointer to a device private struct.
1962  * @sw_context: The software context being used for this batch.
1963  * @header: Pointer to the command header in the command stream.
1964  */
1965 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1966                                    struct vmw_sw_context *sw_context,
1967                                    SVGA3dCmdHeader *header)
1968 {
1969         struct vmw_gb_surface_cmd {
1970                 SVGA3dCmdHeader header;
1971                 SVGA3dCmdUpdateGBImage body;
1972         } *cmd;
1973
1974         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1975
1976         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1977                                  user_surface_converter,
1978                                  &cmd->body.image.sid, NULL);
1979 }
1980
1981 /**
1982  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1983  * command
1984  *
1985  * @dev_priv: Pointer to a device private struct.
1986  * @sw_context: The software context being used for this batch.
1987  * @header: Pointer to the command header in the command stream.
1988  */
1989 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1990                                      struct vmw_sw_context *sw_context,
1991                                      SVGA3dCmdHeader *header)
1992 {
1993         struct vmw_gb_surface_cmd {
1994                 SVGA3dCmdHeader header;
1995                 SVGA3dCmdUpdateGBSurface body;
1996         } *cmd;
1997
1998         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1999
2000         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2001                                  user_surface_converter,
2002                                  &cmd->body.sid, NULL);
2003 }
2004
2005 /**
2006  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2007  * command
2008  *
2009  * @dev_priv: Pointer to a device private struct.
2010  * @sw_context: The software context being used for this batch.
2011  * @header: Pointer to the command header in the command stream.
2012  */
2013 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2014                                      struct vmw_sw_context *sw_context,
2015                                      SVGA3dCmdHeader *header)
2016 {
2017         struct vmw_gb_surface_cmd {
2018                 SVGA3dCmdHeader header;
2019                 SVGA3dCmdReadbackGBImage body;
2020         } *cmd;
2021
2022         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2023
2024         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2025                                  user_surface_converter,
2026                                  &cmd->body.image.sid, NULL);
2027 }
2028
2029 /**
2030  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2031  * command
2032  *
2033  * @dev_priv: Pointer to a device private struct.
2034  * @sw_context: The software context being used for this batch.
2035  * @header: Pointer to the command header in the command stream.
2036  */
2037 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2038                                        struct vmw_sw_context *sw_context,
2039                                        SVGA3dCmdHeader *header)
2040 {
2041         struct vmw_gb_surface_cmd {
2042                 SVGA3dCmdHeader header;
2043                 SVGA3dCmdReadbackGBSurface body;
2044         } *cmd;
2045
2046         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2047
2048         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2049                                  user_surface_converter,
2050                                  &cmd->body.sid, NULL);
2051 }
2052
2053 /**
2054  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2055  * command
2056  *
2057  * @dev_priv: Pointer to a device private struct.
2058  * @sw_context: The software context being used for this batch.
2059  * @header: Pointer to the command header in the command stream.
2060  */
2061 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2062                                        struct vmw_sw_context *sw_context,
2063                                        SVGA3dCmdHeader *header)
2064 {
2065         struct vmw_gb_surface_cmd {
2066                 SVGA3dCmdHeader header;
2067                 SVGA3dCmdInvalidateGBImage body;
2068         } *cmd;
2069
2070         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2071
2072         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2073                                  user_surface_converter,
2074                                  &cmd->body.image.sid, NULL);
2075 }
2076
2077 /**
2078  * vmw_cmd_invalidate_gb_surface - Validate an
2079  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2080  *
2081  * @dev_priv: Pointer to a device private struct.
2082  * @sw_context: The software context being used for this batch.
2083  * @header: Pointer to the command header in the command stream.
2084  */
2085 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2086                                          struct vmw_sw_context *sw_context,
2087                                          SVGA3dCmdHeader *header)
2088 {
2089         struct vmw_gb_surface_cmd {
2090                 SVGA3dCmdHeader header;
2091                 SVGA3dCmdInvalidateGBSurface body;
2092         } *cmd;
2093
2094         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2095
2096         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2097                                  user_surface_converter,
2098                                  &cmd->body.sid, NULL);
2099 }
2100
2101
2102 /**
2103  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2104  * command
2105  *
2106  * @dev_priv: Pointer to a device private struct.
2107  * @sw_context: The software context being used for this batch.
2108  * @header: Pointer to the command header in the command stream.
2109  */
2110 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2111                                  struct vmw_sw_context *sw_context,
2112                                  SVGA3dCmdHeader *header)
2113 {
2114         struct vmw_shader_define_cmd {
2115                 SVGA3dCmdHeader header;
2116                 SVGA3dCmdDefineShader body;
2117         } *cmd;
2118         int ret;
2119         size_t size;
2120         struct vmw_resource_val_node *val;
2121
2122         cmd = container_of(header, struct vmw_shader_define_cmd,
2123                            header);
2124
2125         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2126                                 user_context_converter, &cmd->body.cid,
2127                                 &val);
2128         if (unlikely(ret != 0))
2129                 return ret;
2130
2131         if (unlikely(!dev_priv->has_mob))
2132                 return 0;
2133
2134         size = cmd->header.size - sizeof(cmd->body);
2135         ret = vmw_compat_shader_add(dev_priv,
2136                                     vmw_context_res_man(val->res),
2137                                     cmd->body.shid, cmd + 1,
2138                                     cmd->body.type, size,
2139                                     &sw_context->staged_cmd_res);
2140         if (unlikely(ret != 0))
2141                 return ret;
2142
2143         return vmw_resource_relocation_add(&sw_context->res_relocations,
2144                                            NULL, &cmd->header.id -
2145                                            sw_context->buf_start);
2146
2147         return 0;
2148 }
2149
2150 /**
2151  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2152  * command
2153  *
2154  * @dev_priv: Pointer to a device private struct.
2155  * @sw_context: The software context being used for this batch.
2156  * @header: Pointer to the command header in the command stream.
2157  */
2158 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2159                                   struct vmw_sw_context *sw_context,
2160                                   SVGA3dCmdHeader *header)
2161 {
2162         struct vmw_shader_destroy_cmd {
2163                 SVGA3dCmdHeader header;
2164                 SVGA3dCmdDestroyShader body;
2165         } *cmd;
2166         int ret;
2167         struct vmw_resource_val_node *val;
2168
2169         cmd = container_of(header, struct vmw_shader_destroy_cmd,
2170                            header);
2171
2172         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2173                                 user_context_converter, &cmd->body.cid,
2174                                 &val);
2175         if (unlikely(ret != 0))
2176                 return ret;
2177
2178         if (unlikely(!dev_priv->has_mob))
2179                 return 0;
2180
2181         ret = vmw_shader_remove(vmw_context_res_man(val->res),
2182                                 cmd->body.shid,
2183                                 cmd->body.type,
2184                                 &sw_context->staged_cmd_res);
2185         if (unlikely(ret != 0))
2186                 return ret;
2187
2188         return vmw_resource_relocation_add(&sw_context->res_relocations,
2189                                            NULL, &cmd->header.id -
2190                                            sw_context->buf_start);
2191
2192         return 0;
2193 }
2194
2195 /**
2196  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2197  * command
2198  *
2199  * @dev_priv: Pointer to a device private struct.
2200  * @sw_context: The software context being used for this batch.
2201  * @header: Pointer to the command header in the command stream.
2202  */
2203 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2204                               struct vmw_sw_context *sw_context,
2205                               SVGA3dCmdHeader *header)
2206 {
2207         struct vmw_set_shader_cmd {
2208                 SVGA3dCmdHeader header;
2209                 SVGA3dCmdSetShader body;
2210         } *cmd;
2211         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2212         struct vmw_ctx_bindinfo_shader binding;
2213         struct vmw_resource *res = NULL;
2214         int ret;
2215
2216         cmd = container_of(header, struct vmw_set_shader_cmd,
2217                            header);
2218
2219         if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2220                 DRM_ERROR("Illegal shader type %u.\n",
2221                           (unsigned) cmd->body.type);
2222                 return -EINVAL;
2223         }
2224
2225         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226                                 user_context_converter, &cmd->body.cid,
2227                                 &ctx_node);
2228         if (unlikely(ret != 0))
2229                 return ret;
2230
2231         if (!dev_priv->has_mob)
2232                 return 0;
2233
2234         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2235                 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2236                                         cmd->body.shid,
2237                                         cmd->body.type);
2238
2239                 if (!IS_ERR(res)) {
2240                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2241                                                     &cmd->body.shid, res,
2242                                                     &res_node);
2243                         vmw_resource_unreference(&res);
2244                         if (unlikely(ret != 0))
2245                                 return ret;
2246                 }
2247         }
2248
2249         if (!res_node) {
2250                 ret = vmw_cmd_res_check(dev_priv, sw_context,
2251                                         vmw_res_shader,
2252                                         user_shader_converter,
2253                                         &cmd->body.shid, &res_node);
2254                 if (unlikely(ret != 0))
2255                         return ret;
2256         }
2257
2258         binding.bi.ctx = ctx_node->res;
2259         binding.bi.res = res_node ? res_node->res : NULL;
2260         binding.bi.bt = vmw_ctx_binding_shader;
2261         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2262         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2263                         binding.shader_slot, 0);
2264         return 0;
2265 }
2266
2267 /**
2268  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2269  * command
2270  *
2271  * @dev_priv: Pointer to a device private struct.
2272  * @sw_context: The software context being used for this batch.
2273  * @header: Pointer to the command header in the command stream.
2274  */
2275 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2276                                     struct vmw_sw_context *sw_context,
2277                                     SVGA3dCmdHeader *header)
2278 {
2279         struct vmw_set_shader_const_cmd {
2280                 SVGA3dCmdHeader header;
2281                 SVGA3dCmdSetShaderConst body;
2282         } *cmd;
2283         int ret;
2284
2285         cmd = container_of(header, struct vmw_set_shader_const_cmd,
2286                            header);
2287
2288         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2289                                 user_context_converter, &cmd->body.cid,
2290                                 NULL);
2291         if (unlikely(ret != 0))
2292                 return ret;
2293
2294         if (dev_priv->has_mob)
2295                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2296
2297         return 0;
2298 }
2299
2300 /**
2301  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2302  * command
2303  *
2304  * @dev_priv: Pointer to a device private struct.
2305  * @sw_context: The software context being used for this batch.
2306  * @header: Pointer to the command header in the command stream.
2307  */
2308 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2309                                   struct vmw_sw_context *sw_context,
2310                                   SVGA3dCmdHeader *header)
2311 {
2312         struct vmw_bind_gb_shader_cmd {
2313                 SVGA3dCmdHeader header;
2314                 SVGA3dCmdBindGBShader body;
2315         } *cmd;
2316
2317         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2318                            header);
2319
2320         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2321                                      user_shader_converter,
2322                                      &cmd->body.shid, &cmd->body.mobid,
2323                                      cmd->body.offsetInBytes);
2324 }
2325
2326 /**
2327  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2328  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2329  *
2330  * @dev_priv: Pointer to a device private struct.
2331  * @sw_context: The software context being used for this batch.
2332  * @header: Pointer to the command header in the command stream.
2333  */
2334 static int
2335 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2336                                       struct vmw_sw_context *sw_context,
2337                                       SVGA3dCmdHeader *header)
2338 {
2339         struct {
2340                 SVGA3dCmdHeader header;
2341                 SVGA3dCmdDXSetSingleConstantBuffer body;
2342         } *cmd;
2343         struct vmw_resource_val_node *res_node = NULL;
2344         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2345         struct vmw_ctx_bindinfo_cb binding;
2346         int ret;
2347
2348         if (unlikely(ctx_node == NULL)) {
2349                 DRM_ERROR("DX Context not set.\n");
2350                 return -EINVAL;
2351         }
2352
2353         cmd = container_of(header, typeof(*cmd), header);
2354         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2355                                 user_surface_converter,
2356                                 &cmd->body.sid, &res_node);
2357         if (unlikely(ret != 0))
2358                 return ret;
2359
2360         binding.bi.ctx = ctx_node->res;
2361         binding.bi.res = res_node ? res_node->res : NULL;
2362         binding.bi.bt = vmw_ctx_binding_cb;
2363         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2364         binding.offset = cmd->body.offsetInBytes;
2365         binding.size = cmd->body.sizeInBytes;
2366         binding.slot = cmd->body.slot;
2367
2368         if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2369             binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2370                 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2371                           (unsigned) cmd->body.type,
2372                           (unsigned) binding.slot);
2373                 return -EINVAL;
2374         }
2375
2376         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2377                         binding.shader_slot, binding.slot);
2378
2379         return 0;
2380 }
2381
2382 /**
2383  * vmw_cmd_dx_set_shader_res - Validate an
2384  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2385  *
2386  * @dev_priv: Pointer to a device private struct.
2387  * @sw_context: The software context being used for this batch.
2388  * @header: Pointer to the command header in the command stream.
2389  */
2390 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2391                                      struct vmw_sw_context *sw_context,
2392                                      SVGA3dCmdHeader *header)
2393 {
2394         struct {
2395                 SVGA3dCmdHeader header;
2396                 SVGA3dCmdDXSetShaderResources body;
2397         } *cmd = container_of(header, typeof(*cmd), header);
2398         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2399                 sizeof(SVGA3dShaderResourceViewId);
2400
2401         if ((u64) cmd->body.startView + (u64) num_sr_view >
2402             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2403             cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2404                 DRM_ERROR("Invalid shader binding.\n");
2405                 return -EINVAL;
2406         }
2407
2408         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2409                                      vmw_ctx_binding_sr,
2410                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2411                                      (void *) &cmd[1], num_sr_view,
2412                                      cmd->body.startView);
2413 }
2414
2415 /**
2416  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2417  * command
2418  *
2419  * @dev_priv: Pointer to a device private struct.
2420  * @sw_context: The software context being used for this batch.
2421  * @header: Pointer to the command header in the command stream.
2422  */
2423 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2424                                  struct vmw_sw_context *sw_context,
2425                                  SVGA3dCmdHeader *header)
2426 {
2427         struct {
2428                 SVGA3dCmdHeader header;
2429                 SVGA3dCmdDXSetShader body;
2430         } *cmd;
2431         struct vmw_resource *res = NULL;
2432         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2433         struct vmw_ctx_bindinfo_shader binding;
2434         int ret = 0;
2435
2436         if (unlikely(ctx_node == NULL)) {
2437                 DRM_ERROR("DX Context not set.\n");
2438                 return -EINVAL;
2439         }
2440
2441         cmd = container_of(header, typeof(*cmd), header);
2442
2443         if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2444                 DRM_ERROR("Illegal shader type %u.\n",
2445                           (unsigned) cmd->body.type);
2446                 return -EINVAL;
2447         }
2448
2449         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2450                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2451                 if (IS_ERR(res)) {
2452                         DRM_ERROR("Could not find shader for binding.\n");
2453                         return PTR_ERR(res);
2454                 }
2455
2456                 ret = vmw_resource_val_add(sw_context, res, NULL);
2457                 if (ret)
2458                         goto out_unref;
2459         }
2460
2461         binding.bi.ctx = ctx_node->res;
2462         binding.bi.res = res;
2463         binding.bi.bt = vmw_ctx_binding_dx_shader;
2464         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2465
2466         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2467                         binding.shader_slot, 0);
2468 out_unref:
2469         if (res)
2470                 vmw_resource_unreference(&res);
2471
2472         return ret;
2473 }
2474
2475 /**
2476  * vmw_cmd_dx_set_vertex_buffers - Validates an
2477  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2478  *
2479  * @dev_priv: Pointer to a device private struct.
2480  * @sw_context: The software context being used for this batch.
2481  * @header: Pointer to the command header in the command stream.
2482  */
2483 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2484                                          struct vmw_sw_context *sw_context,
2485                                          SVGA3dCmdHeader *header)
2486 {
2487         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488         struct vmw_ctx_bindinfo_vb binding;
2489         struct vmw_resource_val_node *res_node;
2490         struct {
2491                 SVGA3dCmdHeader header;
2492                 SVGA3dCmdDXSetVertexBuffers body;
2493                 SVGA3dVertexBuffer buf[];
2494         } *cmd;
2495         int i, ret, num;
2496
2497         if (unlikely(ctx_node == NULL)) {
2498                 DRM_ERROR("DX Context not set.\n");
2499                 return -EINVAL;
2500         }
2501
2502         cmd = container_of(header, typeof(*cmd), header);
2503         num = (cmd->header.size - sizeof(cmd->body)) /
2504                 sizeof(SVGA3dVertexBuffer);
2505         if ((u64)num + (u64)cmd->body.startBuffer >
2506             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2507                 DRM_ERROR("Invalid number of vertex buffers.\n");
2508                 return -EINVAL;
2509         }
2510
2511         for (i = 0; i < num; i++) {
2512                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2513                                         user_surface_converter,
2514                                         &cmd->buf[i].sid, &res_node);
2515                 if (unlikely(ret != 0))
2516                         return ret;
2517
2518                 binding.bi.ctx = ctx_node->res;
2519                 binding.bi.bt = vmw_ctx_binding_vb;
2520                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2521                 binding.offset = cmd->buf[i].offset;
2522                 binding.stride = cmd->buf[i].stride;
2523                 binding.slot = i + cmd->body.startBuffer;
2524
2525                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2526                                 0, binding.slot);
2527         }
2528
2529         return 0;
2530 }
2531
2532 /**
2533  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2534  * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2535  *
2536  * @dev_priv: Pointer to a device private struct.
2537  * @sw_context: The software context being used for this batch.
2538  * @header: Pointer to the command header in the command stream.
2539  */
2540 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2541                                        struct vmw_sw_context *sw_context,
2542                                        SVGA3dCmdHeader *header)
2543 {
2544         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2545         struct vmw_ctx_bindinfo_ib binding;
2546         struct vmw_resource_val_node *res_node;
2547         struct {
2548                 SVGA3dCmdHeader header;
2549                 SVGA3dCmdDXSetIndexBuffer body;
2550         } *cmd;
2551         int ret;
2552
2553         if (unlikely(ctx_node == NULL)) {
2554                 DRM_ERROR("DX Context not set.\n");
2555                 return -EINVAL;
2556         }
2557
2558         cmd = container_of(header, typeof(*cmd), header);
2559         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2560                                 user_surface_converter,
2561                                 &cmd->body.sid, &res_node);
2562         if (unlikely(ret != 0))
2563                 return ret;
2564
2565         binding.bi.ctx = ctx_node->res;
2566         binding.bi.res = ((res_node) ? res_node->res : NULL);
2567         binding.bi.bt = vmw_ctx_binding_ib;
2568         binding.offset = cmd->body.offset;
2569         binding.format = cmd->body.format;
2570
2571         vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2572
2573         return 0;
2574 }
2575
2576 /**
2577  * vmw_cmd_dx_set_rendertarget - Validate an
2578  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2579  *
2580  * @dev_priv: Pointer to a device private struct.
2581  * @sw_context: The software context being used for this batch.
2582  * @header: Pointer to the command header in the command stream.
2583  */
2584 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2585                                         struct vmw_sw_context *sw_context,
2586                                         SVGA3dCmdHeader *header)
2587 {
2588         struct {
2589                 SVGA3dCmdHeader header;
2590                 SVGA3dCmdDXSetRenderTargets body;
2591         } *cmd = container_of(header, typeof(*cmd), header);
2592         int ret;
2593         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2594                 sizeof(SVGA3dRenderTargetViewId);
2595
2596         if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2597                 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2598                 return -EINVAL;
2599         }
2600
2601         ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2602                                     vmw_ctx_binding_ds, 0,
2603                                     &cmd->body.depthStencilViewId, 1, 0);
2604         if (ret)
2605                 return ret;
2606
2607         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2608                                      vmw_ctx_binding_dx_rt, 0,
2609                                      (void *)&cmd[1], num_rt_view, 0);
2610 }
2611
2612 /**
2613  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2614  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2615  *
2616  * @dev_priv: Pointer to a device private struct.
2617  * @sw_context: The software context being used for this batch.
2618  * @header: Pointer to the command header in the command stream.
2619  */
2620 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2621                                               struct vmw_sw_context *sw_context,
2622                                               SVGA3dCmdHeader *header)
2623 {
2624         struct {
2625                 SVGA3dCmdHeader header;
2626                 SVGA3dCmdDXClearRenderTargetView body;
2627         } *cmd = container_of(header, typeof(*cmd), header);
2628
2629         return vmw_view_id_val_add(sw_context, vmw_view_rt,
2630                                    cmd->body.renderTargetViewId);
2631 }
2632
2633 /**
2634  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2635  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2636  *
2637  * @dev_priv: Pointer to a device private struct.
2638  * @sw_context: The software context being used for this batch.
2639  * @header: Pointer to the command header in the command stream.
2640  */
2641 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2642                                               struct vmw_sw_context *sw_context,
2643                                               SVGA3dCmdHeader *header)
2644 {
2645         struct {
2646                 SVGA3dCmdHeader header;
2647                 SVGA3dCmdDXClearDepthStencilView body;
2648         } *cmd = container_of(header, typeof(*cmd), header);
2649
2650         return vmw_view_id_val_add(sw_context, vmw_view_ds,
2651                                    cmd->body.depthStencilViewId);
2652 }
2653
2654 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2655                                   struct vmw_sw_context *sw_context,
2656                                   SVGA3dCmdHeader *header)
2657 {
2658         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2659         struct vmw_resource_val_node *srf_node;
2660         struct vmw_resource *res;
2661         enum vmw_view_type view_type;
2662         int ret;
2663         /*
2664          * This is based on the fact that all affected define commands have
2665          * the same initial command body layout.
2666          */
2667         struct {
2668                 SVGA3dCmdHeader header;
2669                 uint32 defined_id;
2670                 uint32 sid;
2671         } *cmd;
2672
2673         if (unlikely(ctx_node == NULL)) {
2674                 DRM_ERROR("DX Context not set.\n");
2675                 return -EINVAL;
2676         }
2677
2678         view_type = vmw_view_cmd_to_type(header->id);
2679         cmd = container_of(header, typeof(*cmd), header);
2680         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2681                                 user_surface_converter,
2682                                 &cmd->sid, &srf_node);
2683         if (unlikely(ret != 0))
2684                 return ret;
2685
2686         res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2687         ret = vmw_cotable_notify(res, cmd->defined_id);
2688         vmw_resource_unreference(&res);
2689         if (unlikely(ret != 0))
2690                 return ret;
2691
2692         return vmw_view_add(sw_context->man,
2693                             ctx_node->res,
2694                             srf_node->res,
2695                             view_type,
2696                             cmd->defined_id,
2697                             header,
2698                             header->size + sizeof(*header),
2699                             &sw_context->staged_cmd_res);
2700 }
2701
2702 /**
2703  * vmw_cmd_dx_set_so_targets - Validate an
2704  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2705  *
2706  * @dev_priv: Pointer to a device private struct.
2707  * @sw_context: The software context being used for this batch.
2708  * @header: Pointer to the command header in the command stream.
2709  */
2710 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2711                                      struct vmw_sw_context *sw_context,
2712                                      SVGA3dCmdHeader *header)
2713 {
2714         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715         struct vmw_ctx_bindinfo_so binding;
2716         struct vmw_resource_val_node *res_node;
2717         struct {
2718                 SVGA3dCmdHeader header;
2719                 SVGA3dCmdDXSetSOTargets body;
2720                 SVGA3dSoTarget targets[];
2721         } *cmd;
2722         int i, ret, num;
2723
2724         if (unlikely(ctx_node == NULL)) {
2725                 DRM_ERROR("DX Context not set.\n");
2726                 return -EINVAL;
2727         }
2728
2729         cmd = container_of(header, typeof(*cmd), header);
2730         num = (cmd->header.size - sizeof(cmd->body)) /
2731                 sizeof(SVGA3dSoTarget);
2732
2733         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2734                 DRM_ERROR("Invalid DX SO binding.\n");
2735                 return -EINVAL;
2736         }
2737
2738         for (i = 0; i < num; i++) {
2739                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2740                                         user_surface_converter,
2741                                         &cmd->targets[i].sid, &res_node);
2742                 if (unlikely(ret != 0))
2743                         return ret;
2744
2745                 binding.bi.ctx = ctx_node->res;
2746                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2747                 binding.bi.bt = vmw_ctx_binding_so,
2748                 binding.offset = cmd->targets[i].offset;
2749                 binding.size = cmd->targets[i].sizeInBytes;
2750                 binding.slot = i;
2751
2752                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2753                                 0, binding.slot);
2754         }
2755
2756         return 0;
2757 }
2758
2759 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2760                                 struct vmw_sw_context *sw_context,
2761                                 SVGA3dCmdHeader *header)
2762 {
2763         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2764         struct vmw_resource *res;
2765         /*
2766          * This is based on the fact that all affected define commands have
2767          * the same initial command body layout.
2768          */
2769         struct {
2770                 SVGA3dCmdHeader header;
2771                 uint32 defined_id;
2772         } *cmd;
2773         enum vmw_so_type so_type;
2774         int ret;
2775
2776         if (unlikely(ctx_node == NULL)) {
2777                 DRM_ERROR("DX Context not set.\n");
2778                 return -EINVAL;
2779         }
2780
2781         so_type = vmw_so_cmd_to_type(header->id);
2782         res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2783         cmd = container_of(header, typeof(*cmd), header);
2784         ret = vmw_cotable_notify(res, cmd->defined_id);
2785         vmw_resource_unreference(&res);
2786
2787         return ret;
2788 }
2789
2790 /**
2791  * vmw_cmd_dx_check_subresource - Validate an
2792  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2793  *
2794  * @dev_priv: Pointer to a device private struct.
2795  * @sw_context: The software context being used for this batch.
2796  * @header: Pointer to the command header in the command stream.
2797  */
2798 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2799                                         struct vmw_sw_context *sw_context,
2800                                         SVGA3dCmdHeader *header)
2801 {
2802         struct {
2803                 SVGA3dCmdHeader header;
2804                 union {
2805                         SVGA3dCmdDXReadbackSubResource r_body;
2806                         SVGA3dCmdDXInvalidateSubResource i_body;
2807                         SVGA3dCmdDXUpdateSubResource u_body;
2808                         SVGA3dSurfaceId sid;
2809                 };
2810         } *cmd;
2811
2812         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2813                      offsetof(typeof(*cmd), sid));
2814         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2815                      offsetof(typeof(*cmd), sid));
2816         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2817                      offsetof(typeof(*cmd), sid));
2818
2819         cmd = container_of(header, typeof(*cmd), header);
2820
2821         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2822                                  user_surface_converter,
2823                                  &cmd->sid, NULL);
2824 }
2825
2826 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2827                                 struct vmw_sw_context *sw_context,
2828                                 SVGA3dCmdHeader *header)
2829 {
2830         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2831
2832         if (unlikely(ctx_node == NULL)) {
2833                 DRM_ERROR("DX Context not set.\n");
2834                 return -EINVAL;
2835         }
2836
2837         return 0;
2838 }
2839
2840 /**
2841  * vmw_cmd_dx_view_remove - validate a view remove command and
2842  * schedule the view resource for removal.
2843  *
2844  * @dev_priv: Pointer to a device private struct.
2845  * @sw_context: The software context being used for this batch.
2846  * @header: Pointer to the command header in the command stream.
2847  *
2848  * Check that the view exists, and if it was not created using this
2849  * command batch, make sure it's validated (present in the device) so that
2850  * the remove command will not confuse the device.
2851  */
2852 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2853                                   struct vmw_sw_context *sw_context,
2854                                   SVGA3dCmdHeader *header)
2855 {
2856         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2857         struct {
2858                 SVGA3dCmdHeader header;
2859                 union vmw_view_destroy body;
2860         } *cmd = container_of(header, typeof(*cmd), header);
2861         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2862         struct vmw_resource *view;
2863         int ret;
2864
2865         if (!ctx_node) {
2866                 DRM_ERROR("DX Context not set.\n");
2867                 return -EINVAL;
2868         }
2869
2870         ret = vmw_view_remove(sw_context->man,
2871                               cmd->body.view_id, view_type,
2872                               &sw_context->staged_cmd_res,
2873                               &view);
2874         if (ret || !view)
2875                 return ret;
2876
2877         /*
2878          * Add view to the validate list iff it was not created using this
2879          * command batch.
2880          */
2881         return vmw_view_res_val_add(sw_context, view);
2882 }
2883
2884 /**
2885  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2886  * command
2887  *
2888  * @dev_priv: Pointer to a device private struct.
2889  * @sw_context: The software context being used for this batch.
2890  * @header: Pointer to the command header in the command stream.
2891  */
2892 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2893                                     struct vmw_sw_context *sw_context,
2894                                     SVGA3dCmdHeader *header)
2895 {
2896         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2897         struct vmw_resource *res;
2898         struct {
2899                 SVGA3dCmdHeader header;
2900                 SVGA3dCmdDXDefineShader body;
2901         } *cmd = container_of(header, typeof(*cmd), header);
2902         int ret;
2903
2904         if (!ctx_node) {
2905                 DRM_ERROR("DX Context not set.\n");
2906                 return -EINVAL;
2907         }
2908
2909         res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2910         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2911         vmw_resource_unreference(&res);
2912         if (ret)
2913                 return ret;
2914
2915         return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2916                                  cmd->body.shaderId, cmd->body.type,
2917                                  &sw_context->staged_cmd_res);
2918 }
2919
2920 /**
2921  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2922  * command
2923  *
2924  * @dev_priv: Pointer to a device private struct.
2925  * @sw_context: The software context being used for this batch.
2926  * @header: Pointer to the command header in the command stream.
2927  */
2928 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2929                                      struct vmw_sw_context *sw_context,
2930                                      SVGA3dCmdHeader *header)
2931 {
2932         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2933         struct {
2934                 SVGA3dCmdHeader header;
2935                 SVGA3dCmdDXDestroyShader body;
2936         } *cmd = container_of(header, typeof(*cmd), header);
2937         int ret;
2938
2939         if (!ctx_node) {
2940                 DRM_ERROR("DX Context not set.\n");
2941                 return -EINVAL;
2942         }
2943
2944         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2945                                 &sw_context->staged_cmd_res);
2946         if (ret)
2947                 DRM_ERROR("Could not find shader to remove.\n");
2948
2949         return ret;
2950 }
2951
2952 /**
2953  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2954  * command
2955  *
2956  * @dev_priv: Pointer to a device private struct.
2957  * @sw_context: The software context being used for this batch.
2958  * @header: Pointer to the command header in the command stream.
2959  */
2960 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2961                                   struct vmw_sw_context *sw_context,
2962                                   SVGA3dCmdHeader *header)
2963 {
2964         struct vmw_resource_val_node *ctx_node;
2965         struct vmw_resource_val_node *res_node;
2966         struct vmw_resource *res;
2967         struct {
2968                 SVGA3dCmdHeader header;
2969                 SVGA3dCmdDXBindShader body;
2970         } *cmd = container_of(header, typeof(*cmd), header);
2971         int ret;
2972
2973         if (cmd->body.cid != SVGA3D_INVALID_ID) {
2974                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2975                                         user_context_converter,
2976                                         &cmd->body.cid, &ctx_node);
2977                 if (ret)
2978                         return ret;
2979         } else {
2980                 ctx_node = sw_context->dx_ctx_node;
2981                 if (!ctx_node) {
2982                         DRM_ERROR("DX Context not set.\n");
2983                         return -EINVAL;
2984                 }
2985         }
2986
2987         res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2988                                 cmd->body.shid, 0);
2989         if (IS_ERR(res)) {
2990                 DRM_ERROR("Could not find shader to bind.\n");
2991                 return PTR_ERR(res);
2992         }
2993
2994         ret = vmw_resource_val_add(sw_context, res, &res_node);
2995         if (ret) {
2996                 DRM_ERROR("Error creating resource validation node.\n");
2997                 goto out_unref;
2998         }
2999
3000
3001         ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3002                                         &cmd->body.mobid,
3003                                         cmd->body.offsetInBytes);
3004 out_unref:
3005         vmw_resource_unreference(&res);
3006
3007         return ret;
3008 }
3009
3010 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3011                                 struct vmw_sw_context *sw_context,
3012                                 void *buf, uint32_t *size)
3013 {
3014         uint32_t size_remaining = *size;
3015         uint32_t cmd_id;
3016
3017         cmd_id = ((uint32_t *)buf)[0];
3018         switch (cmd_id) {
3019         case SVGA_CMD_UPDATE:
3020                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3021                 break;
3022         case SVGA_CMD_DEFINE_GMRFB:
3023                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3024                 break;
3025         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3026                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3027                 break;
3028         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3029                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3030                 break;
3031         default:
3032                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3033                 return -EINVAL;
3034         }
3035
3036         if (*size > size_remaining) {
3037                 DRM_ERROR("Invalid SVGA command (size mismatch):"
3038                           " %u.\n", cmd_id);
3039                 return -EINVAL;
3040         }
3041
3042         if (unlikely(!sw_context->kernel)) {
3043                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3044                 return -EPERM;
3045         }
3046
3047         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3048                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3049
3050         return 0;
3051 }
3052
3053 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3054         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3055                     false, false, false),
3056         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3057                     false, false, false),
3058         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3059                     true, false, false),
3060         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3061                     true, false, false),
3062         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3063                     true, false, false),
3064         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3065                     false, false, false),
3066         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3067                     false, false, false),
3068         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3069                     true, false, false),
3070         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3071                     true, false, false),
3072         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3073                     true, false, false),
3074         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3075                     &vmw_cmd_set_render_target_check, true, false, false),
3076         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3077                     true, false, false),
3078         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3079                     true, false, false),
3080         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3081                     true, false, false),
3082         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3083                     true, false, false),
3084         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3085                     true, false, false),
3086         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3087                     true, false, false),
3088         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3089                     true, false, false),
3090         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3091                     false, false, false),
3092         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3093                     true, false, false),
3094         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3095                     true, false, false),
3096         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3097                     true, false, false),
3098         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3099                     true, false, false),
3100         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3101                     true, false, false),
3102         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3103                     true, false, false),
3104         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3105                     true, false, false),
3106         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3107                     true, false, false),
3108         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3109                     true, false, false),
3110         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3111                     true, false, false),
3112         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3113                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3114         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3115                     false, false, false),
3116         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3117                     false, false, false),
3118         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3119                     false, false, false),
3120         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3121                     false, false, false),
3122         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3123                     false, false, false),
3124         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3125                     false, false, false),
3126         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3127                     false, false, false),
3128         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3129                     false, false, false),
3130         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3131                     false, false, false),
3132         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3133                     false, false, false),
3134         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3135                     false, false, false),
3136         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3137                     false, false, false),
3138         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3139                     false, false, false),
3140         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3141                     false, false, true),
3142         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3143                     false, false, true),
3144         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3145                     false, false, true),
3146         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3147                     false, false, true),
3148         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3149                     false, false, true),
3150         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3151                     false, false, true),
3152         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3153                     false, false, true),
3154         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3155                     false, false, true),
3156         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3157                     true, false, true),
3158         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3159                     false, false, true),
3160         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3161                     true, false, true),
3162         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3163                     &vmw_cmd_update_gb_surface, true, false, true),
3164         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3165                     &vmw_cmd_readback_gb_image, true, false, true),
3166         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3167                     &vmw_cmd_readback_gb_surface, true, false, true),
3168         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3169                     &vmw_cmd_invalidate_gb_image, true, false, true),
3170         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3171                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3172         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3173                     false, false, true),
3174         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3175                     false, false, true),
3176         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3177                     false, false, true),
3178         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3179                     false, false, true),
3180         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3181                     false, false, true),
3182         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3183                     false, false, true),
3184         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3185                     true, false, true),
3186         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3187                     false, false, true),
3188         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3189                     false, false, false),
3190         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3191                     true, false, true),
3192         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3193                     true, false, true),
3194         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3195                     true, false, true),
3196         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3197                     true, false, true),
3198         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3199                     false, false, true),
3200         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3201                     false, false, true),
3202         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3203                     false, false, true),
3204         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3205                     false, false, true),
3206         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3207                     false, false, true),
3208         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3209                     false, false, true),
3210         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3211                     false, false, true),
3212         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3213                     false, false, true),
3214         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3215                     false, false, true),
3216         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3217                     false, false, true),
3218         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3219                     true, false, true),
3220         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3221                     false, false, true),
3222         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3223                     false, false, true),
3224         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3225                     false, false, true),
3226         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3227                     false, false, true),
3228
3229         /*
3230          * DX commands
3231          */
3232         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3233                     false, false, true),
3234         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3235                     false, false, true),
3236         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3237                     false, false, true),
3238         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3239                     false, false, true),
3240         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3241                     false, false, true),
3242         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3243                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3244         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3245                     &vmw_cmd_dx_set_shader_res, true, false, true),
3246         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3247                     true, false, true),
3248         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3249                     true, false, true),
3250         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3251                     true, false, true),
3252         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3253                     true, false, true),
3254         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3255                     true, false, true),
3256         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3257                     &vmw_cmd_dx_cid_check, true, false, true),
3258         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3259                     true, false, true),
3260         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3261                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3262         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3263                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3264         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3265                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3266         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3267                     true, false, true),
3268         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3269                     &vmw_cmd_dx_cid_check, true, false, true),
3270         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3271                     &vmw_cmd_dx_cid_check, true, false, true),
3272         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3273                     true, false, true),
3274         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
3275                     true, false, true),
3276         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3277                     true, false, true),
3278         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3279                     &vmw_cmd_ok, true, false, true),
3280         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
3281                     true, false, true),
3282         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
3283                     true, false, true),
3284         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3285                     true, false, true),
3286         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3287                     true, false, true),
3288         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3289                     true, false, true),
3290         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3291                     true, false, true),
3292         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3293                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3294         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3295                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3296         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3297                     true, false, true),
3298         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3299                     true, false, true),
3300         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3301                     &vmw_cmd_dx_check_subresource, true, false, true),
3302         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3303                     &vmw_cmd_dx_check_subresource, true, false, true),
3304         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3305                     &vmw_cmd_dx_check_subresource, true, false, true),
3306         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3307                     &vmw_cmd_dx_view_define, true, false, true),
3308         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3309                     &vmw_cmd_dx_view_remove, true, false, true),
3310         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3311                     &vmw_cmd_dx_view_define, true, false, true),
3312         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3313                     &vmw_cmd_dx_view_remove, true, false, true),
3314         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3315                     &vmw_cmd_dx_view_define, true, false, true),
3316         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3317                     &vmw_cmd_dx_view_remove, true, false, true),
3318         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3319                     &vmw_cmd_dx_so_define, true, false, true),
3320         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3321                     &vmw_cmd_dx_cid_check, true, false, true),
3322         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3323                     &vmw_cmd_dx_so_define, true, false, true),
3324         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3325                     &vmw_cmd_dx_cid_check, true, false, true),
3326         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3327                     &vmw_cmd_dx_so_define, true, false, true),
3328         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3329                     &vmw_cmd_dx_cid_check, true, false, true),
3330         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3331                     &vmw_cmd_dx_so_define, true, false, true),
3332         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3333                     &vmw_cmd_dx_cid_check, true, false, true),
3334         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3335                     &vmw_cmd_dx_so_define, true, false, true),
3336         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3337                     &vmw_cmd_dx_cid_check, true, false, true),
3338         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3339                     &vmw_cmd_dx_define_shader, true, false, true),
3340         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3341                     &vmw_cmd_dx_destroy_shader, true, false, true),
3342         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3343                     &vmw_cmd_dx_bind_shader, true, false, true),
3344         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3345                     &vmw_cmd_dx_so_define, true, false, true),
3346         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3347                     &vmw_cmd_dx_cid_check, true, false, true),
3348         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3349                     true, false, true),
3350         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3351                     &vmw_cmd_dx_set_so_targets, true, false, true),
3352         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3353                     &vmw_cmd_dx_cid_check, true, false, true),
3354         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3355                     &vmw_cmd_dx_cid_check, true, false, true),
3356         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3357                     &vmw_cmd_buffer_copy_check, true, false, true),
3358         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3359                     &vmw_cmd_pred_copy_check, true, false, true),
3360 };
3361
3362 static int vmw_cmd_check(struct vmw_private *dev_priv,
3363                          struct vmw_sw_context *sw_context,
3364                          void *buf, uint32_t *size)
3365 {
3366         uint32_t cmd_id;
3367         uint32_t size_remaining = *size;
3368         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3369         int ret;
3370         const struct vmw_cmd_entry *entry;
3371         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3372
3373         cmd_id = ((uint32_t *)buf)[0];
3374         /* Handle any none 3D commands */
3375         if (unlikely(cmd_id < SVGA_CMD_MAX))
3376                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3377
3378
3379         cmd_id = header->id;
3380         *size = header->size + sizeof(SVGA3dCmdHeader);
3381
3382         cmd_id -= SVGA_3D_CMD_BASE;
3383         if (unlikely(*size > size_remaining))
3384                 goto out_invalid;
3385
3386         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3387                 goto out_invalid;
3388
3389         entry = &vmw_cmd_entries[cmd_id];
3390         if (unlikely(!entry->func))
3391                 goto out_invalid;
3392
3393         if (unlikely(!entry->user_allow && !sw_context->kernel))
3394                 goto out_privileged;
3395
3396         if (unlikely(entry->gb_disable && gb))
3397                 goto out_old;
3398
3399         if (unlikely(entry->gb_enable && !gb))
3400                 goto out_new;
3401
3402         ret = entry->func(dev_priv, sw_context, header);
3403         if (unlikely(ret != 0))
3404                 goto out_invalid;
3405
3406         return 0;
3407 out_invalid:
3408         DRM_ERROR("Invalid SVGA3D command: %d\n",
3409                   cmd_id + SVGA_3D_CMD_BASE);
3410         return -EINVAL;
3411 out_privileged:
3412         DRM_ERROR("Privileged SVGA3D command: %d\n",
3413                   cmd_id + SVGA_3D_CMD_BASE);
3414         return -EPERM;
3415 out_old:
3416         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3417                   cmd_id + SVGA_3D_CMD_BASE);
3418         return -EINVAL;
3419 out_new:
3420         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3421                   cmd_id + SVGA_3D_CMD_BASE);
3422         return -EINVAL;
3423 }
3424
3425 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3426                              struct vmw_sw_context *sw_context,
3427                              void *buf,
3428                              uint32_t size)
3429 {
3430         int32_t cur_size = size;
3431         int ret;
3432
3433         sw_context->buf_start = buf;
3434
3435         while (cur_size > 0) {
3436                 size = cur_size;
3437                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3438                 if (unlikely(ret != 0))
3439                         return ret;
3440                 buf = (void *)((unsigned long) buf + size);
3441                 cur_size -= size;
3442         }
3443
3444         if (unlikely(cur_size != 0)) {
3445                 DRM_ERROR("Command verifier out of sync.\n");
3446                 return -EINVAL;
3447         }
3448
3449         return 0;
3450 }
3451
3452 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3453 {
3454         sw_context->cur_reloc = 0;
3455 }
3456
3457 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3458 {
3459         uint32_t i;
3460         struct vmw_relocation *reloc;
3461         struct ttm_validate_buffer *validate;
3462         struct ttm_buffer_object *bo;
3463
3464         for (i = 0; i < sw_context->cur_reloc; ++i) {
3465                 reloc = &sw_context->relocs[i];
3466                 validate = &sw_context->val_bufs[reloc->index].base;
3467                 bo = validate->bo;
3468                 switch (bo->mem.mem_type) {
3469                 case TTM_PL_VRAM:
3470                         reloc->location->offset += bo->offset;
3471                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3472                         break;
3473                 case VMW_PL_GMR:
3474                         reloc->location->gmrId = bo->mem.start;
3475                         break;
3476                 case VMW_PL_MOB:
3477                         *reloc->mob_loc = bo->mem.start;
3478                         break;
3479                 default:
3480                         BUG();
3481                 }
3482         }
3483         vmw_free_relocations(sw_context);
3484 }
3485
3486 /**
3487  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3488  * all resources referenced by it.
3489  *
3490  * @list: The resource list.
3491  */
3492 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3493                                           struct list_head *list)
3494 {
3495         struct vmw_resource_val_node *val, *val_next;
3496
3497         /*
3498          * Drop references to resources held during command submission.
3499          */
3500
3501         list_for_each_entry_safe(val, val_next, list, head) {
3502                 list_del_init(&val->head);
3503                 vmw_resource_unreference(&val->res);
3504
3505                 if (val->staged_bindings) {
3506                         if (val->staged_bindings != sw_context->staged_bindings)
3507                                 vmw_binding_state_free(val->staged_bindings);
3508                         else
3509                                 sw_context->staged_bindings_inuse = false;
3510                         val->staged_bindings = NULL;
3511                 }
3512
3513                 kfree(val);
3514         }
3515 }
3516
3517 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3518 {
3519         struct vmw_validate_buffer *entry, *next;
3520         struct vmw_resource_val_node *val;
3521
3522         /*
3523          * Drop references to DMA buffers held during command submission.
3524          */
3525         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3526                                  base.head) {
3527                 list_del(&entry->base.head);
3528                 ttm_bo_unref(&entry->base.bo);
3529                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3530                 sw_context->cur_val_buf--;
3531         }
3532         BUG_ON(sw_context->cur_val_buf != 0);
3533
3534         list_for_each_entry(val, &sw_context->resource_list, head)
3535                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3536 }
3537
3538 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3539                                struct ttm_buffer_object *bo,
3540                                bool interruptible,
3541                                bool validate_as_mob)
3542 {
3543         struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3544                                                   base);
3545         int ret;
3546
3547         if (vbo->pin_count > 0)
3548                 return 0;
3549
3550         if (validate_as_mob)
3551                 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3552                                        false);
3553
3554         /**
3555          * Put BO in VRAM if there is space, otherwise as a GMR.
3556          * If there is no space in VRAM and GMR ids are all used up,
3557          * start evicting GMRs to make room. If the DMA buffer can't be
3558          * used as a GMR, this will return -ENOMEM.
3559          */
3560
3561         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3562                               false);
3563         if (likely(ret == 0 || ret == -ERESTARTSYS))
3564                 return ret;
3565
3566         /**
3567          * If that failed, try VRAM again, this time evicting
3568          * previous contents.
3569          */
3570
3571         ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3572         return ret;
3573 }
3574
3575 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3576                                 struct vmw_sw_context *sw_context)
3577 {
3578         struct vmw_validate_buffer *entry;
3579         int ret;
3580
3581         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3582                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3583                                                  true,
3584                                                  entry->validate_as_mob);
3585                 if (unlikely(ret != 0))
3586                         return ret;
3587         }
3588         return 0;
3589 }
3590
3591 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3592                                  uint32_t size)
3593 {
3594         if (likely(sw_context->cmd_bounce_size >= size))
3595                 return 0;
3596
3597         if (sw_context->cmd_bounce_size == 0)
3598                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3599
3600         while (sw_context->cmd_bounce_size < size) {
3601                 sw_context->cmd_bounce_size =
3602                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3603                                    (sw_context->cmd_bounce_size >> 1));
3604         }
3605
3606         if (sw_context->cmd_bounce != NULL)
3607                 vfree(sw_context->cmd_bounce);
3608
3609         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3610
3611         if (sw_context->cmd_bounce == NULL) {
3612                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3613                 sw_context->cmd_bounce_size = 0;
3614                 return -ENOMEM;
3615         }
3616
3617         return 0;
3618 }
3619
3620 /**
3621  * vmw_execbuf_fence_commands - create and submit a command stream fence
3622  *
3623  * Creates a fence object and submits a command stream marker.
3624  * If this fails for some reason, We sync the fifo and return NULL.
3625  * It is then safe to fence buffers with a NULL pointer.
3626  *
3627  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3628  * a userspace handle if @p_handle is not NULL, otherwise not.
3629  */
3630
3631 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3632                                struct vmw_private *dev_priv,
3633                                struct vmw_fence_obj **p_fence,
3634                                uint32_t *p_handle)
3635 {
3636         uint32_t sequence;
3637         int ret;
3638         bool synced = false;
3639
3640         /* p_handle implies file_priv. */
3641         BUG_ON(p_handle != NULL && file_priv == NULL);
3642
3643         ret = vmw_fifo_send_fence(dev_priv, &sequence);
3644         if (unlikely(ret != 0)) {
3645                 DRM_ERROR("Fence submission error. Syncing.\n");
3646                 synced = true;
3647         }
3648
3649         if (p_handle != NULL)
3650                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3651                                             sequence, p_fence, p_handle);
3652         else
3653                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3654
3655         if (unlikely(ret != 0 && !synced)) {
3656                 (void) vmw_fallback_wait(dev_priv, false, false,
3657                                          sequence, false,
3658                                          VMW_FENCE_WAIT_TIMEOUT);
3659                 *p_fence = NULL;
3660         }
3661
3662         return 0;
3663 }
3664
3665 /**
3666  * vmw_execbuf_copy_fence_user - copy fence object information to
3667  * user-space.
3668  *
3669  * @dev_priv: Pointer to a vmw_private struct.
3670  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3671  * @ret: Return value from fence object creation.
3672  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3673  * which the information should be copied.
3674  * @fence: Pointer to the fenc object.
3675  * @fence_handle: User-space fence handle.
3676  *
3677  * This function copies fence information to user-space. If copying fails,
3678  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3679  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3680  * the error will hopefully be detected.
3681  * Also if copying fails, user-space will be unable to signal the fence
3682  * object so we wait for it immediately, and then unreference the
3683  * user-space reference.
3684  */
3685 void
3686 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3687                             struct vmw_fpriv *vmw_fp,
3688                             int ret,
3689                             struct drm_vmw_fence_rep __user *user_fence_rep,
3690                             struct vmw_fence_obj *fence,
3691                             uint32_t fence_handle)
3692 {
3693         struct drm_vmw_fence_rep fence_rep;
3694
3695         if (user_fence_rep == NULL)
3696                 return;
3697
3698         memset(&fence_rep, 0, sizeof(fence_rep));
3699
3700         fence_rep.error = ret;
3701         if (ret == 0) {
3702                 BUG_ON(fence == NULL);
3703
3704                 fence_rep.handle = fence_handle;
3705                 fence_rep.seqno = fence->base.seqno;
3706                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3707                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3708         }
3709
3710         /*
3711          * copy_to_user errors will be detected by user space not
3712          * seeing fence_rep::error filled in. Typically
3713          * user-space would have pre-set that member to -EFAULT.
3714          */
3715         ret = copy_to_user(user_fence_rep, &fence_rep,
3716                            sizeof(fence_rep));
3717
3718         /*
3719          * User-space lost the fence object. We need to sync
3720          * and unreference the handle.
3721          */
3722         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3723                 ttm_ref_object_base_unref(vmw_fp->tfile,
3724                                           fence_handle, TTM_REF_USAGE);
3725                 DRM_ERROR("Fence copy error. Syncing.\n");
3726                 (void) vmw_fence_obj_wait(fence, false, false,
3727                                           VMW_FENCE_WAIT_TIMEOUT);
3728         }
3729 }
3730
3731 /**
3732  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3733  * the fifo.
3734  *
3735  * @dev_priv: Pointer to a device private structure.
3736  * @kernel_commands: Pointer to the unpatched command batch.
3737  * @command_size: Size of the unpatched command batch.
3738  * @sw_context: Structure holding the relocation lists.
3739  *
3740  * Side effects: If this function returns 0, then the command batch
3741  * pointed to by @kernel_commands will have been modified.
3742  */
3743 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3744                                    void *kernel_commands,
3745                                    u32 command_size,
3746                                    struct vmw_sw_context *sw_context)
3747 {
3748         void *cmd;
3749
3750         if (sw_context->dx_ctx_node)
3751                 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3752                                           sw_context->dx_ctx_node->res->id);
3753         else
3754                 cmd = vmw_fifo_reserve(dev_priv, command_size);
3755         if (!cmd) {
3756                 DRM_ERROR("Failed reserving fifo space for commands.\n");
3757                 return -ENOMEM;
3758         }
3759
3760         vmw_apply_relocations(sw_context);
3761         memcpy(cmd, kernel_commands, command_size);
3762         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3763         vmw_resource_relocations_free(&sw_context->res_relocations);
3764         vmw_fifo_commit(dev_priv, command_size);
3765
3766         return 0;
3767 }
3768
3769 /**
3770  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3771  * the command buffer manager.
3772  *
3773  * @dev_priv: Pointer to a device private structure.
3774  * @header: Opaque handle to the command buffer allocation.
3775  * @command_size: Size of the unpatched command batch.
3776  * @sw_context: Structure holding the relocation lists.
3777  *
3778  * Side effects: If this function returns 0, then the command buffer
3779  * represented by @header will have been modified.
3780  */
3781 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3782                                      struct vmw_cmdbuf_header *header,
3783                                      u32 command_size,
3784                                      struct vmw_sw_context *sw_context)
3785 {
3786         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3787                   SVGA3D_INVALID_ID);
3788         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3789                                        id, false, header);
3790
3791         vmw_apply_relocations(sw_context);
3792         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3793         vmw_resource_relocations_free(&sw_context->res_relocations);
3794         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3795
3796         return 0;
3797 }
3798
3799 /**
3800  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3801  * submission using a command buffer.
3802  *
3803  * @dev_priv: Pointer to a device private structure.
3804  * @user_commands: User-space pointer to the commands to be submitted.
3805  * @command_size: Size of the unpatched command batch.
3806  * @header: Out parameter returning the opaque pointer to the command buffer.
3807  *
3808  * This function checks whether we can use the command buffer manager for
3809  * submission and if so, creates a command buffer of suitable size and
3810  * copies the user data into that buffer.
3811  *
3812  * On successful return, the function returns a pointer to the data in the
3813  * command buffer and *@header is set to non-NULL.
3814  * If command buffers could not be used, the function will return the value
3815  * of @kernel_commands on function call. That value may be NULL. In that case,
3816  * the value of *@header will be set to NULL.
3817  * If an error is encountered, the function will return a pointer error value.
3818  * If the function is interrupted by a signal while sleeping, it will return
3819  * -ERESTARTSYS casted to a pointer error value.
3820  */
3821 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3822                                 void __user *user_commands,
3823                                 void *kernel_commands,
3824                                 u32 command_size,
3825                                 struct vmw_cmdbuf_header **header)
3826 {
3827         size_t cmdbuf_size;
3828         int ret;
3829
3830         *header = NULL;
3831         if (!dev_priv->cman || kernel_commands)
3832                 return kernel_commands;
3833
3834         if (command_size > SVGA_CB_MAX_SIZE) {
3835                 DRM_ERROR("Command buffer is too large.\n");
3836                 return ERR_PTR(-EINVAL);
3837         }
3838
3839         /* If possible, add a little space for fencing. */
3840         cmdbuf_size = command_size + 512;
3841         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3842         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3843                                            true, header);
3844         if (IS_ERR(kernel_commands))
3845                 return kernel_commands;
3846
3847         ret = copy_from_user(kernel_commands, user_commands,
3848                              command_size);
3849         if (ret) {
3850                 DRM_ERROR("Failed copying commands.\n");
3851                 vmw_cmdbuf_header_free(*header);
3852                 *header = NULL;
3853                 return ERR_PTR(-EFAULT);
3854         }
3855
3856         return kernel_commands;
3857 }
3858
3859 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3860                                    struct vmw_sw_context *sw_context,
3861                                    uint32_t handle)
3862 {
3863         struct vmw_resource_val_node *ctx_node;
3864         struct vmw_resource *res;
3865         int ret;
3866
3867         if (handle == SVGA3D_INVALID_ID)
3868                 return 0;
3869
3870         ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3871                                               handle, user_context_converter,
3872                                               &res);
3873         if (unlikely(ret != 0)) {
3874                 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3875                           (unsigned) handle);
3876                 return ret;
3877         }
3878
3879         ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3880         if (unlikely(ret != 0))
3881                 goto out_err;
3882
3883         sw_context->dx_ctx_node = ctx_node;
3884         sw_context->man = vmw_context_res_man(res);
3885 out_err:
3886         vmw_resource_unreference(&res);
3887         return ret;
3888 }
3889
3890 int vmw_execbuf_process(struct drm_file *file_priv,
3891                         struct vmw_private *dev_priv,
3892                         void __user *user_commands,
3893                         void *kernel_commands,
3894                         uint32_t command_size,
3895                         uint64_t throttle_us,
3896                         uint32_t dx_context_handle,
3897                         struct drm_vmw_fence_rep __user *user_fence_rep,
3898                         struct vmw_fence_obj **out_fence)
3899 {
3900         struct vmw_sw_context *sw_context = &dev_priv->ctx;
3901         struct vmw_fence_obj *fence = NULL;
3902         struct vmw_resource *error_resource;
3903         struct list_head resource_list;
3904         struct vmw_cmdbuf_header *header;
3905         struct ww_acquire_ctx ticket;
3906         uint32_t handle;
3907         int ret;
3908
3909         if (throttle_us) {
3910                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3911                                    throttle_us);
3912
3913                 if (ret)
3914                         return ret;
3915         }
3916
3917         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3918                                              kernel_commands, command_size,
3919                                              &header);
3920         if (IS_ERR(kernel_commands))
3921                 return PTR_ERR(kernel_commands);
3922
3923         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3924         if (ret) {
3925                 ret = -ERESTARTSYS;
3926                 goto out_free_header;
3927         }
3928
3929         sw_context->kernel = false;
3930         if (kernel_commands == NULL) {
3931                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3932                 if (unlikely(ret != 0))
3933                         goto out_unlock;
3934
3935
3936                 ret = copy_from_user(sw_context->cmd_bounce,
3937                                      user_commands, command_size);
3938
3939                 if (unlikely(ret != 0)) {
3940                         ret = -EFAULT;
3941                         DRM_ERROR("Failed copying commands.\n");
3942                         goto out_unlock;
3943                 }
3944                 kernel_commands = sw_context->cmd_bounce;
3945         } else if (!header)
3946                 sw_context->kernel = true;
3947
3948         sw_context->fp = vmw_fpriv(file_priv);
3949         sw_context->cur_reloc = 0;
3950         sw_context->cur_val_buf = 0;
3951         INIT_LIST_HEAD(&sw_context->resource_list);
3952         INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3953         sw_context->cur_query_bo = dev_priv->pinned_bo;
3954         sw_context->last_query_ctx = NULL;
3955         sw_context->needs_post_query_barrier = false;
3956         sw_context->dx_ctx_node = NULL;
3957         sw_context->dx_query_mob = NULL;
3958         sw_context->dx_query_ctx = NULL;
3959         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3960         INIT_LIST_HEAD(&sw_context->validate_nodes);
3961         INIT_LIST_HEAD(&sw_context->res_relocations);
3962         if (sw_context->staged_bindings)
3963                 vmw_binding_state_reset(sw_context->staged_bindings);
3964
3965         if (!sw_context->res_ht_initialized) {
3966                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3967                 if (unlikely(ret != 0))
3968                         goto out_unlock;
3969                 sw_context->res_ht_initialized = true;
3970         }
3971         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3972         INIT_LIST_HEAD(&resource_list);
3973         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3974         if (unlikely(ret != 0)) {
3975                 list_splice_init(&sw_context->ctx_resource_list,
3976                                  &sw_context->resource_list);
3977                 goto out_err_nores;
3978         }
3979
3980         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3981                                 command_size);
3982         /*
3983          * Merge the resource lists before checking the return status
3984          * from vmd_cmd_check_all so that all the open hashtabs will
3985          * be handled properly even if vmw_cmd_check_all fails.
3986          */
3987         list_splice_init(&sw_context->ctx_resource_list,
3988                          &sw_context->resource_list);
3989
3990         if (unlikely(ret != 0))
3991                 goto out_err_nores;
3992
3993         ret = vmw_resources_reserve(sw_context);
3994         if (unlikely(ret != 0))
3995                 goto out_err_nores;
3996
3997         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
3998                                      true, NULL);
3999         if (unlikely(ret != 0))
4000                 goto out_err_nores;
4001
4002         ret = vmw_validate_buffers(dev_priv, sw_context);
4003         if (unlikely(ret != 0))
4004                 goto out_err;
4005
4006         ret = vmw_resources_validate(sw_context);
4007         if (unlikely(ret != 0))
4008                 goto out_err;
4009
4010         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4011         if (unlikely(ret != 0)) {
4012                 ret = -ERESTARTSYS;
4013                 goto out_err;
4014         }
4015
4016         if (dev_priv->has_mob) {
4017                 ret = vmw_rebind_contexts(sw_context);
4018                 if (unlikely(ret != 0))
4019                         goto out_unlock_binding;
4020         }
4021
4022         if (!header) {
4023                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4024                                               command_size, sw_context);
4025         } else {
4026                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4027                                                 sw_context);
4028                 header = NULL;
4029         }
4030         mutex_unlock(&dev_priv->binding_mutex);
4031         if (ret)
4032                 goto out_err;
4033
4034         vmw_query_bo_switch_commit(dev_priv, sw_context);
4035         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4036                                          &fence,
4037                                          (user_fence_rep) ? &handle : NULL);
4038         /*
4039          * This error is harmless, because if fence submission fails,
4040          * vmw_fifo_send_fence will sync. The error will be propagated to
4041          * user-space in @fence_rep
4042          */
4043
4044         if (ret != 0)
4045                 DRM_ERROR("Fence submission error. Syncing.\n");
4046
4047         vmw_resources_unreserve(sw_context, false);
4048
4049         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4050                                     (void *) fence);
4051
4052         if (unlikely(dev_priv->pinned_bo != NULL &&
4053                      !dev_priv->query_cid_valid))
4054                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4055
4056         vmw_clear_validations(sw_context);
4057         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4058                                     user_fence_rep, fence, handle);
4059
4060         /* Don't unreference when handing fence out */
4061         if (unlikely(out_fence != NULL)) {
4062                 *out_fence = fence;
4063                 fence = NULL;
4064         } else if (likely(fence != NULL)) {
4065                 vmw_fence_obj_unreference(&fence);
4066         }
4067
4068         list_splice_init(&sw_context->resource_list, &resource_list);
4069         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4070         mutex_unlock(&dev_priv->cmdbuf_mutex);
4071
4072         /*
4073          * Unreference resources outside of the cmdbuf_mutex to
4074          * avoid deadlocks in resource destruction paths.
4075          */
4076         vmw_resource_list_unreference(sw_context, &resource_list);
4077
4078         return 0;
4079
4080 out_unlock_binding:
4081         mutex_unlock(&dev_priv->binding_mutex);
4082 out_err:
4083         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4084 out_err_nores:
4085         vmw_resources_unreserve(sw_context, true);
4086         vmw_resource_relocations_free(&sw_context->res_relocations);
4087         vmw_free_relocations(sw_context);
4088         vmw_clear_validations(sw_context);
4089         if (unlikely(dev_priv->pinned_bo != NULL &&
4090                      !dev_priv->query_cid_valid))
4091                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4092 out_unlock:
4093         list_splice_init(&sw_context->resource_list, &resource_list);
4094         error_resource = sw_context->error_resource;
4095         sw_context->error_resource = NULL;
4096         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4097         mutex_unlock(&dev_priv->cmdbuf_mutex);
4098
4099         /*
4100          * Unreference resources outside of the cmdbuf_mutex to
4101          * avoid deadlocks in resource destruction paths.
4102          */
4103         vmw_resource_list_unreference(sw_context, &resource_list);
4104         if (unlikely(error_resource != NULL))
4105                 vmw_resource_unreference(&error_resource);
4106 out_free_header:
4107         if (header)
4108                 vmw_cmdbuf_header_free(header);
4109
4110         return ret;
4111 }
4112
4113 /**
4114  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4115  *
4116  * @dev_priv: The device private structure.
4117  *
4118  * This function is called to idle the fifo and unpin the query buffer
4119  * if the normal way to do this hits an error, which should typically be
4120  * extremely rare.
4121  */
4122 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4123 {
4124         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4125
4126         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4127         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4128         if (dev_priv->dummy_query_bo_pinned) {
4129                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4130                 dev_priv->dummy_query_bo_pinned = false;
4131         }
4132 }
4133
4134
4135 /**
4136  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4137  * query bo.
4138  *
4139  * @dev_priv: The device private structure.
4140  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4141  * _after_ a query barrier that flushes all queries touching the current
4142  * buffer pointed to by @dev_priv->pinned_bo
4143  *
4144  * This function should be used to unpin the pinned query bo, or
4145  * as a query barrier when we need to make sure that all queries have
4146  * finished before the next fifo command. (For example on hardware
4147  * context destructions where the hardware may otherwise leak unfinished
4148  * queries).
4149  *
4150  * This function does not return any failure codes, but make attempts
4151  * to do safe unpinning in case of errors.
4152  *
4153  * The function will synchronize on the previous query barrier, and will
4154  * thus not finish until that barrier has executed.
4155  *
4156  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4157  * before calling this function.
4158  */
4159 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4160                                      struct vmw_fence_obj *fence)
4161 {
4162         int ret = 0;
4163         struct list_head validate_list;
4164         struct ttm_validate_buffer pinned_val, query_val;
4165         struct vmw_fence_obj *lfence = NULL;
4166         struct ww_acquire_ctx ticket;
4167
4168         if (dev_priv->pinned_bo == NULL)
4169                 goto out_unlock;
4170
4171         INIT_LIST_HEAD(&validate_list);
4172
4173         pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4174         pinned_val.shared = false;
4175         list_add_tail(&pinned_val.head, &validate_list);
4176
4177         query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4178         query_val.shared = false;
4179         list_add_tail(&query_val.head, &validate_list);
4180
4181         ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4182                                      false, NULL);
4183         if (unlikely(ret != 0)) {
4184                 vmw_execbuf_unpin_panic(dev_priv);
4185                 goto out_no_reserve;
4186         }
4187
4188         if (dev_priv->query_cid_valid) {
4189                 BUG_ON(fence != NULL);
4190                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4191                 if (unlikely(ret != 0)) {
4192                         vmw_execbuf_unpin_panic(dev_priv);
4193                         goto out_no_emit;
4194                 }
4195                 dev_priv->query_cid_valid = false;
4196         }
4197
4198         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4199         if (dev_priv->dummy_query_bo_pinned) {
4200                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4201                 dev_priv->dummy_query_bo_pinned = false;
4202         }
4203         if (fence == NULL) {
4204                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4205                                                   NULL);
4206                 fence = lfence;
4207         }
4208         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4209         if (lfence != NULL)
4210                 vmw_fence_obj_unreference(&lfence);
4211
4212         ttm_bo_unref(&query_val.bo);
4213         ttm_bo_unref(&pinned_val.bo);
4214         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4215         DRM_INFO("Dummy query bo pin count: %d\n",
4216                  dev_priv->dummy_query_bo->pin_count);
4217
4218 out_unlock:
4219         return;
4220
4221 out_no_emit:
4222         ttm_eu_backoff_reservation(&ticket, &validate_list);
4223 out_no_reserve:
4224         ttm_bo_unref(&query_val.bo);
4225         ttm_bo_unref(&pinned_val.bo);
4226         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4227 }
4228
4229 /**
4230  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4231  * query bo.
4232  *
4233  * @dev_priv: The device private structure.
4234  *
4235  * This function should be used to unpin the pinned query bo, or
4236  * as a query barrier when we need to make sure that all queries have
4237  * finished before the next fifo command. (For example on hardware
4238  * context destructions where the hardware may otherwise leak unfinished
4239  * queries).
4240  *
4241  * This function does not return any failure codes, but make attempts
4242  * to do safe unpinning in case of errors.
4243  *
4244  * The function will synchronize on the previous query barrier, and will
4245  * thus not finish until that barrier has executed.
4246  */
4247 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4248 {
4249         mutex_lock(&dev_priv->cmdbuf_mutex);
4250         if (dev_priv->query_cid_valid)
4251                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4252         mutex_unlock(&dev_priv->cmdbuf_mutex);
4253 }
4254
4255 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4256                       struct drm_file *file_priv, size_t size)
4257 {
4258         struct vmw_private *dev_priv = vmw_priv(dev);
4259         struct drm_vmw_execbuf_arg arg;
4260         int ret;
4261         static const size_t copy_offset[] = {
4262                 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4263                 sizeof(struct drm_vmw_execbuf_arg)};
4264
4265         if (unlikely(size < copy_offset[0])) {
4266                 DRM_ERROR("Invalid command size, ioctl %d\n",
4267                           DRM_VMW_EXECBUF);
4268                 return -EINVAL;
4269         }
4270
4271         if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4272                 return -EFAULT;
4273
4274         /*
4275          * Extend the ioctl argument while
4276          * maintaining backwards compatibility:
4277          * We take different code paths depending on the value of
4278          * arg.version.
4279          */
4280
4281         if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4282                      arg.version == 0)) {
4283                 DRM_ERROR("Incorrect execbuf version.\n");
4284                 return -EINVAL;
4285         }
4286
4287         if (arg.version > 1 &&
4288             copy_from_user(&arg.context_handle,
4289                            (void __user *) (data + copy_offset[0]),
4290                            copy_offset[arg.version - 1] -
4291                            copy_offset[0]) != 0)
4292                 return -EFAULT;
4293
4294         switch (arg.version) {
4295         case 1:
4296                 arg.context_handle = (uint32_t) -1;
4297                 break;
4298         case 2:
4299                 if (arg.pad64 != 0) {
4300                         DRM_ERROR("Unused IOCTL data not set to zero.\n");
4301                         return -EINVAL;
4302                 }
4303                 break;
4304         default:
4305                 break;
4306         }
4307
4308         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4309         if (unlikely(ret != 0))
4310                 return ret;
4311
4312         ret = vmw_execbuf_process(file_priv, dev_priv,
4313                                   (void __user *)(unsigned long)arg.commands,
4314                                   NULL, arg.command_size, arg.throttle_us,
4315                                   arg.context_handle,
4316                                   (void __user *)(unsigned long)arg.fence_rep,
4317                                   NULL);
4318         ttm_read_unlock(&dev_priv->reservation_sem);
4319         if (unlikely(ret != 0))
4320                 return ret;
4321
4322         vmw_kms_cursor_post_execbuf(dev_priv);
4323
4324         return 0;
4325 }