1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include "ttm/ttm_bo_api.h"
31 #include "ttm/ttm_placement.h"
33 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
40 static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
48 static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
49 struct vmw_resource **p_res)
52 struct vmw_resource *res = *p_res;
54 if (!res->on_validate_list) {
55 if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
56 DRM_ERROR("Too many resources referenced in "
61 sw_context->resources[sw_context->num_ref_resources++] = res;
62 res->on_validate_list = true;
67 vmw_resource_unreference(p_res);
71 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
72 struct vmw_sw_context *sw_context,
73 SVGA3dCmdHeader *header)
75 struct vmw_resource *ctx;
78 SVGA3dCmdHeader header;
83 cmd = container_of(header, struct vmw_cid_cmd, header);
84 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
87 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
89 if (unlikely(ret != 0)) {
90 DRM_ERROR("Could not find or use context %u\n",
95 sw_context->last_cid = cmd->cid;
96 sw_context->cid_valid = true;
97 return vmw_resource_to_validate_list(sw_context, &ctx);
100 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
101 struct vmw_sw_context *sw_context,
104 struct vmw_surface *srf;
106 struct vmw_resource *res;
108 if (*sid == SVGA3D_INVALID_ID)
111 if (likely((sw_context->sid_valid &&
112 *sid == sw_context->last_sid))) {
113 *sid = sw_context->sid_translation;
117 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
119 if (unlikely(ret != 0)) {
120 DRM_ERROR("Could ot find or use surface 0x%08x "
123 (unsigned long) sid);
127 sw_context->last_sid = *sid;
128 sw_context->sid_valid = true;
129 sw_context->sid_translation = srf->res.id;
130 *sid = sw_context->sid_translation;
133 return vmw_resource_to_validate_list(sw_context, &res);
137 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
138 struct vmw_sw_context *sw_context,
139 SVGA3dCmdHeader *header)
142 SVGA3dCmdHeader header;
143 SVGA3dCmdSetRenderTarget body;
147 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
148 if (unlikely(ret != 0))
151 cmd = container_of(header, struct vmw_sid_cmd, header);
152 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
156 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
157 struct vmw_sw_context *sw_context,
158 SVGA3dCmdHeader *header)
161 SVGA3dCmdHeader header;
162 SVGA3dCmdSurfaceCopy body;
166 cmd = container_of(header, struct vmw_sid_cmd, header);
167 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
168 if (unlikely(ret != 0))
170 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
173 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
174 struct vmw_sw_context *sw_context,
175 SVGA3dCmdHeader *header)
178 SVGA3dCmdHeader header;
179 SVGA3dCmdSurfaceStretchBlt body;
183 cmd = container_of(header, struct vmw_sid_cmd, header);
184 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
185 if (unlikely(ret != 0))
187 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
190 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
191 struct vmw_sw_context *sw_context,
192 SVGA3dCmdHeader *header)
195 SVGA3dCmdHeader header;
196 SVGA3dCmdBlitSurfaceToScreen body;
199 cmd = container_of(header, struct vmw_sid_cmd, header);
200 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
203 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
204 struct vmw_sw_context *sw_context,
205 SVGA3dCmdHeader *header)
208 SVGA3dCmdHeader header;
209 SVGA3dCmdPresent body;
212 cmd = container_of(header, struct vmw_sid_cmd, header);
213 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
216 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
217 struct vmw_sw_context *sw_context,
219 struct vmw_dma_buffer **vmw_bo_p)
221 struct vmw_dma_buffer *vmw_bo = NULL;
222 struct ttm_buffer_object *bo;
223 uint32_t handle = ptr->gmrId;
224 struct vmw_relocation *reloc;
225 uint32_t cur_validate_node;
226 struct ttm_validate_buffer *val_buf;
229 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
230 if (unlikely(ret != 0)) {
231 DRM_ERROR("Could not find or use GMR region.\n");
236 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
237 DRM_ERROR("Max number relocations per submission"
243 reloc = &sw_context->relocs[sw_context->cur_reloc++];
244 reloc->location = ptr;
246 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
247 if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
248 DRM_ERROR("Max number of DMA buffers per submission"
254 reloc->index = cur_validate_node;
255 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
256 val_buf = &sw_context->val_bufs[cur_validate_node];
257 val_buf->bo = ttm_bo_reference(bo);
258 val_buf->usage = TTM_USAGE_READWRITE;
259 val_buf->new_sync_obj_arg = (void *) dev_priv;
260 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
261 ++sw_context->cur_val_buf;
267 vmw_dmabuf_unreference(&vmw_bo);
272 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
273 struct vmw_sw_context *sw_context,
274 SVGA3dCmdHeader *header)
276 struct vmw_dma_buffer *vmw_bo;
277 struct vmw_query_cmd {
278 SVGA3dCmdHeader header;
283 cmd = container_of(header, struct vmw_query_cmd, header);
284 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
285 if (unlikely(ret != 0))
288 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
291 if (unlikely(ret != 0))
294 vmw_dmabuf_unreference(&vmw_bo);
298 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
299 struct vmw_sw_context *sw_context,
300 SVGA3dCmdHeader *header)
302 struct vmw_dma_buffer *vmw_bo;
303 struct vmw_query_cmd {
304 SVGA3dCmdHeader header;
305 SVGA3dCmdWaitForQuery q;
309 cmd = container_of(header, struct vmw_query_cmd, header);
310 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
311 if (unlikely(ret != 0))
314 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
317 if (unlikely(ret != 0))
320 vmw_dmabuf_unreference(&vmw_bo);
325 static int vmw_cmd_dma(struct vmw_private *dev_priv,
326 struct vmw_sw_context *sw_context,
327 SVGA3dCmdHeader *header)
329 struct vmw_dma_buffer *vmw_bo = NULL;
330 struct ttm_buffer_object *bo;
331 struct vmw_surface *srf = NULL;
333 SVGA3dCmdHeader header;
334 SVGA3dCmdSurfaceDMA dma;
337 struct vmw_resource *res;
339 cmd = container_of(header, struct vmw_dma_cmd, header);
340 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
343 if (unlikely(ret != 0))
347 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
348 cmd->dma.host.sid, &srf);
350 DRM_ERROR("could not find surface\n");
355 * Patch command stream with device SID.
357 cmd->dma.host.sid = srf->res.id;
358 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
360 vmw_dmabuf_unreference(&vmw_bo);
363 return vmw_resource_to_validate_list(sw_context, &res);
366 vmw_dmabuf_unreference(&vmw_bo);
370 static int vmw_cmd_draw(struct vmw_private *dev_priv,
371 struct vmw_sw_context *sw_context,
372 SVGA3dCmdHeader *header)
374 struct vmw_draw_cmd {
375 SVGA3dCmdHeader header;
376 SVGA3dCmdDrawPrimitives body;
378 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
379 (unsigned long)header + sizeof(*cmd));
380 SVGA3dPrimitiveRange *range;
385 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
386 if (unlikely(ret != 0))
389 cmd = container_of(header, struct vmw_draw_cmd, header);
390 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
392 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
393 DRM_ERROR("Illegal number of vertex declarations.\n");
397 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
398 ret = vmw_cmd_sid_check(dev_priv, sw_context,
399 &decl->array.surfaceId);
400 if (unlikely(ret != 0))
404 maxnum = (header->size - sizeof(cmd->body) -
405 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
406 if (unlikely(cmd->body.numRanges > maxnum)) {
407 DRM_ERROR("Illegal number of index ranges.\n");
411 range = (SVGA3dPrimitiveRange *) decl;
412 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
413 ret = vmw_cmd_sid_check(dev_priv, sw_context,
414 &range->indexArray.surfaceId);
415 if (unlikely(ret != 0))
422 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
423 struct vmw_sw_context *sw_context,
424 SVGA3dCmdHeader *header)
426 struct vmw_tex_state_cmd {
427 SVGA3dCmdHeader header;
428 SVGA3dCmdSetTextureState state;
431 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
432 ((unsigned long) header + header->size + sizeof(header));
433 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
434 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
437 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
438 if (unlikely(ret != 0))
441 for (; cur_state < last_state; ++cur_state) {
442 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
445 ret = vmw_cmd_sid_check(dev_priv, sw_context,
447 if (unlikely(ret != 0))
455 typedef int (*vmw_cmd_func) (struct vmw_private *,
456 struct vmw_sw_context *,
459 #define VMW_CMD_DEF(cmd, func) \
460 [cmd - SVGA_3D_CMD_BASE] = func
462 static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
463 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
464 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
465 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
466 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
467 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
468 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
469 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
470 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
471 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
472 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
473 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
474 &vmw_cmd_set_render_target_check),
475 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
476 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
477 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
478 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
479 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
480 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
481 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
482 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
483 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
484 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
485 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
486 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
487 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
488 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
489 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
490 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
491 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
492 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
493 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
494 &vmw_cmd_blt_surf_screen_check)
497 static int vmw_cmd_check(struct vmw_private *dev_priv,
498 struct vmw_sw_context *sw_context,
499 void *buf, uint32_t *size)
502 uint32_t size_remaining = *size;
503 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
506 cmd_id = ((uint32_t *)buf)[0];
507 if (cmd_id == SVGA_CMD_UPDATE) {
512 cmd_id = le32_to_cpu(header->id);
513 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
515 cmd_id -= SVGA_3D_CMD_BASE;
516 if (unlikely(*size > size_remaining))
519 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
522 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
523 if (unlikely(ret != 0))
528 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
529 cmd_id + SVGA_3D_CMD_BASE);
533 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
534 struct vmw_sw_context *sw_context,
537 void *buf = sw_context->cmd_bounce;
538 int32_t cur_size = size;
541 while (cur_size > 0) {
543 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
544 if (unlikely(ret != 0))
546 buf = (void *)((unsigned long) buf + size);
550 if (unlikely(cur_size != 0)) {
551 DRM_ERROR("Command verifier out of sync.\n");
558 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
560 sw_context->cur_reloc = 0;
563 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
566 struct vmw_relocation *reloc;
567 struct ttm_validate_buffer *validate;
568 struct ttm_buffer_object *bo;
570 for (i = 0; i < sw_context->cur_reloc; ++i) {
571 reloc = &sw_context->relocs[i];
572 validate = &sw_context->val_bufs[reloc->index];
574 if (bo->mem.mem_type == TTM_PL_VRAM) {
575 reloc->location->offset += bo->offset;
576 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
578 reloc->location->gmrId = bo->mem.start;
580 vmw_free_relocations(sw_context);
583 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
585 struct ttm_validate_buffer *entry, *next;
586 uint32_t i = sw_context->num_ref_resources;
589 * Drop references to DMA buffers held during command submission.
591 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
593 list_del(&entry->head);
594 vmw_dmabuf_validate_clear(entry->bo);
595 ttm_bo_unref(&entry->bo);
596 sw_context->cur_val_buf--;
598 BUG_ON(sw_context->cur_val_buf != 0);
601 * Drop references to resources held during command submission.
604 sw_context->resources[i]->on_validate_list = false;
605 vmw_resource_unreference(&sw_context->resources[i]);
609 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
610 struct ttm_buffer_object *bo)
615 * Put BO in VRAM if there is space, otherwise as a GMR.
616 * If there is no space in VRAM and GMR ids are all used up,
617 * start evicting GMRs to make room. If the DMA buffer can't be
618 * used as a GMR, this will return -ENOMEM.
621 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
622 if (likely(ret == 0 || ret == -ERESTARTSYS))
626 * If that failed, try VRAM again, this time evicting
630 DRM_INFO("Falling through to VRAM.\n");
631 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
636 static int vmw_validate_buffers(struct vmw_private *dev_priv,
637 struct vmw_sw_context *sw_context)
639 struct ttm_validate_buffer *entry;
642 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
643 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
644 if (unlikely(ret != 0))
650 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
653 if (likely(sw_context->cmd_bounce_size >= size))
656 if (sw_context->cmd_bounce_size == 0)
657 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
659 while (sw_context->cmd_bounce_size < size) {
660 sw_context->cmd_bounce_size =
661 PAGE_ALIGN(sw_context->cmd_bounce_size +
662 (sw_context->cmd_bounce_size >> 1));
665 if (sw_context->cmd_bounce != NULL)
666 vfree(sw_context->cmd_bounce);
668 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
670 if (sw_context->cmd_bounce == NULL) {
671 DRM_ERROR("Failed to allocate command bounce buffer.\n");
672 sw_context->cmd_bounce_size = 0;
679 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv)
682 struct vmw_private *dev_priv = vmw_priv(dev);
683 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
684 struct drm_vmw_fence_rep fence_rep;
685 struct drm_vmw_fence_rep __user *user_fence_rep;
690 struct vmw_sw_context *sw_context = &dev_priv->ctx;
691 struct vmw_master *vmaster = vmw_master(file_priv->master);
693 ret = ttm_read_lock(&vmaster->lock, true);
694 if (unlikely(ret != 0))
697 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
698 if (unlikely(ret != 0)) {
700 goto out_no_cmd_mutex;
703 ret = vmw_resize_cmd_bounce(sw_context, arg->command_size);
704 if (unlikely(ret != 0))
707 user_cmd = (void __user *)(unsigned long)arg->commands;
708 ret = copy_from_user(sw_context->cmd_bounce,
709 user_cmd, arg->command_size);
711 if (unlikely(ret != 0)) {
713 DRM_ERROR("Failed copying commands.\n");
717 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
718 sw_context->cid_valid = false;
719 sw_context->sid_valid = false;
720 sw_context->cur_reloc = 0;
721 sw_context->cur_val_buf = 0;
722 sw_context->num_ref_resources = 0;
724 INIT_LIST_HEAD(&sw_context->validate_nodes);
726 ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size);
727 if (unlikely(ret != 0))
730 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
731 if (unlikely(ret != 0))
734 ret = vmw_validate_buffers(dev_priv, sw_context);
735 if (unlikely(ret != 0))
738 vmw_apply_relocations(sw_context);
740 if (arg->throttle_us) {
741 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
744 if (unlikely(ret != 0))
748 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
749 if (unlikely(cmd == NULL)) {
750 DRM_ERROR("Failed reserving fifo space for commands.\n");
755 memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
756 vmw_fifo_commit(dev_priv, arg->command_size);
758 ret = vmw_fifo_send_fence(dev_priv, &sequence);
760 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
761 (void *)(unsigned long) sequence);
762 vmw_clear_validations(sw_context);
763 mutex_unlock(&dev_priv->cmdbuf_mutex);
766 * This error is harmless, because if fence submission fails,
767 * vmw_fifo_send_fence will sync.
771 DRM_ERROR("Fence submission error. Syncing.\n");
773 fence_rep.error = ret;
774 fence_rep.fence_seq = (uint64_t) sequence;
777 user_fence_rep = (struct drm_vmw_fence_rep __user *)
778 (unsigned long)arg->fence_rep;
781 * copy_to_user errors will be detected by user space not
782 * seeing fence_rep::error filled in.
785 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
787 vmw_kms_cursor_post_execbuf(dev_priv);
788 ttm_read_unlock(&vmaster->lock);
791 vmw_free_relocations(sw_context);
793 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
794 vmw_clear_validations(sw_context);
796 mutex_unlock(&dev_priv->cmdbuf_mutex);
798 ttm_read_unlock(&vmaster->lock);