Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34
35 #define VMW_RES_EVICT_ERR_COUNT 10
36
37 struct vmw_user_dma_buffer {
38         struct ttm_prime_object prime;
39         struct vmw_dma_buffer dma;
40 };
41
42 struct vmw_bo_user_rep {
43         uint32_t handle;
44         uint64_t map_handle;
45 };
46
47 struct vmw_stream {
48         struct vmw_resource res;
49         uint32_t stream_id;
50 };
51
52 struct vmw_user_stream {
53         struct ttm_base_object base;
54         struct vmw_stream stream;
55 };
56
57
58 static uint64_t vmw_user_stream_size;
59
60 static const struct vmw_res_func vmw_stream_func = {
61         .res_type = vmw_res_stream,
62         .needs_backup = false,
63         .may_evict = false,
64         .type_name = "video streams",
65         .backup_placement = NULL,
66         .create = NULL,
67         .destroy = NULL,
68         .bind = NULL,
69         .unbind = NULL
70 };
71
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75         return container_of(bo, struct vmw_dma_buffer, base);
76 }
77
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87         kref_get(&res->kref);
88         return res;
89 }
90
91
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101         struct vmw_private *dev_priv = res->dev_priv;
102         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104         write_lock(&dev_priv->resource_lock);
105         if (res->id != -1)
106                 idr_remove(idr, res->id);
107         res->id = -1;
108         write_unlock(&dev_priv->resource_lock);
109 }
110
111 static void vmw_resource_release(struct kref *kref)
112 {
113         struct vmw_resource *res =
114             container_of(kref, struct vmw_resource, kref);
115         struct vmw_private *dev_priv = res->dev_priv;
116         int id;
117         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118
119         res->avail = false;
120         list_del_init(&res->lru_head);
121         write_unlock(&dev_priv->resource_lock);
122         if (res->backup) {
123                 struct ttm_buffer_object *bo = &res->backup->base;
124
125                 ttm_bo_reserve(bo, false, false, false, 0);
126                 if (!list_empty(&res->mob_head) &&
127                     res->func->unbind != NULL) {
128                         struct ttm_validate_buffer val_buf;
129
130                         val_buf.bo = bo;
131                         res->func->unbind(res, false, &val_buf);
132                 }
133                 res->backup_dirty = false;
134                 list_del_init(&res->mob_head);
135                 ttm_bo_unreserve(bo);
136                 vmw_dmabuf_unreference(&res->backup);
137         }
138
139         if (likely(res->hw_destroy != NULL))
140                 res->hw_destroy(res);
141
142         id = res->id;
143         if (res->res_free != NULL)
144                 res->res_free(res);
145         else
146                 kfree(res);
147
148         write_lock(&dev_priv->resource_lock);
149
150         if (id != -1)
151                 idr_remove(idr, id);
152 }
153
154 void vmw_resource_unreference(struct vmw_resource **p_res)
155 {
156         struct vmw_resource *res = *p_res;
157         struct vmw_private *dev_priv = res->dev_priv;
158
159         *p_res = NULL;
160         write_lock(&dev_priv->resource_lock);
161         kref_put(&res->kref, vmw_resource_release);
162         write_unlock(&dev_priv->resource_lock);
163 }
164
165
166 /**
167  * vmw_resource_alloc_id - release a resource id to the id manager.
168  *
169  * @res: Pointer to the resource.
170  *
171  * Allocate the lowest free resource from the resource manager, and set
172  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173  */
174 int vmw_resource_alloc_id(struct vmw_resource *res)
175 {
176         struct vmw_private *dev_priv = res->dev_priv;
177         int ret;
178         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
179
180         BUG_ON(res->id != -1);
181
182         idr_preload(GFP_KERNEL);
183         write_lock(&dev_priv->resource_lock);
184
185         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186         if (ret >= 0)
187                 res->id = ret;
188
189         write_unlock(&dev_priv->resource_lock);
190         idr_preload_end();
191         return ret < 0 ? ret : 0;
192 }
193
194 /**
195  * vmw_resource_init - initialize a struct vmw_resource
196  *
197  * @dev_priv:       Pointer to a device private struct.
198  * @res:            The struct vmw_resource to initialize.
199  * @obj_type:       Resource object type.
200  * @delay_id:       Boolean whether to defer device id allocation until
201  *                  the first validation.
202  * @res_free:       Resource destructor.
203  * @func:           Resource function table.
204  */
205 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206                       bool delay_id,
207                       void (*res_free) (struct vmw_resource *res),
208                       const struct vmw_res_func *func)
209 {
210         kref_init(&res->kref);
211         res->hw_destroy = NULL;
212         res->res_free = res_free;
213         res->avail = false;
214         res->dev_priv = dev_priv;
215         res->func = func;
216         INIT_LIST_HEAD(&res->lru_head);
217         INIT_LIST_HEAD(&res->mob_head);
218         res->id = -1;
219         res->backup = NULL;
220         res->backup_offset = 0;
221         res->backup_dirty = false;
222         res->res_dirty = false;
223         if (delay_id)
224                 return 0;
225         else
226                 return vmw_resource_alloc_id(res);
227 }
228
229 /**
230  * vmw_resource_activate
231  *
232  * @res:        Pointer to the newly created resource
233  * @hw_destroy: Destroy function. NULL if none.
234  *
235  * Activate a resource after the hardware has been made aware of it.
236  * Set tye destroy function to @destroy. Typically this frees the
237  * resource and destroys the hardware resources associated with it.
238  * Activate basically means that the function vmw_resource_lookup will
239  * find it.
240  */
241 void vmw_resource_activate(struct vmw_resource *res,
242                            void (*hw_destroy) (struct vmw_resource *))
243 {
244         struct vmw_private *dev_priv = res->dev_priv;
245
246         write_lock(&dev_priv->resource_lock);
247         res->avail = true;
248         res->hw_destroy = hw_destroy;
249         write_unlock(&dev_priv->resource_lock);
250 }
251
252 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
253                                          struct idr *idr, int id)
254 {
255         struct vmw_resource *res;
256
257         read_lock(&dev_priv->resource_lock);
258         res = idr_find(idr, id);
259         if (res && res->avail)
260                 kref_get(&res->kref);
261         else
262                 res = NULL;
263         read_unlock(&dev_priv->resource_lock);
264
265         if (unlikely(res == NULL))
266                 return NULL;
267
268         return res;
269 }
270
271 /**
272  * vmw_user_resource_lookup_handle - lookup a struct resource from a
273  * TTM user-space handle and perform basic type checks
274  *
275  * @dev_priv:     Pointer to a device private struct
276  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
277  * @handle:       The TTM user-space handle
278  * @converter:    Pointer to an object describing the resource type
279  * @p_res:        On successful return the location pointed to will contain
280  *                a pointer to a refcounted struct vmw_resource.
281  *
282  * If the handle can't be found or is associated with an incorrect resource
283  * type, -EINVAL will be returned.
284  */
285 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
286                                     struct ttm_object_file *tfile,
287                                     uint32_t handle,
288                                     const struct vmw_user_resource_conv
289                                     *converter,
290                                     struct vmw_resource **p_res)
291 {
292         struct ttm_base_object *base;
293         struct vmw_resource *res;
294         int ret = -EINVAL;
295
296         base = ttm_base_object_lookup(tfile, handle);
297         if (unlikely(base == NULL))
298                 return -EINVAL;
299
300         if (unlikely(ttm_base_object_type(base) != converter->object_type))
301                 goto out_bad_resource;
302
303         res = converter->base_obj_to_res(base);
304
305         read_lock(&dev_priv->resource_lock);
306         if (!res->avail || res->res_free != converter->res_free) {
307                 read_unlock(&dev_priv->resource_lock);
308                 goto out_bad_resource;
309         }
310
311         kref_get(&res->kref);
312         read_unlock(&dev_priv->resource_lock);
313
314         *p_res = res;
315         ret = 0;
316
317 out_bad_resource:
318         ttm_base_object_unref(&base);
319
320         return ret;
321 }
322
323 /**
324  * Helper function that looks either a surface or dmabuf.
325  *
326  * The pointer this pointed at by out_surf and out_buf needs to be null.
327  */
328 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329                            struct ttm_object_file *tfile,
330                            uint32_t handle,
331                            struct vmw_surface **out_surf,
332                            struct vmw_dma_buffer **out_buf)
333 {
334         struct vmw_resource *res;
335         int ret;
336
337         BUG_ON(*out_surf || *out_buf);
338
339         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340                                               user_surface_converter,
341                                               &res);
342         if (!ret) {
343                 *out_surf = vmw_res_to_srf(res);
344                 return 0;
345         }
346
347         *out_surf = NULL;
348         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
349         return ret;
350 }
351
352 /**
353  * Buffer management.
354  */
355 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356 {
357         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
358
359         kfree(vmw_bo);
360 }
361
362 int vmw_dmabuf_init(struct vmw_private *dev_priv,
363                     struct vmw_dma_buffer *vmw_bo,
364                     size_t size, struct ttm_placement *placement,
365                     bool interruptible,
366                     void (*bo_free) (struct ttm_buffer_object *bo))
367 {
368         struct ttm_bo_device *bdev = &dev_priv->bdev;
369         size_t acc_size;
370         int ret;
371
372         BUG_ON(!bo_free);
373
374         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
375         memset(vmw_bo, 0, sizeof(*vmw_bo));
376
377         INIT_LIST_HEAD(&vmw_bo->res_list);
378
379         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380                           ttm_bo_type_device, placement,
381                           0, interruptible,
382                           NULL, acc_size, NULL, bo_free);
383         return ret;
384 }
385
386 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387 {
388         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390         ttm_prime_object_kfree(vmw_user_bo, prime);
391 }
392
393 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394 {
395         struct vmw_user_dma_buffer *vmw_user_bo;
396         struct ttm_base_object *base = *p_base;
397         struct ttm_buffer_object *bo;
398
399         *p_base = NULL;
400
401         if (unlikely(base == NULL))
402                 return;
403
404         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
405                                    prime.base);
406         bo = &vmw_user_bo->dma.base;
407         ttm_bo_unref(&bo);
408 }
409
410 /**
411  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
412  *
413  * @dev_priv: Pointer to a struct device private.
414  * @tfile: Pointer to a struct ttm_object_file on which to register the user
415  * object.
416  * @size: Size of the dma buffer.
417  * @shareable: Boolean whether the buffer is shareable with other open files.
418  * @handle: Pointer to where the handle value should be assigned.
419  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
420  * should be assigned.
421  */
422 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
423                           struct ttm_object_file *tfile,
424                           uint32_t size,
425                           bool shareable,
426                           uint32_t *handle,
427                           struct vmw_dma_buffer **p_dma_buf)
428 {
429         struct vmw_user_dma_buffer *user_bo;
430         struct ttm_buffer_object *tmp;
431         int ret;
432
433         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
434         if (unlikely(user_bo == NULL)) {
435                 DRM_ERROR("Failed to allocate a buffer.\n");
436                 return -ENOMEM;
437         }
438
439         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
440                               &vmw_vram_sys_placement, true,
441                               &vmw_user_dmabuf_destroy);
442         if (unlikely(ret != 0))
443                 return ret;
444
445         tmp = ttm_bo_reference(&user_bo->dma.base);
446         ret = ttm_prime_object_init(tfile,
447                                     size,
448                                     &user_bo->prime,
449                                     shareable,
450                                     ttm_buffer_type,
451                                     &vmw_user_dmabuf_release, NULL);
452         if (unlikely(ret != 0)) {
453                 ttm_bo_unref(&tmp);
454                 goto out_no_base_object;
455         }
456
457         *p_dma_buf = &user_bo->dma;
458         *handle = user_bo->prime.base.hash.key;
459
460 out_no_base_object:
461         return ret;
462 }
463
464 /**
465  * vmw_user_dmabuf_verify_access - verify access permissions on this
466  * buffer object.
467  *
468  * @bo: Pointer to the buffer object being accessed
469  * @tfile: Identifying the caller.
470  */
471 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
472                                   struct ttm_object_file *tfile)
473 {
474         struct vmw_user_dma_buffer *vmw_user_bo;
475
476         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
477                 return -EPERM;
478
479         vmw_user_bo = vmw_user_dma_buffer(bo);
480         return (vmw_user_bo->prime.base.tfile == tfile ||
481                 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
482 }
483
484 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
485                            struct drm_file *file_priv)
486 {
487         struct vmw_private *dev_priv = vmw_priv(dev);
488         union drm_vmw_alloc_dmabuf_arg *arg =
489             (union drm_vmw_alloc_dmabuf_arg *)data;
490         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
491         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
492         struct vmw_dma_buffer *dma_buf;
493         uint32_t handle;
494         struct vmw_master *vmaster = vmw_master(file_priv->master);
495         int ret;
496
497         ret = ttm_read_lock(&vmaster->lock, true);
498         if (unlikely(ret != 0))
499                 return ret;
500
501         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
502                                     req->size, false, &handle, &dma_buf);
503         if (unlikely(ret != 0))
504                 goto out_no_dmabuf;
505
506         rep->handle = handle;
507         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
508         rep->cur_gmr_id = handle;
509         rep->cur_gmr_offset = 0;
510
511         vmw_dmabuf_unreference(&dma_buf);
512
513 out_no_dmabuf:
514         ttm_read_unlock(&vmaster->lock);
515
516         return ret;
517 }
518
519 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
520                            struct drm_file *file_priv)
521 {
522         struct drm_vmw_unref_dmabuf_arg *arg =
523             (struct drm_vmw_unref_dmabuf_arg *)data;
524
525         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
526                                          arg->handle,
527                                          TTM_REF_USAGE);
528 }
529
530 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
531                            uint32_t handle, struct vmw_dma_buffer **out)
532 {
533         struct vmw_user_dma_buffer *vmw_user_bo;
534         struct ttm_base_object *base;
535
536         base = ttm_base_object_lookup(tfile, handle);
537         if (unlikely(base == NULL)) {
538                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
539                        (unsigned long)handle);
540                 return -ESRCH;
541         }
542
543         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
544                 ttm_base_object_unref(&base);
545                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
546                        (unsigned long)handle);
547                 return -EINVAL;
548         }
549
550         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
551                                    prime.base);
552         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
553         ttm_base_object_unref(&base);
554         *out = &vmw_user_bo->dma;
555
556         return 0;
557 }
558
559 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
560                               struct vmw_dma_buffer *dma_buf)
561 {
562         struct vmw_user_dma_buffer *user_bo;
563
564         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
565                 return -EINVAL;
566
567         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
568         return ttm_ref_object_add(tfile, &user_bo->prime.base,
569                                   TTM_REF_USAGE, NULL);
570 }
571
572 /*
573  * Stream management
574  */
575
576 static void vmw_stream_destroy(struct vmw_resource *res)
577 {
578         struct vmw_private *dev_priv = res->dev_priv;
579         struct vmw_stream *stream;
580         int ret;
581
582         DRM_INFO("%s: unref\n", __func__);
583         stream = container_of(res, struct vmw_stream, res);
584
585         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
586         WARN_ON(ret != 0);
587 }
588
589 static int vmw_stream_init(struct vmw_private *dev_priv,
590                            struct vmw_stream *stream,
591                            void (*res_free) (struct vmw_resource *res))
592 {
593         struct vmw_resource *res = &stream->res;
594         int ret;
595
596         ret = vmw_resource_init(dev_priv, res, false, res_free,
597                                 &vmw_stream_func);
598
599         if (unlikely(ret != 0)) {
600                 if (res_free == NULL)
601                         kfree(stream);
602                 else
603                         res_free(&stream->res);
604                 return ret;
605         }
606
607         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
608         if (ret) {
609                 vmw_resource_unreference(&res);
610                 return ret;
611         }
612
613         DRM_INFO("%s: claimed\n", __func__);
614
615         vmw_resource_activate(&stream->res, vmw_stream_destroy);
616         return 0;
617 }
618
619 static void vmw_user_stream_free(struct vmw_resource *res)
620 {
621         struct vmw_user_stream *stream =
622             container_of(res, struct vmw_user_stream, stream.res);
623         struct vmw_private *dev_priv = res->dev_priv;
624
625         ttm_base_object_kfree(stream, base);
626         ttm_mem_global_free(vmw_mem_glob(dev_priv),
627                             vmw_user_stream_size);
628 }
629
630 /**
631  * This function is called when user space has no more references on the
632  * base object. It releases the base-object's reference on the resource object.
633  */
634
635 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
636 {
637         struct ttm_base_object *base = *p_base;
638         struct vmw_user_stream *stream =
639             container_of(base, struct vmw_user_stream, base);
640         struct vmw_resource *res = &stream->stream.res;
641
642         *p_base = NULL;
643         vmw_resource_unreference(&res);
644 }
645
646 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
647                            struct drm_file *file_priv)
648 {
649         struct vmw_private *dev_priv = vmw_priv(dev);
650         struct vmw_resource *res;
651         struct vmw_user_stream *stream;
652         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
653         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
654         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
655         int ret = 0;
656
657
658         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
659         if (unlikely(res == NULL))
660                 return -EINVAL;
661
662         if (res->res_free != &vmw_user_stream_free) {
663                 ret = -EINVAL;
664                 goto out;
665         }
666
667         stream = container_of(res, struct vmw_user_stream, stream.res);
668         if (stream->base.tfile != tfile) {
669                 ret = -EINVAL;
670                 goto out;
671         }
672
673         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
674 out:
675         vmw_resource_unreference(&res);
676         return ret;
677 }
678
679 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
680                            struct drm_file *file_priv)
681 {
682         struct vmw_private *dev_priv = vmw_priv(dev);
683         struct vmw_user_stream *stream;
684         struct vmw_resource *res;
685         struct vmw_resource *tmp;
686         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
687         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
688         struct vmw_master *vmaster = vmw_master(file_priv->master);
689         int ret;
690
691         /*
692          * Approximate idr memory usage with 128 bytes. It will be limited
693          * by maximum number_of streams anyway?
694          */
695
696         if (unlikely(vmw_user_stream_size == 0))
697                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
698
699         ret = ttm_read_lock(&vmaster->lock, true);
700         if (unlikely(ret != 0))
701                 return ret;
702
703         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
704                                    vmw_user_stream_size,
705                                    false, true);
706         if (unlikely(ret != 0)) {
707                 if (ret != -ERESTARTSYS)
708                         DRM_ERROR("Out of graphics memory for stream"
709                                   " creation.\n");
710                 goto out_unlock;
711         }
712
713
714         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
715         if (unlikely(stream == NULL)) {
716                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
717                                     vmw_user_stream_size);
718                 ret = -ENOMEM;
719                 goto out_unlock;
720         }
721
722         res = &stream->stream.res;
723         stream->base.shareable = false;
724         stream->base.tfile = NULL;
725
726         /*
727          * From here on, the destructor takes over resource freeing.
728          */
729
730         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
731         if (unlikely(ret != 0))
732                 goto out_unlock;
733
734         tmp = vmw_resource_reference(res);
735         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
736                                    &vmw_user_stream_base_release, NULL);
737
738         if (unlikely(ret != 0)) {
739                 vmw_resource_unreference(&tmp);
740                 goto out_err;
741         }
742
743         arg->stream_id = res->id;
744 out_err:
745         vmw_resource_unreference(&res);
746 out_unlock:
747         ttm_read_unlock(&vmaster->lock);
748         return ret;
749 }
750
751 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
752                            struct ttm_object_file *tfile,
753                            uint32_t *inout_id, struct vmw_resource **out)
754 {
755         struct vmw_user_stream *stream;
756         struct vmw_resource *res;
757         int ret;
758
759         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
760                                   *inout_id);
761         if (unlikely(res == NULL))
762                 return -EINVAL;
763
764         if (res->res_free != &vmw_user_stream_free) {
765                 ret = -EINVAL;
766                 goto err_ref;
767         }
768
769         stream = container_of(res, struct vmw_user_stream, stream.res);
770         if (stream->base.tfile != tfile) {
771                 ret = -EPERM;
772                 goto err_ref;
773         }
774
775         *inout_id = stream->stream.stream_id;
776         *out = res;
777         return 0;
778 err_ref:
779         vmw_resource_unreference(&res);
780         return ret;
781 }
782
783
784 int vmw_dumb_create(struct drm_file *file_priv,
785                     struct drm_device *dev,
786                     struct drm_mode_create_dumb *args)
787 {
788         struct vmw_private *dev_priv = vmw_priv(dev);
789         struct vmw_master *vmaster = vmw_master(file_priv->master);
790         struct vmw_user_dma_buffer *vmw_user_bo;
791         struct ttm_buffer_object *tmp;
792         int ret;
793
794         args->pitch = args->width * ((args->bpp + 7) / 8);
795         args->size = args->pitch * args->height;
796
797         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
798         if (vmw_user_bo == NULL)
799                 return -ENOMEM;
800
801         ret = ttm_read_lock(&vmaster->lock, true);
802         if (ret != 0) {
803                 kfree(vmw_user_bo);
804                 return ret;
805         }
806
807         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
808                               &vmw_vram_sys_placement, true,
809                               &vmw_user_dmabuf_destroy);
810         if (ret != 0)
811                 goto out_no_dmabuf;
812
813         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
814         ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
815                                     args->size,
816                                     &vmw_user_bo->prime,
817                                     false,
818                                     ttm_buffer_type,
819                                     &vmw_user_dmabuf_release, NULL);
820         if (unlikely(ret != 0))
821                 goto out_no_base_object;
822
823         args->handle = vmw_user_bo->prime.base.hash.key;
824
825 out_no_base_object:
826         ttm_bo_unref(&tmp);
827 out_no_dmabuf:
828         ttm_read_unlock(&vmaster->lock);
829         return ret;
830 }
831
832 int vmw_dumb_map_offset(struct drm_file *file_priv,
833                         struct drm_device *dev, uint32_t handle,
834                         uint64_t *offset)
835 {
836         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
837         struct vmw_dma_buffer *out_buf;
838         int ret;
839
840         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
841         if (ret != 0)
842                 return -EINVAL;
843
844         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
845         vmw_dmabuf_unreference(&out_buf);
846         return 0;
847 }
848
849 int vmw_dumb_destroy(struct drm_file *file_priv,
850                      struct drm_device *dev,
851                      uint32_t handle)
852 {
853         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
854                                          handle, TTM_REF_USAGE);
855 }
856
857 /**
858  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
859  *
860  * @res:            The resource for which to allocate a backup buffer.
861  * @interruptible:  Whether any sleeps during allocation should be
862  *                  performed while interruptible.
863  */
864 static int vmw_resource_buf_alloc(struct vmw_resource *res,
865                                   bool interruptible)
866 {
867         unsigned long size =
868                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
869         struct vmw_dma_buffer *backup;
870         int ret;
871
872         if (likely(res->backup)) {
873                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
874                 return 0;
875         }
876
877         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
878         if (unlikely(backup == NULL))
879                 return -ENOMEM;
880
881         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
882                               res->func->backup_placement,
883                               interruptible,
884                               &vmw_dmabuf_bo_free);
885         if (unlikely(ret != 0))
886                 goto out_no_dmabuf;
887
888         res->backup = backup;
889
890 out_no_dmabuf:
891         return ret;
892 }
893
894 /**
895  * vmw_resource_do_validate - Make a resource up-to-date and visible
896  *                            to the device.
897  *
898  * @res:            The resource to make visible to the device.
899  * @val_buf:        Information about a buffer possibly
900  *                  containing backup data if a bind operation is needed.
901  *
902  * On hardware resource shortage, this function returns -EBUSY and
903  * should be retried once resources have been freed up.
904  */
905 static int vmw_resource_do_validate(struct vmw_resource *res,
906                                     struct ttm_validate_buffer *val_buf)
907 {
908         int ret = 0;
909         const struct vmw_res_func *func = res->func;
910
911         if (unlikely(res->id == -1)) {
912                 ret = func->create(res);
913                 if (unlikely(ret != 0))
914                         return ret;
915         }
916
917         if (func->bind &&
918             ((func->needs_backup && list_empty(&res->mob_head) &&
919               val_buf->bo != NULL) ||
920              (!func->needs_backup && val_buf->bo != NULL))) {
921                 ret = func->bind(res, val_buf);
922                 if (unlikely(ret != 0))
923                         goto out_bind_failed;
924                 if (func->needs_backup)
925                         list_add_tail(&res->mob_head, &res->backup->res_list);
926         }
927
928         /*
929          * Only do this on write operations, and move to
930          * vmw_resource_unreserve if it can be called after
931          * backup buffers have been unreserved. Otherwise
932          * sort out locking.
933          */
934         res->res_dirty = true;
935
936         return 0;
937
938 out_bind_failed:
939         func->destroy(res);
940
941         return ret;
942 }
943
944 /**
945  * vmw_resource_unreserve - Unreserve a resource previously reserved for
946  * command submission.
947  *
948  * @res:               Pointer to the struct vmw_resource to unreserve.
949  * @new_backup:        Pointer to new backup buffer if command submission
950  *                     switched.
951  * @new_backup_offset: New backup offset if @new_backup is !NULL.
952  *
953  * Currently unreserving a resource means putting it back on the device's
954  * resource lru list, so that it can be evicted if necessary.
955  */
956 void vmw_resource_unreserve(struct vmw_resource *res,
957                             struct vmw_dma_buffer *new_backup,
958                             unsigned long new_backup_offset)
959 {
960         struct vmw_private *dev_priv = res->dev_priv;
961
962         if (!list_empty(&res->lru_head))
963                 return;
964
965         if (new_backup && new_backup != res->backup) {
966
967                 if (res->backup) {
968                         lockdep_assert_held(&res->backup->base.resv->lock.base);
969                         list_del_init(&res->mob_head);
970                         vmw_dmabuf_unreference(&res->backup);
971                 }
972
973                 res->backup = vmw_dmabuf_reference(new_backup);
974                 lockdep_assert_held(&new_backup->base.resv->lock.base);
975                 list_add_tail(&res->mob_head, &new_backup->res_list);
976         }
977         if (new_backup)
978                 res->backup_offset = new_backup_offset;
979
980         if (!res->func->may_evict || res->id == -1)
981                 return;
982
983         write_lock(&dev_priv->resource_lock);
984         list_add_tail(&res->lru_head,
985                       &res->dev_priv->res_lru[res->func->res_type]);
986         write_unlock(&dev_priv->resource_lock);
987 }
988
989 /**
990  * vmw_resource_check_buffer - Check whether a backup buffer is needed
991  *                             for a resource and in that case, allocate
992  *                             one, reserve and validate it.
993  *
994  * @res:            The resource for which to allocate a backup buffer.
995  * @interruptible:  Whether any sleeps during allocation should be
996  *                  performed while interruptible.
997  * @val_buf:        On successful return contains data about the
998  *                  reserved and validated backup buffer.
999  */
1000 static int
1001 vmw_resource_check_buffer(struct vmw_resource *res,
1002                           bool interruptible,
1003                           struct ttm_validate_buffer *val_buf)
1004 {
1005         struct list_head val_list;
1006         bool backup_dirty = false;
1007         int ret;
1008
1009         if (unlikely(res->backup == NULL)) {
1010                 ret = vmw_resource_buf_alloc(res, interruptible);
1011                 if (unlikely(ret != 0))
1012                         return ret;
1013         }
1014
1015         INIT_LIST_HEAD(&val_list);
1016         val_buf->bo = ttm_bo_reference(&res->backup->base);
1017         list_add_tail(&val_buf->head, &val_list);
1018         ret = ttm_eu_reserve_buffers(NULL, &val_list);
1019         if (unlikely(ret != 0))
1020                 goto out_no_reserve;
1021
1022         if (res->func->needs_backup && list_empty(&res->mob_head))
1023                 return 0;
1024
1025         backup_dirty = res->backup_dirty;
1026         ret = ttm_bo_validate(&res->backup->base,
1027                               res->func->backup_placement,
1028                               true, false);
1029
1030         if (unlikely(ret != 0))
1031                 goto out_no_validate;
1032
1033         return 0;
1034
1035 out_no_validate:
1036         ttm_eu_backoff_reservation(NULL, &val_list);
1037 out_no_reserve:
1038         ttm_bo_unref(&val_buf->bo);
1039         if (backup_dirty)
1040                 vmw_dmabuf_unreference(&res->backup);
1041
1042         return ret;
1043 }
1044
1045 /**
1046  * vmw_resource_reserve - Reserve a resource for command submission
1047  *
1048  * @res:            The resource to reserve.
1049  *
1050  * This function takes the resource off the LRU list and make sure
1051  * a backup buffer is present for guest-backed resources. However,
1052  * the buffer may not be bound to the resource at this point.
1053  *
1054  */
1055 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1056 {
1057         struct vmw_private *dev_priv = res->dev_priv;
1058         int ret;
1059
1060         write_lock(&dev_priv->resource_lock);
1061         list_del_init(&res->lru_head);
1062         write_unlock(&dev_priv->resource_lock);
1063
1064         if (res->func->needs_backup && res->backup == NULL &&
1065             !no_backup) {
1066                 ret = vmw_resource_buf_alloc(res, true);
1067                 if (unlikely(ret != 0))
1068                         return ret;
1069         }
1070
1071         return 0;
1072 }
1073
1074 /**
1075  * vmw_resource_backoff_reservation - Unreserve and unreference a
1076  *                                    backup buffer
1077  *.
1078  * @val_buf:        Backup buffer information.
1079  */
1080 static void
1081 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1082 {
1083         struct list_head val_list;
1084
1085         if (likely(val_buf->bo == NULL))
1086                 return;
1087
1088         INIT_LIST_HEAD(&val_list);
1089         list_add_tail(&val_buf->head, &val_list);
1090         ttm_eu_backoff_reservation(NULL, &val_list);
1091         ttm_bo_unref(&val_buf->bo);
1092 }
1093
1094 /**
1095  * vmw_resource_do_evict - Evict a resource, and transfer its data
1096  *                         to a backup buffer.
1097  *
1098  * @res:            The resource to evict.
1099  * @interruptible:  Whether to wait interruptible.
1100  */
1101 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1102 {
1103         struct ttm_validate_buffer val_buf;
1104         const struct vmw_res_func *func = res->func;
1105         int ret;
1106
1107         BUG_ON(!func->may_evict);
1108
1109         val_buf.bo = NULL;
1110         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1111         if (unlikely(ret != 0))
1112                 return ret;
1113
1114         if (unlikely(func->unbind != NULL &&
1115                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1116                 ret = func->unbind(res, res->res_dirty, &val_buf);
1117                 if (unlikely(ret != 0))
1118                         goto out_no_unbind;
1119                 list_del_init(&res->mob_head);
1120         }
1121         ret = func->destroy(res);
1122         res->backup_dirty = true;
1123         res->res_dirty = false;
1124 out_no_unbind:
1125         vmw_resource_backoff_reservation(&val_buf);
1126
1127         return ret;
1128 }
1129
1130
1131 /**
1132  * vmw_resource_validate - Make a resource up-to-date and visible
1133  *                         to the device.
1134  *
1135  * @res:            The resource to make visible to the device.
1136  *
1137  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1138  * be reserved and validated.
1139  * On hardware resource shortage, this function will repeatedly evict
1140  * resources of the same type until the validation succeeds.
1141  */
1142 int vmw_resource_validate(struct vmw_resource *res)
1143 {
1144         int ret;
1145         struct vmw_resource *evict_res;
1146         struct vmw_private *dev_priv = res->dev_priv;
1147         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1148         struct ttm_validate_buffer val_buf;
1149         unsigned err_count = 0;
1150
1151         if (likely(!res->func->may_evict))
1152                 return 0;
1153
1154         val_buf.bo = NULL;
1155         if (res->backup)
1156                 val_buf.bo = &res->backup->base;
1157         do {
1158                 ret = vmw_resource_do_validate(res, &val_buf);
1159                 if (likely(ret != -EBUSY))
1160                         break;
1161
1162                 write_lock(&dev_priv->resource_lock);
1163                 if (list_empty(lru_list) || !res->func->may_evict) {
1164                         DRM_ERROR("Out of device device resources "
1165                                   "for %s.\n", res->func->type_name);
1166                         ret = -EBUSY;
1167                         write_unlock(&dev_priv->resource_lock);
1168                         break;
1169                 }
1170
1171                 evict_res = vmw_resource_reference
1172                         (list_first_entry(lru_list, struct vmw_resource,
1173                                           lru_head));
1174                 list_del_init(&evict_res->lru_head);
1175
1176                 write_unlock(&dev_priv->resource_lock);
1177
1178                 ret = vmw_resource_do_evict(evict_res, true);
1179                 if (unlikely(ret != 0)) {
1180                         write_lock(&dev_priv->resource_lock);
1181                         list_add_tail(&evict_res->lru_head, lru_list);
1182                         write_unlock(&dev_priv->resource_lock);
1183                         if (ret == -ERESTARTSYS ||
1184                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1185                                 vmw_resource_unreference(&evict_res);
1186                                 goto out_no_validate;
1187                         }
1188                 }
1189
1190                 vmw_resource_unreference(&evict_res);
1191         } while (1);
1192
1193         if (unlikely(ret != 0))
1194                 goto out_no_validate;
1195         else if (!res->func->needs_backup && res->backup) {
1196                 list_del_init(&res->mob_head);
1197                 vmw_dmabuf_unreference(&res->backup);
1198         }
1199
1200         return 0;
1201
1202 out_no_validate:
1203         return ret;
1204 }
1205
1206 /**
1207  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1208  *                       object without unreserving it.
1209  *
1210  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1211  * @fence:          Pointer to the fence. If NULL, this function will
1212  *                  insert a fence into the command stream..
1213  *
1214  * Contrary to the ttm_eu version of this function, it takes only
1215  * a single buffer object instead of a list, and it also doesn't
1216  * unreserve the buffer object, which needs to be done separately.
1217  */
1218 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1219                          struct vmw_fence_obj *fence)
1220 {
1221         struct ttm_bo_device *bdev = bo->bdev;
1222         struct ttm_bo_driver *driver = bdev->driver;
1223         struct vmw_fence_obj *old_fence_obj;
1224         struct vmw_private *dev_priv =
1225                 container_of(bdev, struct vmw_private, bdev);
1226
1227         if (fence == NULL)
1228                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1229         else
1230                 driver->sync_obj_ref(fence);
1231
1232         spin_lock(&bdev->fence_lock);
1233
1234         old_fence_obj = bo->sync_obj;
1235         bo->sync_obj = fence;
1236
1237         spin_unlock(&bdev->fence_lock);
1238
1239         if (old_fence_obj)
1240                 vmw_fence_obj_unreference(&old_fence_obj);
1241 }
1242
1243 /**
1244  * vmw_resource_move_notify - TTM move_notify_callback
1245  *
1246  * @bo:             The TTM buffer object about to move.
1247  * @mem:            The truct ttm_mem_reg indicating to what memory
1248  *                  region the move is taking place.
1249  *
1250  * For now does nothing.
1251  */
1252 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1253                               struct ttm_mem_reg *mem)
1254 {
1255 }
1256
1257 /**
1258  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1259  *
1260  * @res:            The resource being queried.
1261  */
1262 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1263 {
1264         return res->func->needs_backup;
1265 }
1266
1267 /**
1268  * vmw_resource_evict_type - Evict all resources of a specific type
1269  *
1270  * @dev_priv:       Pointer to a device private struct
1271  * @type:           The resource type to evict
1272  *
1273  * To avoid thrashing starvation or as part of the hibernation sequence,
1274  * try to evict all evictable resources of a specific type.
1275  */
1276 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1277                                     enum vmw_res_type type)
1278 {
1279         struct list_head *lru_list = &dev_priv->res_lru[type];
1280         struct vmw_resource *evict_res;
1281         unsigned err_count = 0;
1282         int ret;
1283
1284         do {
1285                 write_lock(&dev_priv->resource_lock);
1286
1287                 if (list_empty(lru_list))
1288                         goto out_unlock;
1289
1290                 evict_res = vmw_resource_reference(
1291                         list_first_entry(lru_list, struct vmw_resource,
1292                                          lru_head));
1293                 list_del_init(&evict_res->lru_head);
1294                 write_unlock(&dev_priv->resource_lock);
1295
1296                 ret = vmw_resource_do_evict(evict_res, false);
1297                 if (unlikely(ret != 0)) {
1298                         write_lock(&dev_priv->resource_lock);
1299                         list_add_tail(&evict_res->lru_head, lru_list);
1300                         write_unlock(&dev_priv->resource_lock);
1301                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1302                                 vmw_resource_unreference(&evict_res);
1303                                 return;
1304                         }
1305                 }
1306
1307                 vmw_resource_unreference(&evict_res);
1308         } while (1);
1309
1310 out_unlock:
1311         write_unlock(&dev_priv->resource_lock);
1312 }
1313
1314 /**
1315  * vmw_resource_evict_all - Evict all evictable resources
1316  *
1317  * @dev_priv:       Pointer to a device private struct
1318  *
1319  * To avoid thrashing starvation or as part of the hibernation sequence,
1320  * evict all evictable resources. In particular this means that all
1321  * guest-backed resources that are registered with the device are
1322  * evicted and the OTable becomes clean.
1323  */
1324 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1325 {
1326         enum vmw_res_type type;
1327
1328         mutex_lock(&dev_priv->cmdbuf_mutex);
1329
1330         for (type = 0; type < vmw_res_max; ++type)
1331                 vmw_resource_evict_type(dev_priv, type);
1332
1333         mutex_unlock(&dev_priv->cmdbuf_mutex);
1334 }