Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / qxl / qxl_release.c
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "qxl_drv.h"
23 #include "qxl_object.h"
24
25 /*
26  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
27  * into 256 byte chunks for now - gives 16 cmds per page.
28  *
29  * use an ida to index into the chunks?
30  */
31 /* manage releaseables */
32 /* stack them 16 high for now -drawable object is 191 */
33 #define RELEASE_SIZE 256
34 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
35 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
36 #define SURFACE_RELEASE_SIZE 128
37 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
38
39 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41
42 static uint64_t
43 qxl_release_alloc(struct qxl_device *qdev, int type,
44                   struct qxl_release **ret)
45 {
46         struct qxl_release *release;
47         int handle;
48         size_t size = sizeof(*release);
49         int idr_ret;
50
51         release = kmalloc(size, GFP_KERNEL);
52         if (!release) {
53                 DRM_ERROR("Out of memory\n");
54                 return 0;
55         }
56         release->type = type;
57         release->release_offset = 0;
58         release->surface_release_id = 0;
59         INIT_LIST_HEAD(&release->bos);
60
61         idr_preload(GFP_KERNEL);
62         spin_lock(&qdev->release_idr_lock);
63         idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
64         spin_unlock(&qdev->release_idr_lock);
65         idr_preload_end();
66         handle = idr_ret;
67         if (idr_ret < 0)
68                 goto release_fail;
69         *ret = release;
70         QXL_INFO(qdev, "allocated release %lld\n", handle);
71         release->id = handle;
72 release_fail:
73
74         return handle;
75 }
76
77 void
78 qxl_release_free(struct qxl_device *qdev,
79                  struct qxl_release *release)
80 {
81         struct qxl_bo_list *entry, *tmp;
82         QXL_INFO(qdev, "release %d, type %d\n", release->id,
83                  release->type);
84
85         if (release->surface_release_id)
86                 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87
88         list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89                 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
90                 QXL_INFO(qdev, "release %llx\n",
91                         drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
92                                                 - DRM_FILE_OFFSET);
93                 qxl_fence_remove_release(&bo->fence, release->id);
94                 qxl_bo_unref(&bo);
95                 kfree(entry);
96         }
97         spin_lock(&qdev->release_idr_lock);
98         idr_remove(&qdev->release_idr, release->id);
99         spin_unlock(&qdev->release_idr_lock);
100         kfree(release);
101 }
102
103 static int qxl_release_bo_alloc(struct qxl_device *qdev,
104                                 struct qxl_bo **bo)
105 {
106         int ret;
107         /* pin releases bo's they are too messy to evict */
108         ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
109                             QXL_GEM_DOMAIN_VRAM, NULL,
110                             bo);
111         return ret;
112 }
113
114 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
115 {
116         struct qxl_bo_list *entry;
117
118         list_for_each_entry(entry, &release->bos, tv.head) {
119                 if (entry->tv.bo == &bo->tbo)
120                         return 0;
121         }
122
123         entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
124         if (!entry)
125                 return -ENOMEM;
126
127         qxl_bo_ref(bo);
128         entry->tv.bo = &bo->tbo;
129         list_add_tail(&entry->tv.head, &release->bos);
130         return 0;
131 }
132
133 static int qxl_release_validate_bo(struct qxl_bo *bo)
134 {
135         int ret;
136
137         if (!bo->pin_count) {
138                 qxl_ttm_placement_from_domain(bo, bo->type, false);
139                 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
140                                       true, false);
141                 if (ret)
142                         return ret;
143         }
144
145         /* allocate a surface for reserved + validated buffers */
146         ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
147         if (ret)
148                 return ret;
149         return 0;
150 }
151
152 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
153 {
154         int ret;
155         struct qxl_bo_list *entry;
156
157         /* if only one object on the release its the release itself
158            since these objects are pinned no need to reserve */
159         if (list_is_singular(&release->bos))
160                 return 0;
161
162         ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
163         if (ret)
164                 return ret;
165
166         list_for_each_entry(entry, &release->bos, tv.head) {
167                 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
168
169                 ret = qxl_release_validate_bo(bo);
170                 if (ret) {
171                         ttm_eu_backoff_reservation(&release->ticket, &release->bos);
172                         return ret;
173                 }
174         }
175         return 0;
176 }
177
178 void qxl_release_backoff_reserve_list(struct qxl_release *release)
179 {
180         /* if only one object on the release its the release itself
181            since these objects are pinned no need to reserve */
182         if (list_is_singular(&release->bos))
183                 return;
184
185         ttm_eu_backoff_reservation(&release->ticket, &release->bos);
186 }
187
188
189 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
190                                        enum qxl_surface_cmd_type surface_cmd_type,
191                                        struct qxl_release *create_rel,
192                                        struct qxl_release **release)
193 {
194         if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
195                 int idr_ret;
196                 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
197                 struct qxl_bo *bo;
198                 union qxl_release_info *info;
199
200                 /* stash the release after the create command */
201                 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
202                 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
203
204                 (*release)->release_offset = create_rel->release_offset + 64;
205
206                 qxl_release_list_add(*release, bo);
207
208                 info = qxl_release_map(qdev, *release);
209                 info->id = idr_ret;
210                 qxl_release_unmap(qdev, *release, info);
211
212                 qxl_bo_unref(&bo);
213                 return 0;
214         }
215
216         return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
217                                          QXL_RELEASE_SURFACE_CMD, release, NULL);
218 }
219
220 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
221                                        int type, struct qxl_release **release,
222                                        struct qxl_bo **rbo)
223 {
224         struct qxl_bo *bo;
225         int idr_ret;
226         int ret = 0;
227         union qxl_release_info *info;
228         int cur_idx;
229
230         if (type == QXL_RELEASE_DRAWABLE)
231                 cur_idx = 0;
232         else if (type == QXL_RELEASE_SURFACE_CMD)
233                 cur_idx = 1;
234         else if (type == QXL_RELEASE_CURSOR_CMD)
235                 cur_idx = 2;
236         else {
237                 DRM_ERROR("got illegal type: %d\n", type);
238                 return -EINVAL;
239         }
240
241         idr_ret = qxl_release_alloc(qdev, type, release);
242
243         mutex_lock(&qdev->release_mutex);
244         if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
245                 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
246                 qdev->current_release_bo_offset[cur_idx] = 0;
247                 qdev->current_release_bo[cur_idx] = NULL;
248         }
249         if (!qdev->current_release_bo[cur_idx]) {
250                 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
251                 if (ret) {
252                         mutex_unlock(&qdev->release_mutex);
253                         return ret;
254                 }
255         }
256
257         bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
258
259         (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
260         qdev->current_release_bo_offset[cur_idx]++;
261
262         if (rbo)
263                 *rbo = bo;
264
265         mutex_unlock(&qdev->release_mutex);
266
267         qxl_release_list_add(*release, bo);
268
269         info = qxl_release_map(qdev, *release);
270         info->id = idr_ret;
271         qxl_release_unmap(qdev, *release, info);
272
273         qxl_bo_unref(&bo);
274         return ret;
275 }
276
277 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
278                                                    uint64_t id)
279 {
280         struct qxl_release *release;
281
282         spin_lock(&qdev->release_idr_lock);
283         release = idr_find(&qdev->release_idr, id);
284         spin_unlock(&qdev->release_idr_lock);
285         if (!release) {
286                 DRM_ERROR("failed to find id in release_idr\n");
287                 return NULL;
288         }
289
290         return release;
291 }
292
293 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
294                                         struct qxl_release *release)
295 {
296         void *ptr;
297         union qxl_release_info *info;
298         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
299         struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
300
301         ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
302         if (!ptr)
303                 return NULL;
304         info = ptr + (release->release_offset & ~PAGE_SIZE);
305         return info;
306 }
307
308 void qxl_release_unmap(struct qxl_device *qdev,
309                        struct qxl_release *release,
310                        union qxl_release_info *info)
311 {
312         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
313         struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
314         void *ptr;
315
316         ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
317         qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
318 }
319
320 void qxl_release_fence_buffer_objects(struct qxl_release *release)
321 {
322         struct ttm_validate_buffer *entry;
323         struct ttm_buffer_object *bo;
324         struct ttm_bo_global *glob;
325         struct ttm_bo_device *bdev;
326         struct ttm_bo_driver *driver;
327         struct qxl_bo *qbo;
328
329         /* if only one object on the release its the release itself
330            since these objects are pinned no need to reserve */
331         if (list_is_singular(&release->bos))
332                 return;
333
334         bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
335         bdev = bo->bdev;
336         driver = bdev->driver;
337         glob = bo->glob;
338
339         spin_lock(&glob->lru_lock);
340         spin_lock(&bdev->fence_lock);
341
342         list_for_each_entry(entry, &release->bos, head) {
343                 bo = entry->bo;
344                 qbo = to_qxl_bo(bo);
345
346                 if (!entry->bo->sync_obj)
347                         entry->bo->sync_obj = &qbo->fence;
348
349                 qxl_fence_add_release_locked(&qbo->fence, release->id);
350
351                 ttm_bo_add_to_lru(bo);
352                 ww_mutex_unlock(&bo->resv->lock);
353                 entry->reserved = false;
354         }
355         spin_unlock(&bdev->fence_lock);
356         spin_unlock(&glob->lru_lock);
357         ww_acquire_fini(&release->ticket);
358 }
359