Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40
41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42 {
43         ttm_bo_mem_put(bo, &bo->mem);
44 }
45
46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47                     bool evict,
48                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 {
50         struct ttm_tt *ttm = bo->ttm;
51         struct ttm_mem_reg *old_mem = &bo->mem;
52         int ret;
53
54         if (old_mem->mem_type != TTM_PL_SYSTEM) {
55                 ttm_tt_unbind(ttm);
56                 ttm_bo_free_old_node(bo);
57                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58                                 TTM_PL_MASK_MEM);
59                 old_mem->mem_type = TTM_PL_SYSTEM;
60         }
61
62         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63         if (unlikely(ret != 0))
64                 return ret;
65
66         if (new_mem->mem_type != TTM_PL_SYSTEM) {
67                 ret = ttm_tt_bind(ttm, new_mem);
68                 if (unlikely(ret != 0))
69                         return ret;
70         }
71
72         *old_mem = *new_mem;
73         new_mem->mm_node = NULL;
74
75         return 0;
76 }
77 EXPORT_SYMBOL(ttm_bo_move_ttm);
78
79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80 {
81         if (likely(man->io_reserve_fastpath))
82                 return 0;
83
84         if (interruptible)
85                 return mutex_lock_interruptible(&man->io_reserve_mutex);
86
87         mutex_lock(&man->io_reserve_mutex);
88         return 0;
89 }
90 EXPORT_SYMBOL(ttm_mem_io_lock);
91
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93 {
94         if (likely(man->io_reserve_fastpath))
95                 return;
96
97         mutex_unlock(&man->io_reserve_mutex);
98 }
99 EXPORT_SYMBOL(ttm_mem_io_unlock);
100
101 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102 {
103         struct ttm_buffer_object *bo;
104
105         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
106                 return -EAGAIN;
107
108         bo = list_first_entry(&man->io_reserve_lru,
109                               struct ttm_buffer_object,
110                               io_reserve_lru);
111         list_del_init(&bo->io_reserve_lru);
112         ttm_bo_unmap_virtual_locked(bo);
113
114         return 0;
115 }
116
117
118 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119                        struct ttm_mem_reg *mem)
120 {
121         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122         int ret = 0;
123
124         if (!bdev->driver->io_mem_reserve)
125                 return 0;
126         if (likely(man->io_reserve_fastpath))
127                 return bdev->driver->io_mem_reserve(bdev, mem);
128
129         if (bdev->driver->io_mem_reserve &&
130             mem->bus.io_reserved_count++ == 0) {
131 retry:
132                 ret = bdev->driver->io_mem_reserve(bdev, mem);
133                 if (ret == -EAGAIN) {
134                         ret = ttm_mem_io_evict(man);
135                         if (ret == 0)
136                                 goto retry;
137                 }
138         }
139         return ret;
140 }
141 EXPORT_SYMBOL(ttm_mem_io_reserve);
142
143 void ttm_mem_io_free(struct ttm_bo_device *bdev,
144                      struct ttm_mem_reg *mem)
145 {
146         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
147
148         if (likely(man->io_reserve_fastpath))
149                 return;
150
151         if (bdev->driver->io_mem_reserve &&
152             --mem->bus.io_reserved_count == 0 &&
153             bdev->driver->io_mem_free)
154                 bdev->driver->io_mem_free(bdev, mem);
155
156 }
157 EXPORT_SYMBOL(ttm_mem_io_free);
158
159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160 {
161         struct ttm_mem_reg *mem = &bo->mem;
162         int ret;
163
164         if (!mem->bus.io_reserved_vm) {
165                 struct ttm_mem_type_manager *man =
166                         &bo->bdev->man[mem->mem_type];
167
168                 ret = ttm_mem_io_reserve(bo->bdev, mem);
169                 if (unlikely(ret != 0))
170                         return ret;
171                 mem->bus.io_reserved_vm = true;
172                 if (man->use_io_reserve_lru)
173                         list_add_tail(&bo->io_reserve_lru,
174                                       &man->io_reserve_lru);
175         }
176         return 0;
177 }
178
179 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180 {
181         struct ttm_mem_reg *mem = &bo->mem;
182
183         if (mem->bus.io_reserved_vm) {
184                 mem->bus.io_reserved_vm = false;
185                 list_del_init(&bo->io_reserve_lru);
186                 ttm_mem_io_free(bo->bdev, mem);
187         }
188 }
189
190 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191                         void **virtual)
192 {
193         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194         int ret;
195         void *addr;
196
197         *virtual = NULL;
198         (void) ttm_mem_io_lock(man, false);
199         ret = ttm_mem_io_reserve(bdev, mem);
200         ttm_mem_io_unlock(man);
201         if (ret || !mem->bus.is_iomem)
202                 return ret;
203
204         if (mem->bus.addr) {
205                 addr = mem->bus.addr;
206         } else {
207                 if (mem->placement & TTM_PL_FLAG_WC)
208                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209                 else
210                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
211                 if (!addr) {
212                         (void) ttm_mem_io_lock(man, false);
213                         ttm_mem_io_free(bdev, mem);
214                         ttm_mem_io_unlock(man);
215                         return -ENOMEM;
216                 }
217         }
218         *virtual = addr;
219         return 0;
220 }
221
222 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
223                          void *virtual)
224 {
225         struct ttm_mem_type_manager *man;
226
227         man = &bdev->man[mem->mem_type];
228
229         if (virtual && mem->bus.addr == NULL)
230                 iounmap(virtual);
231         (void) ttm_mem_io_lock(man, false);
232         ttm_mem_io_free(bdev, mem);
233         ttm_mem_io_unlock(man);
234 }
235
236 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
237 {
238         uint32_t *dstP =
239             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
240         uint32_t *srcP =
241             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
242
243         int i;
244         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245                 iowrite32(ioread32(srcP++), dstP++);
246         return 0;
247 }
248
249 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250                                 unsigned long page,
251                                 pgprot_t prot)
252 {
253         struct page *d = ttm->pages[page];
254         void *dst;
255
256         if (!d)
257                 return -ENOMEM;
258
259         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260
261 #ifdef CONFIG_X86
262         dst = kmap_atomic_prot(d, prot);
263 #else
264         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265                 dst = vmap(&d, 1, 0, prot);
266         else
267                 dst = kmap(d);
268 #endif
269         if (!dst)
270                 return -ENOMEM;
271
272         memcpy_fromio(dst, src, PAGE_SIZE);
273
274 #ifdef CONFIG_X86
275         kunmap_atomic(dst);
276 #else
277         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278                 vunmap(dst);
279         else
280                 kunmap(d);
281 #endif
282
283         return 0;
284 }
285
286 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287                                 unsigned long page,
288                                 pgprot_t prot)
289 {
290         struct page *s = ttm->pages[page];
291         void *src;
292
293         if (!s)
294                 return -ENOMEM;
295
296         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297 #ifdef CONFIG_X86
298         src = kmap_atomic_prot(s, prot);
299 #else
300         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301                 src = vmap(&s, 1, 0, prot);
302         else
303                 src = kmap(s);
304 #endif
305         if (!src)
306                 return -ENOMEM;
307
308         memcpy_toio(dst, src, PAGE_SIZE);
309
310 #ifdef CONFIG_X86
311         kunmap_atomic(src);
312 #else
313         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314                 vunmap(src);
315         else
316                 kunmap(s);
317 #endif
318
319         return 0;
320 }
321
322 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323                        bool evict, bool no_wait_gpu,
324                        struct ttm_mem_reg *new_mem)
325 {
326         struct ttm_bo_device *bdev = bo->bdev;
327         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
328         struct ttm_tt *ttm = bo->ttm;
329         struct ttm_mem_reg *old_mem = &bo->mem;
330         struct ttm_mem_reg old_copy = *old_mem;
331         void *old_iomap;
332         void *new_iomap;
333         int ret;
334         unsigned long i;
335         unsigned long page;
336         unsigned long add = 0;
337         int dir;
338
339         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
340         if (ret)
341                 return ret;
342         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
343         if (ret)
344                 goto out;
345
346         /*
347          * Single TTM move. NOP.
348          */
349         if (old_iomap == NULL && new_iomap == NULL)
350                 goto out2;
351
352         /*
353          * Don't move nonexistent data. Clear destination instead.
354          */
355         if (old_iomap == NULL &&
356             (ttm == NULL || ttm->state == tt_unpopulated)) {
357                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358                 goto out2;
359         }
360
361         /*
362          * TTM might be null for moves within the same region.
363          */
364         if (ttm && ttm->state == tt_unpopulated) {
365                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
366                 if (ret)
367                         goto out1;
368         }
369
370         add = 0;
371         dir = 1;
372
373         if ((old_mem->mem_type == new_mem->mem_type) &&
374             (new_mem->start < old_mem->start + old_mem->size)) {
375                 dir = -1;
376                 add = new_mem->num_pages - 1;
377         }
378
379         for (i = 0; i < new_mem->num_pages; ++i) {
380                 page = i * dir + add;
381                 if (old_iomap == NULL) {
382                         pgprot_t prot = ttm_io_prot(old_mem->placement,
383                                                     PAGE_KERNEL);
384                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
385                                                    prot);
386                 } else if (new_iomap == NULL) {
387                         pgprot_t prot = ttm_io_prot(new_mem->placement,
388                                                     PAGE_KERNEL);
389                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
390                                                    prot);
391                 } else
392                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
393                 if (ret)
394                         goto out1;
395         }
396         mb();
397 out2:
398         old_copy = *old_mem;
399         *old_mem = *new_mem;
400         new_mem->mm_node = NULL;
401
402         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
403                 ttm_tt_unbind(ttm);
404                 ttm_tt_destroy(ttm);
405                 bo->ttm = NULL;
406         }
407
408 out1:
409         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
410 out:
411         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
412
413         /*
414          * On error, keep the mm node!
415          */
416         if (!ret)
417                 ttm_bo_mem_put(bo, &old_copy);
418         return ret;
419 }
420 EXPORT_SYMBOL(ttm_bo_move_memcpy);
421
422 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
423 {
424         kfree(bo);
425 }
426
427 /**
428  * ttm_buffer_object_transfer
429  *
430  * @bo: A pointer to a struct ttm_buffer_object.
431  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
432  * holding the data of @bo with the old placement.
433  *
434  * This is a utility function that may be called after an accelerated move
435  * has been scheduled. A new buffer object is created as a placeholder for
436  * the old data while it's being copied. When that buffer object is idle,
437  * it can be destroyed, releasing the space of the old placement.
438  * Returns:
439  * !0: Failure.
440  */
441
442 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
443                                       struct ttm_buffer_object **new_obj)
444 {
445         struct ttm_buffer_object *fbo;
446         struct ttm_bo_device *bdev = bo->bdev;
447         struct ttm_bo_driver *driver = bdev->driver;
448         int ret;
449
450         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451         if (!fbo)
452                 return -ENOMEM;
453
454         *fbo = *bo;
455
456         /**
457          * Fix up members that we shouldn't copy directly:
458          * TODO: Explicit member copy would probably be better here.
459          */
460
461         INIT_LIST_HEAD(&fbo->ddestroy);
462         INIT_LIST_HEAD(&fbo->lru);
463         INIT_LIST_HEAD(&fbo->swap);
464         INIT_LIST_HEAD(&fbo->io_reserve_lru);
465         drm_vma_node_reset(&fbo->vma_node);
466         atomic_set(&fbo->cpu_writers, 0);
467
468         spin_lock(&bdev->fence_lock);
469         if (bo->sync_obj)
470                 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
471         else
472                 fbo->sync_obj = NULL;
473         spin_unlock(&bdev->fence_lock);
474         kref_init(&fbo->list_kref);
475         kref_init(&fbo->kref);
476         fbo->destroy = &ttm_transfered_destroy;
477         fbo->acc_size = 0;
478         fbo->resv = &fbo->ttm_resv;
479         reservation_object_init(fbo->resv);
480         ret = ww_mutex_trylock(&fbo->resv->lock);
481         WARN_ON(!ret);
482
483         *new_obj = fbo;
484         return 0;
485 }
486
487 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
488 {
489 #if defined(__i386__) || defined(__x86_64__)
490         if (caching_flags & TTM_PL_FLAG_WC)
491                 tmp = pgprot_writecombine(tmp);
492         else if (boot_cpu_data.x86 > 3)
493                 tmp = pgprot_noncached(tmp);
494
495 #elif defined(__powerpc__)
496         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
497                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
498                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
499                         pgprot_val(tmp) |= _PAGE_GUARDED;
500         }
501 #endif
502 #if defined(__ia64__)
503         if (caching_flags & TTM_PL_FLAG_WC)
504                 tmp = pgprot_writecombine(tmp);
505         else
506                 tmp = pgprot_noncached(tmp);
507 #endif
508 #if defined(__sparc__) || defined(__mips__)
509         if (!(caching_flags & TTM_PL_FLAG_CACHED))
510                 tmp = pgprot_noncached(tmp);
511 #endif
512         return tmp;
513 }
514 EXPORT_SYMBOL(ttm_io_prot);
515
516 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
517                           unsigned long offset,
518                           unsigned long size,
519                           struct ttm_bo_kmap_obj *map)
520 {
521         struct ttm_mem_reg *mem = &bo->mem;
522
523         if (bo->mem.bus.addr) {
524                 map->bo_kmap_type = ttm_bo_map_premapped;
525                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
526         } else {
527                 map->bo_kmap_type = ttm_bo_map_iomap;
528                 if (mem->placement & TTM_PL_FLAG_WC)
529                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
530                                                   size);
531                 else
532                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
533                                                        size);
534         }
535         return (!map->virtual) ? -ENOMEM : 0;
536 }
537
538 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
539                            unsigned long start_page,
540                            unsigned long num_pages,
541                            struct ttm_bo_kmap_obj *map)
542 {
543         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
544         struct ttm_tt *ttm = bo->ttm;
545         int ret;
546
547         BUG_ON(!ttm);
548
549         if (ttm->state == tt_unpopulated) {
550                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
551                 if (ret)
552                         return ret;
553         }
554
555         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
556                 /*
557                  * We're mapping a single page, and the desired
558                  * page protection is consistent with the bo.
559                  */
560
561                 map->bo_kmap_type = ttm_bo_map_kmap;
562                 map->page = ttm->pages[start_page];
563                 map->virtual = kmap(map->page);
564         } else {
565                 /*
566                  * We need to use vmap to get the desired page protection
567                  * or to make the buffer object look contiguous.
568                  */
569                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
570                         PAGE_KERNEL :
571                         ttm_io_prot(mem->placement, PAGE_KERNEL);
572                 map->bo_kmap_type = ttm_bo_map_vmap;
573                 map->virtual = vmap(ttm->pages + start_page, num_pages,
574                                     0, prot);
575         }
576         return (!map->virtual) ? -ENOMEM : 0;
577 }
578
579 int ttm_bo_kmap(struct ttm_buffer_object *bo,
580                 unsigned long start_page, unsigned long num_pages,
581                 struct ttm_bo_kmap_obj *map)
582 {
583         struct ttm_mem_type_manager *man =
584                 &bo->bdev->man[bo->mem.mem_type];
585         unsigned long offset, size;
586         int ret;
587
588         BUG_ON(!list_empty(&bo->swap));
589         map->virtual = NULL;
590         map->bo = bo;
591         if (num_pages > bo->num_pages)
592                 return -EINVAL;
593         if (start_page > bo->num_pages)
594                 return -EINVAL;
595 #if 0
596         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
597                 return -EPERM;
598 #endif
599         (void) ttm_mem_io_lock(man, false);
600         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
601         ttm_mem_io_unlock(man);
602         if (ret)
603                 return ret;
604         if (!bo->mem.bus.is_iomem) {
605                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
606         } else {
607                 offset = start_page << PAGE_SHIFT;
608                 size = num_pages << PAGE_SHIFT;
609                 return ttm_bo_ioremap(bo, offset, size, map);
610         }
611 }
612 EXPORT_SYMBOL(ttm_bo_kmap);
613
614 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
615 {
616         struct ttm_buffer_object *bo = map->bo;
617         struct ttm_mem_type_manager *man =
618                 &bo->bdev->man[bo->mem.mem_type];
619
620         if (!map->virtual)
621                 return;
622         switch (map->bo_kmap_type) {
623         case ttm_bo_map_iomap:
624                 iounmap(map->virtual);
625                 break;
626         case ttm_bo_map_vmap:
627                 vunmap(map->virtual);
628                 break;
629         case ttm_bo_map_kmap:
630                 kunmap(map->page);
631                 break;
632         case ttm_bo_map_premapped:
633                 break;
634         default:
635                 BUG();
636         }
637         (void) ttm_mem_io_lock(man, false);
638         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
639         ttm_mem_io_unlock(man);
640         map->virtual = NULL;
641         map->page = NULL;
642 }
643 EXPORT_SYMBOL(ttm_bo_kunmap);
644
645 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
646                               void *sync_obj,
647                               bool evict,
648                               bool no_wait_gpu,
649                               struct ttm_mem_reg *new_mem)
650 {
651         struct ttm_bo_device *bdev = bo->bdev;
652         struct ttm_bo_driver *driver = bdev->driver;
653         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
654         struct ttm_mem_reg *old_mem = &bo->mem;
655         int ret;
656         struct ttm_buffer_object *ghost_obj;
657         void *tmp_obj = NULL;
658
659         spin_lock(&bdev->fence_lock);
660         if (bo->sync_obj) {
661                 tmp_obj = bo->sync_obj;
662                 bo->sync_obj = NULL;
663         }
664         bo->sync_obj = driver->sync_obj_ref(sync_obj);
665         if (evict) {
666                 ret = ttm_bo_wait(bo, false, false, false);
667                 spin_unlock(&bdev->fence_lock);
668                 if (tmp_obj)
669                         driver->sync_obj_unref(&tmp_obj);
670                 if (ret)
671                         return ret;
672
673                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
674                     (bo->ttm != NULL)) {
675                         ttm_tt_unbind(bo->ttm);
676                         ttm_tt_destroy(bo->ttm);
677                         bo->ttm = NULL;
678                 }
679                 ttm_bo_free_old_node(bo);
680         } else {
681                 /**
682                  * This should help pipeline ordinary buffer moves.
683                  *
684                  * Hang old buffer memory on a new buffer object,
685                  * and leave it to be released when the GPU
686                  * operation has completed.
687                  */
688
689                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
690                 spin_unlock(&bdev->fence_lock);
691                 if (tmp_obj)
692                         driver->sync_obj_unref(&tmp_obj);
693
694                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
695                 if (ret)
696                         return ret;
697
698                 /**
699                  * If we're not moving to fixed memory, the TTM object
700                  * needs to stay alive. Otherwhise hang it on the ghost
701                  * bo to be unbound and destroyed.
702                  */
703
704                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
705                         ghost_obj->ttm = NULL;
706                 else
707                         bo->ttm = NULL;
708
709                 ttm_bo_unreserve(ghost_obj);
710                 ttm_bo_unref(&ghost_obj);
711         }
712
713         *old_mem = *new_mem;
714         new_mem->mm_node = NULL;
715
716         return 0;
717 }
718 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);