Merge branch 'vendor/GCC47'
[dragonfly.git] / sys / dev / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  *
30  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo.c 248060 2013-03-08 18:11:02Z dumbbell $
31  */
32
33 #define pr_fmt(fmt) "[TTM] " fmt
34
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <linux/atomic.h>
39 #include <linux/export.h>
40 #include <linux/wait.h>
41
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
45
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
49
50 MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
51
52 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
53 {
54         int i;
55
56         for (i = 0; i <= TTM_PL_PRIV5; i++)
57                 if (flags & (1 << i)) {
58                         *mem_type = i;
59                         return 0;
60                 }
61         return -EINVAL;
62 }
63
64 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
65 {
66         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
67
68         kprintf("    has_type: %d\n", man->has_type);
69         kprintf("    use_type: %d\n", man->use_type);
70         kprintf("    flags: 0x%08X\n", man->flags);
71         kprintf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
72         kprintf("    size: %ju\n", (uintmax_t)man->size);
73         kprintf("    available_caching: 0x%08X\n", man->available_caching);
74         kprintf("    default_caching: 0x%08X\n", man->default_caching);
75         if (mem_type != TTM_PL_SYSTEM)
76                 (*man->func->debug)(man, TTM_PFX);
77 }
78
79 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
80                                         struct ttm_placement *placement)
81 {
82         int i, ret, mem_type;
83
84         kprintf("No space for %p (%lu pages, %luK, %luM)\n",
85                bo, bo->mem.num_pages, bo->mem.size >> 10,
86                bo->mem.size >> 20);
87         for (i = 0; i < placement->num_placement; i++) {
88                 ret = ttm_mem_type_from_flags(placement->placement[i],
89                                                 &mem_type);
90                 if (ret)
91                         return;
92                 kprintf("  placement[%d]=0x%08X (%d)\n",
93                        i, placement->placement[i], mem_type);
94                 ttm_mem_type_debug(bo->bdev, mem_type);
95         }
96 }
97
98 #if 0
99 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
100     char *buffer)
101 {
102
103         return snprintf(buffer, PAGE_SIZE, "%lu\n",
104                         (unsigned long) atomic_read(&glob->bo_count));
105 }
106 #endif
107
108 static inline uint32_t ttm_bo_type_flags(unsigned type)
109 {
110         return 1 << (type);
111 }
112
113 static void ttm_bo_release_list(struct kref *list_kref)
114 {
115         struct ttm_buffer_object *bo =
116             container_of(list_kref, struct ttm_buffer_object, list_kref);
117         struct ttm_bo_device *bdev = bo->bdev;
118         size_t acc_size = bo->acc_size;
119
120         BUG_ON(atomic_read(&bo->list_kref.refcount));
121         BUG_ON(atomic_read(&bo->kref.refcount));
122         BUG_ON(atomic_read(&bo->cpu_writers));
123         BUG_ON(bo->sync_obj != NULL);
124         BUG_ON(bo->mem.mm_node != NULL);
125         BUG_ON(!list_empty(&bo->lru));
126         BUG_ON(!list_empty(&bo->ddestroy));
127
128         if (bo->ttm)
129                 ttm_tt_destroy(bo->ttm);
130         atomic_dec(&bo->glob->bo_count);
131         if (bo->destroy)
132                 bo->destroy(bo);
133         else {
134                 kfree(bo, M_TTM_BO);
135         }
136         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
137 }
138
139 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
140                                   bool interruptible)
141 {
142         const char *wmsg;
143         int flags, ret;
144
145         ret = 0;
146         if (interruptible) {
147                 flags = PCATCH;
148                 wmsg = "ttbowi";
149         } else {
150                 flags = 0;
151                 wmsg = "ttbowu";
152         }
153         while (ttm_bo_is_reserved(bo)) {
154                 ret = -lksleep(bo, &bo->glob->lru_lock, 0, wmsg, 0);
155                 if (ret != 0)
156                         break;
157         }
158         return (ret);
159 }
160
161 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
162 {
163         struct ttm_bo_device *bdev = bo->bdev;
164         struct ttm_mem_type_manager *man;
165
166         BUG_ON(!ttm_bo_is_reserved(bo));
167
168         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
169
170                 BUG_ON(!list_empty(&bo->lru));
171
172                 man = &bdev->man[bo->mem.mem_type];
173                 list_add_tail(&bo->lru, &man->lru);
174                 kref_get(&bo->list_kref);
175
176                 if (bo->ttm != NULL) {
177                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
178                         kref_get(&bo->list_kref);
179                 }
180         }
181 }
182
183 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
184 {
185         int put_count = 0;
186
187         if (!list_empty(&bo->swap)) {
188                 list_del_init(&bo->swap);
189                 ++put_count;
190         }
191         if (!list_empty(&bo->lru)) {
192                 list_del_init(&bo->lru);
193                 ++put_count;
194         }
195
196         /*
197          * TODO: Add a driver hook to delete from
198          * driver-specific LRU's here.
199          */
200
201         return put_count;
202 }
203
204 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
205                           bool interruptible,
206                           bool no_wait, bool use_sequence, uint32_t sequence)
207 {
208         int ret;
209
210         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
211                 /**
212                  * Deadlock avoidance for multi-bo reserving.
213                  */
214                 if (use_sequence && bo->seq_valid) {
215                         /**
216                          * We've already reserved this one.
217                          */
218                         if (unlikely(sequence == bo->val_seq))
219                                 return -EDEADLK;
220                         /**
221                          * Already reserved by a thread that will not back
222                          * off for us. We need to back off.
223                          */
224                         if (unlikely(sequence - bo->val_seq < (1U << 31)))
225                                 return -EAGAIN;
226                 }
227
228                 if (no_wait)
229                         return -EBUSY;
230
231                 ret = ttm_bo_wait_unreserved(bo, interruptible);
232
233                 if (unlikely(ret))
234                         return ret;
235         }
236
237         if (use_sequence) {
238                 bool wake_up = false;
239                 /**
240                  * Wake up waiters that may need to recheck for deadlock,
241                  * if we decreased the sequence number.
242                  */
243                 if (unlikely((bo->val_seq - sequence < (1U << 31))
244                              || !bo->seq_valid))
245                         wake_up = true;
246
247                 /*
248                  * In the worst case with memory ordering these values can be
249                  * seen in the wrong order. However since we call wake_up_all
250                  * in that case, this will hopefully not pose a problem,
251                  * and the worst case would only cause someone to accidentally
252                  * hit -EAGAIN in ttm_bo_reserve when they see old value of
253                  * val_seq. However this would only happen if seq_valid was
254                  * written before val_seq was, and just means some slightly
255                  * increased cpu usage
256                  */
257                 bo->val_seq = sequence;
258                 bo->seq_valid = true;
259                 if (wake_up)
260                         wake_up_all(&bo->event_queue);
261         } else {
262                 bo->seq_valid = false;
263         }
264
265         return 0;
266 }
267 EXPORT_SYMBOL(ttm_bo_reserve);
268
269 static void ttm_bo_ref_bug(struct kref *list_kref)
270 {
271         BUG();
272 }
273
274 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
275                          bool never_free)
276 {
277         kref_sub(&bo->list_kref, count,
278                  (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
279 }
280
281 int ttm_bo_reserve(struct ttm_buffer_object *bo,
282                    bool interruptible,
283                    bool no_wait, bool use_sequence, uint32_t sequence)
284 {
285         struct ttm_bo_global *glob = bo->glob;
286         int put_count = 0;
287         int ret;
288
289         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
290         ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
291                                    sequence);
292         if (likely(ret == 0)) {
293                 put_count = ttm_bo_del_from_lru(bo);
294                 lockmgr(&glob->lru_lock, LK_RELEASE);
295                 ttm_bo_list_ref_sub(bo, put_count, true);
296         } else {
297                 lockmgr(&glob->lru_lock, LK_RELEASE);
298         }
299
300
301         return ret;
302 }
303
304 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
305                                   bool interruptible, uint32_t sequence)
306 {
307         bool wake_up = false;
308         int ret;
309
310         while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
311                 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
312
313                 ret = ttm_bo_wait_unreserved(bo, interruptible);
314
315                 if (unlikely(ret))
316                         return ret;
317         }
318
319         if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)
320                 wake_up = true;
321
322         /**
323          * Wake up waiters that may need to recheck for deadlock,
324          * if we decreased the sequence number.
325          */
326         bo->val_seq = sequence;
327         bo->seq_valid = true;
328         if (wake_up)
329                 wake_up_all(&bo->event_queue);
330
331         return 0;
332 }
333
334 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
335                             bool interruptible, uint32_t sequence)
336 {
337         struct ttm_bo_global *glob = bo->glob;
338         int put_count, ret;
339
340         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
341         ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
342         if (likely(!ret)) {
343                 put_count = ttm_bo_del_from_lru(bo);
344                 lockmgr(&glob->lru_lock, LK_RELEASE);
345                 ttm_bo_list_ref_sub(bo, put_count, true);
346         } else {
347                 lockmgr(&glob->lru_lock, LK_RELEASE);
348         }
349         return ret;
350 }
351 EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
352
353 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
354 {
355         ttm_bo_add_to_lru(bo);
356         atomic_set(&bo->reserved, 0);
357         wake_up_all(&bo->event_queue);
358 }
359
360 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
361 {
362         struct ttm_bo_global *glob = bo->glob;
363
364         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
365         ttm_bo_unreserve_locked(bo);
366         lockmgr(&glob->lru_lock, LK_RELEASE);
367 }
368 EXPORT_SYMBOL(ttm_bo_unreserve);
369
370 /*
371  * Call bo->mutex locked.
372  */
373 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
374 {
375         struct ttm_bo_device *bdev = bo->bdev;
376         struct ttm_bo_global *glob = bo->glob;
377         int ret = 0;
378         uint32_t page_flags = 0;
379
380         TTM_ASSERT_LOCKED(&bo->mutex);
381         bo->ttm = NULL;
382
383         if (bdev->need_dma32)
384                 page_flags |= TTM_PAGE_FLAG_DMA32;
385
386         switch (bo->type) {
387         case ttm_bo_type_device:
388                 if (zero_alloc)
389                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
390         case ttm_bo_type_kernel:
391                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
392                                                       page_flags, glob->dummy_read_page);
393                 if (unlikely(bo->ttm == NULL))
394                         ret = -ENOMEM;
395                 break;
396         case ttm_bo_type_sg:
397                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
398                                                       page_flags | TTM_PAGE_FLAG_SG,
399                                                       glob->dummy_read_page);
400                 if (unlikely(bo->ttm == NULL)) {
401                         ret = -ENOMEM;
402                         break;
403                 }
404                 bo->ttm->sg = bo->sg;
405                 break;
406         default:
407                 kprintf("[TTM] Illegal buffer object type\n");
408                 ret = -EINVAL;
409                 break;
410         }
411
412         return ret;
413 }
414
415 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
416                                   struct ttm_mem_reg *mem,
417                                   bool evict, bool interruptible,
418                                   bool no_wait_gpu)
419 {
420         struct ttm_bo_device *bdev = bo->bdev;
421         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
422         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
423         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
424         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
425         int ret = 0;
426
427         if (old_is_pci || new_is_pci ||
428             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
429                 ret = ttm_mem_io_lock(old_man, true);
430                 if (unlikely(ret != 0))
431                         goto out_err;
432                 ttm_bo_unmap_virtual_locked(bo);
433                 ttm_mem_io_unlock(old_man);
434         }
435
436         /*
437          * Create and bind a ttm if required.
438          */
439
440         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
441                 if (bo->ttm == NULL) {
442                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
443                         ret = ttm_bo_add_ttm(bo, zero);
444                         if (ret)
445                                 goto out_err;
446                 }
447
448                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
449                 if (ret)
450                         goto out_err;
451
452                 if (mem->mem_type != TTM_PL_SYSTEM) {
453                         ret = ttm_tt_bind(bo->ttm, mem);
454                         if (ret)
455                                 goto out_err;
456                 }
457
458                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
459                         if (bdev->driver->move_notify)
460                                 bdev->driver->move_notify(bo, mem);
461                         bo->mem = *mem;
462                         mem->mm_node = NULL;
463                         goto moved;
464                 }
465         }
466
467         if (bdev->driver->move_notify)
468                 bdev->driver->move_notify(bo, mem);
469
470         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
471             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
472                 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
473         else if (bdev->driver->move)
474                 ret = bdev->driver->move(bo, evict, interruptible,
475                                          no_wait_gpu, mem);
476         else
477                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
478
479         if (ret) {
480                 if (bdev->driver->move_notify) {
481                         struct ttm_mem_reg tmp_mem = *mem;
482                         *mem = bo->mem;
483                         bo->mem = tmp_mem;
484                         bdev->driver->move_notify(bo, mem);
485                         bo->mem = *mem;
486                         *mem = tmp_mem;
487                 }
488
489                 goto out_err;
490         }
491
492 moved:
493         if (bo->evicted) {
494                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
495                 if (ret)
496                         kprintf("[TTM] Can not flush read caches\n");
497                 bo->evicted = false;
498         }
499
500         if (bo->mem.mm_node) {
501                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
502                     bdev->man[bo->mem.mem_type].gpu_offset;
503                 bo->cur_placement = bo->mem.placement;
504         } else
505                 bo->offset = 0;
506
507         return 0;
508
509 out_err:
510         new_man = &bdev->man[bo->mem.mem_type];
511         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
512                 ttm_tt_unbind(bo->ttm);
513                 ttm_tt_destroy(bo->ttm);
514                 bo->ttm = NULL;
515         }
516
517         return ret;
518 }
519
520 /**
521  * Call bo::reserved.
522  * Will release GPU memory type usage on destruction.
523  * This is the place to put in driver specific hooks to release
524  * driver private resources.
525  * Will release the bo::reserved lock.
526  */
527
528 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
529 {
530         if (bo->bdev->driver->move_notify)
531                 bo->bdev->driver->move_notify(bo, NULL);
532
533         if (bo->ttm) {
534                 ttm_tt_unbind(bo->ttm);
535                 ttm_tt_destroy(bo->ttm);
536                 bo->ttm = NULL;
537         }
538         ttm_bo_mem_put(bo, &bo->mem);
539
540         atomic_set(&bo->reserved, 0);
541         wake_up_all(&bo->event_queue);
542
543         /*
544          * Since the final reference to this bo may not be dropped by
545          * the current task we have to put a memory barrier here to make
546          * sure the changes done in this function are always visible.
547          *
548          * This function only needs protection against the final kref_put.
549          */
550         cpu_mfence();
551 }
552
553 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
554 {
555         struct ttm_bo_device *bdev = bo->bdev;
556         struct ttm_bo_global *glob = bo->glob;
557         struct ttm_bo_driver *driver = bdev->driver;
558         void *sync_obj = NULL;
559         int put_count;
560         int ret;
561
562         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
563         ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
564
565         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
566         (void) ttm_bo_wait(bo, false, false, true);
567         if (!ret && !bo->sync_obj) {
568                 lockmgr(&bdev->fence_lock, LK_RELEASE);
569                 put_count = ttm_bo_del_from_lru(bo);
570
571                 lockmgr(&glob->lru_lock, LK_RELEASE);
572                 ttm_bo_cleanup_memtype_use(bo);
573
574                 ttm_bo_list_ref_sub(bo, put_count, true);
575
576                 return;
577         }
578         if (bo->sync_obj)
579                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
580         lockmgr(&bdev->fence_lock, LK_RELEASE);
581
582         if (!ret) {
583                 atomic_set(&bo->reserved, 0);
584                 wake_up_all(&bo->event_queue);
585         }
586
587         kref_get(&bo->list_kref);
588         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
589         lockmgr(&glob->lru_lock, LK_RELEASE);
590
591         if (sync_obj) {
592                 driver->sync_obj_flush(sync_obj);
593                 driver->sync_obj_unref(&sync_obj);
594         }
595         schedule_delayed_work(&bdev->wq,
596                               ((hz / 100) < 1) ? 1 : hz / 100);
597 }
598
599 /**
600  * function ttm_bo_cleanup_refs_and_unlock
601  * If bo idle, remove from delayed- and lru lists, and unref.
602  * If not idle, do nothing.
603  *
604  * Must be called with lru_lock and reservation held, this function
605  * will drop both before returning.
606  *
607  * @interruptible         Any sleeps should occur interruptibly.
608  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
609  */
610
611 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
612                                           bool interruptible,
613                                           bool no_wait_gpu)
614 {
615         struct ttm_bo_device *bdev = bo->bdev;
616         struct ttm_bo_driver *driver = bdev->driver;
617         struct ttm_bo_global *glob = bo->glob;
618         int put_count;
619         int ret;
620
621         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
622         ret = ttm_bo_wait(bo, false, false, true);
623
624         if (ret && !no_wait_gpu) {
625                 void *sync_obj;
626
627                 /*
628                  * Take a reference to the fence and unreserve,
629                  * at this point the buffer should be dead, so
630                  * no new sync objects can be attached.
631                  */
632                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
633                 lockmgr(&bdev->fence_lock, LK_RELEASE);
634
635                 atomic_set(&bo->reserved, 0);
636                 wake_up_all(&bo->event_queue);
637                 lockmgr(&glob->lru_lock, LK_RELEASE);
638
639                 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
640                 driver->sync_obj_unref(&sync_obj);
641                 if (ret)
642                         return ret;
643
644                 /*
645                  * remove sync_obj with ttm_bo_wait, the wait should be
646                  * finished, and no new wait object should have been added.
647                  */
648                 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
649                 ret = ttm_bo_wait(bo, false, false, true);
650                 WARN_ON(ret);
651                 lockmgr(&bdev->fence_lock, LK_RELEASE);
652                 if (ret)
653                         return ret;
654
655                 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
656                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
657
658                 /*
659                  * We raced, and lost, someone else holds the reservation now,
660                  * and is probably busy in ttm_bo_cleanup_memtype_use.
661                  *
662                  * Even if it's not the case, because we finished waiting any
663                  * delayed destruction would succeed, so just return success
664                  * here.
665                  */
666                 if (ret) {
667                         lockmgr(&glob->lru_lock, LK_RELEASE);
668                         return 0;
669                 }
670         } else
671                 lockmgr(&bdev->fence_lock, LK_RELEASE);
672
673         if (ret || unlikely(list_empty(&bo->ddestroy))) {
674                 atomic_set(&bo->reserved, 0);
675                 wake_up_all(&bo->event_queue);
676                 lockmgr(&glob->lru_lock, LK_RELEASE);
677                 return ret;
678         }
679
680         put_count = ttm_bo_del_from_lru(bo);
681         list_del_init(&bo->ddestroy);
682         ++put_count;
683
684         lockmgr(&glob->lru_lock, LK_RELEASE);
685         ttm_bo_cleanup_memtype_use(bo);
686
687         ttm_bo_list_ref_sub(bo, put_count, true);
688
689         return 0;
690 }
691
692 /**
693  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
694  * encountered buffers.
695  */
696
697 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
698 {
699         struct ttm_bo_global *glob = bdev->glob;
700         struct ttm_buffer_object *entry = NULL;
701         int ret = 0;
702
703         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
704         if (list_empty(&bdev->ddestroy))
705                 goto out_unlock;
706
707         entry = list_first_entry(&bdev->ddestroy,
708                 struct ttm_buffer_object, ddestroy);
709         kref_get(&entry->list_kref);
710
711         for (;;) {
712                 struct ttm_buffer_object *nentry = NULL;
713
714                 if (entry->ddestroy.next != &bdev->ddestroy) {
715                         nentry = list_first_entry(&entry->ddestroy,
716                                 struct ttm_buffer_object, ddestroy);
717                         kref_get(&nentry->list_kref);
718                 }
719
720                 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
721                 if (remove_all && ret) {
722                         ret = ttm_bo_reserve_nolru(entry, false, false,
723                                                    false, 0);
724                 }
725
726                 if (!ret)
727                         ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
728                                                              !remove_all);
729                 else
730                         lockmgr(&glob->lru_lock, LK_RELEASE);
731
732                 kref_put(&entry->list_kref, ttm_bo_release_list);
733                 entry = nentry;
734
735                 if (ret || !entry)
736                         goto out;
737
738                 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
739                 if (list_empty(&entry->ddestroy))
740                         break;
741         }
742
743 out_unlock:
744         lockmgr(&glob->lru_lock, LK_RELEASE);
745 out:
746         if (entry)
747                 kref_put(&entry->list_kref, ttm_bo_release_list);
748         return ret;
749 }
750
751 static void ttm_bo_delayed_workqueue(struct work_struct *work)
752 {
753         struct ttm_bo_device *bdev =
754             container_of(work, struct ttm_bo_device, wq.work);
755
756         if (ttm_bo_delayed_delete(bdev, false)) {
757                 schedule_delayed_work(&bdev->wq,
758                                       ((hz / 100) < 1) ? 1 : hz / 100);
759         }
760 }
761
762 /*
763  * NOTE: bdev->vm_lock already held on call, this function release it.
764  */
765 static void ttm_bo_release(struct kref *kref)
766 {
767         struct ttm_buffer_object *bo =
768             container_of(kref, struct ttm_buffer_object, kref);
769         struct ttm_bo_device *bdev = bo->bdev;
770         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
771         int release_active;
772
773         if (atomic_read(&bo->kref.refcount) > 0) {
774                 lockmgr(&bdev->vm_lock, LK_RELEASE);
775                 return;
776         }
777         if (likely(bo->vm_node != NULL)) {
778                 RB_REMOVE(ttm_bo_device_buffer_objects,
779                                 &bdev->addr_space_rb, bo);
780                 drm_mm_put_block(bo->vm_node);
781                 bo->vm_node = NULL;
782         }
783
784         /*
785          * Should we clean up our implied list_kref?  Because ttm_bo_release()
786          * can be called reentrantly due to races (this may not be true any
787          * more with the lock management changes in the deref), it is possible
788          * to get here twice, but there's only one list_kref ref to drop and
789          * in the other path 'bo' can be kfree()d by another thread the
790          * instant we release our lock.
791          */
792         release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
793         if (release_active) {
794                 clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
795                 lockmgr(&bdev->vm_lock, LK_RELEASE);
796                 ttm_mem_io_lock(man, false);
797                 ttm_mem_io_free_vm(bo);
798                 ttm_mem_io_unlock(man);
799                 ttm_bo_cleanup_refs_or_queue(bo);
800                 kref_put(&bo->list_kref, ttm_bo_release_list);
801         } else {
802                 lockmgr(&bdev->vm_lock, LK_RELEASE);
803         }
804 }
805
806 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
807 {
808         struct ttm_buffer_object *bo = *p_bo;
809         struct ttm_bo_device *bdev = bo->bdev;
810
811         *p_bo = NULL;
812         lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
813         if (kref_put(&bo->kref, ttm_bo_release) == 0)
814                 lockmgr(&bdev->vm_lock, LK_RELEASE);
815 }
816 EXPORT_SYMBOL(ttm_bo_unref);
817
818 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
819 {
820         return cancel_delayed_work_sync(&bdev->wq);
821 }
822 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
823
824 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
825 {
826         if (resched)
827                 schedule_delayed_work(&bdev->wq,
828                                       ((hz / 100) < 1) ? 1 : hz / 100);
829 }
830 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
831
832 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
833                         bool no_wait_gpu)
834 {
835         struct ttm_bo_device *bdev = bo->bdev;
836         struct ttm_mem_reg evict_mem;
837         struct ttm_placement placement;
838         int ret = 0;
839
840         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
841         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
842         lockmgr(&bdev->fence_lock, LK_RELEASE);
843
844         if (unlikely(ret != 0)) {
845                 if (ret != -ERESTART) {
846                         kprintf("[TTM] Failed to expire sync object before buffer eviction\n");
847                 }
848                 goto out;
849         }
850
851         BUG_ON(!ttm_bo_is_reserved(bo));
852
853         evict_mem = bo->mem;
854         evict_mem.mm_node = NULL;
855         evict_mem.bus.io_reserved_vm = false;
856         evict_mem.bus.io_reserved_count = 0;
857
858         placement.fpfn = 0;
859         placement.lpfn = 0;
860         placement.num_placement = 0;
861         placement.num_busy_placement = 0;
862         bdev->driver->evict_flags(bo, &placement);
863         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
864                                 no_wait_gpu);
865         if (ret) {
866                 if (ret != -ERESTART) {
867                         kprintf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
868                                bo);
869                         ttm_bo_mem_space_debug(bo, &placement);
870                 }
871                 goto out;
872         }
873
874         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
875                                      no_wait_gpu);
876         if (ret) {
877                 if (ret != -ERESTART)
878                         kprintf("[TTM] Buffer eviction failed\n");
879                 ttm_bo_mem_put(bo, &evict_mem);
880                 goto out;
881         }
882         bo->evicted = true;
883 out:
884         return ret;
885 }
886
887 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
888                                 uint32_t mem_type,
889                                 bool interruptible,
890                                 bool no_wait_gpu)
891 {
892         struct ttm_bo_global *glob = bdev->glob;
893         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
894         struct ttm_buffer_object *bo;
895         int ret = -EBUSY, put_count;
896
897         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
898         list_for_each_entry(bo, &man->lru, lru) {
899                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
900                 if (!ret)
901                         break;
902         }
903
904         if (ret) {
905                 lockmgr(&glob->lru_lock, LK_RELEASE);
906                 return ret;
907         }
908
909         kref_get(&bo->list_kref);
910
911         if (!list_empty(&bo->ddestroy)) {
912                 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
913                                                      no_wait_gpu);
914                 kref_put(&bo->list_kref, ttm_bo_release_list);
915                 return ret;
916         }
917
918         put_count = ttm_bo_del_from_lru(bo);
919         lockmgr(&glob->lru_lock, LK_RELEASE);
920
921         BUG_ON(ret != 0);
922
923         ttm_bo_list_ref_sub(bo, put_count, true);
924
925         ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
926         ttm_bo_unreserve(bo);
927
928         kref_put(&bo->list_kref, ttm_bo_release_list);
929         return ret;
930 }
931
932 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
933 {
934         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
935
936         if (mem->mm_node)
937                 (*man->func->put_node)(man, mem);
938 }
939 EXPORT_SYMBOL(ttm_bo_mem_put);
940
941 /**
942  * Repeatedly evict memory from the LRU for @mem_type until we create enough
943  * space, or we've evicted everything and there isn't enough space.
944  */
945 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
946                                         uint32_t mem_type,
947                                         struct ttm_placement *placement,
948                                         struct ttm_mem_reg *mem,
949                                         bool interruptible,
950                                         bool no_wait_gpu)
951 {
952         struct ttm_bo_device *bdev = bo->bdev;
953         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
954         int ret;
955
956         do {
957                 ret = (*man->func->get_node)(man, bo, placement, mem);
958                 if (unlikely(ret != 0))
959                         return ret;
960                 if (mem->mm_node)
961                         break;
962                 ret = ttm_mem_evict_first(bdev, mem_type,
963                                           interruptible, no_wait_gpu);
964                 if (unlikely(ret != 0))
965                         return ret;
966         } while (1);
967         if (mem->mm_node == NULL)
968                 return -ENOMEM;
969         mem->mem_type = mem_type;
970         return 0;
971 }
972
973 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
974                                       uint32_t cur_placement,
975                                       uint32_t proposed_placement)
976 {
977         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
978         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
979
980         /**
981          * Keep current caching if possible.
982          */
983
984         if ((cur_placement & caching) != 0)
985                 result |= (cur_placement & caching);
986         else if ((man->default_caching & caching) != 0)
987                 result |= man->default_caching;
988         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
989                 result |= TTM_PL_FLAG_CACHED;
990         else if ((TTM_PL_FLAG_WC & caching) != 0)
991                 result |= TTM_PL_FLAG_WC;
992         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
993                 result |= TTM_PL_FLAG_UNCACHED;
994
995         return result;
996 }
997
998 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
999                                  uint32_t mem_type,
1000                                  uint32_t proposed_placement,
1001                                  uint32_t *masked_placement)
1002 {
1003         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1004
1005         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
1006                 return false;
1007
1008         if ((proposed_placement & man->available_caching) == 0)
1009                 return false;
1010
1011         cur_flags |= (proposed_placement & man->available_caching);
1012
1013         *masked_placement = cur_flags;
1014         return true;
1015 }
1016
1017 /**
1018  * Creates space for memory region @mem according to its type.
1019  *
1020  * This function first searches for free space in compatible memory types in
1021  * the priority order defined by the driver.  If free space isn't found, then
1022  * ttm_bo_mem_force_space is attempted in priority order to evict and find
1023  * space.
1024  */
1025 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1026                         struct ttm_placement *placement,
1027                         struct ttm_mem_reg *mem,
1028                         bool interruptible,
1029                         bool no_wait_gpu)
1030 {
1031         struct ttm_bo_device *bdev = bo->bdev;
1032         struct ttm_mem_type_manager *man;
1033         uint32_t mem_type = TTM_PL_SYSTEM;
1034         uint32_t cur_flags = 0;
1035         bool type_found = false;
1036         bool type_ok = false;
1037         bool has_erestartsys = false;
1038         int i, ret;
1039
1040         mem->mm_node = NULL;
1041         for (i = 0; i < placement->num_placement; ++i) {
1042                 ret = ttm_mem_type_from_flags(placement->placement[i],
1043                                                 &mem_type);
1044                 if (ret)
1045                         return ret;
1046                 man = &bdev->man[mem_type];
1047
1048                 type_ok = ttm_bo_mt_compatible(man,
1049                                                 mem_type,
1050                                                 placement->placement[i],
1051                                                 &cur_flags);
1052
1053                 if (!type_ok)
1054                         continue;
1055
1056                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1057                                                   cur_flags);
1058                 /*
1059                  * Use the access and other non-mapping-related flag bits from
1060                  * the memory placement flags to the current flags
1061                  */
1062                 ttm_flag_masked(&cur_flags, placement->placement[i],
1063                                 ~TTM_PL_MASK_MEMTYPE);
1064
1065                 if (mem_type == TTM_PL_SYSTEM)
1066                         break;
1067
1068                 if (man->has_type && man->use_type) {
1069                         type_found = true;
1070                         ret = (*man->func->get_node)(man, bo, placement, mem);
1071                         if (unlikely(ret))
1072                                 return ret;
1073                 }
1074                 if (mem->mm_node)
1075                         break;
1076         }
1077
1078         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1079                 mem->mem_type = mem_type;
1080                 mem->placement = cur_flags;
1081                 return 0;
1082         }
1083
1084         if (!type_found)
1085                 return -EINVAL;
1086
1087         for (i = 0; i < placement->num_busy_placement; ++i) {
1088                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1089                                                 &mem_type);
1090                 if (ret)
1091                         return ret;
1092                 man = &bdev->man[mem_type];
1093                 if (!man->has_type)
1094                         continue;
1095                 if (!ttm_bo_mt_compatible(man,
1096                                                 mem_type,
1097                                                 placement->busy_placement[i],
1098                                                 &cur_flags))
1099                         continue;
1100
1101                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1102                                                   cur_flags);
1103                 /*
1104                  * Use the access and other non-mapping-related flag bits from
1105                  * the memory placement flags to the current flags
1106                  */
1107                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1108                                 ~TTM_PL_MASK_MEMTYPE);
1109
1110
1111                 if (mem_type == TTM_PL_SYSTEM) {
1112                         mem->mem_type = mem_type;
1113                         mem->placement = cur_flags;
1114                         mem->mm_node = NULL;
1115                         return 0;
1116                 }
1117
1118                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1119                                                 interruptible, no_wait_gpu);
1120                 if (ret == 0 && mem->mm_node) {
1121                         mem->placement = cur_flags;
1122                         return 0;
1123                 }
1124                 if (ret == -ERESTART)
1125                         has_erestartsys = true;
1126         }
1127         ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
1128         return ret;
1129 }
1130 EXPORT_SYMBOL(ttm_bo_mem_space);
1131
1132 static
1133 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1134                         struct ttm_placement *placement,
1135                         bool interruptible,
1136                         bool no_wait_gpu)
1137 {
1138         int ret = 0;
1139         struct ttm_mem_reg mem;
1140         struct ttm_bo_device *bdev = bo->bdev;
1141
1142         BUG_ON(!ttm_bo_is_reserved(bo));
1143
1144         /*
1145          * FIXME: It's possible to pipeline buffer moves.
1146          * Have the driver move function wait for idle when necessary,
1147          * instead of doing it here.
1148          */
1149         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1150         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1151         lockmgr(&bdev->fence_lock, LK_RELEASE);
1152         if (ret)
1153                 return ret;
1154         mem.num_pages = bo->num_pages;
1155         mem.size = mem.num_pages << PAGE_SHIFT;
1156         mem.page_alignment = bo->mem.page_alignment;
1157         mem.bus.io_reserved_vm = false;
1158         mem.bus.io_reserved_count = 0;
1159         /*
1160          * Determine where to move the buffer.
1161          */
1162         ret = ttm_bo_mem_space(bo, placement, &mem,
1163                                interruptible, no_wait_gpu);
1164         if (ret)
1165                 goto out_unlock;
1166         ret = ttm_bo_handle_move_mem(bo, &mem, false,
1167                                      interruptible, no_wait_gpu);
1168 out_unlock:
1169         if (ret && mem.mm_node)
1170                 ttm_bo_mem_put(bo, &mem);
1171         return ret;
1172 }
1173
1174 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1175                              struct ttm_mem_reg *mem)
1176 {
1177         int i;
1178
1179         if (mem->mm_node && placement->lpfn != 0 &&
1180             (mem->start < placement->fpfn ||
1181              mem->start + mem->num_pages > placement->lpfn))
1182                 return -1;
1183
1184         for (i = 0; i < placement->num_placement; i++) {
1185                 if ((placement->placement[i] & mem->placement &
1186                         TTM_PL_MASK_CACHING) &&
1187                         (placement->placement[i] & mem->placement &
1188                         TTM_PL_MASK_MEM))
1189                         return i;
1190         }
1191         return -1;
1192 }
1193
1194 int ttm_bo_validate(struct ttm_buffer_object *bo,
1195                         struct ttm_placement *placement,
1196                         bool interruptible,
1197                         bool no_wait_gpu)
1198 {
1199         int ret;
1200
1201         BUG_ON(!ttm_bo_is_reserved(bo));
1202         /* Check that range is valid */
1203         if (placement->lpfn || placement->fpfn)
1204                 if (placement->fpfn > placement->lpfn ||
1205                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1206                         return -EINVAL;
1207         /*
1208          * Check whether we need to move buffer.
1209          */
1210         ret = ttm_bo_mem_compat(placement, &bo->mem);
1211         if (ret < 0) {
1212                 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1213                                          no_wait_gpu);
1214                 if (ret)
1215                         return ret;
1216         } else {
1217                 /*
1218                  * Use the access and other non-mapping-related flag bits from
1219                  * the compatible memory placement flags to the active flags
1220                  */
1221                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1222                                 ~TTM_PL_MASK_MEMTYPE);
1223         }
1224         /*
1225          * We might need to add a TTM.
1226          */
1227         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1228                 ret = ttm_bo_add_ttm(bo, true);
1229                 if (ret)
1230                         return ret;
1231         }
1232         return 0;
1233 }
1234 EXPORT_SYMBOL(ttm_bo_validate);
1235
1236 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1237                                 struct ttm_placement *placement)
1238 {
1239         BUG_ON((placement->fpfn || placement->lpfn) &&
1240                (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1241
1242         return 0;
1243 }
1244
1245 int ttm_bo_init(struct ttm_bo_device *bdev,
1246                 struct ttm_buffer_object *bo,
1247                 unsigned long size,
1248                 enum ttm_bo_type type,
1249                 struct ttm_placement *placement,
1250                 uint32_t page_alignment,
1251                 bool interruptible,
1252                 struct vm_object *persistent_swap_storage,
1253                 size_t acc_size,
1254                 struct sg_table *sg,
1255                 void (*destroy) (struct ttm_buffer_object *))
1256 {
1257         int ret = 0;
1258         unsigned long num_pages;
1259         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1260
1261         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1262         if (ret) {
1263                 kprintf("[TTM] Out of kernel memory\n");
1264                 if (destroy)
1265                         (*destroy)(bo);
1266                 else
1267                         kfree(bo, M_TTM_BO);
1268                 return -ENOMEM;
1269         }
1270
1271         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1272         if (num_pages == 0) {
1273                 kprintf("[TTM] Illegal buffer object size\n");
1274                 if (destroy)
1275                         (*destroy)(bo);
1276                 else
1277                         kfree(bo, M_TTM_BO);
1278                 ttm_mem_global_free(mem_glob, acc_size);
1279                 return -EINVAL;
1280         }
1281         bo->destroy = destroy;
1282
1283         kref_init(&bo->kref);
1284         kref_init(&bo->list_kref);
1285         atomic_set(&bo->cpu_writers, 0);
1286         atomic_set(&bo->reserved, 1);
1287         init_waitqueue_head(&bo->event_queue);
1288         INIT_LIST_HEAD(&bo->lru);
1289         INIT_LIST_HEAD(&bo->ddestroy);
1290         INIT_LIST_HEAD(&bo->swap);
1291         INIT_LIST_HEAD(&bo->io_reserve_lru);
1292         /*bzero(&bo->vm_rb, sizeof(bo->vm_rb));*/
1293         bo->bdev = bdev;
1294         bo->glob = bdev->glob;
1295         bo->type = type;
1296         bo->num_pages = num_pages;
1297         bo->mem.size = num_pages << PAGE_SHIFT;
1298         bo->mem.mem_type = TTM_PL_SYSTEM;
1299         bo->mem.num_pages = bo->num_pages;
1300         bo->mem.mm_node = NULL;
1301         bo->mem.page_alignment = page_alignment;
1302         bo->mem.bus.io_reserved_vm = false;
1303         bo->mem.bus.io_reserved_count = 0;
1304         bo->priv_flags = 0;
1305         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1306         bo->seq_valid = false;
1307         bo->persistent_swap_storage = persistent_swap_storage;
1308         bo->acc_size = acc_size;
1309         bo->sg = sg;
1310         atomic_inc(&bo->glob->bo_count);
1311
1312         /*
1313          * Mirror ref from kref_init() for list_kref.
1314          */
1315         set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
1316
1317         ret = ttm_bo_check_placement(bo, placement);
1318         if (unlikely(ret != 0))
1319                 goto out_err;
1320
1321         /*
1322          * For ttm_bo_type_device buffers, allocate
1323          * address space from the device.
1324          */
1325         if (bo->type == ttm_bo_type_device ||
1326             bo->type == ttm_bo_type_sg) {
1327                 ret = ttm_bo_setup_vm(bo);
1328                 if (ret)
1329                         goto out_err;
1330         }
1331
1332         ret = ttm_bo_validate(bo, placement, interruptible, false);
1333         if (ret)
1334                 goto out_err;
1335
1336         ttm_bo_unreserve(bo);
1337         return 0;
1338
1339 out_err:
1340         ttm_bo_unreserve(bo);
1341         ttm_bo_unref(&bo);
1342
1343         return ret;
1344 }
1345 EXPORT_SYMBOL(ttm_bo_init);
1346
1347 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1348                        unsigned long bo_size,
1349                        unsigned struct_size)
1350 {
1351         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1352         size_t size = 0;
1353
1354         size += ttm_round_pot(struct_size);
1355         size += PAGE_ALIGN(npages * sizeof(void *));
1356         size += ttm_round_pot(sizeof(struct ttm_tt));
1357         return size;
1358 }
1359 EXPORT_SYMBOL(ttm_bo_acc_size);
1360
1361 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1362                            unsigned long bo_size,
1363                            unsigned struct_size)
1364 {
1365         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1366         size_t size = 0;
1367
1368         size += ttm_round_pot(struct_size);
1369         size += PAGE_ALIGN(npages * sizeof(void *));
1370         size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1371         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1372         return size;
1373 }
1374 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1375
1376 int ttm_bo_create(struct ttm_bo_device *bdev,
1377                         unsigned long size,
1378                         enum ttm_bo_type type,
1379                         struct ttm_placement *placement,
1380                         uint32_t page_alignment,
1381                         bool interruptible,
1382                         struct vm_object *persistent_swap_storage,
1383                         struct ttm_buffer_object **p_bo)
1384 {
1385         struct ttm_buffer_object *bo;
1386         size_t acc_size;
1387         int ret;
1388
1389         *p_bo = NULL;
1390         bo = kmalloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
1391         if (unlikely(bo == NULL))
1392                 return -ENOMEM;
1393
1394         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1395         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1396                           interruptible, persistent_swap_storage, acc_size,
1397                           NULL, NULL);
1398         if (likely(ret == 0))
1399                 *p_bo = bo;
1400
1401         return ret;
1402 }
1403 EXPORT_SYMBOL(ttm_bo_create);
1404
1405 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1406                                         unsigned mem_type, bool allow_errors)
1407 {
1408         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1409         struct ttm_bo_global *glob = bdev->glob;
1410         int ret;
1411
1412         /*
1413          * Can't use standard list traversal since we're unlocking.
1414          */
1415
1416         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1417         while (!list_empty(&man->lru)) {
1418                 lockmgr(&glob->lru_lock, LK_RELEASE);
1419                 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1420                 if (ret) {
1421                         if (allow_errors) {
1422                                 return ret;
1423                         } else {
1424                                 kprintf("[TTM] Cleanup eviction failed\n");
1425                         }
1426                 }
1427                 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1428         }
1429         lockmgr(&glob->lru_lock, LK_RELEASE);
1430         return 0;
1431 }
1432
1433 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1434 {
1435         struct ttm_mem_type_manager *man;
1436         int ret = -EINVAL;
1437
1438         if (mem_type >= TTM_NUM_MEM_TYPES) {
1439                 kprintf("[TTM] Illegal memory type %d\n", mem_type);
1440                 return ret;
1441         }
1442         man = &bdev->man[mem_type];
1443
1444         if (!man->has_type) {
1445                 kprintf("[TTM] Trying to take down uninitialized memory manager type %u\n",
1446                        mem_type);
1447                 return ret;
1448         }
1449
1450         man->use_type = false;
1451         man->has_type = false;
1452
1453         ret = 0;
1454         if (mem_type > 0) {
1455                 ttm_bo_force_list_clean(bdev, mem_type, false);
1456
1457                 ret = (*man->func->takedown)(man);
1458         }
1459
1460         return ret;
1461 }
1462 EXPORT_SYMBOL(ttm_bo_clean_mm);
1463
1464 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1465 {
1466         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1467
1468         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1469                 kprintf("[TTM] Illegal memory manager memory type %u\n", mem_type);
1470                 return -EINVAL;
1471         }
1472
1473         if (!man->has_type) {
1474                 kprintf("[TTM] Memory type %u has not been initialized\n", mem_type);
1475                 return 0;
1476         }
1477
1478         return ttm_bo_force_list_clean(bdev, mem_type, true);
1479 }
1480 EXPORT_SYMBOL(ttm_bo_evict_mm);
1481
1482 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1483                         unsigned long p_size)
1484 {
1485         int ret = -EINVAL;
1486         struct ttm_mem_type_manager *man;
1487
1488         BUG_ON(type >= TTM_NUM_MEM_TYPES);
1489         man = &bdev->man[type];
1490         BUG_ON(man->has_type);
1491         man->io_reserve_fastpath = true;
1492         man->use_io_reserve_lru = false;
1493         lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE);
1494         INIT_LIST_HEAD(&man->io_reserve_lru);
1495
1496         ret = bdev->driver->init_mem_type(bdev, type, man);
1497         if (ret)
1498                 return ret;
1499         man->bdev = bdev;
1500
1501         ret = 0;
1502         if (type != TTM_PL_SYSTEM) {
1503                 ret = (*man->func->init)(man, p_size);
1504                 if (ret)
1505                         return ret;
1506         }
1507         man->has_type = true;
1508         man->use_type = true;
1509         man->size = p_size;
1510
1511         INIT_LIST_HEAD(&man->lru);
1512
1513         return 0;
1514 }
1515 EXPORT_SYMBOL(ttm_bo_init_mm);
1516
1517 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1518 {
1519         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1520         vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1521         glob->dummy_read_page = NULL;
1522         /*
1523         vm_page_free(glob->dummy_read_page);
1524         */
1525 }
1526
1527 void ttm_bo_global_release(struct drm_global_reference *ref)
1528 {
1529         struct ttm_bo_global *glob = ref->object;
1530
1531         if (refcount_release(&glob->kobj_ref))
1532                 ttm_bo_global_kobj_release(glob);
1533 }
1534 EXPORT_SYMBOL(ttm_bo_global_release);
1535
1536 int ttm_bo_global_init(struct drm_global_reference *ref)
1537 {
1538         struct ttm_bo_global_ref *bo_ref =
1539                 container_of(ref, struct ttm_bo_global_ref, ref);
1540         struct ttm_bo_global *glob = ref->object;
1541         int ret;
1542
1543         lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE);
1544         lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE);
1545         glob->mem_glob = bo_ref->mem_glob;
1546         glob->dummy_read_page = vm_page_alloc_contig(
1547             0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE);
1548
1549         if (unlikely(glob->dummy_read_page == NULL)) {
1550                 ret = -ENOMEM;
1551                 goto out_no_drp;
1552         }
1553
1554         INIT_LIST_HEAD(&glob->swap_lru);
1555         INIT_LIST_HEAD(&glob->device_list);
1556
1557         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1558         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1559         if (unlikely(ret != 0)) {
1560                 kprintf("[TTM] Could not register buffer object swapout\n");
1561                 goto out_no_shrink;
1562         }
1563
1564         atomic_set(&glob->bo_count, 0);
1565
1566         refcount_init(&glob->kobj_ref, 1);
1567         return (0);
1568
1569 out_no_shrink:
1570         vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1571         glob->dummy_read_page = NULL;
1572         /*
1573         vm_page_free(glob->dummy_read_page);
1574         */
1575 out_no_drp:
1576         kfree(glob, M_DRM_GLOBAL);
1577         return ret;
1578 }
1579 EXPORT_SYMBOL(ttm_bo_global_init);
1580
1581
1582 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1583 {
1584         int ret = 0;
1585         unsigned i = TTM_NUM_MEM_TYPES;
1586         struct ttm_mem_type_manager *man;
1587         struct ttm_bo_global *glob = bdev->glob;
1588
1589         while (i--) {
1590                 man = &bdev->man[i];
1591                 if (man->has_type) {
1592                         man->use_type = false;
1593                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1594                                 ret = -EBUSY;
1595                                 kprintf("[TTM] DRM memory manager type %d is not clean\n",
1596                                        i);
1597                         }
1598                         man->has_type = false;
1599                 }
1600         }
1601
1602         lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1603         list_del(&bdev->device_list);
1604         lockmgr(&glob->device_list_mutex, LK_RELEASE);
1605
1606         cancel_delayed_work_sync(&bdev->wq);
1607
1608         while (ttm_bo_delayed_delete(bdev, true))
1609                 ;
1610
1611         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1612         if (list_empty(&bdev->ddestroy))
1613                 TTM_DEBUG("Delayed destroy list was clean\n");
1614
1615         if (list_empty(&bdev->man[0].lru))
1616                 TTM_DEBUG("Swap list was clean\n");
1617         lockmgr(&glob->lru_lock, LK_RELEASE);
1618
1619         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1620         lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1621         drm_mm_takedown(&bdev->addr_space_mm);
1622         lockmgr(&bdev->vm_lock, LK_RELEASE);
1623
1624         return ret;
1625 }
1626 EXPORT_SYMBOL(ttm_bo_device_release);
1627
1628 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1629                        struct ttm_bo_global *glob,
1630                        struct ttm_bo_driver *driver,
1631                        uint64_t file_page_offset,
1632                        bool need_dma32)
1633 {
1634         int ret = -EINVAL;
1635
1636         lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE);
1637         bdev->driver = driver;
1638
1639         memset(bdev->man, 0, sizeof(bdev->man));
1640
1641         /*
1642          * Initialize the system memory buffer type.
1643          * Other types need to be driver / IOCTL initialized.
1644          */
1645         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1646         if (unlikely(ret != 0))
1647                 goto out_no_sys;
1648
1649         RB_INIT(&bdev->addr_space_rb);
1650         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1651         if (unlikely(ret != 0))
1652                 goto out_no_addr_mm;
1653
1654         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1655         INIT_LIST_HEAD(&bdev->ddestroy);
1656         bdev->dev_mapping = NULL;
1657         bdev->glob = glob;
1658         bdev->need_dma32 = need_dma32;
1659         bdev->val_seq = 0;
1660         lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE);
1661         lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1662         list_add_tail(&bdev->device_list, &glob->device_list);
1663         lockmgr(&glob->device_list_mutex, LK_RELEASE);
1664
1665         return 0;
1666 out_no_addr_mm:
1667         ttm_bo_clean_mm(bdev, 0);
1668 out_no_sys:
1669         return ret;
1670 }
1671 EXPORT_SYMBOL(ttm_bo_device_init);
1672
1673 /*
1674  * buffer object vm functions.
1675  */
1676
1677 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1678 {
1679         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1680
1681         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1682                 if (mem->mem_type == TTM_PL_SYSTEM)
1683                         return false;
1684
1685                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1686                         return false;
1687
1688                 if (mem->placement & TTM_PL_FLAG_CACHED)
1689                         return false;
1690         }
1691         return true;
1692 }
1693
1694 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1695 {
1696
1697         ttm_bo_release_mmap(bo);
1698         ttm_mem_io_free_vm(bo);
1699 }
1700
1701 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1702 {
1703         struct ttm_bo_device *bdev = bo->bdev;
1704         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1705
1706         ttm_mem_io_lock(man, false);
1707         ttm_bo_unmap_virtual_locked(bo);
1708         ttm_mem_io_unlock(man);
1709 }
1710
1711
1712 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1713
1714 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1715 {
1716         struct ttm_bo_device *bdev = bo->bdev;
1717
1718         /* The caller acquired bdev->vm_lock. */
1719         RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1720 }
1721
1722 /**
1723  * ttm_bo_setup_vm:
1724  *
1725  * @bo: the buffer to allocate address space for
1726  *
1727  * Allocate address space in the drm device so that applications
1728  * can mmap the buffer and access the contents. This only
1729  * applies to ttm_bo_type_device objects as others are not
1730  * placed in the drm device address space.
1731  */
1732
1733 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1734 {
1735         struct ttm_bo_device *bdev = bo->bdev;
1736         int ret;
1737
1738 retry_pre_get:
1739         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1740         if (unlikely(ret != 0))
1741                 return ret;
1742
1743         lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1744         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1745                                          bo->mem.num_pages, 0, 0);
1746
1747         if (unlikely(bo->vm_node == NULL)) {
1748                 ret = -ENOMEM;
1749                 goto out_unlock;
1750         }
1751
1752         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1753                                               bo->mem.num_pages, 0);
1754
1755         if (unlikely(bo->vm_node == NULL)) {
1756                 lockmgr(&bdev->vm_lock, LK_RELEASE);
1757                 goto retry_pre_get;
1758         }
1759
1760         ttm_bo_vm_insert_rb(bo);
1761         lockmgr(&bdev->vm_lock, LK_RELEASE);
1762         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1763
1764         return 0;
1765 out_unlock:
1766         lockmgr(&bdev->vm_lock, LK_RELEASE);
1767         return ret;
1768 }
1769
1770 int ttm_bo_wait(struct ttm_buffer_object *bo,
1771                 bool lazy, bool interruptible, bool no_wait)
1772 {
1773         struct ttm_bo_driver *driver = bo->bdev->driver;
1774         struct ttm_bo_device *bdev = bo->bdev;
1775         void *sync_obj;
1776         int ret = 0;
1777
1778         if (likely(bo->sync_obj == NULL))
1779                 return 0;
1780
1781         while (bo->sync_obj) {
1782
1783                 if (driver->sync_obj_signaled(bo->sync_obj)) {
1784                         void *tmp_obj = bo->sync_obj;
1785                         bo->sync_obj = NULL;
1786                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1787                         lockmgr(&bdev->fence_lock, LK_RELEASE);
1788                         driver->sync_obj_unref(&tmp_obj);
1789                         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1790                         continue;
1791                 }
1792
1793                 if (no_wait)
1794                         return -EBUSY;
1795
1796                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1797                 lockmgr(&bdev->fence_lock, LK_RELEASE);
1798                 ret = driver->sync_obj_wait(sync_obj,
1799                                             lazy, interruptible);
1800                 if (unlikely(ret != 0)) {
1801                         driver->sync_obj_unref(&sync_obj);
1802                         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1803                         return ret;
1804                 }
1805                 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1806                 if (likely(bo->sync_obj == sync_obj)) {
1807                         void *tmp_obj = bo->sync_obj;
1808                         bo->sync_obj = NULL;
1809                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1810                                   &bo->priv_flags);
1811                         lockmgr(&bdev->fence_lock, LK_RELEASE);
1812                         driver->sync_obj_unref(&sync_obj);
1813                         driver->sync_obj_unref(&tmp_obj);
1814                         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1815                 } else {
1816                         lockmgr(&bdev->fence_lock, LK_RELEASE);
1817                         driver->sync_obj_unref(&sync_obj);
1818                         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1819                 }
1820         }
1821         return 0;
1822 }
1823 EXPORT_SYMBOL(ttm_bo_wait);
1824
1825 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1826 {
1827         struct ttm_bo_device *bdev = bo->bdev;
1828         int ret = 0;
1829
1830         /*
1831          * Using ttm_bo_reserve makes sure the lru lists are updated.
1832          */
1833
1834         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1835         if (unlikely(ret != 0))
1836                 return ret;
1837         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1838         ret = ttm_bo_wait(bo, false, true, no_wait);
1839         lockmgr(&bdev->fence_lock, LK_RELEASE);
1840         if (likely(ret == 0))
1841                 atomic_inc(&bo->cpu_writers);
1842         ttm_bo_unreserve(bo);
1843         return ret;
1844 }
1845 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1846
1847 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1848 {
1849         atomic_dec(&bo->cpu_writers);
1850 }
1851 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1852
1853 /**
1854  * A buffer object shrink method that tries to swap out the first
1855  * buffer object on the bo_global::swap_lru list.
1856  */
1857
1858 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1859 {
1860         struct ttm_bo_global *glob =
1861             container_of(shrink, struct ttm_bo_global, shrink);
1862         struct ttm_buffer_object *bo;
1863         int ret = -EBUSY;
1864         int put_count;
1865         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1866
1867         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1868         list_for_each_entry(bo, &glob->swap_lru, swap) {
1869                 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1870                 if (!ret)
1871                         break;
1872         }
1873
1874         if (ret) {
1875                 lockmgr(&glob->lru_lock, LK_RELEASE);
1876                 return ret;
1877         }
1878
1879         kref_get(&bo->list_kref);
1880
1881         if (!list_empty(&bo->ddestroy)) {
1882                 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1883                 kref_put(&bo->list_kref, ttm_bo_release_list);
1884                 return ret;
1885         }
1886
1887         put_count = ttm_bo_del_from_lru(bo);
1888         lockmgr(&glob->lru_lock, LK_RELEASE);
1889
1890         ttm_bo_list_ref_sub(bo, put_count, true);
1891
1892         /**
1893          * Wait for GPU, then move to system cached.
1894          */
1895
1896         lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE);
1897         ret = ttm_bo_wait(bo, false, false, false);
1898         lockmgr(&bo->bdev->fence_lock, LK_RELEASE);
1899
1900         if (unlikely(ret != 0))
1901                 goto out;
1902
1903         if ((bo->mem.placement & swap_placement) != swap_placement) {
1904                 struct ttm_mem_reg evict_mem;
1905
1906                 evict_mem = bo->mem;
1907                 evict_mem.mm_node = NULL;
1908                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1909                 evict_mem.mem_type = TTM_PL_SYSTEM;
1910
1911                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1912                                              false, false);
1913                 if (unlikely(ret != 0))
1914                         goto out;
1915         }
1916
1917         ttm_bo_unmap_virtual(bo);
1918
1919         /**
1920          * Swap out. Buffer will be swapped in again as soon as
1921          * anyone tries to access a ttm page.
1922          */
1923
1924         if (bo->bdev->driver->swap_notify)
1925                 bo->bdev->driver->swap_notify(bo);
1926
1927         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1928 out:
1929
1930         /**
1931          *
1932          * Unreserve without putting on LRU to avoid swapping out an
1933          * already swapped buffer.
1934          */
1935
1936         atomic_set(&bo->reserved, 0);
1937         wake_up_all(&bo->event_queue);
1938         kref_put(&bo->list_kref, ttm_bo_release_list);
1939         return ret;
1940 }
1941
1942 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1943 {
1944         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1945                 ;
1946 }
1947 EXPORT_SYMBOL(ttm_bo_swapout_all);