Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / dev / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  *
37  *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
38  */
39
40 #include "opt_vm.h"
41
42 #define pr_fmt(fmt) "[TTM] " fmt
43
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_bo_driver.h>
46 #include <drm/ttm/ttm_placement.h>
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <linux/errno.h>
50 #include <linux/export.h>
51
52 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
53     ttm_bo_cmp_rb_tree_items);
54
55
56 #define TTM_BO_VM_NUM_PREFAULT 16
57
58 int
59 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
60     struct ttm_buffer_object *b)
61 {
62     if (a->vm_node->start < b->vm_node->start) {
63         return (-1);
64     } else if (a->vm_node->start > b->vm_node->start) {
65         return (1);
66     } else {
67         return (0);
68     }
69 }
70
71
72 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
73                                                      unsigned long page_start,
74                                                      unsigned long num_pages)
75 {
76         unsigned long cur_offset;
77         struct ttm_buffer_object *bo;
78         struct ttm_buffer_object *best_bo = NULL;
79
80         bo = RB_ROOT(&bdev->addr_space_rb);
81         while (bo != NULL) {
82                 cur_offset = bo->vm_node->start;
83                 if (page_start >= cur_offset) {
84                         best_bo = bo;
85                         if (page_start == cur_offset)
86                                 break;
87                         bo = RB_RIGHT(bo, vm_rb);
88                 } else
89                         bo = RB_LEFT(bo, vm_rb);
90         }
91
92         if (unlikely(best_bo == NULL))
93                 return NULL;
94
95         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
96                      (page_start + num_pages)))
97                 return NULL;
98
99         return best_bo;
100 }
101
102 static int
103 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
104     int prot, vm_page_t *mres)
105 {
106         struct ttm_buffer_object *bo = vm_obj->handle;
107         struct ttm_bo_device *bdev = bo->bdev;
108         struct ttm_tt *ttm = NULL;
109         vm_page_t m, m1, oldm;
110         int ret;
111         int retval = VM_PAGER_OK;
112         struct ttm_mem_type_manager *man =
113                 &bdev->man[bo->mem.mem_type];
114
115         vm_object_pip_add(vm_obj, 1);
116         oldm = *mres;
117         if (oldm != NULL) {
118                 vm_page_remove(oldm);
119                 *mres = NULL;
120         } else
121                 oldm = NULL;
122 retry:
123         VM_OBJECT_WUNLOCK(vm_obj);
124         m = NULL;
125
126 reserve:
127         ret = ttm_bo_reserve(bo, false, false, false, 0);
128         if (unlikely(ret != 0)) {
129                 if (ret == -EBUSY) {
130                         lwkt_yield();
131                         goto reserve;
132                 }
133         }
134
135         if (bdev->driver->fault_reserve_notify) {
136                 ret = bdev->driver->fault_reserve_notify(bo);
137                 switch (ret) {
138                 case 0:
139                         break;
140                 case -EBUSY:
141                 case -ERESTARTSYS:
142                 case -EINTR:
143                         lwkt_yield();
144                         goto reserve;
145                 default:
146                         retval = VM_PAGER_ERROR;
147                         goto out_unlock;
148                 }
149         }
150
151         /*
152          * Wait for buffer data in transit, due to a pipelined
153          * move.
154          */
155
156         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
157         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
158                 /*
159                  * Here, the behavior differs between Linux and FreeBSD.
160                  *
161                  * On Linux, the wait is interruptible (3rd argument to
162                  * ttm_bo_wait). There must be some mechanism to resume
163                  * page fault handling, once the signal is processed.
164                  *
165                  * On FreeBSD, the wait is uninteruptible. This is not a
166                  * problem as we can't end up with an unkillable process
167                  * here, because the wait will eventually time out.
168                  *
169                  * An example of this situation is the Xorg process
170                  * which uses SIGALRM internally. The signal could
171                  * interrupt the wait, causing the page fault to fail
172                  * and the process to receive SIGSEGV.
173                  */
174                 ret = ttm_bo_wait(bo, false, false, false);
175                 lockmgr(&bdev->fence_lock, LK_RELEASE);
176                 if (unlikely(ret != 0)) {
177                         retval = VM_PAGER_ERROR;
178                         goto out_unlock;
179                 }
180         } else
181                 lockmgr(&bdev->fence_lock, LK_RELEASE);
182
183         ret = ttm_mem_io_lock(man, true);
184         if (unlikely(ret != 0)) {
185                 retval = VM_PAGER_ERROR;
186                 goto out_unlock;
187         }
188         ret = ttm_mem_io_reserve_vm(bo);
189         if (unlikely(ret != 0)) {
190                 retval = VM_PAGER_ERROR;
191                 goto out_io_unlock;
192         }
193
194         /*
195          * Strictly, we're not allowed to modify vma->vm_page_prot here,
196          * since the mmap_sem is only held in read mode. However, we
197          * modify only the caching bits of vma->vm_page_prot and
198          * consider those bits protected by
199          * the bo->mutex, as we should be the only writers.
200          * There shouldn't really be any readers of these bits except
201          * within vm_insert_mixed()? fork?
202          *
203          * TODO: Add a list of vmas to the bo, and change the
204          * vma->vm_page_prot when the object changes caching policy, with
205          * the correct locks held.
206          */
207         if (!bo->mem.bus.is_iomem) {
208                 /* Allocate all page at once, most common usage */
209                 ttm = bo->ttm;
210                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
211                         retval = VM_PAGER_ERROR;
212                         goto out_io_unlock;
213                 }
214         }
215
216         if (bo->mem.bus.is_iomem) {
217                 m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
218                     bo->mem.bus.offset + offset);
219                 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
220         } else {
221                 ttm = bo->ttm;
222                 m = ttm->pages[OFF_TO_IDX(offset)];
223                 if (unlikely(!m)) {
224                         retval = VM_PAGER_ERROR;
225                         goto out_io_unlock;
226                 }
227                 pmap_page_set_memattr(m,
228                     (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
229                     VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
230         }
231
232         VM_OBJECT_WLOCK(vm_obj);
233         if ((m->flags & PG_BUSY) != 0) {
234 #if 0
235                 vm_page_sleep(m, "ttmpbs");
236 #endif
237                 ttm_mem_io_unlock(man);
238                 ttm_bo_unreserve(bo);
239                 goto retry;
240         }
241         m->valid = VM_PAGE_BITS_ALL;
242         *mres = m;
243         m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
244         if (m1 == NULL) {
245                 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
246         } else {
247                 KASSERT(m == m1,
248                     ("inconsistent insert bo %p m %p m1 %p offset %jx",
249                     bo, m, m1, (uintmax_t)offset));
250         }
251         vm_page_busy_try(m, FALSE);
252
253         if (oldm != NULL) {
254                 vm_page_free(oldm);
255         }
256
257 out_io_unlock1:
258         ttm_mem_io_unlock(man);
259 out_unlock1:
260         ttm_bo_unreserve(bo);
261         vm_object_pip_wakeup(vm_obj);
262         return (retval);
263
264 out_io_unlock:
265         VM_OBJECT_WLOCK(vm_obj);
266         goto out_io_unlock1;
267
268 out_unlock:
269         VM_OBJECT_WLOCK(vm_obj);
270         goto out_unlock1;
271 }
272
273 static int
274 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
275     vm_ooffset_t foff, struct ucred *cred, u_short *color)
276 {
277
278         /*
279          * On Linux, a reference to the buffer object is acquired here.
280          * The reason is that this function is not called when the
281          * mmap() is initialized, but only when a process forks for
282          * instance. Therefore on Linux, the reference on the bo is
283          * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
284          * then released in ttm_bo_vm_close().
285          *
286          * Here, this function is called during mmap() intialization.
287          * Thus, the reference acquired in ttm_bo_mmap_single() is
288          * sufficient.
289          */
290         *color = 0;
291         return (0);
292 }
293
294 static void
295 ttm_bo_vm_dtor(void *handle)
296 {
297         struct ttm_buffer_object *bo = handle;
298
299         ttm_bo_unref(&bo);
300 }
301
302 static struct cdev_pager_ops ttm_pager_ops = {
303         .cdev_pg_fault = ttm_bo_vm_fault,
304         .cdev_pg_ctor = ttm_bo_vm_ctor,
305         .cdev_pg_dtor = ttm_bo_vm_dtor
306 };
307
308 int
309 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
310     struct vm_object **obj_res, int nprot)
311 {
312         struct ttm_bo_driver *driver;
313         struct ttm_buffer_object *bo;
314         struct vm_object *vm_obj;
315         int ret;
316
317         *obj_res = NULL;
318
319         lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
320         bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
321         if (likely(bo != NULL))
322                 kref_get(&bo->kref);
323         lockmgr(&bdev->vm_lock, LK_RELEASE);
324
325         if (unlikely(bo == NULL)) {
326                 kprintf("[TTM] Could not find buffer object to map\n");
327                 return (EINVAL);
328         }
329
330         driver = bo->bdev->driver;
331         if (unlikely(!driver->verify_access)) {
332                 ret = EPERM;
333                 goto out_unref;
334         }
335         ret = -driver->verify_access(bo);
336         if (unlikely(ret != 0))
337                 goto out_unref;
338
339         vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
340             size, nprot, 0, curthread->td_ucred);
341
342         if (vm_obj == NULL) {
343                 ret = EINVAL;
344                 goto out_unref;
345         }
346         /*
347          * Note: We're transferring the bo reference to vm_obj->handle here.
348          */
349         *offset = 0;
350         *obj_res = vm_obj;
351         return 0;
352 out_unref:
353         ttm_bo_unref(&bo);
354         return ret;
355 }
356 EXPORT_SYMBOL(ttm_bo_mmap);
357
358 void
359 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
360 {
361         vm_object_t vm_obj;
362         vm_page_t m;
363         int i;
364
365         vm_obj = cdev_pager_lookup(bo);
366         if (vm_obj == NULL)
367                 return;
368
369         VM_OBJECT_WLOCK(vm_obj);
370         for (i = 0; i < bo->num_pages; i++) {
371                 m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
372                 if (m == NULL)
373                         continue;
374                 cdev_pager_free_page(vm_obj, m);
375         }
376         VM_OBJECT_WUNLOCK(vm_obj);
377
378         vm_object_deallocate(vm_obj);
379 }
380
381 #if 0
382 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
383 {
384         if (vma->vm_pgoff != 0)
385                 return -EACCES;
386
387         vma->vm_ops = &ttm_bo_vm_ops;
388         vma->vm_private_data = ttm_bo_reference(bo);
389         vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
390         return 0;
391 }
392 EXPORT_SYMBOL(ttm_fbdev_mmap);
393
394
395 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
396                   const char __user *wbuf, char __user *rbuf, size_t count,
397                   loff_t *f_pos, bool write)
398 {
399         struct ttm_buffer_object *bo;
400         struct ttm_bo_driver *driver;
401         struct ttm_bo_kmap_obj map;
402         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
403         unsigned long kmap_offset;
404         unsigned long kmap_end;
405         unsigned long kmap_num;
406         size_t io_size;
407         unsigned int page_offset;
408         char *virtual;
409         int ret;
410         bool no_wait = false;
411         bool dummy;
412
413         lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
414         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
415         if (likely(bo != NULL))
416                 ttm_bo_reference(bo);
417         lockmgr(&bdev->vm_lock, LK_RELEASE);
418
419         if (unlikely(bo == NULL))
420                 return -EFAULT;
421
422         driver = bo->bdev->driver;
423         if (unlikely(!driver->verify_access)) {
424                 ret = -EPERM;
425                 goto out_unref;
426         }
427
428         ret = driver->verify_access(bo, filp);
429         if (unlikely(ret != 0))
430                 goto out_unref;
431
432         kmap_offset = dev_offset - bo->vm_node->start;
433         if (unlikely(kmap_offset >= bo->num_pages)) {
434                 ret = -EFBIG;
435                 goto out_unref;
436         }
437
438         page_offset = *f_pos & ~PAGE_MASK;
439         io_size = bo->num_pages - kmap_offset;
440         io_size = (io_size << PAGE_SHIFT) - page_offset;
441         if (count < io_size)
442                 io_size = count;
443
444         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
445         kmap_num = kmap_end - kmap_offset + 1;
446
447         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
448
449         switch (ret) {
450         case 0:
451                 break;
452         case -EBUSY:
453                 ret = -EAGAIN;
454                 goto out_unref;
455         default:
456                 goto out_unref;
457         }
458
459         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
460         if (unlikely(ret != 0)) {
461                 ttm_bo_unreserve(bo);
462                 goto out_unref;
463         }
464
465         virtual = ttm_kmap_obj_virtual(&map, &dummy);
466         virtual += page_offset;
467
468         if (write)
469                 ret = copy_from_user(virtual, wbuf, io_size);
470         else
471                 ret = copy_to_user(rbuf, virtual, io_size);
472
473         ttm_bo_kunmap(&map);
474         ttm_bo_unreserve(bo);
475         ttm_bo_unref(&bo);
476
477         if (unlikely(ret != 0))
478                 return -EFBIG;
479
480         *f_pos += io_size;
481
482         return io_size;
483 out_unref:
484         ttm_bo_unref(&bo);
485         return ret;
486 }
487
488 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
489                         char __user *rbuf, size_t count, loff_t *f_pos,
490                         bool write)
491 {
492         struct ttm_bo_kmap_obj map;
493         unsigned long kmap_offset;
494         unsigned long kmap_end;
495         unsigned long kmap_num;
496         size_t io_size;
497         unsigned int page_offset;
498         char *virtual;
499         int ret;
500         bool no_wait = false;
501         bool dummy;
502
503         kmap_offset = (*f_pos >> PAGE_SHIFT);
504         if (unlikely(kmap_offset >= bo->num_pages))
505                 return -EFBIG;
506
507         page_offset = *f_pos & ~PAGE_MASK;
508         io_size = bo->num_pages - kmap_offset;
509         io_size = (io_size << PAGE_SHIFT) - page_offset;
510         if (count < io_size)
511                 io_size = count;
512
513         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
514         kmap_num = kmap_end - kmap_offset + 1;
515
516         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
517
518         switch (ret) {
519         case 0:
520                 break;
521         case -EBUSY:
522                 return -EAGAIN;
523         default:
524                 return ret;
525         }
526
527         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
528         if (unlikely(ret != 0)) {
529                 ttm_bo_unreserve(bo);
530                 return ret;
531         }
532
533         virtual = ttm_kmap_obj_virtual(&map, &dummy);
534         virtual += page_offset;
535
536         if (write)
537                 ret = copy_from_user(virtual, wbuf, io_size);
538         else
539                 ret = copy_to_user(rbuf, virtual, io_size);
540
541         ttm_bo_kunmap(&map);
542         ttm_bo_unreserve(bo);
543         ttm_bo_unref(&bo);
544
545         if (unlikely(ret != 0))
546                 return ret;
547
548         *f_pos += io_size;
549
550         return io_size;
551 }
552 #endif