drm/ttm: Sync with Linux 3.16
authorFrançois Tigeot <ftigeot@wolfpond.org>
Fri, 29 Mar 2019 21:26:33 +0000 (22:26 +0100)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Fri, 29 Mar 2019 21:26:33 +0000 (22:26 +0100)
20 files changed:
sys/dev/drm/drm_mm.c
sys/dev/drm/drm_vma_manager.c
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/drm_mm.h
sys/dev/drm/include/drm/drm_vma_manager.h
sys/dev/drm/include/drm/ttm/ttm_bo_api.h
sys/dev/drm/include/drm/ttm/ttm_bo_driver.h
sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h
sys/dev/drm/include/drm/ttm/ttm_page_alloc.h
sys/dev/drm/include/drm/ttm/ttm_placement.h
sys/dev/drm/include/linux/mm.h
sys/dev/drm/radeon/radeon_ttm.c
sys/dev/drm/ttm/ttm_agp_backend.c
sys/dev/drm/ttm/ttm_bo.c
sys/dev/drm/ttm/ttm_bo_manager.c
sys/dev/drm/ttm/ttm_bo_util.c
sys/dev/drm/ttm/ttm_bo_vm.c
sys/dev/drm/ttm/ttm_execbuf_util.c
sys/dev/drm/ttm/ttm_page_alloc.c
sys/dev/drm/ttm/ttm_tt.c

index 776ade8..bfbb107 100644 (file)
@@ -395,27 +395,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
-       struct drm_mm *mm = node->mm;
-
-       drm_mm_remove_node(node);
-
-       spin_lock(&mm->unused_lock);
-       if (mm->num_unused < MM_UNUSED_TARGET) {
-               list_add(&node->node_list, &mm->unused_nodes);
-               ++mm->num_unused;
-       } else
-               kfree(node);
-       spin_unlock(&mm->unused_lock);
-}
-
 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
 {
        if (end - start < size)
@@ -765,8 +744,6 @@ EXPORT_SYMBOL(drm_mm_clean);
 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 {
        INIT_LIST_HEAD(&mm->hole_stack);
-       INIT_LIST_HEAD(&mm->unused_nodes);
-       mm->num_unused = 0;
        mm->scanned_blocks = 0;
 
        /* Clever trick to avoid a special case in the free hole tracking. */
index a0252e3..3c7ab30 100644 (file)
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_vma_manager.h>
+#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
index 3925eb3..30270c2 100644 (file)
@@ -1258,7 +1258,6 @@ struct ttm_bo_device;
 int ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
     vm_size_t size, struct vm_object **obj_res, int nprot);
 struct ttm_buffer_object;
-void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
 
 /* simplified version of kvasnprintf() for drm needs. */
 char *drm_vasprintf(int flags, const char *format, __va_list ap) __printflike(2, 0);
index b15670f..fc65118 100644 (file)
@@ -79,9 +79,6 @@ struct drm_mm {
        /* head_node.node_list is the list of all memory nodes, ordered
         * according to the (increasing) start address of the memory node. */
        struct drm_mm_node head_node;
-       struct list_head unused_nodes;
-       int num_unused;
-       struct spinlock unused_lock;
        unsigned int scan_check_range : 1;
        unsigned scan_alignment;
        unsigned long scan_color;
@@ -290,8 +287,6 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
                                                   DRM_MM_CREATE_DEFAULT);
 }
 
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-
 void drm_mm_remove_node(struct drm_mm_node *node);
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 void drm_mm_init(struct drm_mm *mm,
index 25ab4f8..9e6d075 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <drm/drm_mm.h>
+#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
@@ -220,7 +221,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
                                      struct address_space *file_mapping)
 {
-       if (drm_vma_node_has_offset(node))
+       if (file_mapping && drm_vma_node_has_offset(node))
                unmap_mapping_range(file_mapping,
                                    drm_vma_node_offset_addr(node),
                                    drm_vma_node_size(node) << PAGE_SHIFT, 1);
index a5a1000..c3f605a 100644 (file)
@@ -184,6 +184,7 @@ struct ttm_tt;
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
+ * @wu_mutex: Wait unreserved mutex.
  *
  * Base class for TTM buffer object, that deals with data placement and CPU
  * mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -266,6 +267,7 @@ struct ttm_buffer_object {
 
        struct reservation_object *resv;
        struct reservation_object ttm_resv;
+       struct lock wu_mutex;
 };
 
 /**
@@ -499,13 +501,12 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
                        void (*destroy) (struct ttm_buffer_object *));
 
 /**
- * ttm_bo_synccpu_object_init
+ * ttm_bo_create
  *
  * @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
  * @size: Requested size of buffer object.
  * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement.
  * @page_alignment: Data alignment in pages.
  * @interruptible: If needing to sleep while waiting for GPU resources,
  * sleep interruptible.
@@ -717,11 +718,6 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
                         const char __user *wbuf, char __user *rbuf,
                         size_t count, loff_t *f_pos, bool write);
 
-extern ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo,
-                        const char __user *wbuf,
-                       char __user *rbuf, size_t count, loff_t *f_pos,
-                       bool write);
-
 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
 #endif
index d19ced8..a5abd10 100644 (file)
@@ -682,6 +682,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
 extern int ttm_tt_swapout(struct ttm_tt *ttm,
                          struct vm_object *persistent_swap_storage);
 
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
 /*
  * ttm_bo.c
  */
@@ -739,6 +748,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
  * @bdev: A pointer to a struct ttm_bo_device to initialize.
  * @glob: A pointer to an initialized struct ttm_bo_global.
  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @mapping: The address space to use for this bo.
  * @file_page_offset: Offset into the device address space that is available
  * for buffer data. This ensures compatibility with other users of the
  * address space.
@@ -750,6 +760,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
                              struct ttm_bo_global *glob,
                              struct ttm_bo_driver *driver,
+                             struct address_space *mapping,
                              uint64_t file_page_offset, bool need_dma32);
 
 /**
@@ -777,7 +788,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
 
 /**
- * ttm_bo_reserve_nolru:
+ * __ttm_bo_reserve:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
@@ -798,10 +809,10 @@ extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
  * -EALREADY: Bo already reserved using @ticket. This error code will only
  * be returned if @use_ticket is set to true.
  */
-static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
-                                      bool interruptible,
-                                      bool no_wait, bool use_ticket,
-                                      struct ww_acquire_ctx *ticket)
+static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
+                                  bool interruptible,
+                                  bool no_wait, bool use_ticket,
+                                  struct ww_acquire_ctx *ticket)
 {
        int ret = 0;
 
@@ -877,8 +888,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
 
        WARN_ON(!atomic_read(&bo->kref.refcount));
 
-       ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
-                                   ticket);
+       ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
        if (likely(ret == 0))
                ttm_bo_del_sub_from_lru(bo);
 
@@ -918,20 +928,14 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 }
 
 /**
- * ttm_bo_unreserve_ticket
+ * __ttm_bo_unreserve
  * @bo: A pointer to a struct ttm_buffer_object.
- * @ticket: ww_acquire_ctx used for reserving
  *
- * Unreserve a previous reservation of @bo made with @ticket.
+ * Unreserve a previous reservation of @bo where the buffer object is
+ * already on lru lists.
  */
-static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
-                                          struct ww_acquire_ctx *t)
+static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
-       if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-               lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE);
-               ttm_bo_add_to_lru(bo);
-               lockmgr(&bo->glob->lru_lock, LK_RELEASE);
-       }
        ww_mutex_unlock(&bo->resv->lock);
 }
 
@@ -944,7 +948,25 @@ static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
  */
 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
-       ttm_bo_unreserve_ticket(bo, NULL);
+       if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+               lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE);
+               ttm_bo_add_to_lru(bo);
+               lockmgr(&bo->glob->lru_lock, LK_RELEASE);
+       }
+       __ttm_bo_unreserve(bo);
+}
+
+/**
+ * ttm_bo_unreserve_ticket
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @ticket: ww_acquire_ctx used for reserving
+ *
+ * Unreserve a previous reservation of @bo made with @ticket.
+ */
+static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
+                                          struct ww_acquire_ctx *t)
+{
+       ttm_bo_unreserve(bo);
 }
 
 /*
index ec8a1d3..16db7d0 100644 (file)
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 /**
  * function ttm_eu_reserve_buffers
  *
- * @ticket:  [out] ww_acquire_ctx returned by call.
+ * @ticket:  [out] ww_acquire_ctx filled in by call, or NULL if only
+ *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
index 3a14e80..49a8284 100644 (file)
  * Authors: Dave Airlie <airlied@redhat.com>
  *          Jerome Glisse <jglisse@redhat.com>
  */
-/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.h 247835 2013-03-05 09:49:34Z kib $ */
 #ifndef TTM_PAGE_ALLOC
 #define TTM_PAGE_ALLOC
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_memory.h>
 
+struct device;
+
 /**
  * Initialize pool allocator.
  */
@@ -60,11 +61,10 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
 /**
  * Output the state of pools to debugfs file
  */
-/* XXXKIB
 extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-*/
 
-#ifdef CONFIG_SWIOTLB
+
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
  */
@@ -92,12 +92,19 @@ static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
 
 static inline void ttm_dma_page_alloc_fini(void) { return; }
 
-/* XXXKIB
 static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
 {
        return 0;
 }
-*/
+static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+                                  struct device *dev)
+{
+       return -ENOMEM;
+}
+static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+                                     struct device *dev)
+{
+}
 #endif
 
 #endif
index f09e283..8ed44f9 100644 (file)
@@ -27,7 +27,6 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_placement.h 247835 2013-03-05 09:49:34Z kib $ */
 
 #ifndef _TTM_PLACEMENT_H_
 #define _TTM_PLACEMENT_H_
index d343e60..cd5e71a 100644 (file)
@@ -187,4 +187,8 @@ unmap_mapping_range(struct address_space *mapping,
 {
 }
 
+#define VM_SHARED      0x00000008
+
+#define VM_PFNMAP      0x00000400
+
 #endif /* _LINUX_MM_H_ */
index 5d6c8be..d8605ee 100644 (file)
@@ -745,7 +745,13 @@ int radeon_ttm_init(struct radeon_device *rdev)
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&rdev->mman.bdev,
                               rdev->mman.bo_global_ref.ref.object,
-                              &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+                              &radeon_bo_driver,
+#ifdef __DragonFly__
+                              NULL,
+#else
+                              rdev->ddev->anon_inode->i_mapping,
+#endif
+                              DRM_FILE_PAGE_OFFSET,
                               rdev->need_dma32);
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
index 752fca9..258cbfe 100644 (file)
@@ -27,8 +27,6 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  *          Keith Packard.
- *
- * $FreeBSD: head/sys/dev/drm2/ttm/ttm_agp_backend.c 247835 2013-03-05 09:49:34Z kib $
  */
 
 #define pr_fmt(fmt) "[TTM] " fmt
 #include <drm/ttm/ttm_page_alloc.h>
 #ifdef TTM_HAS_AGP
 #include <drm/ttm/ttm_placement.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/agp.h>
 
 struct ttm_agp_backend {
        struct ttm_tt ttm;
@@ -59,7 +62,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 
        mem->page_count = 0;
        for (i = 0; i < ttm->num_pages; i++) {
-               vm_page_t page = ttm->pages[i];
+               struct page *page = ttm->pages[i];
 
                if (!page)
                        page = ttm->dummy_read_page;
@@ -110,11 +113,11 @@ static struct ttm_backend_func ttm_agp_func = {
 struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
                                 device_t bridge,
                                 unsigned long size, uint32_t page_flags,
-                                vm_page_t dummy_read_page)
+                                struct page *dummy_read_page)
 {
        struct ttm_agp_backend *agp_be;
 
-       agp_be = kmalloc(sizeof(*agp_be), M_DRM, M_WAITOK | M_ZERO);
+       agp_be = kmalloc(sizeof(*agp_be), M_DRM, M_WAITOK);
        if (!agp_be)
                return NULL;
 
index a2166c7..fc90b7c 100644 (file)
@@ -45,7 +45,6 @@
 #define TTM_DEBUG(fmt, arg...)
 #define TTM_BO_HASH_ORDER 13
 
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
@@ -154,7 +153,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
        atomic_dec(&bo->glob->bo_count);
        if (bo->resv == &bo->ttm_resv)
                reservation_object_fini(&bo->ttm_resv);
-
+       mutex_destroy(&bo->wu_mutex);
        if (bo->destroy)
                bo->destroy(bo);
        else {
@@ -354,9 +353,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
 moved:
        if (bo->evicted) {
-               ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
-               if (ret)
-                       pr_err("Can not flush read caches\n");
+               if (bdev->driver->invalidate_caches) {
+                       ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+                       if (ret)
+                               pr_err("Can not flush read caches\n");
+               }
                bo->evicted = false;
        }
 
@@ -413,7 +414,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
        int ret;
 
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-       ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+       ret = __ttm_bo_reserve(bo, false, true, false, 0);
 
        lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
        (void) ttm_bo_wait(bo, false, false, true);
@@ -432,8 +433,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
        lockmgr(&bdev->fence_lock, LK_RELEASE);
 
-       if (!ret)
-               ww_mutex_unlock(&bo->resv->lock);
+       if (!ret) {
+
+               /*
+                * Make NO_EVICT bos immediately available to
+                * shrinkers, now that they are queued for
+                * destruction.
+                */
+               if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+                       bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+                       ttm_bo_add_to_lru(bo);
+               }
+
+               __ttm_bo_unreserve(bo);
+       }
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -483,7 +496,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
                lockmgr(&bdev->fence_lock, LK_RELEASE);
 
-               ww_mutex_unlock(&bo->resv->lock);
+               __ttm_bo_unreserve(bo);
                lockmgr(&glob->lru_lock, LK_RELEASE);
 
                ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -503,7 +516,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                        return ret;
 
                lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, 0);
 
                /*
                 * We raced, and lost, someone else holds the reservation now,
@@ -521,7 +534,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                lockmgr(&bdev->fence_lock, LK_RELEASE);
 
        if (ret || unlikely(list_empty(&bo->ddestroy))) {
-               ww_mutex_unlock(&bo->resv->lock);
+               __ttm_bo_unreserve(bo);
                lockmgr(&glob->lru_lock, LK_RELEASE);
                return ret;
        }
@@ -566,11 +579,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+               ret = __ttm_bo_reserve(entry, false, true, false, 0);
                if (remove_all && ret) {
                        lockmgr(&glob->lru_lock, LK_RELEASE);
-                       ret = ttm_bo_reserve_nolru(entry, false, false,
-                                                  false, 0);
+                       ret = __ttm_bo_reserve(entry, false, false,
+                                              false, 0);
                        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
                }
 
@@ -715,7 +728,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
        list_for_each_entry(bo, &man->lru, lru) {
-               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, 0);
                if (!ret)
                        break;
        }
@@ -1107,6 +1120,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
        INIT_LIST_HEAD(&bo->io_reserve_lru);
+       lockinit(&bo->wu_mutex, "ttmbwm", 0, LK_CANRECURSE);
        bo->bdev = bdev;
        bo->glob = bdev->glob;
        bo->type = type;
@@ -1137,7 +1151,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        if (likely(!ret) &&
            (bo->type == ttm_bo_type_device ||
             bo->type == ttm_bo_type_sg))
-               ret = ttm_bo_setup_vm(bo);
+               ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+                                        bo->mem.num_pages);
 
        locked = ww_mutex_trylock(&bo->resv->lock);
        WARN_ON(!locked);
@@ -1405,9 +1420,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                }
        }
 
-       lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
+       mutex_lock(&glob->device_list_mutex);
        list_del(&bdev->device_list);
-       lockmgr(&glob->device_list_mutex, LK_RELEASE);
+       mutex_unlock(&glob->device_list_mutex);
 
        cancel_delayed_work_sync(&bdev->wq);
 
@@ -1431,6 +1446,7 @@ EXPORT_SYMBOL(ttm_bo_device_release);
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_global *glob,
                       struct ttm_bo_driver *driver,
+                      struct address_space *mapping,
                       uint64_t file_page_offset,
                       bool need_dma32)
 {
@@ -1456,14 +1472,14 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
         * XXX DRAGONFLY - dev_mapping NULL atm, find other XXX DRAGONFLY
         * lines and fix when it no longer is in later API change.
         */
-       bdev->dev_mapping = NULL;
+       bdev->dev_mapping = mapping;
        bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
        bdev->val_seq = 0;
        lockinit(&bdev->fence_lock, "ttmfnc", 0, 0);
-       lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
+       mutex_lock(&glob->device_list_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);
-       lockmgr(&glob->device_list_mutex, LK_RELEASE);
+       mutex_unlock(&glob->device_list_mutex);
 
        return 0;
 out_no_sys:
@@ -1492,29 +1508,45 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
        return true;
 }
 
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+#ifdef __DragonFly__
+
+/*
+ * XXX DRAGONFLY - device_mapping not yet implemented so
+ * file_mapping is basically always NULL.  We have to properly
+ * release the mmap, etc.
+*/
+void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists or
+ * the address-space is invalid, nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+                                     struct address_space *file_mapping)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
-       loff_t offset, holelen;
+       struct ttm_buffer_object *bo = container_of(node, struct ttm_buffer_object, vma_node);
 
-       if (!bdev->dev_mapping) {
-               /*
-                * XXX DRAGONFLY - device_mapping not yet implemented so
-                * dev_mapping is basically always NULL.  We have to properly
-                * release the mmap, etc.
-                */
-               ttm_bo_release_mmap(bo);
-               ttm_mem_io_free_vm(bo);
-               return;
-       }
+       if (drm_vma_node_has_offset(node))
+               unmap_mapping_range(file_mapping,
+                                   drm_vma_node_offset_addr(node),
+                                   drm_vma_node_size(node) << PAGE_SHIFT, 1);
+       ttm_bo_release_mmap(bo);
+}
+#endif
 
-       if (drm_vma_node_has_offset(&bo->vma_node)) {
-               offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node);
-               holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
 
-               unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
-       }
-       ttm_bo_release_mmap(bo);        /* for DragonFly VM interface */
+       drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
        ttm_mem_io_free_vm(bo);
 }
 
@@ -1531,24 +1563,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-
-       return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
-                                 bo->mem.num_pages);
-}
 
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool lazy, bool interruptible, bool no_wait)
@@ -1649,7 +1663,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
        list_for_each_entry(bo, &glob->swap_lru, swap) {
-               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, 0);
                if (!ret)
                        break;
        }
@@ -1716,7 +1730,7 @@ out:
         * already swapped buffer.
         */
 
-       ww_mutex_unlock(&bo->resv->lock);
+       __ttm_bo_unreserve(bo);
        kref_put(&bo->list_kref, ttm_bo_release_list);
        return ret;
 }
@@ -1727,3 +1741,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
                ;
 }
 EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+       int ret;
+
+       /*
+        * In the absense of a wait_unlocked API,
+        * Use the bo::wu_mutex to avoid triggering livelocks due to
+        * concurrent use of this function. Note that this use of
+        * bo::wu_mutex can go away if we change locking order to
+        * mmap_sem -> bo::reserve.
+        */
+       ret = mutex_lock_interruptible(&bo->wu_mutex);
+       if (unlikely(ret != 0))
+               return -ERESTARTSYS;
+       if (!ww_mutex_is_locked(&bo->resv->lock))
+               goto out_unlock;
+       ret = __ttm_bo_reserve(bo, true, false, false, NULL);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+       __ttm_bo_unreserve(bo);
+
+out_unlock:
+       mutex_unlock(&bo->wu_mutex);
+       return ret;
+}
index c24ed8f..15ff1a9 100644 (file)
@@ -66,17 +66,16 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        node = kzalloc(sizeof(*node), GFP_KERNEL);
        if (!node)
                return -ENOMEM;
-       /* not in yet ?
-       if (placement->flags & TTM_PL_FLAG_TOPDOWN)
+
+       if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
                aflags = DRM_MM_CREATE_TOP;
-       */
 
        lockmgr(&rman->lock, LK_EXCLUSIVE);
        ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
-                                       mem->page_alignment, 0,
-                                       placement->fpfn, lpfn,
-                                       DRM_MM_SEARCH_BEST,
-                                       aflags);
+                                         mem->page_alignment, 0,
+                                         placement->fpfn, lpfn,
+                                         DRM_MM_SEARCH_BEST,
+                                         aflags);
        lockmgr(&rman->lock, LK_RELEASE);
 
        if (unlikely(ret)) {
@@ -85,6 +84,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                mem->mm_node = node;
                mem->start = node->start;
        }
+
        return 0;
 }
 
index 0dd82e1..6278586 100644 (file)
@@ -81,15 +81,10 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
        if (likely(man->io_reserve_fastpath))
                return 0;
 
-       if (interruptible) {
-               if (lockmgr(&man->io_reserve_mutex,
-                           LK_EXCLUSIVE | LK_SLEEPFAIL))
-                       return (-EINTR);
-               else
-                       return (0);
-       }
+       if (interruptible)
+               return mutex_lock_interruptible(&man->io_reserve_mutex);
 
-       lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
+       mutex_lock(&man->io_reserve_mutex);
        return 0;
 }
 EXPORT_SYMBOL(ttm_mem_io_lock);
@@ -99,7 +94,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
        if (likely(man->io_reserve_fastpath))
                return;
 
-       lockmgr(&man->io_reserve_mutex, LK_RELEASE);
+       mutex_unlock(&man->io_reserve_mutex);
 }
 EXPORT_SYMBOL(ttm_mem_io_unlock);
 
@@ -209,9 +204,10 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
        if (mem->bus.addr) {
                addr = mem->bus.addr;
        } else {
-               addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
-                   mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
-                   VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
+               if (mem->placement & TTM_PL_FLAG_WC)
+                       addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
+               else
+                       addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
                if (!addr) {
                        (void) ttm_mem_io_lock(man, false);
                        ttm_mem_io_free(bdev, mem);
@@ -231,7 +227,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *
        man = &bdev->man[mem->mem_type];
 
        if (virtual && mem->bus.addr == NULL)
-               pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
+               iounmap(virtual);
        (void) ttm_mem_io_lock(man, false);
        ttm_mem_io_free(bdev, mem);
        ttm_mem_io_unlock(man);
@@ -246,8 +242,7 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
 
        int i;
        for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
-               /* iowrite32(ioread32(srcP++), dstP++); */
-               *dstP++ = *srcP++;
+               iowrite32(ioread32(srcP++), dstP++);
        return 0;
 }
 
@@ -396,13 +391,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                                                   prot);
                } else
                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
-               if (ret) {
-                       /* failing here, means keep old copy as-is */
-                       old_copy.mm_node = NULL;
+               if (ret)
                        goto out1;
-               }
        }
-       cpu_mfence();
+       mb();
 out2:
        old_copy = *old_mem;
        *old_mem = *new_mem;
@@ -489,7 +481,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        ret = ww_mutex_trylock(&fbo->resv->lock);
        WARN_ON(!ret);
 
-        /*
+       /*
         * Mirror ref from kref_init() for list_kref.
         */
        set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
index 8d2d7bf..c91dd6a 100644 (file)
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
+#if 0
+static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+                               struct vm_area_struct *vma,
+                               struct vm_fault *vmf)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       int ret = 0;
+
+       lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
+       if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+               goto out_unlock;
+
+       /*
+        * Quick non-stalling check for idle.
+        */
+       ret = ttm_bo_wait(bo, false, false, true);
+       if (likely(ret == 0))
+               goto out_unlock;
+
+       /*
+        * If possible, avoid waiting for GPU with mmap_sem
+        * held.
+        */
+       if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+               ret = VM_FAULT_RETRY;
+               if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+                       goto out_unlock;
+
+               up_read(&vma->vm_mm->mmap_sem);
+               (void) ttm_bo_wait(bo, false, true, false);
+               goto out_unlock;
+       }
+
+       /*
+        * Ordinary wait.
+        */
+       ret = ttm_bo_wait(bo, false, true, false);
+       if (unlikely(ret != 0))
+               ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+                       VM_FAULT_NOPAGE;
+
+out_unlock:
+       lockmgr(&bdev->fence_lock, LK_RELEASE);
+       return ret;
+}
+#endif
+
 /*
  * Always unstall on unexpected vm_page alias, fatal bus fault.
  * Set to 0 to stall, set to positive count to unstall N times,
@@ -75,27 +122,51 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int retval = VM_FAULT_NOPAGE;
        struct ttm_mem_type_manager *man =
                &bdev->man[bo->mem.mem_type];
+       struct vm_area_struct cvma;
 
        /*
         * Work around locking order reversal in fault / nopfn
         * between mmap_sem and bo_reserve: Perform a trylock operation
-        * for reserve, and if it fails, retry the fault after scheduling.
+        * for reserve, and if it fails, retry the fault after waiting
+        * for the buffer to become unreserved.
         */
-
-       ret = ttm_bo_reserve(bo, true, true, false, 0);
+       ret = ttm_bo_reserve(bo, true, true, false, NULL);
        if (unlikely(ret != 0)) {
-               if (ret == -EBUSY)
-                       set_need_resched();
+               if (ret != -EBUSY)
+                       return VM_FAULT_NOPAGE;
+
+               if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+                       if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               up_read(&vma->vm_mm->mmap_sem);
+                               (void) ttm_bo_wait_unreserved(bo);
+                       }
+
+                       return VM_FAULT_RETRY;
+               }
+
+               /*
+                * If we'd want to change locking order to
+                * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+                * instead of retrying the fault...
+                */
                return VM_FAULT_NOPAGE;
        }
 
+       /*
+        * Refuse to fault imported pages. This should be handled
+        * (if at all) by redirecting mmap to the exporter.
+        */
+       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
        if (bdev->driver->fault_reserve_notify) {
                ret = bdev->driver->fault_reserve_notify(bo);
                switch (ret) {
                case 0:
                        break;
                case -EBUSY:
-                       set_need_resched();
                case -ERESTARTSYS:
                        retval = VM_FAULT_NOPAGE;
                        goto out_unlock;
@@ -110,17 +181,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * move.
         */
 
-       lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
-       if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
-               ret = ttm_bo_wait(bo, false, true, false);
-               lockmgr(&bdev->fence_lock, LK_RELEASE);
-               if (unlikely(ret != 0)) {
-                       retval = (ret != -ERESTARTSYS) ?
-                           VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-                       goto out_unlock;
-               }
-       } else
-               lockmgr(&bdev->fence_lock, LK_RELEASE);
+       ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+       if (unlikely(ret != 0)) {
+               retval = ret;
+               goto out_unlock;
+       }
 
        ret = ttm_mem_io_lock(man, true);
        if (unlikely(ret != 0)) {
@@ -134,9 +199,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
-       page_last = vma_pages(vma) +
-           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+               vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+       page_last = vma_pages(vma) + vma->vm_pgoff -
+               drm_vma_node_start(&bo->vma_node);
 
        if (unlikely(page_offset >= bo->num_pages)) {
                retval = VM_FAULT_SIGBUS;
@@ -144,26 +209,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /*
-        * Strictly, we're not allowed to modify vma->vm_page_prot here,
-        * since the mmap_sem is only held in read mode. However, we
-        * modify only the caching bits of vma->vm_page_prot and
-        * consider those bits protected by
-        * the bo->mutex, as we should be the only writers.
-        * There shouldn't really be any readers of these bits except
-        * within vm_insert_mixed()? fork?
-        *
-        * TODO: Add a list of vmas to the bo, and change the
-        * vma->vm_page_prot when the object changes caching policy, with
-        * the correct locks held.
+        * Make a local vma copy to modify the page_prot member
+        * and vm_flags if necessary. The vma parameter is protected
+        * by mmap_sem in write mode.
         */
+       cvma = *vma;
+       cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
+
        if (bo->mem.bus.is_iomem) {
-               vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
-                                               vma->vm_page_prot);
+               cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+                                               cvma.vm_page_prot);
        } else {
                ttm = bo->ttm;
-               vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
-                   vm_get_page_prot(vma->vm_flags) :
-                   ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+               if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
+                       cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+                                                       cvma.vm_page_prot);
 
                /* Allocate all page at once, most common usage */
                if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -187,10 +247,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        } else if (unlikely(!page)) {
                                break;
                        }
+                       page->mapping = vma->vm_file->f_mapping;
+                       page->index = drm_vma_node_start(&bo->vma_node) +
+                               page_offset;
                        pfn = page_to_pfn(page);
                }
 
-               ret = vm_insert_mixed(vma, address, pfn);
+               if (vma->vm_flags & VM_MIXEDMAP)
+                       ret = vm_insert_mixed(&cvma, address, pfn);
+               else
+                       ret = vm_insert_pfn(&cvma, address, pfn);
+
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
@@ -222,6 +289,10 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
        struct ttm_buffer_object *bo =
            (struct ttm_buffer_object *)vma->vm_private_data;
 
+#if 0
+       WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+#endif
+
        (void)ttm_bo_reference(bo);
 }
 
@@ -292,7 +363,16 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
         */
 
        vma->vm_private_data = bo;
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+       /*
+        * We'd like to use VM_PFNMAP on shared mappings, where
+        * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+        * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+        * bad for performance. Until that has been sorted out, use
+        * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+        */
+       vma->vm_flags |= VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 out_unref:
        ttm_bo_unref(&bo);
@@ -307,165 +387,12 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 
        vma->vm_ops = &ttm_bo_vm_ops;
        vma->vm_private_data = ttm_bo_reference(bo);
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
        return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);
 
-
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
-                 const char __user *wbuf, char __user *rbuf, size_t count,
-                 loff_t *f_pos, bool write)
-{
-       struct ttm_buffer_object *bo;
-       struct ttm_bo_driver *driver;
-       struct ttm_bo_kmap_obj map;
-       unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
-       unsigned long kmap_offset;
-       unsigned long kmap_end;
-       unsigned long kmap_num;
-       size_t io_size;
-       unsigned int page_offset;
-       char *virtual;
-       int ret;
-       bool no_wait = false;
-       bool dummy;
-
-       bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
-       if (unlikely(bo == NULL))
-               return -EFAULT;
-
-       driver = bo->bdev->driver;
-       if (unlikely(!driver->verify_access)) {
-               ret = -EPERM;
-               goto out_unref;
-       }
-
-       ret = driver->verify_access(bo, filp);
-       if (unlikely(ret != 0))
-               goto out_unref;
-
-       kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
-       if (unlikely(kmap_offset >= bo->num_pages)) {
-               ret = -EFBIG;
-               goto out_unref;
-       }
-
-       page_offset = *f_pos & ~PAGE_MASK;
-       io_size = bo->num_pages - kmap_offset;
-       io_size = (io_size << PAGE_SHIFT) - page_offset;
-       if (count < io_size)
-               io_size = count;
-
-       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
-       kmap_num = kmap_end - kmap_offset + 1;
-
-       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
-       switch (ret) {
-       case 0:
-               break;
-       case -EBUSY:
-               ret = -EAGAIN;
-               goto out_unref;
-       default:
-               goto out_unref;
-       }
-
-       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
-       if (unlikely(ret != 0)) {
-               ttm_bo_unreserve(bo);
-               goto out_unref;
-       }
-
-       virtual = ttm_kmap_obj_virtual(&map, &dummy);
-       virtual += page_offset;
-
-       if (write)
-               ret = copy_from_user(virtual, wbuf, io_size);
-       else
-               ret = copy_to_user(rbuf, virtual, io_size);
-
-       ttm_bo_kunmap(&map);
-       ttm_bo_unreserve(bo);
-       ttm_bo_unref(&bo);
-
-       if (unlikely(ret != 0))
-               return -EFBIG;
-
-       *f_pos += io_size;
-
-       return io_size;
-out_unref:
-       ttm_bo_unref(&bo);
-       return ret;
-}
-
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
-                       char __user *rbuf, size_t count, loff_t *f_pos,
-                       bool write)
-{
-       struct ttm_bo_kmap_obj map;
-       unsigned long kmap_offset;
-       unsigned long kmap_end;
-       unsigned long kmap_num;
-       size_t io_size;
-       unsigned int page_offset;
-       char *virtual;
-       int ret;
-       bool no_wait = false;
-       bool dummy;
-
-       kmap_offset = (*f_pos >> PAGE_SHIFT);
-       if (unlikely(kmap_offset >= bo->num_pages))
-               return -EFBIG;
-
-       page_offset = *f_pos & ~PAGE_MASK;
-       io_size = bo->num_pages - kmap_offset;
-       io_size = (io_size << PAGE_SHIFT) - page_offset;
-       if (count < io_size)
-               io_size = count;
-
-       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
-       kmap_num = kmap_end - kmap_offset + 1;
-
-       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
-       switch (ret) {
-       case 0:
-               break;
-       case -EBUSY:
-               return -EAGAIN;
-       default:
-               return ret;
-       }
-
-       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
-       if (unlikely(ret != 0)) {
-               ttm_bo_unreserve(bo);
-               return ret;
-       }
-
-       virtual = ttm_kmap_obj_virtual(&map, &dummy);
-       virtual += page_offset;
-
-       if (write)
-               ret = copy_from_user(virtual, wbuf, io_size);
-       else
-               ret = copy_to_user(rbuf, virtual, io_size);
-
-       ttm_bo_kunmap(&map);
-       ttm_bo_unreserve(bo);
-       ttm_bo_unref(&bo);
-
-       if (unlikely(ret != 0))
-               return ret;
-
-       *f_pos += io_size;
-
-       return io_size;
-}
-
 /*
  * DragonFlyBSD Interface
  */
@@ -773,6 +700,9 @@ ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
 }
 EXPORT_SYMBOL(ttm_bo_mmap_single);
 
+#ifdef __DragonFly__
+void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
+
 void
 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
 {
@@ -795,6 +725,7 @@ ttm_bo_release_mmap(struct ttm_buffer_object *bo)
 
        vm_object_deallocate(vm_obj);
 }
+#endif
 
 #if 0
 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
index 1ec137d..c956c54 100644 (file)
@@ -32,8 +32,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
-                                             struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 {
        struct ttm_validate_buffer *entry;
 
@@ -47,7 +46,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list,
                        ttm_bo_add_to_lru(bo);
                        entry->removed = false;
                }
-               ww_mutex_unlock(&bo->resv->lock);
+               __ttm_bo_unreserve(bo);
        }
 }
 
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-       ttm_eu_backoff_reservation_locked(list, ticket);
-       ww_acquire_fini(ticket);
+       ttm_eu_backoff_reservation_locked(list);
+       if (ticket)
+               ww_acquire_fini(ticket);
        lockmgr(&glob->lru_lock, LK_RELEASE);
 }
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,8 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;
 
-       ww_acquire_init(ticket, &reservation_ww_class);
-
+       if (ticket)
+               ww_acquire_init(ticket, &reservation_ww_class);
 retry:
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
@@ -140,16 +140,17 @@ retry:
                if (entry->reserved)
                        continue;
 
-               ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
-
+               ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
+                                      ticket);
 
                if (ret == -EDEADLK) {
                        /* uh oh, we lost out, drop every reservation and try
                         * to only reserve this buffer, then start over if
                         * this succeeds.
                         */
+                       BUG_ON(ticket == NULL);
                        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-                       ttm_eu_backoff_reservation_locked(list, ticket);
+                       ttm_eu_backoff_reservation_locked(list);
                        lockmgr(&glob->lru_lock, LK_RELEASE);
                        ttm_eu_list_ref_sub(list);
                        ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -176,7 +177,8 @@ retry:
                }
        }
 
-       ww_acquire_done(ticket);
+       if (ticket)
+               ww_acquire_done(ticket);
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
        ttm_eu_del_from_lru_locked(list);
        lockmgr(&glob->lru_lock, LK_RELEASE);
@@ -185,12 +187,14 @@ retry:
 
 err:
        lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-       ttm_eu_backoff_reservation_locked(list, ticket);
+       ttm_eu_backoff_reservation_locked(list);
        lockmgr(&glob->lru_lock, LK_RELEASE);
        ttm_eu_list_ref_sub(list);
 err_fini:
-       ww_acquire_done(ticket);
-       ww_acquire_fini(ticket);
+       if (ticket) {
+               ww_acquire_done(ticket);
+               ww_acquire_fini(ticket);
+       }
        return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -220,12 +224,13 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
                entry->old_sync_obj = bo->sync_obj;
                bo->sync_obj = driver->sync_obj_ref(sync_obj);
                ttm_bo_add_to_lru(bo);
-               ww_mutex_unlock(&bo->resv->lock);
+               __ttm_bo_unreserve(bo);
                entry->reserved = false;
        }
        lockmgr(&bdev->fence_lock, LK_RELEASE);
        lockmgr(&glob->lru_lock, LK_RELEASE);
-       ww_acquire_fini(ticket);
+       if (ticket)
+               ww_acquire_fini(ticket);
 
        list_for_each_entry(entry, list, head) {
                if (entry->old_sync_obj)
index bdf74a3..200559d 100644 (file)
@@ -117,6 +117,7 @@ struct ttm_pool_opts {
  **/
 struct ttm_pool_manager {
        struct kobject          kobj;
+       struct shrinker         mm_shrink;
        eventhandler_tag lowmem_handler;
        struct ttm_pool_opts    options;
 
@@ -388,27 +389,26 @@ out:
        return nr_free;
 }
 
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
-       unsigned i;
-       int total = 0;
-       for (i = 0; i < NUM_POOLS; ++i)
-               total += _manager->pools[i].npages;
-
-       return total;
-}
-
 /**
  * Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
  */
-static int ttm_pool_mm_shrink(void *arg)
+static unsigned long
+ttm_pool_shrink_scan(void *arg)
 {
-       static unsigned int start_pool = 0;
+       static atomic_t start_pool = ATOMIC_INIT(0);
        unsigned i;
-       unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
+       unsigned pool_offset = atomic_add_return(1, &start_pool);
        struct ttm_page_pool *pool;
        int shrink_pages = 100; /* XXXKIB */
+       unsigned long freed = 0;
 
        pool_offset = pool_offset % NUM_POOLS;
        /* select start pool in round robin fashion */
@@ -418,15 +418,29 @@ static int ttm_pool_mm_shrink(void *arg)
                        break;
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
                shrink_pages = ttm_page_pool_free(pool, nr_free);
+               freed += nr_free - shrink_pages;
        }
-       /* return estimated number of unused pages in pool */
-       return ttm_pool_get_num_unused_pages();
+       return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned i;
+       unsigned long count = 0;
+
+       for (i = 0; i < NUM_POOLS; ++i)
+               count += _manager->pools[i].npages;
+
+       return count;
 }
 
 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 {
+       manager->mm_shrink.count_objects = ttm_pool_shrink_count;
        manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
-           ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
+           ttm_pool_shrink_scan, manager, EVENTHANDLER_PRI_ANY);
 }
 
 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
index aefc968..a4a8a07 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/*
- * Copyright (c) 2013 The FreeBSD Foundation
- * All rights reserved.
- *
- * Portions of this software were developed by Konstantin Belousov
- * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
- *
- * $FreeBSD: head/sys/dev/drm2/ttm/ttm_tt.c 251452 2013-06-06 06:17:20Z alc $
- */
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/drmP.h>
-
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
 #include <linux/export.h>
 #include <drm/drm_mem_util.h>
 #include <drm/ttm/ttm_module.h>
@@ -48,8 +44,6 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_page_alloc.h>
 
-#include <vm/vm_page2.h>
-
 /**
  * Allocates storage for pointers to the pages that back the ttm.
  */
@@ -65,19 +59,26 @@ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
                                            sizeof(*ttm->dma_address));
 }
 
+#ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
                                          enum ttm_caching_state c_old,
                                          enum ttm_caching_state c_new)
 {
+       int ret = 0;
 
-       /* XXXKIB our VM does not need this. */
 #if 0
+       if (PageHighMem(p))
+               return 0;
+#endif
+
        if (c_old != tt_cached) {
                /* p isn't in the default caching state, set it to
                 * writeback first to free its current memtype. */
-               pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
+
+               ret = set_pages_wb(p, 1);
+               if (ret)
+                       return ret;
        }
-#endif
 
        if (c_new == tt_wc)
                pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING);
@@ -86,6 +87,14 @@ static inline int ttm_tt_set_page_caching(struct page *p,
 
        return (0);
 }
+#else /* CONFIG_X86 */
+static inline int ttm_tt_set_page_caching(struct page *p,
+                                         enum ttm_caching_state c_old,
+                                         enum ttm_caching_state c_new)
+{
+       return 0;
+}
+#endif /* CONFIG_X86 */
 
 /*
  * Change caching policy for the linear kernel map
@@ -162,9 +171,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
                ttm_tt_unbind(ttm);
        }
 
-       if (likely(ttm->pages != NULL)) {
-               ttm->bdev->driver->ttm_tt_unpopulate(ttm);
-       }
+       if (ttm->state == tt_unbound)
+               ttm_tt_unpopulate(ttm);
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
            ttm->swap_storage)
@@ -279,37 +287,38 @@ EXPORT_SYMBOL(ttm_tt_bind);
 int ttm_tt_swapin(struct ttm_tt *ttm)
 {
        vm_object_t obj;
-       vm_page_t from_page, to_page;
-       int i, ret, rv;
+       struct page *from_page;
+       struct page *to_page;
+       int i;
+       int ret = -ENOMEM;
 
        obj = ttm->swap_storage;
 
        VM_OBJECT_LOCK(obj);
        vm_object_pip_add(obj, 1);
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |
+               from_page = (struct page *)vm_page_grab(obj, i, VM_ALLOC_NORMAL |
                                                 VM_ALLOC_RETRY);
-               if (from_page->valid != VM_PAGE_BITS_ALL) {
+               if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) {
                        if (vm_pager_has_page(obj, i)) {
-                               rv = vm_pager_get_page(obj, &from_page, 1);
-                               if (rv != VM_PAGER_OK) {
-                                       vm_page_free(from_page);
+                               if (vm_pager_get_page(obj, (struct vm_page **)&from_page, 1) != VM_PAGER_OK) {
+                                       vm_page_free((struct vm_page *)from_page);
                                        ret = -EIO;
-                                       goto err_ret;
+                                       goto out_err;
                                }
                        } else {
-                               vm_page_zero_invalid(from_page, TRUE);
+                               vm_page_zero_invalid((struct vm_page *)from_page, TRUE);
                        }
                }
-               to_page = (struct vm_page *)ttm->pages[i];
+               to_page = ttm->pages[i];
                if (unlikely(to_page == NULL)) {
-                       ret = -ENOMEM;
-                       vm_page_wakeup(from_page);
-                       goto err_ret;
+                       vm_page_wakeup((struct vm_page *)from_page);
+                       goto out_err;
                }
-               pmap_copy_page(VM_PAGE_TO_PHYS(from_page),
-                              VM_PAGE_TO_PHYS(to_page));
-               vm_page_wakeup(from_page);
+
+               pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page),
+                              VM_PAGE_TO_PHYS((struct vm_page *)to_page));
+               vm_page_wakeup((struct vm_page *)from_page);
        }
        vm_object_pip_wakeup(obj);
        VM_OBJECT_UNLOCK(obj);
@@ -318,12 +327,12 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
                vm_object_deallocate(obj);
        ttm->swap_storage = NULL;
        ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
-       return (0);
 
-err_ret:
+       return 0;
+out_err:
        vm_object_pip_wakeup(obj);
        VM_OBJECT_UNLOCK(obj);
-       return (ret);
+       return ret;
 }
 
 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
@@ -362,7 +371,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
        vm_object_pip_wakeup(obj);
        VM_OBJECT_UNLOCK(obj);
 
-       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+       ttm_tt_unpopulate(ttm);
        ttm->swap_storage = obj;
        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
        if (persistent_swap_storage)
@@ -370,3 +379,28 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
 
        return 0;
 }
+
+static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
+{
+#if 0
+       pgoff_t i;
+       struct page **page = ttm->pages;
+
+       if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+               return;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               (*page)->mapping = NULL;
+               (*page++)->index = 0;
+       }
+#endif
+}
+
+void ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       if (ttm->state == tt_unpopulated)
+               return;
+
+       ttm_tt_clear_mapping(ttm);
+       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+}