drm/ttm: convert to unified vma offset manager
authorMatthew Dillon <dillon@apollo.backplane.com>
Sat, 9 Mar 2019 19:09:14 +0000 (11:09 -0800)
committerFran├žois Tigeot <ftigeot@wolfpond.org>
Tue, 12 Mar 2019 18:45:20 +0000 (19:45 +0100)
* Add TTM_BO_PRIV_FLAG_ACTIVE.

* Changes so we can use more of the linux ttm_bo_vm.c code.  Fake
  struct vm_fault, fake struct vm_operations_struct, and adjust
  struct vm_area_struct.  Also add related flags.

  The VM interface to DragonFlyBSD is now (mostly) a wrapper around
  the linux code.

* The linux red/black tree code uses some of the DFly RB macros.
  This means that struct rb_root must be compatible.  Add missing
  fields.  This fixes RB_REMOVE breakage due to it trying to iterate
  rbh_inprog.

* Add set_need_resched() (empty)

* radeon_bo_is_reserved() is no longer applicable and has been removed,
  allowing us to avoid implementintg ttm_bo_is_reserved().  Note that
  linux-current does not have these functions.

* Refactor radeon_do_test_moves() so reflect the linux code a bit
  better.  This fixes a few error paths.

* radeon_verify_access() remains empty.  We need the struct file
  (which we don't have) to implement it.

* Make some adjustments to ttm_mem_type_from_pace() and other
  API functions which use struct ttm_place instead of uint32_t.
  This better reflects the linux code and fixes compile-time breakage
  due to the partial API patch.

  Make other struct ttm_place adjustments.

* bdev->dev_mapping is broken (partial API update in WIP) and is
  still always NULL because ttm_bo_device_init() does not yet initialize
  it (needs an argument passed from the chipset code).

* refactor ttm_bo_man_get_node() to fix compile issues.

* Completely rewrite the DragonFly API interfacing code in ttm_bo_vm.c.
  Hopefully the fault code is now far, far more robust than it was before.

  Add debug code to check for duplicate vm_page insertions, in case
  we hit that panic again (it had problems due to bdev->dev_mapping being
  NULL before that issue was tracked down).

17 files changed:
sys/dev/drm/drm_drv.c
sys/dev/drm/drm_mm.c
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/drm_mm.h
sys/dev/drm/include/drm/ttm/ttm_bo_api.h
sys/dev/drm/include/drm/ttm/ttm_bo_driver.h
sys/dev/drm/include/linux/mm.h
sys/dev/drm/include/linux/rbtree.h
sys/dev/drm/include/linux/sched.h
sys/dev/drm/radeon/radeon_object.c
sys/dev/drm/radeon/radeon_object.h
sys/dev/drm/radeon/radeon_test.c
sys/dev/drm/radeon/radeon_ttm.c
sys/dev/drm/ttm/ttm_bo.c
sys/dev/drm/ttm/ttm_bo_manager.c
sys/dev/drm/ttm/ttm_bo_util.c
sys/dev/drm/ttm/ttm_bo_vm.c

index a81b1c1..91fed02 100644 (file)
@@ -1251,8 +1251,7 @@ drm_mmap_single(struct dev_mmap_single_args *ap)
 
        dev = drm_get_device_from_kdev(kdev);
        if (dev->drm_ttm_bdev != NULL) {
-               return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
-                   obj_res, nprot));
+               return (ttm_bo_mmap_single(dev, offset, size, obj_res, nprot));
        } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
                return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
        } else {
index 451dc86..776ade8 100644 (file)
 
 #define MM_UNUSED_TARGET 4
 
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
-       struct drm_mm_node *child;
-
-       if (atomic)
-               child = kzalloc(sizeof(*child), GFP_ATOMIC);
-       else
-               child = kzalloc(sizeof(*child), GFP_KERNEL);
-
-       if (unlikely(child == NULL)) {
-               spin_lock(&mm->unused_lock);
-               if (list_empty(&mm->unused_nodes))
-                       child = NULL;
-               else {
-                       child =
-                           list_entry(mm->unused_nodes.next,
-                                      struct drm_mm_node, node_list);
-                       list_del(&child->node_list);
-                       --mm->num_unused;
-               }
-               spin_unlock(&mm->unused_lock);
-       }
-       return child;
-}
-
-int drm_mm_pre_get(struct drm_mm *mm)
-{
-       struct drm_mm_node *node;
-
-       spin_lock(&mm->unused_lock);
-       while (mm->num_unused < MM_UNUSED_TARGET) {
-               spin_unlock(&mm->unused_lock);
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               spin_lock(&mm->unused_lock);
-
-               if (unlikely(node == NULL)) {
-                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
-                       spin_unlock(&mm->unused_lock);
-                       return ret;
-               }
-               ++mm->num_unused;
-               list_add_tail(&node->node_list, &mm->unused_nodes);
-       }
-       spin_unlock(&mm->unused_lock);
-       return 0;
-}
-
 /**
  * DOC: Overview
  *
@@ -139,6 +92,11 @@ int drm_mm_pre_get(struct drm_mm *mm)
  * some basic allocator dumpers for debugging.
  */
 
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+                                               u64 size,
+                                               unsigned alignment,
+                                               unsigned long color,
+                                               enum drm_mm_search_flags flags);
 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
                                                u64 size,
                                                unsigned alignment,
@@ -258,23 +216,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
-                                            unsigned long size,
-                                            unsigned alignment,
-                                            unsigned long color,
-                                            int atomic)
-{
-       struct drm_mm_node *node;
-
-       node = drm_mm_kmalloc(hole_node->mm, atomic);
-       if (unlikely(node == NULL))
-               return NULL;
-
-       drm_mm_insert_helper(hole_node, node, size, alignment, color, DRM_MM_CREATE_DEFAULT);
-
-       return node;
-}
-
 /**
  * drm_mm_insert_node_generic - search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -492,7 +433,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
        return end >= start + size;
 }
 
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
                                                      u64 size,
                                                      unsigned alignment,
                                                      unsigned long color,
index a5a005b..3925eb3 100644 (file)
@@ -1255,7 +1255,7 @@ extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
 extern void drm_sysfs_hotplug_event(struct drm_device *dev);
 
 struct ttm_bo_device;
-int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
+int ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
     vm_size_t size, struct vm_object **obj_res, int nprot);
 struct ttm_buffer_object;
 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
index b2576d6..b15670f 100644 (file)
@@ -217,19 +217,6 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 
-extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
-                                                   unsigned long size,
-                                                   unsigned alignment,
-                                                   unsigned long color,
-                                                   int atomic);
-
-static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
-                                                         unsigned long size,
-                                                         unsigned alignment)
-{
-       return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
-}
-
 int drm_mm_insert_node_generic(struct drm_mm *mm,
                               struct drm_mm_node *node,
                               u64 size,
@@ -305,22 +292,6 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 
 extern void drm_mm_put_block(struct drm_mm_node *cur);
 
-extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                               u64 size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               enum drm_mm_search_flags flags);
-
-static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-                                                    unsigned long size,
-                                                    unsigned alignment,
-                                                    enum drm_mm_search_flags flags)
-{
-       return drm_mm_search_free_generic(mm,size, alignment, 0, flags);
-}
-
-extern int drm_mm_pre_get(struct drm_mm *mm);
-
 void drm_mm_remove_node(struct drm_mm_node *node);
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 void drm_mm_init(struct drm_mm *mm,
index 0576ca2..a5a1000 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_api.h 247835 2013-03-05 09:49:34Z kib $ */
 
 #ifndef _TTM_BO_API_H_
 #define _TTM_BO_API_H_
 
 #include <drm/drmP.h>
 #include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
 #include <linux/reservation.h>
 
 struct ttm_bo_device;
@@ -47,9 +49,9 @@ struct drm_mm_node;
 /**
  * struct ttm_place
  *
- * @fpfn:      first valid page frame number to put the object
- * @lpfn:      last valid page frame number to put the object
- * @flags:     memory domain and caching flags for the object
+ * @fpfn:       first valid page frame number to put the object
+ * @lpfn:       last valid page frame number to put the object
+ * @flags:      memory domain and caching flags for the object
  *
  * Structure indicating a possible place to put an object.
  */
@@ -62,6 +64,8 @@ struct ttm_place {
 /**
  * struct ttm_placement
  *
+ * @fpfn:              first valid page frame number to put the object
+ * @lpfn:              last valid page frame number to put the object
  * @num_placement:     number of preferred placements
  * @placement:         preferred placements
  * @num_busy_placement:        number of preferred placements when need to evict buffer
@@ -70,9 +74,11 @@ struct ttm_place {
  * Structure indicating the placement you request for an object.
  */
 struct ttm_placement {
-       unsigned                num_placement;
-       const struct ttm_place  *placement;
-       unsigned                num_busy_placement;
+       unsigned        fpfn;
+       unsigned        lpfn;
+       unsigned        num_placement;
+       const struct ttm_place *placement;
+       unsigned        num_busy_placement;
        const struct ttm_place  *busy_placement;
 };
 
@@ -154,7 +160,6 @@ struct ttm_tt;
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
  * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is put on the delayed delete list.
@@ -163,7 +168,6 @@ struct ttm_tt;
  * Lru lists may keep one refcount, the delayed delete list, and kref != 0
  * keeps one refcount. When this refcount reaches zero,
  * the object is destroyed.
- * @event_queue: Queue for processes waiting on buffer object status change.
  * @mem: structure describing current placement.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
  * pinned in physical memory. If this behaviour is not desired, this member
@@ -174,16 +178,9 @@ struct ttm_tt;
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
- * @val_seq: Sequence of the validation holding the @reserved lock.
- * Used to avoid starvation when many processes compete to validate the
- * buffer. This member is protected by the bo_device::lru_lock.
- * @seq_valid: The value of @val_seq is valid. This value is protected by
- * the bo_device::lru_lock.
- * @reserved: Deadlock-free lock used for synchronization state transitions.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -210,7 +207,6 @@ struct ttm_buffer_object {
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
        unsigned long num_pages;
-       uint64_t addr_space_offset;
        size_t acc_size;
 
        /**
@@ -221,7 +217,7 @@ struct ttm_buffer_object {
        struct kref list_kref;
 
        /**
-        * Members protected by the bo::reserved lock.
+        * Members protected by the bo::resv::reserved lock.
         */
 
        struct ttm_mem_reg mem;
@@ -254,13 +250,8 @@ struct ttm_buffer_object {
        void *sync_obj;
        unsigned long priv_flags;
 
-       /**
-        * Members protected by the bdev::vm_lock
-        */
-
-       RB_ENTRY(ttm_buffer_object) vm_rb;
-       struct drm_mm_node *vm_node;
-
+       RB_ENTRY(ttm_buffer_object) vm_rb;      /* DragonFly */
+       struct drm_vma_offset_node vma_node;
 
        /**
         * Special members that are protected by the reserve lock
@@ -508,12 +499,13 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
                        void (*destroy) (struct ttm_buffer_object *));
 
 /**
- * ttm_bo_create
+ * ttm_bo_synccpu_object_init
  *
  * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
  * @size: Requested size of buffer object.
  * @type: Requested type of buffer object.
- * @placement: Initial placement.
+ * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
  * @interruptible: If needing to sleep while waiting for GPU resources,
  * sleep interruptible.
@@ -541,6 +533,20 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
                                struct vm_object *persistent_swap_storage,
                                struct ttm_buffer_object **p_bo);
 
+/**
+ * ttm_bo_check_placement
+ *
+ * @bo:                the buffer object.
+ * @placement: placements
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+                                       struct ttm_placement *placement);
+
 /**
  * ttm_bo_init_mm
  *
@@ -669,10 +675,9 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
  * if the fbdev address space is to be backed by a bo.
  */
 
-/* XXXKIB
 extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
                          struct ttm_buffer_object *bo);
-*/
+
 /**
  * ttm_bo_mmap - mmap out of the ttm device address space.
  *
@@ -683,10 +688,10 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
  * This function is intended to be called by the device mmap method.
  * if the device address space is to be backed by the bo manager.
  */
-/* XXXKIB
+
 extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
                       struct ttm_bo_device *bdev);
-*/
+
 /**
  * ttm_bo_io
  *
@@ -709,23 +714,14 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  */
 
 extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
-                        const char *wbuf, char *rbuf,
-                        size_t count, off_t *f_pos, bool write);
+                        const char __user *wbuf, char __user *rbuf,
+                        size_t count, loff_t *f_pos, bool write);
 
-extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+extern ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo,
+                        const char __user *wbuf,
+                       char __user *rbuf, size_t count, loff_t *f_pos,
+                       bool write);
 
-/**
- * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
- *
- * @bo:     The buffer object to check.
- *
- * This function returns an indication if a bo is reserved or not, and should
- * only be used to print an error when it is not from incorrect api usage, since
- * there's no guarantee that it is the caller that is holding the reservation.
- */
-static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
-{
-       return ww_mutex_is_locked(&bo->resv->lock);
-}
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
 
 #endif
index 89b3489..d19ced8 100644 (file)
 #ifndef _TTM_BO_DRIVER_H_
 #define _TTM_BO_DRIVER_H_
 
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_placement.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_memory.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_global.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/spinlock.h>
 #include <linux/reservation.h>
 
-#include <sys/tree.h>
-
-/* XXX nasty hack, but does the job */
-#undef RB_ROOT
-#define        RB_ROOT(head)   (head)->rbh_root
-
 struct ttm_backend_func {
        /**
         * struct ttm_backend_func member bind
@@ -188,7 +182,6 @@ struct ttm_mem_type_manager_func {
         * @man: Pointer to a memory type manager.
         * @bo: Pointer to the buffer object we're allocating space for.
         * @placement: Placement details.
-        * @flags: Additional placement flags.
         * @mem: Pointer to a struct ttm_mem_reg to be filled in.
         *
         * This function should allocate space in the memory type managed
@@ -212,7 +205,7 @@ struct ttm_mem_type_manager_func {
         */
        int  (*get_node)(struct ttm_mem_type_manager *man,
                         struct ttm_buffer_object *bo,
-                        const struct ttm_place *place,
+                        struct ttm_placement *placement,
                         struct ttm_mem_reg *mem);
 
        /**
@@ -412,7 +405,6 @@ struct ttm_bo_driver {
         *
         * @bo: Pointer to a buffer object.
         * @filp: Pointer to a struct file trying to access the object.
-        * FreeBSD: use devfs_get_cdevpriv etc.
         *
         * Called from the map / write / read methods to verify that the
         * caller is permitted to access the buffer object.
@@ -420,7 +412,8 @@ struct ttm_bo_driver {
         * access for all buffer objects.
         * This function should return 0 if access is granted, -EPERM otherwise.
         */
-       int (*verify_access) (struct ttm_buffer_object *bo);
+       int (*verify_access) (struct ttm_buffer_object *bo,
+                             struct file *filp);
 
        /**
         * In case a driver writer dislikes the TTM fence objects,
@@ -517,10 +510,10 @@ struct ttm_bo_global {
 
 #define TTM_NUM_MEM_TYPES 8
 
-#define TTM_BO_PRIV_FLAG_MOVING        0       /* Buffer object is moving and needs
+#define TTM_BO_PRIV_FLAG_MOVING  0     /* Buffer object is moving and needs
                                           idling before CPU mapping */
-#define TTM_BO_PRIV_FLAG_MAX   1
-#define TTM_BO_PRIV_FLAG_ACTIVE        2       /* Used for release sequencing */
+#define TTM_BO_PRIV_FLAG_ACTIVE         1
+#define TTM_BO_PRIV_FLAG_MAX 2
 /**
  * struct ttm_bo_device - Buffer object driver device-specific data.
  *
@@ -528,7 +521,7 @@ struct ttm_bo_global {
  * @man: An array of mem_type_managers.
  * @fence_lock: Protects the synchronizing members on *all* bos belonging
  * to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
@@ -546,15 +539,13 @@ struct ttm_bo_device {
        struct list_head device_list;
        struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
-       struct lock vm_lock;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        struct lock fence_lock;
+
        /*
-        * Protected by the vm lock.
+        * Protected by internal locks.
         */
-
-       RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
-       struct drm_mm addr_space_mm;
+       struct drm_vma_offset_manager vma_manager;
 
        /*
         * Protected by the global:lru lock.
@@ -662,6 +653,18 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
  */
 extern int ttm_tt_swapin(struct ttm_tt *ttm);
 
+/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
+
 /**
  * ttm_tt_set_placement_caching:
  *
@@ -1039,7 +1042,7 @@ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
 
 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 
-#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) && 0
+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
 #define TTM_HAS_AGP
 #include <linux/agp_backend.h>
 
@@ -1065,11 +1068,11 @@ int ttm_agp_tt_populate(struct ttm_tt *ttm);
 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif
 
-
+/* required for DragonFly VM, see ttm/ttm_bo_vm.c */
+struct ttm_bo_device_buffer_objects;
 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
         struct ttm_buffer_object *b);
 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
-    ttm_bo_cmp_rb_tree_items);
-
+       ttm_bo_cmp_rb_tree_items);
 
 #endif
index 22e5c63..132ac03 100644 (file)
@@ -47,6 +47,8 @@
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 
+struct vm_operations_struct;
+
 static inline struct page *
 nth_page(struct page *page, int n)
 {
@@ -55,12 +57,34 @@ nth_page(struct page *page, int n)
 
 #define PAGE_ALIGN(addr) round_page(addr)
 
+struct vm_fault {
+       uintptr_t       virtual_address;
+};
+
+#define VM_FAULT_NOPAGE                0x0001
+#define VM_FAULT_SIGBUS                0x0002
+#define VM_FAULT_OOM           0x0004
+
 struct vm_area_struct {
        vm_offset_t     vm_start;
        vm_offset_t     vm_end;
        vm_offset_t     vm_pgoff;
        vm_paddr_t      vm_pfn;         /* PFN For mmap. */
        vm_memattr_t    vm_page_prot;
+       void            *vm_private_data;
+       int             vm_flags;
+       const struct vm_operations_struct *vm_ops;
+};
+
+#define VM_DONTDUMP    0x0001
+#define VM_DONTEXPAND  0x0002
+#define VM_IO          0x0004
+#define VM_MIXEDMAP    0x0008
+
+struct vm_operations_struct {
+       int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+       void (*open)(struct vm_area_struct *vma);
+       void (*close)(struct vm_area_struct *vma);
 };
 
 /*
index a20a2b5..2075dad 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/rcupdate.h>
 
 #include <sys/tree.h>
+#include <sys/spinlock.h>
 
 struct rb_node {
        RB_ENTRY(rb_node)       __entry;
@@ -42,11 +43,12 @@ struct rb_node {
 #define        rb_right        __entry.rbe_right
 
 /*
- * We provide a false structure that has the same bit pattern as tree.h
- * presents so it matches the member names expected by linux.
+ * This must match enough of sys/tree.h so the macros still work.
  */
 struct rb_root {
-       struct  rb_node *rb_node;
+       struct  rb_node *rb_node;       /* only member under linux */
+       void    *rbh_inprog;            /* so we can use sys/tree macros */
+       struct spinlock rbh_spin;       /* so we can use sys/tree macros */
 };
 
 /*
index 75b96b5..3d60837 100644 (file)
@@ -145,4 +145,11 @@ send_sig(int sig, struct proc *p, int priv)
        return 0;
 }
 
+static inline void
+set_need_resched(void)
+{
+       /* do nothing for now */
+       /* used on ttm_bo_reserve failures */
+}
+
 #endif /* _LINUX_SCHED_H_ */
index b776613..e2ddd97 100644 (file)
@@ -556,9 +556,6 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
        int steal;
        int i;
 
-       KASSERT(radeon_bo_is_reserved(bo),
-           ("radeon_bo_get_surface_reg: radeon_bo is not reserved"));
-
        if (!bo->tiling_flags)
                return 0;
 
@@ -683,8 +680,6 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
                                uint32_t *tiling_flags,
                                uint32_t *pitch)
 {
-       KASSERT(radeon_bo_is_reserved(bo),
-           ("radeon_bo_get_tiling_flags: radeon_bo is not reserved"));
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
        if (pitch)
@@ -694,8 +689,6 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop)
 {
-       KASSERT((radeon_bo_is_reserved(bo) || force_drop),
-           ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop"));
 
        if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
                return 0;
index 4cc094f..2b607e4 100644 (file)
@@ -79,11 +79,6 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
        return bo->tbo.num_pages << PAGE_SHIFT;
 }
 
-static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
-{
-       return ttm_bo_is_reserved(&bo->tbo);
-}
-
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
 {
        return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
@@ -99,13 +94,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
  * @bo:        radeon object for which we query the offset
  *
  * Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
  */
 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }
 
 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
index f09200c..8f9e1a7 100644 (file)
@@ -75,11 +75,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
        }
        r = radeon_bo_reserve(vram_obj, false);
        if (unlikely(r != 0))
-               goto out_cleanup;
+               goto out_unref;
        r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
        if (r) {
                DRM_ERROR("Failed to pin VRAM object\n");
-               goto out_cleanup;
+               goto out_unres;
        }
        for (i = 0; i < n; i++) {
                void *gtt_map, *vram_map;
@@ -90,22 +90,22 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                                     RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
                if (r) {
                        DRM_ERROR("Failed to create GTT object %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean;
                }
 
                r = radeon_bo_reserve(gtt_obj[i], false);
                if (unlikely(r != 0))
-                       goto out_cleanup;
+                       goto out_lclean_unref;
                r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
                if (r) {
                        DRM_ERROR("Failed to pin GTT object %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unres;
                }
 
                r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
                if (r) {
                        DRM_ERROR("Failed to map GTT object %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size);
@@ -121,13 +121,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                        r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                r = radeon_fence_wait(fence, false);
                if (r) {
                        DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                radeon_fence_unref(&fence);
@@ -135,7 +135,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                r = radeon_bo_kmap(vram_obj, &vram_map);
                if (r) {
                        DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
@@ -154,7 +154,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                                          ((uintptr_t)vram_addr - (uintptr_t)rdev->mc.vram_start +
                                           (uintptr_t)gtt_start - (uintptr_t)gtt_map));
                                radeon_bo_kunmap(vram_obj);
-                               goto out_cleanup;
+                               goto out_lclean_unpin;
                        }
                        *vram_start = vram_start;
                }
@@ -167,13 +167,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                        r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                r = radeon_fence_wait(fence, false);
                if (r) {
                        DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                radeon_fence_unref(&fence);
@@ -181,7 +181,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
                if (r) {
                        DRM_ERROR("Failed to map GTT object after copy %d\n", i);
-                       goto out_cleanup;
+                       goto out_lclean_unpin;
                }
 
                for (gtt_start = gtt_map, gtt_end = (void *)((uintptr_t)gtt_map + size),
@@ -200,7 +200,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                                          ((uintptr_t)gtt_addr - (uintptr_t)rdev->mc.gtt_start +
                                           (uintptr_t)vram_start - (uintptr_t)vram_map));
                                radeon_bo_kunmap(gtt_obj[i]);
-                               goto out_cleanup;
+                               goto out_lclean_unpin;
                        }
                }
 
@@ -208,28 +208,33 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
 
                DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
                         (uintmax_t)gtt_addr - rdev->mc.gtt_start);
+               continue;
+
+out_lclean_unpin:
+               radeon_bo_unpin(gtt_obj[i]);
+out_lclean_unres:
+               radeon_bo_unreserve(gtt_obj[i]);
+out_lclean_unref:
+               radeon_bo_unref(&gtt_obj[i]);
+out_lclean:
+               for (--i; i >= 0; --i) {
+                       radeon_bo_unpin(gtt_obj[i]);
+                       radeon_bo_unreserve(gtt_obj[i]);
+                       radeon_bo_unref(&gtt_obj[i]);
+               }
+               if (fence && !IS_ERR(fence))
+                       radeon_fence_unref(&fence);
+               break;
        }
 
+       radeon_bo_unpin(vram_obj);
+out_unres:
+       radeon_bo_unreserve(vram_obj);
+out_unref:
+       radeon_bo_unref(&vram_obj);
 out_cleanup:
-       if (vram_obj) {
-               if (radeon_bo_is_reserved(vram_obj)) {
-                       radeon_bo_unpin(vram_obj);
-                       radeon_bo_unreserve(vram_obj);
-               }
-               radeon_bo_unref(&vram_obj);
-       }
-       if (gtt_obj) {
-               for (i = 0; i < n; i++) {
-                       if (gtt_obj[i]) {
-                               if (radeon_bo_is_reserved(gtt_obj[i])) {
-                                       radeon_bo_unpin(gtt_obj[i]);
-                                       radeon_bo_unreserve(gtt_obj[i]);
-                               }
-                               radeon_bo_unref(&gtt_obj[i]);
-                       }
-               }
+       if (gtt_obj)
                kfree(gtt_obj);
-       }
        if (fence) {
                radeon_fence_unref(&fence);
        }
index 4ec5c91..5d6c8be 100644 (file)
@@ -207,9 +207,21 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        *placement = rbo->placement;
 }
 
-static int radeon_verify_access(struct ttm_buffer_object *bo)
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *fp)
 {
+#if 0
+       struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+#endif
+
        return 0;
+
+#if 0
+       /* XXX needs radeon_gem_userptr_ioctl() and related infrastructure */
+       if (radeon_ttm_tt_has_userptr(bo->ttm))
+               return -EPERM;
+       return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+                                         fp->private_data);
+#endif
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,
index aa06702..a2166c7 100644 (file)
@@ -41,8 +41,8 @@
 #include <linux/module.h>
 #include <linux/atomic.h>
 
-#define TTM_ASSERT_LOCKED(param)       do { } while (0)
-#define TTM_DEBUG(fmt, arg...)         do { } while (0)
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
 #define TTM_BO_HASH_ORDER 13
 
 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
@@ -59,11 +59,12 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
 {
        int i;
 
-       for (i = 0; i <= TTM_PL_PRIV5; i++)
+       for (i = 0; i <= TTM_PL_PRIV5; i++) {
                if (place->flags & (1 << i)) {
                        *mem_type = i;
                        return 0;
                }
+       }
        return -EINVAL;
 }
 
@@ -75,7 +76,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
        pr_err("    use_type: %d\n", man->use_type);
        pr_err("    flags: 0x%08X\n", man->flags);
        pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
-       pr_err("    size: %ju\n", (uintmax_t)man->size);
+       pr_err("    size: %ju\n", man->size);
        pr_err("    available_caching: 0x%08X\n", man->available_caching);
        pr_err("    default_caching: 0x%08X\n", man->default_caching);
        if (mem_type != TTM_PL_SYSTEM)
@@ -108,7 +109,7 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj,
        struct ttm_bo_global *glob =
                container_of(kobj, struct ttm_bo_global, kobj);
 
-       return ksnprintf(buffer, PAGE_SIZE, "%lu\n",
+       return snprintf(buffer, PAGE_SIZE, "%lu\n",
                        (unsigned long) atomic_read(&glob->bo_count));
 }
 
@@ -127,6 +128,7 @@ static struct kobj_type ttm_bo_glob_kobj_type  = {
        .default_attrs = ttm_bo_global_attrs
 };
 
+
 static inline uint32_t ttm_bo_type_flags(unsigned type)
 {
        return 1 << (type);
@@ -222,9 +224,9 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
        int put_count;
 
        lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE);
-        put_count = ttm_bo_del_from_lru(bo);
-        lockmgr(&bo->glob->lru_lock, LK_RELEASE);
-        ttm_bo_list_ref_sub(bo, put_count, true);
+       put_count = ttm_bo_del_from_lru(bo);
+       lockmgr(&bo->glob->lru_lock, LK_RELEASE);
+       ttm_bo_list_ref_sub(bo, put_count, true);
 }
 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 
@@ -430,21 +432,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
        lockmgr(&bdev->fence_lock, LK_RELEASE);
 
-       if (!ret) {
-
-               /*
-                * Make NO_EVICT bos immediately available to
-                * shrinkers, now that they are queued for
-                * destruction.
-                */
-               if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-                       bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
-                       ttm_bo_add_to_lru(bo);
-               }
-
-               if (!ret)
-                       ww_mutex_unlock(&bo->resv->lock);
-       }
+       if (!ret)
+               ww_mutex_unlock(&bo->resv->lock);
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -455,7 +444,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                driver->sync_obj_unref(&sync_obj);
        }
        schedule_delayed_work(&bdev->wq,
-                             ((hz / 100) < 1) ? 1 : hz / 100);
+                             ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
 
 /**
@@ -617,63 +606,31 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
 
        if (ttm_bo_delayed_delete(bdev, false)) {
                schedule_delayed_work(&bdev->wq,
-                                     ((hz / 100) < 1) ? 1 : hz / 100);
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
        }
 }
 
-/*
- * NOTE: bdev->vm_lock already held on call, this function release it.
- */
 static void ttm_bo_release(struct kref *kref)
 {
        struct ttm_buffer_object *bo =
            container_of(kref, struct ttm_buffer_object, kref);
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
-       int release_active;
 
-       if (atomic_read(&bo->kref.refcount) > 0) {
-               lockmgr(&bdev->vm_lock, LK_RELEASE);
-               return;
-       }
-       if (likely(bo->vm_node != NULL)) {
-               RB_REMOVE(ttm_bo_device_buffer_objects,
-                               &bdev->addr_space_rb, bo);
-               drm_mm_put_block(bo->vm_node);
-               bo->vm_node = NULL;
-       }
-
-       /*
-        * Should we clean up our implied list_kref?  Because ttm_bo_release()
-        * can be called reentrantly due to races (this may not be true any
-        * more with the lock management changes in the deref), it is possible
-        * to get here twice, but there's only one list_kref ref to drop and
-        * in the other path 'bo' can be kfree()d by another thread the
-        * instant we release our lock.
-        */
-       release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
-       if (release_active) {
-               clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
-               lockmgr(&bdev->vm_lock, LK_RELEASE);
-               ttm_mem_io_lock(man, false);
-               ttm_mem_io_free_vm(bo);
-               ttm_mem_io_unlock(man);
-               ttm_bo_cleanup_refs_or_queue(bo);
-               kref_put(&bo->list_kref, ttm_bo_release_list);
-       } else {
-               lockmgr(&bdev->vm_lock, LK_RELEASE);
-       }
+       drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
+       ttm_mem_io_lock(man, false);
+       ttm_mem_io_free_vm(bo);
+       ttm_mem_io_unlock(man);
+       ttm_bo_cleanup_refs_or_queue(bo);
+       kref_put(&bo->list_kref, ttm_bo_release_list);
 }
 
 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo = *p_bo;
-       struct ttm_bo_device *bdev = bo->bdev;
 
        *p_bo = NULL;
-       lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
-       if (kref_put(&bo->kref, ttm_bo_release) == 0)
-               lockmgr(&bdev->vm_lock, LK_RELEASE);
+       kref_put(&bo->kref, ttm_bo_release);
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
@@ -687,7 +644,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 {
        if (resched)
                schedule_delayed_work(&bdev->wq,
-                                     ((hz / 100) < 1) ? 1 : hz / 100);
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
@@ -717,6 +674,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        evict_mem.bus.io_reserved_vm = false;
        evict_mem.bus.io_reserved_count = 0;
 
+       placement.fpfn = 0;
+       placement.lpfn = 0;
        placement.num_placement = 0;
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
@@ -804,7 +763,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
  */
 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        uint32_t mem_type,
-                                       const struct ttm_place *place,
+                                       struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
                                        bool interruptible,
                                        bool no_wait_gpu)
@@ -814,7 +773,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
        int ret;
 
        do {
-               ret = (*man->func->get_node)(man, bo, place, mem);
+               ret = (*man->func->get_node)(man, bo, placement, mem);
                if (unlikely(ret != 0))
                        return ret;
                if (mem->mm_node)
@@ -899,14 +858,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
        mem->mm_node = NULL;
        for (i = 0; i < placement->num_placement; ++i) {
-               const struct ttm_place *place = &placement->placement[i];
-
-               ret = ttm_mem_type_from_place(place, &mem_type);
+               ret = ttm_mem_type_from_place(&placement->placement[i],
+                                               &mem_type);
                if (ret)
                        return ret;
                man = &bdev->man[mem_type];
 
-               type_ok = ttm_bo_mt_compatible(man, mem_type, place,
+               type_ok = ttm_bo_mt_compatible(man,
+                                               mem_type,
+                                               &placement->placement[i],
                                                &cur_flags);
 
                if (!type_ok)
@@ -918,7 +878,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                 * Use the access and other non-mapping-related flag bits from
                 * the memory placement flags to the current flags
                 */
-               ttm_flag_masked(&cur_flags, place->flags,
+               ttm_flag_masked(&cur_flags, placement->placement[i].flags,
                                ~TTM_PL_MASK_MEMTYPE);
 
                if (mem_type == TTM_PL_SYSTEM)
@@ -926,7 +886,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
                if (man->has_type && man->use_type) {
                        type_found = true;
-                       ret = (*man->func->get_node)(man, bo, place, mem);
+                       ret = (*man->func->get_node)(man, bo, placement, mem);
                        if (unlikely(ret))
                                return ret;
                }
@@ -944,15 +904,17 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                return -EINVAL;
 
        for (i = 0; i < placement->num_busy_placement; ++i) {
-               const struct ttm_place *place = &placement->busy_placement[i];
-
-               ret = ttm_mem_type_from_place(place, &mem_type);
+               ret = ttm_mem_type_from_place(&placement->busy_placement[i],
+                                               &mem_type);
                if (ret)
                        return ret;
                man = &bdev->man[mem_type];
                if (!man->has_type)
                        continue;
-               if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+               if (!ttm_bo_mt_compatible(man,
+                                               mem_type,
+                                               &placement->busy_placement[i],
+                                               &cur_flags))
                        continue;
 
                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -961,9 +923,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                 * Use the access and other non-mapping-related flag bits from
                 * the memory placement flags to the current flags
                 */
-               ttm_flag_masked(&cur_flags, place->flags,
+               ttm_flag_masked(&cur_flags, placement->busy_placement[i].flags,
                                ~TTM_PL_MASK_MEMTYPE);
 
+
                if (mem_type == TTM_PL_SYSTEM) {
                        mem->mem_type = mem_type;
                        mem->placement = cur_flags;
@@ -971,7 +934,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        return 0;
                }
 
-               ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
+               ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
                                                interruptible, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
@@ -1026,39 +989,24 @@ out_unlock:
        return ret;
 }
 
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
-                             struct ttm_mem_reg *mem,
-                             uint32_t *new_flags)
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
+                            struct ttm_mem_reg *mem)
 {
        int i;
 
-       for (i = 0; i < placement->num_placement; i++) {
-               const struct ttm_place *heap = &placement->placement[i];
-               if (mem->mm_node &&
-                   (mem->start < heap->fpfn ||
-                    (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-                       continue;
-
-               *new_flags = heap->flags;
-               if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
-                   (*new_flags & mem->placement & TTM_PL_MASK_MEM))
-                       return true;
-       }
+       if (mem->mm_node && placement->lpfn != 0 &&
+           (mem->start < placement->fpfn ||
+            mem->start + mem->num_pages > placement->lpfn))
+               return -1;
 
-       for (i = 0; i < placement->num_busy_placement; i++) {
-               const struct ttm_place *heap = &placement->busy_placement[i];
-               if (mem->mm_node &&
-                   (mem->start < heap->fpfn ||
-                    (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
-                       continue;
-
-               *new_flags = heap->flags;
-               if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
-                   (*new_flags & mem->placement & TTM_PL_MASK_MEM))
-                       return true;
+       for (i = 0; i < placement->num_placement; i++) {
+               if ((placement->placement[i].flags & mem->placement &
+                       TTM_PL_MASK_CACHING) &&
+                       (placement->placement[i].flags & mem->placement &
+                       TTM_PL_MASK_MEM))
+                       return i;
        }
-
-       return false;
+       return -1;
 }
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1067,13 +1015,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
                        bool no_wait_gpu)
 {
        int ret;
-       uint32_t new_flags;
 
        lockdep_assert_held(&bo->resv->lock.base);
+       /* Check that range is valid */
+       if (placement->lpfn || placement->fpfn)
+               if (placement->fpfn > placement->lpfn ||
+                       (placement->lpfn - placement->fpfn) < bo->num_pages)
+                       return -EINVAL;
        /*
         * Check whether we need to move buffer.
         */
-       if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+       ret = ttm_bo_mem_compat(placement, &bo->mem);
+       if (ret < 0) {
                ret = ttm_bo_move_buffer(bo, placement, interruptible,
                                         no_wait_gpu);
                if (ret)
@@ -1083,7 +1036,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
                 * Use the access and other non-mapping-related flag bits from
                 * the compatible memory placement flags to the active flags
                 */
-               ttm_flag_masked(&bo->mem.placement, new_flags,
+               ttm_flag_masked(&bo->mem.placement,
+                               placement->placement[ret].flags,
                                ~TTM_PL_MASK_MEMTYPE);
        }
        /*
@@ -1098,6 +1052,15 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_validate);
 
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement)
+{
+       BUG_ON((placement->fpfn || placement->lpfn) &&
+              (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
+
+       return 0;
+}
+
 int ttm_bo_init(struct ttm_bo_device *bdev,
                struct ttm_buffer_object *bo,
                unsigned long size,
@@ -1163,22 +1126,18 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->resv = &bo->ttm_resv;
        reservation_object_init(bo->resv);
        atomic_inc(&bo->glob->bo_count);
+       drm_vma_node_reset(&bo->vma_node);
 
-       /*
-        * Mirror ref from kref_init() for list_kref.
-        */
-       set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
+       ret = ttm_bo_check_placement(bo, placement);
 
        /*
         * For ttm_bo_type_device buffers, allocate
         * address space from the device.
         */
-       if (bo->type == ttm_bo_type_device ||
-           bo->type == ttm_bo_type_sg) {
+       if (likely(!ret) &&
+           (bo->type == ttm_bo_type_device ||
+            bo->type == ttm_bo_type_sg))
                ret = ttm_bo_setup_vm(bo);
-               if (ret)
-                       goto out_err;
-       }
 
        locked = ww_mutex_trylock(&bo->resv->lock);
        WARN_ON(!locked);
@@ -1186,7 +1145,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        if (likely(!ret))
                ret = ttm_bo_validate(bo, placement, interruptible, false);
 
-out_err:
        ttm_bo_unreserve(bo);
 
        if (unlikely(ret))
@@ -1238,7 +1196,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
        size_t acc_size;
        int ret;
 
-       *p_bo = NULL;
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
        if (unlikely(bo == NULL))
                return -ENOMEM;
@@ -1342,7 +1299,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        BUG_ON(man->has_type);
        man->io_reserve_fastpath = true;
        man->use_io_reserve_lru = false;
-       lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE);
+       lockinit(&man->io_reserve_mutex, "ttmior", 0, 0);
        INIT_LIST_HEAD(&man->io_reserve_lru);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1392,8 +1349,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
        struct ttm_bo_global *glob = ref->object;
        int ret;
 
-       lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE);
-       lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE);
+       lockinit(&glob->device_list_mutex, "ttmdlm", 0, 0);
+       lockinit(&glob->lru_lock, "ttmlru", 0, 0);
        glob->mem_glob = bo_ref->mem_glob;
        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
@@ -1448,9 +1405,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                }
        }
 
-       mutex_lock(&glob->device_list_mutex);
+       lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
        list_del(&bdev->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       lockmgr(&glob->device_list_mutex, LK_RELEASE);
 
        cancel_delayed_work_sync(&bdev->wq);
 
@@ -1465,10 +1422,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                TTM_DEBUG("Swap list was clean\n");
        lockmgr(&glob->lru_lock, LK_RELEASE);
 
-       BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
-       lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
-       drm_mm_takedown(&bdev->addr_space_mm);
-       lockmgr(&bdev->vm_lock, LK_RELEASE);
+       drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
        return ret;
 }
@@ -1482,7 +1436,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 {
        int ret = -EINVAL;
 
-       lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE);
        bdev->driver = driver;
 
        memset(bdev->man, 0, sizeof(bdev->man));
@@ -1495,19 +1448,22 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        if (unlikely(ret != 0))
                goto out_no_sys;
 
-       RB_INIT(&bdev->addr_space_rb);
-       drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+       drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+                                   0x10000000);
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
+       /*
+        * XXX DRAGONFLY - dev_mapping NULL atm, find other XXX DRAGONFLY
+        * lines and fix when it no longer is in later API change.
+        */
        bdev->dev_mapping = NULL;
        bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
        bdev->val_seq = 0;
-       lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE);
-       mutex_lock(&glob->device_list_mutex);
+       lockinit(&bdev->fence_lock, "ttmfnc", 0, 0);
+       lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
        list_add_tail(&bdev->device_list, &glob->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       lockmgr(&glob->device_list_mutex, LK_RELEASE);
 
        return 0;
 out_no_sys:
@@ -1538,8 +1494,27 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 
 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
+       struct ttm_bo_device *bdev = bo->bdev;
+       loff_t offset, holelen;
+
+       if (!bdev->dev_mapping) {
+               /*
+                * XXX DRAGONFLY - device_mapping not yet implemented so
+                * dev_mapping is basically always NULL.  We have to properly
+                * release the mmap, etc.
+                */
+               ttm_bo_release_mmap(bo);
+               ttm_mem_io_free_vm(bo);
+               return;
+       }
+
+       if (drm_vma_node_has_offset(&bo->vma_node)) {
+               offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node);
+               holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
 
-       ttm_bo_release_mmap(bo);
+               unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       }
+       ttm_bo_release_mmap(bo);        /* for DragonFly VM interface */
        ttm_mem_io_free_vm(bo);
 }
 
@@ -1556,14 +1531,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-
-       /* The caller acquired bdev->vm_lock. */
-       RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
-}
-
 /**
  * ttm_bo_setup_vm:
  *
@@ -1578,38 +1545,9 @@ static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       int ret;
-
-retry_pre_get:
-       ret = drm_mm_pre_get(&bdev->addr_space_mm);
-       if (unlikely(ret != 0))
-               return ret;
-
-       lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
-       bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
-                                        bo->mem.num_pages, 0, 0);
 
-       if (unlikely(bo->vm_node == NULL)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
-                                             bo->mem.num_pages, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               lockmgr(&bdev->vm_lock, LK_RELEASE);
-               goto retry_pre_get;
-       }
-
-       ttm_bo_vm_insert_rb(bo);
-       lockmgr(&bdev->vm_lock, LK_RELEASE);
-       bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
-       return 0;
-out_unlock:
-       lockmgr(&bdev->vm_lock, LK_RELEASE);
-       return ret;
+       return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+                                 bo->mem.num_pages);
 }
 
 int ttm_bo_wait(struct ttm_buffer_object *bo,
index 5184570..c0753b0 100644 (file)
  **************************************************************************/
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- *
- * $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_manager.c 247835 2013-03-05 09:49:34Z kib $
  */
 
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
-#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
 
 /**
  * Currently we use a spinlock for the lock, but a mutex *may* be
@@ -49,7 +49,7 @@ struct ttm_range_manager {
 
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                               struct ttm_buffer_object *bo,
-                              const struct ttm_place *place,
+                              struct ttm_placement *placement,
                               struct ttm_mem_reg *mem)
 {
        struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -59,23 +59,24 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        unsigned long lpfn;
        int ret;
 
-       lpfn = place->lpfn;
+       lpfn = placement->lpfn;
        if (!lpfn)
                lpfn = man->size;
 
        node = kzalloc(sizeof(*node), GFP_KERNEL);
        if (!node)
                return -ENOMEM;
-
-       if (place->flags & TTM_PL_FLAG_TOPDOWN)
+       /* not in yet ?
+       if (placement->flags & TTM_PL_FLAG_TOPDOWN)
                aflags = DRM_MM_CREATE_TOP;
+       */
 
        lockmgr(&rman->lock, LK_EXCLUSIVE);
        ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
-                                         mem->page_alignment, 0,
-                                         place->fpfn, lpfn,
-                                         DRM_MM_SEARCH_BEST,
-                                         aflags);
+                                       mem->page_alignment, 0,
+                                       placement->fpfn, lpfn,
+                                       DRM_MM_SEARCH_BEST,
+                                       aflags);
        lockmgr(&rman->lock, LK_RELEASE);
 
        if (unlikely(ret)) {
@@ -84,7 +85,6 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                mem->mm_node = node;
                mem->start = node->start;
        }
-
        return 0;
 }
 
@@ -95,10 +95,8 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 
        if (mem->mm_node) {
                lockmgr(&rman->lock, LK_EXCLUSIVE);
-               drm_mm_remove_node(mem->mm_node);
+               drm_mm_put_block(mem->mm_node);
                lockmgr(&rman->lock, LK_RELEASE);
-
-               kfree(mem->mm_node);
                mem->mm_node = NULL;
        }
 }
index 12db784..0dd82e1 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/io.h>
 #include <linux/highmem.h>
 #include <linux/wait.h>
@@ -88,7 +89,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
                        return (0);
        }
 
-       mutex_lock(&man->io_reserve_mutex);
+       lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
        return 0;
 }
 EXPORT_SYMBOL(ttm_mem_io_lock);
@@ -98,7 +99,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
        if (likely(man->io_reserve_fastpath))
                return;
 
-       mutex_unlock(&man->io_reserve_mutex);
+       lockmgr(&man->io_reserve_mutex, LK_RELEASE);
 }
 EXPORT_SYMBOL(ttm_mem_io_unlock);
 
@@ -470,7 +471,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
-       fbo->vm_node = NULL;
+       drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
        lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
index 9c9b21b..8d2d7bf 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/*
- * Copyright (c) 2013 The FreeBSD Foundation
- * All rights reserved.
- *
- * Portions of this software were developed by Konstantin Belousov
- * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
- *
- *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
- */
-
-#include "opt_vm.h"
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <sys/sysctl.h>
 #include <vm/vm.h>
 #include <vm/vm_page.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-
 #include <vm/vm_page2.h>
 
-RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
-    ttm_bo_cmp_rb_tree_items);
+#define TTM_BO_VM_NUM_PREFAULT 16
 
+/*
+ * Always unstall on unexpected vm_page alias, fatal bus fault.
+ * Set to 0 to stall, set to positive count to unstall N times,
+ * then stall again.
+ */
+static int drm_unstall = -1;
+SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
 
-#define TTM_BO_VM_NUM_PREFAULT 16
+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       /* see ttm_bo_mmap_single() at end of this file */
+       /* ttm_bo_vm_ops not currently used, no entry should occur */
+       panic("ttm_bo_vm_fault");
+#if 0
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+           vma->vm_private_data;
+       struct ttm_bo_device *bdev = bo->bdev;
+       unsigned long page_offset;
+       unsigned long page_last;
+       unsigned long pfn;
+       struct ttm_tt *ttm = NULL;
+       struct page *page;
+       int ret;
+       int i;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       int retval = VM_FAULT_NOPAGE;
+       struct ttm_mem_type_manager *man =
+               &bdev->man[bo->mem.mem_type];
 
-int
-ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
-    struct ttm_buffer_object *b)
+       /*
+        * Work around locking order reversal in fault / nopfn
+        * between mmap_sem and bo_reserve: Perform a trylock operation
+        * for reserve, and if it fails, retry the fault after scheduling.
+        */
+
+       ret = ttm_bo_reserve(bo, true, true, false, 0);
+       if (unlikely(ret != 0)) {
+               if (ret == -EBUSY)
+                       set_need_resched();
+               return VM_FAULT_NOPAGE;
+       }
+
+       if (bdev->driver->fault_reserve_notify) {
+               ret = bdev->driver->fault_reserve_notify(bo);
+               switch (ret) {
+               case 0:
+                       break;
+               case -EBUSY:
+                       set_need_resched();
+               case -ERESTARTSYS:
+                       retval = VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               default:
+                       retval = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
+
+       /*
+        * Wait for buffer data in transit, due to a pipelined
+        * move.
+        */
+
+       lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
+       if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+               ret = ttm_bo_wait(bo, false, true, false);
+               lockmgr(&bdev->fence_lock, LK_RELEASE);
+               if (unlikely(ret != 0)) {
+                       retval = (ret != -ERESTARTSYS) ?
+                           VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               }
+       } else
+               lockmgr(&bdev->fence_lock, LK_RELEASE);
+
+       ret = ttm_mem_io_lock(man, true);
+       if (unlikely(ret != 0)) {
+               retval = VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+       ret = ttm_mem_io_reserve_vm(bo);
+       if (unlikely(ret != 0)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_io_unlock;
+       }
+
+       page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+       page_last = vma_pages(vma) +
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+
+       if (unlikely(page_offset >= bo->num_pages)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_io_unlock;
+       }
+
+       /*
+        * Strictly, we're not allowed to modify vma->vm_page_prot here,
+        * since the mmap_sem is only held in read mode. However, we
+        * modify only the caching bits of vma->vm_page_prot and
+        * consider those bits protected by
+        * the bo->mutex, as we should be the only writers.
+        * There shouldn't really be any readers of these bits except
+        * within vm_insert_mixed()? fork?
+        *
+        * TODO: Add a list of vmas to the bo, and change the
+        * vma->vm_page_prot when the object changes caching policy, with
+        * the correct locks held.
+        */
+       if (bo->mem.bus.is_iomem) {
+               vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
+                                               vma->vm_page_prot);
+       } else {
+               ttm = bo->ttm;
+               vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+                   vm_get_page_prot(vma->vm_flags) :
+                   ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+               /* Allocate all page at once, most common usage */
+               if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+                       retval = VM_FAULT_OOM;
+                       goto out_io_unlock;
+               }
+       }
+
+       /*
+        * Speculatively prefault a number of pages. Only error on
+        * first page.
+        */
+       for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+               if (bo->mem.bus.is_iomem)
+                       pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+               else {
+                       page = ttm->pages[page_offset];
+                       if (unlikely(!page && i == 0)) {
+                               retval = VM_FAULT_OOM;
+                               goto out_io_unlock;
+                       } else if (unlikely(!page)) {
+                               break;
+                       }
+                       pfn = page_to_pfn(page);
+               }
+
+               ret = vm_insert_mixed(vma, address, pfn);
+               /*
+                * Somebody beat us to this PTE or prefaulting to
+                * an already populated PTE, or prefaulting error.
+                */
+
+               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+                       break;
+               else if (unlikely(ret != 0)) {
+                       retval =
+                           (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+                       goto out_io_unlock;
+               }
+
+               address += PAGE_SIZE;
+               if (unlikely(++page_offset >= page_last))
+                       break;
+       }
+out_io_unlock:
+       ttm_mem_io_unlock(man);
+out_unlock:
+       ttm_bo_unreserve(bo);
+       return retval;
+#endif
+}
+
+/* ttm_bo_vm_ops not currently used, no entry should occur */
+static void ttm_bo_vm_open(struct vm_area_struct *vma)
 {
-    if (a->vm_node->start < b->vm_node->start) {
-        return (-1);
-    } else if (a->vm_node->start > b->vm_node->start) {
-        return (1);
-    } else {
-        return (0);
-    }
+       struct ttm_buffer_object *bo =
+           (struct ttm_buffer_object *)vma->vm_private_data;
+
+       (void)ttm_bo_reference(bo);
+}
+
+/* ttm_bo_vm_ops not currently used, no entry should occur */
+static void ttm_bo_vm_close(struct vm_area_struct *vma)
+{
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+
+       ttm_bo_unref(&bo);
+       vma->vm_private_data = NULL;
 }
 
+static const struct vm_operations_struct ttm_bo_vm_ops = {
+       .fault = ttm_bo_vm_fault,
+       .open = ttm_bo_vm_open,
+       .close = ttm_bo_vm_close
+};
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+                                                 unsigned long offset,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+       struct ttm_buffer_object *bo = NULL;
+
+       drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+       node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+       if (likely(node)) {
+               bo = container_of(node, struct ttm_buffer_object, vma_node);
+               if (!kref_get_unless_zero(&bo->kref))
+                       bo = NULL;
+       }
+
+       drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+       if (!bo)
+               pr_err("Could not find buffer object to map\n");
+
+       return bo;
+}
 
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
-                                                    unsigned long page_start,
-                                                    unsigned long num_pages)
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+               struct ttm_bo_device *bdev)
 {
-       unsigned long cur_offset;
+       struct ttm_bo_driver *driver;
        struct ttm_buffer_object *bo;
-       struct ttm_buffer_object *best_bo = NULL;
-
-       bo = RB_ROOT(&bdev->addr_space_rb);
-       while (bo != NULL) {
-               cur_offset = bo->vm_node->start;
-               if (page_start >= cur_offset) {
-                       best_bo = bo;
-                       if (page_start == cur_offset)
-                               break;
-                       bo = RB_RIGHT(bo, vm_rb);
-               } else
-                       bo = RB_LEFT(bo, vm_rb);
+       int ret;
+
+       bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+       if (unlikely(!bo))
+               return -EINVAL;
+
+       driver = bo->bdev->driver;
+       if (unlikely(!driver->verify_access)) {
+               ret = -EPERM;
+               goto out_unref;
+       }
+       ret = driver->verify_access(bo, filp);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       vma->vm_ops = &ttm_bo_vm_ops;
+
+       /*
+        * Note: We're transferring the bo reference to
+        * vma->vm_private_data here.
+        */
+
+       vma->vm_private_data = bo;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       return 0;
+out_unref:
+       ttm_bo_unref(&bo);
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mmap);
+
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+       if (vma->vm_pgoff != 0)
+               return -EACCES;
+
+       vma->vm_ops = &ttm_bo_vm_ops;
+       vma->vm_private_data = ttm_bo_reference(bo);
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       return 0;
+}
+EXPORT_SYMBOL(ttm_fbdev_mmap);
+
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+                 const char __user *wbuf, char __user *rbuf, size_t count,
+                 loff_t *f_pos, bool write)
+{
+       struct ttm_buffer_object *bo;
+       struct ttm_bo_driver *driver;
+       struct ttm_bo_kmap_obj map;
+       unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+       unsigned long kmap_offset;
+       unsigned long kmap_end;
+       unsigned long kmap_num;
+       size_t io_size;
+       unsigned int page_offset;
+       char *virtual;
+       int ret;
+       bool no_wait = false;
+       bool dummy;
+
+       bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
+       if (unlikely(bo == NULL))
+               return -EFAULT;
+
+       driver = bo->bdev->driver;
+       if (unlikely(!driver->verify_access)) {
+               ret = -EPERM;
+               goto out_unref;
+       }
+
+       ret = driver->verify_access(bo, filp);
+       if (unlikely(ret != 0))
+               goto out_unref;
+
+       kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
+       if (unlikely(kmap_offset >= bo->num_pages)) {
+               ret = -EFBIG;
+               goto out_unref;
+       }
+
+       page_offset = *f_pos & ~PAGE_MASK;
+       io_size = bo->num_pages - kmap_offset;
+       io_size = (io_size << PAGE_SHIFT) - page_offset;
+       if (count < io_size)
+               io_size = count;
+
+       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+       kmap_num = kmap_end - kmap_offset + 1;
+
+       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+       switch (ret) {
+       case 0:
+               break;
+       case -EBUSY:
+               ret = -EAGAIN;
+               goto out_unref;
+       default:
+               goto out_unref;
+       }
+
+       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unreserve(bo);
+               goto out_unref;
+       }
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       virtual += page_offset;
+
+       if (write)
+               ret = copy_from_user(virtual, wbuf, io_size);
+       else
+               ret = copy_to_user(rbuf, virtual, io_size);
+
+       ttm_bo_kunmap(&map);
+       ttm_bo_unreserve(bo);
+       ttm_bo_unref(&bo);
+
+       if (unlikely(ret != 0))
+               return -EFBIG;
+
+       *f_pos += io_size;
+
+       return io_size;
+out_unref:
+       ttm_bo_unref(&bo);
+       return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+                       char __user *rbuf, size_t count, loff_t *f_pos,
+                       bool write)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_end;
+       unsigned long kmap_num;
+       size_t io_size;
+       unsigned int page_offset;
+       char *virtual;
+       int ret;
+       bool no_wait = false;
+       bool dummy;
+
+       kmap_offset = (*f_pos >> PAGE_SHIFT);
+       if (unlikely(kmap_offset >= bo->num_pages))
+               return -EFBIG;
+
+       page_offset = *f_pos & ~PAGE_MASK;
+       io_size = bo->num_pages - kmap_offset;
+       io_size = (io_size << PAGE_SHIFT) - page_offset;
+       if (count < io_size)
+               io_size = count;
+
+       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+       kmap_num = kmap_end - kmap_offset + 1;
+
+       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+       switch (ret) {
+       case 0:
+               break;
+       case -EBUSY:
+               return -EAGAIN;
+       default:
+               return ret;
        }
 
-       if (unlikely(best_bo == NULL))
-               return NULL;
+       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unreserve(bo);
+               return ret;
+       }
 
-       if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
-                    (page_start + num_pages)))
-               return NULL;
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       virtual += page_offset;
 
-       return best_bo;
+       if (write)
+               ret = copy_from_user(virtual, wbuf, io_size);
+       else
+               ret = copy_to_user(rbuf, virtual, io_size);
+
+       ttm_bo_kunmap(&map);
+       ttm_bo_unreserve(bo);
+       ttm_bo_unref(&bo);
+
+       if (unlikely(ret != 0))
+               return ret;
+
+       *f_pos += io_size;
+
+       return io_size;
 }
 
+/*
+ * DragonFlyBSD Interface
+ */
+
+#include "opt_vm.h"
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+
+#include <vm/vm_page2.h>
+
 static int
-ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
-    int prot, vm_page_t *mres)
+ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
+                    int prot, vm_page_t *mres)
 {
        struct ttm_buffer_object *bo = vm_obj->handle;
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_tt *ttm = NULL;
-       vm_page_t m, m1, oldm;
+       vm_page_t m, oldm;
        int ret;
        int retval = VM_PAGER_OK;
-       struct ttm_mem_type_manager *man =
-               &bdev->man[bo->mem.mem_type];
+       struct ttm_mem_type_manager *man;
+
+       man = &bdev->man[bo->mem.mem_type];
+
+       /*kprintf("FAULT %p %p/%ld\n", vm_obj, bo, offset);*/
 
        vm_object_pip_add(vm_obj, 1);
        oldm = *mres;
-       if (oldm != NULL) {
-               vm_page_remove(oldm);
-               *mres = NULL;
-       } else
-               oldm = NULL;
+       *mres = NULL;
+
 retry:
        VM_OBJECT_UNLOCK(vm_obj);
        m = NULL;
 
-reserve:
-       ret = ttm_bo_reserve(bo, false, false, false, 0);
+       /*
+        * NOTE: set nowait to false, we don't have ttm_bo_wait_unreserved()
+        *       for the -BUSY case yet.
+        */
+       ret = ttm_bo_reserve(bo, true, false, false, 0);
        if (unlikely(ret != 0)) {
-               if (ret == -EBUSY) {
-                       lwkt_yield();
-                       goto reserve;
-               }
+               retval = VM_PAGER_ERROR;
+               VM_OBJECT_LOCK(vm_obj);
+               goto out_unlock2;
        }
 
        if (bdev->driver->fault_reserve_notify) {
@@ -140,10 +520,12 @@ reserve:
                case 0:
                        break;
                case -EBUSY:
+                       lwkt_yield();
+                       /* fall through */
                case -ERESTARTSYS:
                case -EINTR:
-                       lwkt_yield();
-                       goto reserve;
+                       retval = VM_PAGER_ERROR;
+                       goto out_unlock;
                default:
                        retval = VM_PAGER_ERROR;
                        goto out_unlock;
@@ -154,7 +536,6 @@ reserve:
         * Wait for buffer data in transit, due to a pipelined
         * move.
         */
-
        lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
        if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
                /*
@@ -179,8 +560,9 @@ reserve:
                        retval = VM_PAGER_ERROR;
                        goto out_unlock;
                }
-       } else
+       } else {
                lockmgr(&bdev->fence_lock, LK_RELEASE);
+       }
 
        ret = ttm_mem_io_lock(man, true);
        if (unlikely(ret != 0)) {
@@ -192,6 +574,10 @@ reserve:
                retval = VM_PAGER_ERROR;
                goto out_io_unlock;
        }
+       if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
+               retval = VM_PAGER_ERROR;
+               goto out_io_unlock;
+       }
 
        /*
         * Strictly, we're not allowed to modify vma->vm_page_prot here,
@@ -206,21 +592,20 @@ reserve:
         * vma->vm_page_prot when the object changes caching policy, with
         * the correct locks held.
         */
-       if (!bo->mem.bus.is_iomem) {
+
+       if (bo->mem.bus.is_iomem) {
+               m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
+                                                 bo->mem.bus.offset + offset);
+               pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
+       } else {
                /* Allocate all page at once, most common usage */
                ttm = bo->ttm;
                if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
                        retval = VM_PAGER_ERROR;
                        goto out_io_unlock;
                }
-       }
-
-       if (bo->mem.bus.is_iomem) {
-               m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
-                   bo->mem.bus.offset + offset);
-               pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
-       } else {
                ttm = bo->ttm;
+
                m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
                if (unlikely(!m)) {
                        retval = VM_PAGER_ERROR;
@@ -232,34 +617,61 @@ reserve:
        }
 
        VM_OBJECT_LOCK(vm_obj);
-       if ((m->busy_count & PBUSY_LOCKED) != 0) {
-#if 0
-               vm_page_sleep(m, "ttmpbs");
-#endif
+
+       if (vm_page_busy_try(m, FALSE)) {
+               kprintf("r");
+               vm_page_sleep_busy(m, FALSE, "ttmvmf");
                ttm_mem_io_unlock(man);
                ttm_bo_unreserve(bo);
                goto retry;
        }
+
+       /*
+        * We want our fake page in the VM object, not the page the OS
+        * allocatedd for us as a placeholder.
+        */
        m->valid = VM_PAGE_BITS_ALL;
        *mres = m;
-       m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
-       if (m1 == NULL) {
-               vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
-       } else {
-               KASSERT(m == m1,
-                   ("inconsistent insert bo %p m %p m1 %p offset %jx",
-                   bo, m, m1, (uintmax_t)offset));
-       }
-       vm_page_busy_try(m, FALSE);
-
        if (oldm != NULL) {
+               vm_page_remove(oldm);
+               if (m->object) {
+                       retval = VM_PAGER_ERROR;
+                       kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
+                               "in obj %p, attempt obj %p\n",
+                               m, m->object, vm_obj);
+                       while (drm_unstall == 0) {
+                               tsleep(&retval, 0, "DEBUG", hz/10);
+                       }
+                       if (drm_unstall > 0)
+                               --drm_unstall;
+               } else {
+                       vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
+               }
                vm_page_free(oldm);
+               oldm = NULL;
+       } else {
+               vm_page_t mtmp;
+
+               kprintf("oldm NULL\n");
+
+               mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
+               KASSERT(mtmp == NULL || mtmp == m,
+                   ("inconsistent insert bo %p m %p mtmp %p offset %jx",
+                   bo, m, mtmp, (uintmax_t)offset));
+               if (mtmp == NULL)
+                       vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
        }
+       /*vm_page_busy_try(m, FALSE);*/
 
 out_io_unlock1:
        ttm_mem_io_unlock(man);
 out_unlock1:
        ttm_bo_unreserve(bo);
+out_unlock2:
+       if (oldm) {
+               vm_page_remove(oldm);
+               vm_page_free(oldm);
+       }
        vm_object_pip_wakeup(vm_obj);
        return (retval);
 
@@ -274,7 +686,7 @@ out_unlock:
 
 static int
 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
-    vm_ooffset_t foff, struct ucred *cred, u_short *color)
+              vm_ooffset_t foff, struct ucred *cred, u_short *color)
 {
 
        /*
@@ -302,60 +714,64 @@ ttm_bo_vm_dtor(void *handle)
 }
 
 static struct cdev_pager_ops ttm_pager_ops = {
-       .cdev_pg_fault = ttm_bo_vm_fault,
+       .cdev_pg_fault = ttm_bo_vm_fault_dfly,
        .cdev_pg_ctor = ttm_bo_vm_ctor,
        .cdev_pg_dtor = ttm_bo_vm_dtor
 };
 
+/*
+ * Called from drm_drv.c
+ *
+ * *offset - object offset in bytes
+ * size           - map size in bytes
+ *
+ * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
+ * our own VM object and dfly ops.  Note that the ops supplied by
+ * ttm_bo_mmap() are not currently used.
+ */
 int
-ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
-    struct vm_object **obj_res, int nprot)
+ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
+                  vm_size_t size, struct vm_object **obj_res, int nprot)
 {
-       struct ttm_bo_driver *driver;
+       struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
        struct ttm_buffer_object *bo;
        struct vm_object *vm_obj;
+       struct vm_area_struct vma;
        int ret;
 
        *obj_res = NULL;
 
-       lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
-       bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
-       if (likely(bo != NULL))
-               kref_get(&bo->kref);
-       lockmgr(&bdev->vm_lock, LK_RELEASE);
+       bzero(&vma, sizeof(vma));
+       vma.vm_start = *offset;         /* bdev-relative offset */
+       vma.vm_end = vma.vm_start + size;
+       vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
+       /* vma.vm_page_prot */
+       /* vma.vm_flags */
 
-       if (unlikely(bo == NULL)) {
-               pr_err("Could not find buffer object to map\n");
-               return (EINVAL);
-       }
-
-       driver = bo->bdev->driver;
-       if (unlikely(!driver->verify_access)) {
-               ret = EPERM;
-               goto out_unref;
-       }
-       ret = -driver->verify_access(bo);
-       if (unlikely(ret != 0))
-               goto out_unref;
-
-       vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
-           size, nprot, 0, curthread->td_ucred);
-
-       if (vm_obj == NULL) {
-               ret = EINVAL;
-               goto out_unref;
-       }
        /*
-        * Note: We're transferring the bo reference to vm_obj->handle here.
+        * Call the linux-ported code to do the work, and on success just
+        * setup our own VM object and ignore what the linux code did other
+        * then supplying us the 'bo'.
         */
-       *offset = 0;
-       *obj_res = vm_obj;
-       return 0;
-out_unref:
-       ttm_bo_unref(&bo);
+       ret = ttm_bo_mmap(NULL, &vma, bdev);
+
+       if (ret == 0) {
+               bo = vma.vm_private_data;
+               vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
+                                            &ttm_pager_ops,
+                                            size, nprot, 0,
+                                            curthread->td_ucred);
+               if (vm_obj) {
+                       *obj_res = vm_obj;
+                       *offset = 0;            /* object-relative offset */
+               } else {
+                       ttm_bo_unref(&bo);
+                       ret = EINVAL;
+               }
+       }
        return ret;
 }
-EXPORT_SYMBOL(ttm_bo_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_single);
 
 void
 ttm_bo_release_mmap(struct ttm_buffer_object *bo)