drm: Define struct page and use it everywhere
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 9 Jun 2018 16:18:33 +0000 (18:18 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 9 Jun 2018 16:18:33 +0000 (18:18 +0200)
* Removing countless differences with Linux

* struct page is essentially struct vm_page named differently.
  Both can be casted to the other one without further thought.

41 files changed:
sys/dev/drm/drm_cache.c
sys/dev/drm/i915/i915_cmd_parser.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_gem_fence.c
sys/dev/drm/i915/i915_gem_gtt.h
sys/dev/drm/i915/i915_gem_render_state.c
sys/dev/drm/i915/i915_guc_submission.c
sys/dev/drm/i915/intel_lrc.c
sys/dev/drm/i915/intel_ringbuffer.c
sys/dev/drm/include/asm/cacheflush.h
sys/dev/drm/include/asm/memory_model.h
sys/dev/drm/include/asm/page.h
sys/dev/drm/include/asm/pgtable.h
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/ttm/ttm_bo_api.h
sys/dev/drm/include/drm/ttm/ttm_bo_driver.h
sys/dev/drm/include/drm/ttm/ttm_memory.h
sys/dev/drm/include/linux/dma-mapping.h
sys/dev/drm/include/linux/gfp.h
sys/dev/drm/include/linux/highmem.h
sys/dev/drm/include/linux/mm.h
sys/dev/drm/include/linux/mm_types.h
sys/dev/drm/include/linux/mmdebug.h [copied from sys/dev/drm/include/asm/page.h with 86% similarity]
sys/dev/drm/include/linux/mmzone.h [copied from sys/dev/drm/include/linux/mm_types.h with 84% similarity]
sys/dev/drm/include/linux/scatterlist.h
sys/dev/drm/include/linux/shmem_fs.h
sys/dev/drm/include/linux/swap.h
sys/dev/drm/include/linux/vmalloc.h
sys/dev/drm/linux_scatterlist.c
sys/dev/drm/linux_shmem.c
sys/dev/drm/linux_vmalloc.c
sys/dev/drm/radeon/radeon.h
sys/dev/drm/radeon/radeon_gart.c
sys/dev/drm/radeon/radeon_ttm.c
sys/dev/drm/ttm/ttm_bo.c
sys/dev/drm/ttm/ttm_bo_util.c
sys/dev/drm/ttm/ttm_bo_vm.c
sys/dev/drm/ttm/ttm_memory.c
sys/dev/drm/ttm/ttm_page_alloc.c
sys/dev/drm/ttm/ttm_tt.c

index a8440aa..6dea7c2 100644 (file)
@@ -38,7 +38,7 @@
  * in the caller.
  */
 static void
-drm_clflush_page(struct vm_page *page)
+drm_clflush_page(struct page *page)
 {
        uint8_t *page_virtual;
        unsigned int i;
@@ -53,7 +53,7 @@ drm_clflush_page(struct vm_page *page)
        kunmap_atomic(page_virtual);
 }
 
-static void drm_cache_flush_clflush(struct vm_page *pages[],
+static void drm_cache_flush_clflush(struct page *pages[],
                                    unsigned long num_pages)
 {
        unsigned long i;
@@ -65,9 +65,9 @@ static void drm_cache_flush_clflush(struct vm_page *pages[],
 }
 
 void
-drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
 {
-       pmap_invalidate_cache_pages(pages, num_pages);
+       pmap_invalidate_cache_pages((struct vm_page **)pages, num_pages);
 
        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
                drm_cache_flush_clflush(pages, num_pages);
index f7d27fd..a656fb8 100644 (file)
@@ -944,7 +944,7 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj,
        int first_page = start >> PAGE_SHIFT;
        int last_page = (len + start + 4095) >> PAGE_SHIFT;
        int npages = last_page - first_page;
-       struct vm_page **pages;
+       struct page **pages;
 
        pages = drm_malloc_ab(npages, sizeof(*pages));
        if (pages == NULL) {
index edf16de..2848ba5 100644 (file)
@@ -3003,10 +3003,10 @@ static inline int __sg_page_count(struct scatterlist *sg)
        return sg->length >> PAGE_SHIFT;
 }
 
-struct vm_page *
+struct page *
 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
 
-static inline struct vm_page *
+static inline struct page *
 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
        if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
index 8a0c34c..1e088d8 100644 (file)
@@ -518,7 +518,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
  * Flushes invalid cachelines before reading the target if
  * needs_clflush is set. */
 static int
-shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
                 char __user *user_data,
                 bool page_do_bit17_swizzling, bool needs_clflush)
 {
@@ -565,7 +565,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
 /* Only difference to the fast-path function is that this can handle bit17
  * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
                 char __user *user_data,
                 bool page_do_bit17_swizzling, bool needs_clflush)
 {
@@ -619,7 +619,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
-               struct vm_page *page = sg_page_iter_page(&sg_iter);
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (remain <= 0)
                        break;
@@ -831,7 +831,7 @@ out:
  * needs_clflush_before is set and flushes out any written cachelines after
  * writing if needs_clflush is set. */
 static int
-shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
                  char __user *user_data,
                  bool page_do_bit17_swizzling,
                  bool needs_clflush_before,
@@ -860,7 +860,7 @@ shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
 /* Only difference to the fast-path function is that this can handle bit17
  * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
                  char __user *user_data,
                  bool page_do_bit17_swizzling,
                  bool needs_clflush_before,
@@ -944,7 +944,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
                         offset >> PAGE_SHIFT) {
-               struct vm_page *page = sg_page_iter_page(&sg_iter);
+               struct page *page = sg_page_iter_page(&sg_iter);
                int partial_cacheline_write;
 
                if (remain <= 0)
@@ -2325,7 +2325,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                obj->dirty = 0;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct vm_page *page = sg_page_iter_page(&sg_iter);
+               struct page *page = sg_page_iter_page(&sg_iter);
 
                if (obj->dirty)
                        set_page_dirty(page);
@@ -2333,9 +2333,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                if (obj->madv == I915_MADV_WILLNEED)
                        mark_page_accessed(page);
 
-               vm_page_busy_wait(page, FALSE, "i915gem");
-               vm_page_unwire(page, 1);
-               vm_page_wakeup(page);
+               vm_page_busy_wait((struct vm_page *)page, FALSE, "i915gem");
+               vm_page_unwire((struct vm_page *)page, 1);
+               vm_page_wakeup((struct vm_page *)page);
        }
        obj->dirty = 0;
 
@@ -2386,7 +2386,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct sg_table *st;
        struct scatterlist *sg;
        struct sg_page_iter sg_iter;
-       struct vm_page *page;
+       struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
 
@@ -2481,10 +2481,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 err_pages:
        sg_mark_end(sg);
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
-               page = sg_page_iter_page(&sg_iter);
-               vm_page_busy_wait(page, FALSE, "i915gem");
-               vm_page_unwire(page, 0);
-               vm_page_wakeup(page);
+               struct vm_page *vmp = (struct vm_page *)sg_page_iter_page(&sg_iter);
+               vm_page_busy_wait(vmp, FALSE, "i915gem");
+               vm_page_unwire(vmp, 0);
+               vm_page_wakeup(vmp);
        }
        VM_OBJECT_UNLOCK(vm_obj);
        sg_free_table(st);
@@ -2553,7 +2553,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
        i915_gem_object_pin_pages(obj);
 
        if (obj->mapping == NULL) {
-               struct vm_page **pages;
+               struct page **pages;
 
                pages = NULL;
                if (obj->base.size == PAGE_SIZE)
@@ -5509,10 +5509,10 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 }
 
 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct vm_page *
+struct page *
 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
 {
-       struct vm_page *page;
+       struct page *page;
 
        /* Only default objects have per-page dirty tracking */
        if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
index 2d42d38..a2b938e 100644 (file)
@@ -713,7 +713,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
  * by the GPU.
  */
 static void
-i915_gem_swizzle_page(struct vm_page *page)
+i915_gem_swizzle_page(struct page *page)
 {
        char temp[64];
        char *vaddr;
@@ -753,7 +753,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
        i = 0;
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct vm_page *page = sg_page_iter_page(&sg_iter);
+               struct page *page = sg_page_iter_page(&sg_iter);
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
index 53bed2c..40d784a 100644 (file)
@@ -220,7 +220,7 @@ struct i915_vma {
 };
 
 struct i915_page_dma {
-       struct vm_page *page;
+       struct page *page;
        union {
                dma_addr_t daddr;
 
index 458f68a..71611bf 100644 (file)
@@ -95,7 +95,7 @@ static int render_state_setup(struct render_state *so)
 {
        const struct intel_renderstate_rodata *rodata = so->rodata;
        unsigned int i = 0, reloc_index = 0;
-       struct vm_page *page;
+       struct page *page;
        u32 *d;
        int ret;
 
index f7dc61f..ee9feea 100644 (file)
@@ -821,7 +821,7 @@ static void guc_create_ads(struct intel_guc *guc)
        struct guc_policies *policies;
        struct guc_mmio_reg_state *reg_state;
        struct intel_engine_cs *engine;
-       struct vm_page *page;
+       struct page *page;
        u32 size;
 
        /* The ads obj includes the struct itself and buffers passed to GuC */
index 7388653..6ad92bf 100644 (file)
@@ -1395,7 +1395,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        int ret;
        uint32_t *batch;
        uint32_t offset;
-       struct vm_page *page;
+       struct page *page;
        struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 
        WARN_ON(engine->id != RCS);
index eb5ae77..c6f2cbf 100644 (file)
@@ -2608,7 +2608,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        }
        if (dev_priv->semaphore_obj) {
                struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
-               struct vm_page *page = i915_gem_object_get_dirty_page(obj, 0);
+               struct page *page = i915_gem_object_get_dirty_page(obj, 0);
                char *semaphores = kmap(page);
                memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
                       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
index 51cc95e..ad45df5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,17 +44,21 @@ static inline int set_memory_wb(unsigned long vaddr, int numpages)
        return 0;
 }
 
-static inline int set_pages_uc(struct vm_page *page, int num_pages)
+static inline int set_pages_uc(struct page *page, int num_pages)
 {
-       pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page)),
+       struct vm_page *p = (struct vm_page *)page;
+
+       pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(p)),
                         num_pages, PAT_UNCACHED);
 
        return 0;
 }
 
-static inline int set_pages_wb(struct vm_page *page, int num_pages)
+static inline int set_pages_wb(struct page *page, int num_pages)
 {
-       pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page)),
+       struct vm_page *p = (struct vm_page *)page;
+
+       pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(p)),
                         num_pages, PAT_WRITE_BACK);
 
        return 0;
index a67e961..e71cb43 100644 (file)
 #ifndef _ASM_MEMORY_MODEL_H_
 #define _ASM_MEMORY_MODEL_H_
 
-#define page_to_pfn(page)      OFF_TO_IDX(VM_PAGE_TO_PHYS(page))
+#include <vm/vm_object.h>
+
+static inline unsigned long
+page_to_pfn(struct page *page)
+{
+       struct vm_page *p = (struct vm_page *)page;
+
+       return OFF_TO_IDX(VM_PAGE_TO_PHYS(p));
+}
+
 #define pfn_to_page(pfn)       (PHYS_TO_VM_PAGE((pfn) << PAGE_SHIFT))
 
 #endif /* _ASM_MEMORY_MODEL_H_ */
index 629379a..730816f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _ASM_PAGE_H_
 #define _ASM_PAGE_H_
 
-#define page_to_phys(page)     VM_PAGE_TO_PHYS(page)
+struct page;
+
+static inline vm_paddr_t
+page_to_phys(struct page *page)
+{
+       struct vm_page *p = (struct vm_page *)page;
+
+       return VM_PAGE_TO_PHYS(p);
+}
 
 #include <asm/memory_model.h>
 
index a411986..3ef03f8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,6 +27,8 @@
 #ifndef _ASM_PGTABLE_H_
 #define _ASM_PGTABLE_H_
 
+#include <asm/page.h>
+
 #include <asm/pgtable_types.h>
 
 #endif /* _ASM_PGTABLE_H_ */
index 990f5ce..1336b0e 100644 (file)
@@ -1098,7 +1098,7 @@ int drm_invalid_op(struct drm_device *dev, void *data,
                   struct drm_file *file_priv);
 
 /* Cache management (drm_cache.c) */
-void drm_clflush_pages(struct vm_page *pages[], unsigned long num_pages);
+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 void drm_clflush_sg(struct sg_table *st);
 void drm_clflush_virt_range(void *addr, unsigned long length);
 
index 87625da..40accbd 100644 (file)
@@ -301,7 +301,7 @@ struct ttm_buffer_object {
 #define TTM_BO_MAP_IOMEM_MASK 0x80
 struct ttm_bo_kmap_obj {
        void *virtual;
-       struct vm_page *page;
+       struct page *page;
        struct sf_buf *sf;
        int num_pages;
        unsigned long size;
index 6a621e2..255bda1 100644 (file)
@@ -116,8 +116,8 @@ enum ttm_caching_state {
 struct ttm_tt {
        struct ttm_bo_device *bdev;
        struct ttm_backend_func *func;
-       struct vm_page *dummy_read_page;
-       struct vm_page **pages;
+       struct page *dummy_read_page;
+       struct page **pages;
        uint32_t page_flags;
        unsigned long num_pages;
        struct sg_table *sg; /* for SG objects via dma-buf */
@@ -336,7 +336,7 @@ struct ttm_bo_driver {
        struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
                                        unsigned long size,
                                        uint32_t page_flags,
-                                       struct vm_page *dummy_read_page);
+                                       struct page *dummy_read_page);
 
        /**
         * ttm_tt_populate
@@ -489,7 +489,7 @@ struct ttm_bo_global {
         */
 
        struct ttm_mem_global *mem_glob;
-       struct vm_page *dummy_read_page;
+       struct page *dummy_read_page;
        struct ttm_mem_shrink shrink;
        struct lock device_list_mutex;
        struct lock lru_lock;
@@ -606,10 +606,10 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
  */
 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
                        unsigned long size, uint32_t page_flags,
-                       struct vm_page *dummy_read_page);
+                       struct page *dummy_read_page);
 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
                           unsigned long size, uint32_t page_flags,
-                          struct vm_page *dummy_read_page);
+                          struct page *dummy_read_page);
 
 /**
  * ttm_tt_fini
@@ -1023,7 +1023,7 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
                                        struct agp_bridge_data *bridge,
                                        unsigned long size, uint32_t page_flags,
-                                       struct vm_page *dummy_read_page);
+                                       struct page *dummy_read_page);
 int ttm_agp_tt_populate(struct ttm_tt *ttm);
 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif
index 54e53d9..18df50a 100644 (file)
@@ -132,8 +132,6 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
        spin_unlock(&glob->spin);
 }
 
-struct vm_page;
-
 extern int ttm_mem_global_init(struct ttm_mem_global *glob);
 extern void ttm_mem_global_release(struct ttm_mem_global *glob);
 extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
@@ -141,9 +139,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
 extern void ttm_mem_global_free(struct ttm_mem_global *glob,
                                uint64_t amount);
 extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                                    struct vm_page *page,
+                                    struct page *page,
                                     bool no_wait, bool interruptible);
 extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
-                                    struct vm_page *page);
+                                    struct page *page);
 extern size_t ttm_round_pot(size_t size);
 #endif
index 4bb3c7f..5fe09aa 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include <linux/err.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
+#include <linux/bug.h>
 
 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
 
 static inline dma_addr_t
-dma_map_page(struct device *dev, struct vm_page *page,
+dma_map_page(struct device *dev, struct page *page,
     unsigned long offset, size_t size, enum dma_data_direction direction)
 {
-       return VM_PAGE_TO_PHYS(page) + offset;
+       return VM_PAGE_TO_PHYS((struct vm_page *)page) + offset;
 }
 
 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
index f5118b4..fcf575a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _LINUX_GFP_H_
 #define _LINUX_GFP_H_
 
+#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+
 #include <vm/vm_page.h>
 #include <machine/bus_dma.h>
 
 
 #define GFP_DMA32      0x10000 /* XXX: MUST NOT collide with the M_XXX definitions */
 
-static inline void __free_page(struct vm_page *page)
+static inline void __free_page(struct page *page)
 {
-       vm_page_free_contig(page, PAGE_SIZE);
+       vm_page_free_contig((struct vm_page *)page, PAGE_SIZE);
 }
 
-static inline struct vm_page * alloc_page(int flags)
+static inline struct page * alloc_page(int flags)
 {
        vm_paddr_t high = ~0LLU;
 
        if (flags & GFP_DMA32)
                high = BUS_SPACE_MAXADDR_32BIT;
 
-       return vm_page_alloc_contig(0LLU, ~0LLU,
+       return (struct page *)vm_page_alloc_contig(0LLU, ~0LLU,
                        PAGE_SIZE, PAGE_SIZE, PAGE_SIZE,
                        VM_MEMATTR_DEFAULT);
 }
index d3835ad..f87dca6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 François Tigeot
+ * Copyright (c) 2014-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #include <asm/cacheflush.h>
 
-static inline struct vm_page *
+static inline struct page *
 kmap_to_page(void *addr)
 {
-       return PHYS_TO_VM_PAGE(vtophys(addr));
+       return (struct page *)PHYS_TO_VM_PAGE(vtophys(addr));
 }
 
-static inline void *kmap(struct vm_page *pg)
+static inline void *kmap(struct page *pg)
 {
-       return (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg));
+       return (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( (struct vm_page *)pg ));
 }
 
-static inline void kunmap(struct vm_page *pg)
+static inline void kunmap(struct page *pg)
 {
        /* Nothing to do on systems with a direct memory map */
 }
 
-static inline void *kmap_atomic(struct vm_page *pg)
+static inline void *kmap_atomic(struct page *pg)
 {
-       return (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg));
+       return (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( (struct vm_page *)pg ));
 }
 
 static inline void kunmap_atomic(void *vaddr)
index 3992800..540e7e9 100644 (file)
 
 #include <linux/errno.h>
 
+#include <linux/mmdebug.h>
 #include <linux/gfp.h>
 #include <linux/bug.h>
 #include <linux/list.h>
+#include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/atomic.h>
 #include <linux/mm_types.h>
@@ -44,8 +46,8 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-static inline struct vm_page *
-nth_page(struct vm_page *page, int n)
+static inline struct page *
+nth_page(struct page *page, int n)
 {
        return page + n;
 }
@@ -105,9 +107,9 @@ vma_pages(struct vm_area_struct *vma)
 #define offset_in_page(off)    ((off) & PAGE_MASK)
 
 static inline void
-set_page_dirty(struct vm_page *page)
+set_page_dirty(struct page *page)
 {
-       vm_page_dirty(page);
+       vm_page_dirty((struct vm_page *)page);
 }
 
 /*
index 28c4e88..3235b04 100644 (file)
@@ -37,6 +37,8 @@
 #include <linux/workqueue.h>
 #include <asm/page.h>
 
-#define page   vm_page         /* for struct page */
+struct page {
+       struct vm_page pa_vmpage;
+};
 
 #endif /* _LINUX_MM_TYPES_H_ */
similarity index 86%
copy from sys/dev/drm/include/asm/page.h
copy to sys/dev/drm/include/linux/mmdebug.h
index 629379a..1489aa0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 François Tigeot
+ * Copyright (c) 2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _ASM_PAGE_H_
-#define _ASM_PAGE_H_
+#ifndef _LINUX_MMDEBUG_H_
+#define _LINUX_MMDEBUG_H_
 
-#define page_to_phys(page)     VM_PAGE_TO_PHYS(page)
+#include <linux/bug.h>
+#include <linux/stringify.h>
 
-#include <asm/memory_model.h>
+struct page;
 
-#endif /* _ASM_PAGE_H_ */
+#endif /* _LINUX_MMDEBUG_H_ */
similarity index 84%
copy from sys/dev/drm/include/linux/mm_types.h
copy to sys/dev/drm/include/linux/mmzone.h
index 28c4e88..02c60d2 100644 (file)
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _LINUX_MM_TYPES_H_
-#define _LINUX_MM_TYPES_H_
+#ifndef _LINUX_MMZONE_H_
+#define _LINUX_MMZONE_H_
 
-#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/list.h>
 #include <linux/spinlock.h>
-#include <linux/rbtree.h>
-#include <linux/completion.h>
-#include <linux/cpumask.h>
-#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/init.h>
+#include <linux/seqlock.h>
+#include <linux/atomic.h>
 #include <asm/page.h>
 
-#define page   vm_page         /* for struct page */
+struct page;
 
-#endif /* _LINUX_MM_TYPES_H_ */
+#endif /* _LINUX_MMZONE_H_ */
index 213a278..95e3db2 100644 (file)
@@ -55,7 +55,7 @@
 
 struct scatterlist {
        union {
-               struct vm_page          *page;
+               struct page             *page;
                struct scatterlist      *sg;
        } sl_un;
        unsigned long   offset;
@@ -94,7 +94,7 @@ struct sg_page_iter {
 #define        SG_CHAIN        0x02
 
 static inline void
-sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
+sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
     unsigned int offset)
 {
        sg_page(sg) = page;
@@ -125,7 +125,7 @@ sg_next(struct scatterlist *sg)
 static inline vm_paddr_t
 sg_phys(struct scatterlist *sg)
 {
-       return sg_page(sg)->phys_addr + sg->offset;
+       return ((struct vm_page *)sg_page(sg))->phys_addr + sg->offset;
 }
 
 /**
@@ -322,7 +322,7 @@ _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
        }
 }
 
-static inline struct vm_page *
+static inline struct page *
 sg_page_iter_page(struct sg_page_iter *piter)
 {
        return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
index 99fae27..60a28c6 100644 (file)
 #ifndef _LINUX_SHMEM_FS_H_
 #define _LINUX_SHMEM_FS_H_
 
+#include <linux/swap.h>
 #include <linux/pagemap.h>
 
 #define        VM_OBJECT_LOCK_ASSERT_OWNED(object)
 
-vm_page_t shmem_read_mapping_page(vm_object_t, vm_pindex_t);
+struct page * shmem_read_mapping_page(vm_object_t, vm_pindex_t);
 
 #endif /* _LINUX_SHMEM_FS_H_ */
index 2d27c2a..5b1de48 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 François Tigeot
+ * Copyright (c) 2015-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _LINUX_SWAP_H_
 #define _LINUX_SWAP_H_
 
-#include <vm/vm_page2.h>
-
+#include <linux/spinlock.h>
+#include <linux/mmzone.h>
 #include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/atomic.h>
 #include <asm/page.h>
 
-static inline void mark_page_accessed(struct vm_page *m)
+#include <vm/vm_page2.h>
+
+static inline void mark_page_accessed(struct page *m)
 {
-       vm_page_flag_set(m, PG_REFERENCED);
+       vm_page_flag_set((struct vm_page *)m, PG_REFERENCED);
 }
 
 /* from vm/swap_pager.h */
index 832f30e..75e813e 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/page.h>          /* pgprot_t */
 #include <linux/rbtree.h>
 
-void *vmap(struct vm_page **pages, unsigned int count,
+void *vmap(struct page **pages, unsigned int count,
           unsigned long flags, unsigned long prot);
 
 void vunmap(const void *addr);
index 715de17..9260346 100644 (file)
@@ -158,7 +158,7 @@ sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
        int len, curlen, curoff;
        struct sg_page_iter iter;
        struct scatterlist *sg;
-       struct vm_page *page;
+       struct page *page;
        char *vaddr;
 
        off = 0;
@@ -194,7 +194,7 @@ sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
        int len, curlen, curoff;
        struct sg_page_iter iter;
        struct scatterlist *sg;
-       struct vm_page *page;
+       struct page *page;
        char *vaddr;
 
        off = 0;
index 5ee410d..4c20a20 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/err.h>
 #include <linux/shmem_fs.h>
 
-vm_page_t
+struct page *
 shmem_read_mapping_page(vm_object_t object, vm_pindex_t pindex)
 {
        vm_page_t m;
@@ -63,5 +63,5 @@ shmem_read_mapping_page(vm_object_t object, vm_pindex_t pindex)
        }
        vm_page_wire(m);
        vm_page_wakeup(m);
-       return (m);
+       return (struct page *)m;
 }
index e6bb13e..2bf6c12 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 François Tigeot <ftigeot@wolfpond.org>
+ * Copyright (c) 2017-2018 François Tigeot <ftigeot@wolfpond.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,7 @@ SLIST_HEAD(vmap_list_head, vmap) vmap_list = SLIST_HEAD_INITIALIZER(vmap_list);
 
 /* vmap: map an array of pages into virtually contiguous space */
 void *
-vmap(struct vm_page **pages, unsigned int count,
+vmap(struct page **pages, unsigned int count,
        unsigned long flags, unsigned long prot)
 {
        struct vmap *vmp;
@@ -59,7 +59,7 @@ vmap(struct vm_page **pages, unsigned int count,
 
        vmp->addr = (void *)off;
        vmp->npages = count;
-       pmap_qenter(off, pages, count);
+       pmap_qenter(off, (struct vm_page **)pages, count);
        SLIST_INSERT_HEAD(&vmap_list, vmp, vm_vmaps);
 
        return (void *)off;
index 47bd7ab..0ab14ec 100644 (file)
@@ -626,7 +626,7 @@ struct radeon_gart {
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
-       vm_page_t                       *pages;
+       struct page                     **pages;
        dma_addr_t                      *pages_addr;
        bool                            ready;
 };
@@ -642,7 +642,7 @@ void radeon_gart_fini(struct radeon_device *rdev);
 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
                        int pages);
 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
-                    int pages, vm_page_t *pagelist,
+                    int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint32_t flags);
 
 
index ea98105..79a1b00 100644 (file)
@@ -274,7 +274,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
  * Returns 0 for success, -EINVAL for failure.
  */
 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
-                    int pages, vm_page_t *pagelist, dma_addr_t *dma_addr,
+                    int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint32_t flags)
 {
        unsigned t;
index 82e23b1..a5c0394 100644 (file)
@@ -568,7 +568,7 @@ static struct ttm_backend_func radeon_backend_func = {
 
 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
                                    unsigned long size, uint32_t page_flags,
-                                   vm_page_t dummy_read_page)
+                                   struct page *dummy_read_page)
 {
        struct radeon_device *rdev;
        struct radeon_ttm_tt *gtt;
@@ -643,7 +643,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
        }
 
        for (i = 0; i < ttm->num_pages; i++) {
-               gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]);
+               gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS((struct vm_page *)ttm->pages[i]);
 #ifdef DUMBBELL_WIP
                gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
                                                       0, PAGE_SIZE,
index 0914a37..f072748 100644 (file)
@@ -1525,7 +1525,7 @@ EXPORT_SYMBOL(ttm_bo_init_mm);
 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
 {
        ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
-       vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
+       vm_page_free_contig((struct vm_page *)glob->dummy_read_page, PAGE_SIZE);
        glob->dummy_read_page = NULL;
        /*
        vm_page_free(glob->dummy_read_page);
@@ -1551,7 +1551,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
        lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE);
        lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE);
        glob->mem_glob = bo_ref->mem_glob;
-       glob->dummy_read_page = vm_page_alloc_contig(
+       glob->dummy_read_page = (struct page *)vm_page_alloc_contig(
            0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE);
 
        if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1575,7 +1575,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
        return (0);
 
 out_no_shrink:
-       vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
+       vm_page_free_contig((struct vm_page *)glob->dummy_read_page, PAGE_SIZE);
        glob->dummy_read_page = NULL;
        /*
        vm_page_free(glob->dummy_read_page);
index 8075d24..2911e14 100644 (file)
@@ -252,7 +252,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                                unsigned long page,
                                vm_memattr_t prot)
 {
-       vm_page_t d = ttm->pages[page];
+       struct page *d = ttm->pages[page];
        void *dst;
 
        if (!d)
@@ -261,7 +261,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 
        /* XXXKIB can't sleep ? */
-       dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
+       dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS((struct vm_page *)d), PAGE_SIZE, prot);
        if (!dst)
                return -ENOMEM;
 
@@ -276,14 +276,14 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
                                unsigned long page,
                                vm_memattr_t prot)
 {
-       vm_page_t s = ttm->pages[page];
+       struct page *s = ttm->pages[page];
        void *src;
 
        if (!s)
                return -ENOMEM;
 
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-       src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
+       src = pmap_mapdev_attr(VM_PAGE_TO_PHYS((struct vm_page *)s), PAGE_SIZE, prot);
        if (!src)
                return -ENOMEM;
 
@@ -524,7 +524,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 
                map->bo_kmap_type = ttm_bo_map_kmap;
                map->page = ttm->pages[start_page];
-               map->sf = sf_buf_alloc(map->page);
+               map->sf = sf_buf_alloc((struct vm_page *)map->page);
                map->virtual = (void *)sf_buf_kva(map->sf);
        } else {
                /*
@@ -544,11 +544,11 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                if (map->virtual != NULL) {
                        for (i = 0; i < num_pages; i++) {
                                /* XXXKIB hack */
-                               pmap_page_set_memattr(ttm->pages[start_page +
+                               pmap_page_set_memattr((struct vm_page *)ttm->pages[start_page +
                                    i], prot);
                        }
                        pmap_qenter((vm_offset_t)map->virtual,
-                           &ttm->pages[start_page], num_pages);
+                           (struct vm_page **)&ttm->pages[start_page], num_pages);
                }
        }
        return (!map->virtual) ? -ENOMEM : 0;
index 685c09a..f490d19 100644 (file)
@@ -219,7 +219,7 @@ reserve:
                pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
        } else {
                ttm = bo->ttm;
-               m = ttm->pages[OFF_TO_IDX(offset)];
+               m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
                if (unlikely(!m)) {
                        retval = VM_PAGER_ERROR;
                        goto out_io_unlock;
index d342486..9cc01b0 100644 (file)
@@ -433,7 +433,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
 EXPORT_SYMBOL(ttm_mem_global_alloc);
 
 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                             struct vm_page *page,
+                             struct page *page,
                              bool no_wait, bool interruptible)
 {
 
@@ -450,7 +450,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
                                         interruptible);
 }
 
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
 {
        struct ttm_mem_zone *zone = NULL;
 
index 12a2834..1ea4913 100644 (file)
@@ -52,7 +52,7 @@
 #include <asm/agp.h>
 #endif
 
-#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(vm_page_t))
+#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               16
 #define FREE_ALL_PAGES                 (~0U)
 /* times are in msecs */
@@ -131,8 +131,9 @@ struct ttm_pool_manager {
 #define        uc_pool_dma32 _u._ut.u_uc_pool_dma32
 
 static void
-ttm_vm_page_free(vm_page_t m)
+ttm_vm_page_free(struct page *p)
 {
+       struct vm_page *m = (struct vm_page *)p;
 
        KASSERT(m->object == NULL, ("ttm page %p is owned", m));
        KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
@@ -225,28 +226,28 @@ static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
 
 static struct ttm_pool_manager *_manager;
 
-static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
+static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
        vm_page_t m;
        int i;
 
        for (i = 0; i < addrinarray; i++) {
-               m = pages[i];
+               m = (struct vm_page *)pages[i];
 #ifdef TTM_HAS_AGP
-               unmap_page_from_agp(m);
+               unmap_page_from_agp(pages[i]);
 #endif
                pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
        }
        return 0;
 }
 
-static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
+static int set_pages_array_wc(struct page **pages, int addrinarray)
 {
        vm_page_t m;
        int i;
 
        for (i = 0; i < addrinarray; i++) {
-               m = pages[i];
+               m = (struct vm_page *)pages[i];
 #ifdef TTM_HAS_AGP
                map_page_into_agp(pages[i]);
 #endif
@@ -255,13 +256,13 @@ static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
        return 0;
 }
 
-static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
+static int set_pages_array_uc(struct page **pages, int addrinarray)
 {
        vm_page_t m;
        int i;
 
        for (i = 0; i < addrinarray; i++) {
-               m = pages[i];
+               m = (struct vm_page *)pages[i];
 #ifdef TTM_HAS_AGP
                map_page_into_agp(pages[i]);
 #endif
@@ -292,7 +293,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
 }
 
 /* set memory back to wb and free the pages. */
-static void ttm_pages_put(vm_page_t *pages, unsigned npages)
+static void ttm_pages_put(struct page *pages[], unsigned npages)
 {
        unsigned i;
 
@@ -322,7 +323,7 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
 {
        vm_page_t p, p1;
-       vm_page_t *pages_to_free;
+       struct page **pages_to_free;
        unsigned freed_pages = 0,
                 npages_to_free = nr_free;
        unsigned i;
@@ -330,7 +331,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
        if (NUM_PAGES_TO_ALLOC < nr_free)
                npages_to_free = NUM_PAGES_TO_ALLOC;
 
-       pages_to_free = kmalloc(npages_to_free * sizeof(vm_page_t),
+       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
            M_TEMP, M_WAITOK | M_ZERO);
 
 restart:
@@ -340,12 +341,12 @@ restart:
                if (freed_pages >= npages_to_free)
                        break;
 
-               pages_to_free[freed_pages++] = p;
+               pages_to_free[freed_pages++] = (struct page *)p;
                /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
                if (freed_pages >= NUM_PAGES_TO_ALLOC) {
                        /* remove range of pages from the pool */
                        for (i = 0; i < freed_pages; i++)
-                               TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
+                               TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
 
                        ttm_pool_update_free_locked(pool, freed_pages);
                        /**
@@ -381,7 +382,7 @@ restart:
        /* remove range of pages from the pool */
        if (freed_pages) {
                for (i = 0; i < freed_pages; i++)
-                       TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
+                       TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
 
                ttm_pool_update_free_locked(pool, freed_pages);
                nr_free -= freed_pages;
@@ -444,7 +445,7 @@ static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
        EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
 }
 
-static int ttm_set_pages_caching(vm_page_t *pages,
+static int ttm_set_pages_caching(struct page **pages,
                enum ttm_caching_state cstate, unsigned cpages)
 {
        int r = 0;
@@ -473,12 +474,12 @@ static int ttm_set_pages_caching(vm_page_t *pages,
  */
 static void ttm_handle_caching_state_failure(struct pglist *pages,
                int ttm_flags, enum ttm_caching_state cstate,
-               vm_page_t *failed_pages, unsigned cpages)
+               struct page **failed_pages, unsigned cpages)
 {
        unsigned i;
        /* Failed pages have to be freed */
        for (i = 0; i < cpages; ++i) {
-               TAILQ_REMOVE(pages, failed_pages[i], pageq);
+               TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
                ttm_vm_page_free(failed_pages[i]);
        }
 }
@@ -492,8 +493,8 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
                int ttm_flags, enum ttm_caching_state cstate, unsigned count)
 {
-       vm_page_t *caching_array;
-       vm_page_t p;
+       struct page **caching_array;
+       struct vm_page *p;
        int r = 0;
        unsigned i, cpages, aflags;
        unsigned max_cpages = min(count,
@@ -540,7 +541,7 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
                if (!PageHighMem(p))
 #endif
                {
-                       caching_array[cpages++] = p;
+                       caching_array[cpages++] = (struct page *)p;
                        if (cpages == max_cpages) {
 
                                r = ttm_set_pages_caching(caching_array,
@@ -664,11 +665,12 @@ out:
 }
 
 /* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                          enum ttm_caching_state cstate)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        unsigned i;
+       struct vm_page *page;
 
        if (pool == NULL) {
                /* No pool for this memory type so free the pages */
@@ -684,7 +686,8 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
        lockmgr(&pool->lock, LK_EXCLUSIVE);
        for (i = 0; i < npages; i++) {
                if (pages[i]) {
-                       TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
+                       page = (struct vm_page *)pages[i];
+                       TAILQ_INSERT_TAIL(&pool->list, page, pageq);
                        pages[i] = NULL;
                        pool->npages++;
                }
@@ -707,12 +710,12 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
  * On success pages list will hold count number of correctly
  * cached pages.
  */
-static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                         enum ttm_caching_state cstate)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct pglist plist;
-       vm_page_t p = NULL;
+       struct vm_page *p = NULL;
        int gfp_flags, aflags;
        unsigned count;
        int r;
@@ -735,7 +738,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
                        p->oflags &= ~VPO_UNMANAGED;
 #endif
                        p->flags |= PG_FICTITIOUS;
-                       pages[r] = p;
+                       pages[r] = (struct page *)p;
                }
                return 0;
        }
@@ -748,7 +751,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
        npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
        count = 0;
        TAILQ_FOREACH(p, &plist, pageq) {
-               pages[count++] = p;
+               pages[count++] = (struct page *)p;
        }
 
        /* clear the pages coming from the pool if requested */
@@ -767,7 +770,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
                r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
                    npages);
                TAILQ_FOREACH(p, &plist, pageq) {
-                       pages[count++] = p;
+                       pages[count++] = (struct page *)p;
                }
                if (r) {
                        /* If there is any pages in the list put them back to
index 05574cd..95cc0d7 100644 (file)
@@ -63,7 +63,7 @@ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
                                            sizeof(*ttm->dma_address));
 }
 
-static inline int ttm_tt_set_page_caching(vm_page_t p,
+static inline int ttm_tt_set_page_caching(struct page *p,
                                          enum ttm_caching_state c_old,
                                          enum ttm_caching_state c_new)
 {
@@ -78,9 +78,9 @@ static inline int ttm_tt_set_page_caching(vm_page_t p,
 #endif
 
        if (c_new == tt_wc)
-               pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
+               pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING);
        else if (c_new == tt_uncached)
-               pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
+               pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_UNCACHEABLE);
 
        return (0);
 }
@@ -94,7 +94,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
                              enum ttm_caching_state c_state)
 {
        int i, j;
-       vm_page_t cur_page;
+       struct page *cur_page;
        int ret;
 
        if (ttm->caching_state == c_state)
@@ -174,7 +174,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
 
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
                unsigned long size, uint32_t page_flags,
-               vm_page_t dummy_read_page)
+               struct page *dummy_read_page)
 {
        ttm->bdev = bdev;
        ttm->glob = bdev->glob;
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(ttm_tt_fini);
 
 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
                unsigned long size, uint32_t page_flags,
-               vm_page_t dummy_read_page)
+               struct page *dummy_read_page)
 {
        struct ttm_tt *ttm = &ttm_dma->ttm;
 
@@ -299,7 +299,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
                                vm_page_zero_invalid(from_page, TRUE);
                        }
                }
-               to_page = ttm->pages[i];
+               to_page = (struct vm_page *)ttm->pages[i];
                if (unlikely(to_page == NULL)) {
                        ret = -ENOMEM;
                        vm_page_wakeup(from_page);
@@ -346,7 +346,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
        VM_OBJECT_LOCK(obj);
        vm_object_pip_add(obj, 1);
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = ttm->pages[i];
+               from_page = (struct vm_page *)ttm->pages[i];
                if (unlikely(from_page == NULL))
                        continue;
                to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |