drm/i915: Use gtt.mappable_base
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sun, 27 Sep 2015 10:16:22 +0000 (12:16 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sun, 27 Sep 2015 10:16:55 +0000 (12:16 +0200)
Greatly reducing dependencies on the agp driver.

sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_gem_execbuffer.c
sys/dev/drm/i915/i915_gem_gtt.c
sys/dev/drm/i915/intel_fbdev.c
sys/dev/drm/i915/intel_overlay.c
sys/dev/drm/i915/intel_ringbuffer.c

index 717d889..244d455 100644 (file)
@@ -724,7 +724,6 @@ unlock:
  * page faults in the source data
  */
 
-#if 0  /* XXX: buggy on core2 machines */
 static inline int
 fast_user_write(struct io_mapping *mapping,
                loff_t page_base, int page_offset,
@@ -743,27 +742,6 @@ fast_user_write(struct io_mapping *mapping,
        io_mapping_unmap_atomic(vaddr_atomic);
        return unwritten;
 }
-#endif
-
-static int
-i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
-    uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
-{
-       vm_offset_t mkva;
-       int ret;
-
-       /*
-        * Pass the unaligned physical address and size to pmap_mapdev_attr()
-        * so it can properly calculate whether an extra page needs to be
-        * mapped or not to cover the requested range.  The function will
-        * add the page offset into the returned mkva for us.
-        */
-       mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base +
-           i915_gem_obj_ggtt_offset(obj) + offset, size, PAT_WRITE_COMBINING);
-       ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
-       pmap_unmapdev(mkva, size);
-       return ret;
-}
 
 /**
  * This is the fast pwrite path, where we copy the data directly from the
@@ -775,6 +753,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -814,12 +793,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-#if 0
                if (fast_user_write(dev_priv->gtt.mappable, page_base,
                                    page_offset, user_data, page_length)) {
-#else
-               if (i915_gem_gtt_write(dev, obj, args->data_ptr, args->size, args->offset, file)) {
-#endif
                        ret = -EFAULT;
                        goto out_unpin;
                }
@@ -1700,7 +1675,7 @@ retry:
         * Relock object for insertion, leave locked for return.
         */
        VM_OBJECT_LOCK(vm_obj);
-       m = vm_phys_fictitious_to_vm_page(dev->agp->base +
+       m = vm_phys_fictitious_to_vm_page(dev_priv->gtt.mappable_base +
                                          i915_gem_obj_ggtt_offset(obj) +
                                          offset);
        if (m == NULL) {
index a25c862..65e9aaf 100644 (file)
@@ -305,6 +305,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
                   uint64_t target_offset)
 {
        struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        uint64_t delta = reloc->delta + target_offset;
        uint32_t __iomem *reloc_entry;
        void __iomem *reloc_page;
@@ -320,8 +321,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 
        /* Map the page containing the relocation we're going to perform.  */
        reloc->offset += i915_gem_obj_ggtt_offset(obj);
-       reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
-                   ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
+       reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                       reloc->offset & ~PAGE_MASK);
        reloc_entry = (uint32_t __iomem *)
                ((char *)reloc_page + offset_in_page(reloc->offset));
        iowrite32(lower_32_bits(delta), reloc_entry);
@@ -330,18 +331,17 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
                reloc_entry += 1;
 
                if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
-                       pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
-                       reloc_page = pmap_mapdev_attr(
-                                       dev->agp->base +
-                                       reloc->offset + sizeof(uint32_t),
-                                       PAGE_SIZE, PAT_WRITE_COMBINING);
+                       io_mapping_unmap_atomic(reloc_page);
+                       reloc_page = io_mapping_map_atomic_wc(
+                                       dev_priv->gtt.mappable,
+                                       reloc->offset + sizeof(uint32_t));
                        reloc_entry = reloc_page;
                }
 
                iowrite32(upper_32_bits(delta), reloc_entry);
        }
 
-       pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
+       io_mapping_unmap_atomic(reloc_page);
 
        return 0;
 }
index 02166ec..b824fa4 100644 (file)
@@ -1753,13 +1753,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                ggtt_vm->clear_range(ggtt_vm, hole_start,
                                     hole_end - hole_start, true);
        }
-       /* ... but ensure that we clear the entire range. */
+
+       /* XXX: DragonFly-specific */
        intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
        device_printf(dev->dev,
            "taking over the fictitious range 0x%lx-0x%lx\n",
-           dev->agp->base + start, dev->agp->base + start + mappable);
-       error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
-           dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
+           dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable);
+       error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start,
+           dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
 
        /* And finally clear the reserved guard page */
        ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
index d7c5695..b6a2320 100644 (file)
@@ -148,9 +148,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
                container_of(helper, struct intel_fbdev, helper);
        struct intel_framebuffer *intel_fb = ifbdev->fb;
        struct drm_device *dev = helper->dev;
-#if 0
        struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct drm_i915_gem_object *obj;
@@ -207,7 +205,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        info->stride =
            ALIGN(sizes->surface_width * ((sizes->surface_bpp + 7) / 8), 64);
        info->depth = sizes->surface_bpp;
-       info->paddr = dev->agp->base + i915_gem_obj_ggtt_offset(obj);
+       info->paddr = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
        info->is_vga_boot_display = vga_pci_is_boot_display(vga_dev);
        info->vaddr =
            (vm_offset_t)pmap_mapdev_attr(info->paddr,
index d610fce..ab8b460 100644 (file)
@@ -189,14 +189,14 @@ struct intel_overlay {
 static struct overlay_registers __iomem *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
+       struct drm_i915_private *dev_priv = overlay->dev->dev_private;
        struct overlay_registers __iomem *regs;
 
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = pmap_mapdev_attr(overlay->dev->agp->base +
-                   i915_gem_obj_ggtt_offset(overlay->reg_bo), PAGE_SIZE,
-                   PAT_WRITE_COMBINING);
+               regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+                                        i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
 }
index 9a4e672..3ce4c64 100644 (file)
@@ -1530,6 +1530,7 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
                                      struct intel_ringbuffer *ringbuf)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        int ret;
 
@@ -1556,7 +1557,7 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
                goto err_unpin;
 
        ringbuf->virtual_start =
-               ioremap_wc(dev->agp->base + i915_gem_obj_ggtt_offset(obj),
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                                ringbuf->size);
        if (ringbuf->virtual_start == NULL) {
                ret = -EINVAL;