Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / dev / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  */
54
55 #include <machine/md_var.h>
56
57 #include <drm/drmP.h>
58 #include <drm/drm_vma_manager.h>
59 #include <drm/i915_drm.h>
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_drv.h"
63 #include <linux/shmem_fs.h>
64 #include <linux/slab.h>
65 #include <linux/swap.h>
66 #include <linux/pci.h>
67
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
70                                                    bool force);
71 static __must_check int
72 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
73                                bool readonly);
74 static __must_check int
75 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
76                            struct i915_address_space *vm,
77                            unsigned alignment,
78                            bool map_and_fenceable,
79                            bool nonblocking);
80 static int i915_gem_phys_pwrite(struct drm_device *dev,
81                                 struct drm_i915_gem_object *obj,
82                                 struct drm_i915_gem_pwrite *args,
83                                 struct drm_file *file);
84
85 static void i915_gem_write_fence(struct drm_device *dev, int reg,
86                                  struct drm_i915_gem_object *obj);
87 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
88                                          struct drm_i915_fence_reg *fence,
89                                          bool enable);
90
91 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
92 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
93 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
94
95 static bool cpu_cache_is_coherent(struct drm_device *dev,
96                                   enum i915_cache_level level)
97 {
98         return HAS_LLC(dev) || level != I915_CACHE_NONE;
99 }
100
101 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
102 {
103         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
104                 return true;
105
106         return obj->pin_display;
107 }
108
109 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
110 {
111         if (obj->tiling_mode)
112                 i915_gem_release_mmap(obj);
113
114         /* As we do not have an associated fence register, we will force
115          * a tiling change if we ever need to acquire one.
116          */
117         obj->fence_dirty = false;
118         obj->fence_reg = I915_FENCE_REG_NONE;
119 }
120
121 /* some bookkeeping */
122 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
123                                   size_t size)
124 {
125         spin_lock(&dev_priv->mm.object_stat_lock);
126         dev_priv->mm.object_count++;
127         dev_priv->mm.object_memory += size;
128         spin_unlock(&dev_priv->mm.object_stat_lock);
129 }
130
131 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
132                                      size_t size)
133 {
134         spin_lock(&dev_priv->mm.object_stat_lock);
135         dev_priv->mm.object_count--;
136         dev_priv->mm.object_memory -= size;
137         spin_unlock(&dev_priv->mm.object_stat_lock);
138 }
139
140 static int
141 i915_gem_wait_for_error(struct i915_gpu_error *error)
142 {
143         int ret;
144
145 #define EXIT_COND (!i915_reset_in_progress(error) || \
146                    i915_terminally_wedged(error))
147         if (EXIT_COND)
148                 return 0;
149
150         /*
151          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
152          * userspace. If it takes that long something really bad is going on and
153          * we should simply try to bail out and fail as gracefully as possible.
154          */
155         ret = wait_event_interruptible_timeout(error->reset_queue,
156                                                EXIT_COND,
157                                                10*HZ);
158         if (ret == 0) {
159                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
160                 return -EIO;
161         } else if (ret < 0) {
162                 return ret;
163         }
164 #undef EXIT_COND
165
166         return 0;
167 }
168
169 int i915_mutex_lock_interruptible(struct drm_device *dev)
170 {
171         struct drm_i915_private *dev_priv = dev->dev_private;
172         int ret;
173
174         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
175         if (ret)
176                 return ret;
177
178         ret = lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_SLEEPFAIL);
179         if (ret)
180                 return -EINTR;
181
182         WARN_ON(i915_verify_lists(dev));
183         return 0;
184 }
185
186 static inline bool
187 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
188 {
189         return i915_gem_obj_bound_any(obj) && !obj->active;
190 }
191
192 int
193 i915_gem_init_ioctl(struct drm_device *dev, void *data,
194                     struct drm_file *file)
195 {
196         struct drm_i915_private *dev_priv = dev->dev_private;
197         struct drm_i915_gem_init *args = data;
198
199         if (drm_core_check_feature(dev, DRIVER_MODESET))
200                 return -ENODEV;
201
202         if (args->gtt_start >= args->gtt_end ||
203             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
204                 return -EINVAL;
205
206         /* GEM with user mode setting was never supported on ilk and later. */
207         if (INTEL_INFO(dev)->gen >= 5)
208                 return -ENODEV;
209
210         mutex_lock(&dev->struct_mutex);
211         dev_priv->gtt.mappable_end = args->gtt_end;
212         kprintf("INITGLOBALGTT GTT_START %016jx\n", (uintmax_t)args->gtt_start);
213         i915_gem_init_global_gtt(dev);
214 #if 0
215         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
216                                   args->gtt_end);
217 #endif
218         mutex_unlock(&dev->struct_mutex);
219
220         return 0;
221 }
222
223 int
224 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
225                             struct drm_file *file)
226 {
227         struct drm_i915_private *dev_priv = dev->dev_private;
228         struct drm_i915_gem_get_aperture *args = data;
229         struct drm_i915_gem_object *obj;
230         size_t pinned;
231
232         pinned = 0;
233         mutex_lock(&dev->struct_mutex);
234         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
235                 if (obj->pin_count)
236                         pinned += i915_gem_obj_ggtt_size(obj);
237         mutex_unlock(&dev->struct_mutex);
238
239         args->aper_size = dev_priv->gtt.base.total;
240         args->aper_available_size = args->aper_size - pinned;
241
242         return 0;
243 }
244
245 void *i915_gem_object_alloc(struct drm_device *dev)
246 {
247         return kmalloc(sizeof(struct drm_i915_gem_object),
248             M_DRM, M_WAITOK | M_ZERO);
249 }
250
251 void i915_gem_object_free(struct drm_i915_gem_object *obj)
252 {
253         kfree(obj);
254 }
255
256 static int
257 i915_gem_create(struct drm_file *file,
258                 struct drm_device *dev,
259                 uint64_t size,
260                 uint32_t *handle_p)
261 {
262         struct drm_i915_gem_object *obj;
263         int ret;
264         u32 handle;
265
266         size = roundup(size, PAGE_SIZE);
267         if (size == 0)
268                 return -EINVAL;
269
270         /* Allocate the new object */
271         obj = i915_gem_alloc_object(dev, size);
272         if (obj == NULL)
273                 return -ENOMEM;
274
275         ret = drm_gem_handle_create(file, &obj->base, &handle);
276         /* drop reference from allocate - handle holds it now */
277         drm_gem_object_unreference_unlocked(&obj->base);
278         if (ret)
279                 return ret;
280
281         *handle_p = handle;
282         return 0;
283 }
284
285 int
286 i915_gem_dumb_create(struct drm_file *file,
287                      struct drm_device *dev,
288                      struct drm_mode_create_dumb *args)
289 {
290         /* have to work out size/pitch and return them */
291         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
292         args->size = args->pitch * args->height;
293         return i915_gem_create(file, dev,
294                                args->size, &args->handle);
295 }
296
297 /**
298  * Creates a new mm object and returns a handle to it.
299  */
300 int
301 i915_gem_create_ioctl(struct drm_device *dev, void *data,
302                       struct drm_file *file)
303 {
304         struct drm_i915_gem_create *args = data;
305
306         return i915_gem_create(file, dev,
307                                args->size, &args->handle);
308 }
309
310 static inline int
311 __copy_to_user_swizzled(char __user *cpu_vaddr,
312                         const char *gpu_vaddr, int gpu_offset,
313                         int length)
314 {
315         int ret, cpu_offset = 0;
316
317         while (length > 0) {
318                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
319                 int this_length = min(cacheline_end - gpu_offset, length);
320                 int swizzled_gpu_offset = gpu_offset ^ 64;
321
322                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
323                                      gpu_vaddr + swizzled_gpu_offset,
324                                      this_length);
325                 if (ret)
326                         return ret + length;
327
328                 cpu_offset += this_length;
329                 gpu_offset += this_length;
330                 length -= this_length;
331         }
332
333         return 0;
334 }
335
336 static inline int
337 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
338                           const char __user *cpu_vaddr,
339                           int length)
340 {
341         int ret, cpu_offset = 0;
342
343         while (length > 0) {
344                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
345                 int this_length = min(cacheline_end - gpu_offset, length);
346                 int swizzled_gpu_offset = gpu_offset ^ 64;
347
348                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
349                                        cpu_vaddr + cpu_offset,
350                                        this_length);
351                 if (ret)
352                         return ret + length;
353
354                 cpu_offset += this_length;
355                 gpu_offset += this_length;
356                 length -= this_length;
357         }
358
359         return 0;
360 }
361
362 /* Per-page copy function for the shmem pread fastpath.
363  * Flushes invalid cachelines before reading the target if
364  * needs_clflush is set. */
365 static int
366 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
367                  char __user *user_data,
368                  bool page_do_bit17_swizzling, bool needs_clflush)
369 {
370         char *vaddr;
371         int ret;
372
373         if (unlikely(page_do_bit17_swizzling))
374                 return -EINVAL;
375
376         vaddr = kmap_atomic(page);
377         if (needs_clflush)
378                 drm_clflush_virt_range(vaddr + shmem_page_offset,
379                                        page_length);
380         ret = __copy_to_user_inatomic(user_data,
381                                       vaddr + shmem_page_offset,
382                                       page_length);
383         kunmap_atomic(vaddr);
384
385         return ret ? -EFAULT : 0;
386 }
387
388 static void
389 shmem_clflush_swizzled_range(char *addr, unsigned long length,
390                              bool swizzled)
391 {
392         if (unlikely(swizzled)) {
393                 unsigned long start = (unsigned long) addr;
394                 unsigned long end = (unsigned long) addr + length;
395
396                 /* For swizzling simply ensure that we always flush both
397                  * channels. Lame, but simple and it works. Swizzled
398                  * pwrite/pread is far from a hotpath - current userspace
399                  * doesn't use it at all. */
400                 start = round_down(start, 128);
401                 end = round_up(end, 128);
402
403                 drm_clflush_virt_range((void *)start, end - start);
404         } else {
405                 drm_clflush_virt_range(addr, length);
406         }
407
408 }
409
410 /* Only difference to the fast-path function is that this can handle bit17
411  * and uses non-atomic copy and kmap functions. */
412 static int
413 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
414                  char __user *user_data,
415                  bool page_do_bit17_swizzling, bool needs_clflush)
416 {
417         char *vaddr;
418         int ret;
419
420         vaddr = kmap(page);
421         if (needs_clflush)
422                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
423                                              page_length,
424                                              page_do_bit17_swizzling);
425
426         if (page_do_bit17_swizzling)
427                 ret = __copy_to_user_swizzled(user_data,
428                                               vaddr, shmem_page_offset,
429                                               page_length);
430         else
431                 ret = __copy_to_user(user_data,
432                                      vaddr + shmem_page_offset,
433                                      page_length);
434         kunmap(page);
435
436         return ret ? - EFAULT : 0;
437 }
438
439 static int
440 i915_gem_shmem_pread(struct drm_device *dev,
441                      struct drm_i915_gem_object *obj,
442                      struct drm_i915_gem_pread *args,
443                      struct drm_file *file)
444 {
445         char __user *user_data;
446         ssize_t remain;
447         loff_t offset;
448         int shmem_page_offset, page_length, ret = 0;
449         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
450         int prefaulted = 0;
451         int needs_clflush = 0;
452         int i;
453
454         user_data = to_user_ptr(args->data_ptr);
455         remain = args->size;
456
457         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
458
459         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
460                 /* If we're not in the cpu read domain, set ourself into the gtt
461                  * read domain and manually flush cachelines (if required). This
462                  * optimizes for the case when the gpu will dirty the data
463                  * anyway again before the next pread happens. */
464                 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
465                 ret = i915_gem_object_wait_rendering(obj, true);
466                 if (ret)
467                         return ret;
468         }
469
470         ret = i915_gem_object_get_pages(obj);
471         if (ret)
472                 return ret;
473
474         i915_gem_object_pin_pages(obj);
475
476         offset = args->offset;
477
478         for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
479                 struct vm_page *page = obj->pages[i];
480
481                 if (remain <= 0)
482                         break;
483
484                 /* Operation in this page
485                  *
486                  * shmem_page_offset = offset within page in shmem file
487                  * page_length = bytes to copy for this page
488                  */
489                 shmem_page_offset = offset_in_page(offset);
490                 page_length = remain;
491                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
492                         page_length = PAGE_SIZE - shmem_page_offset;
493
494                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
495                         (page_to_phys(page) & (1 << 17)) != 0;
496
497                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
498                                        user_data, page_do_bit17_swizzling,
499                                        needs_clflush);
500                 if (ret == 0)
501                         goto next_page;
502
503                 mutex_unlock(&dev->struct_mutex);
504
505                 if (likely(!i915_prefault_disable) && !prefaulted) {
506                         ret = fault_in_multipages_writeable(user_data, remain);
507                         /* Userspace is tricking us, but we've already clobbered
508                          * its pages with the prefault and promised to write the
509                          * data up to the first fault. Hence ignore any errors
510                          * and just continue. */
511                         (void)ret;
512                         prefaulted = 1;
513                 }
514
515                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
516                                        user_data, page_do_bit17_swizzling,
517                                        needs_clflush);
518
519                 mutex_lock(&dev->struct_mutex);
520
521 next_page:
522                 mark_page_accessed(page);
523
524                 if (ret)
525                         goto out;
526
527                 remain -= page_length;
528                 user_data += page_length;
529                 offset += page_length;
530         }
531
532 out:
533         i915_gem_object_unpin_pages(obj);
534
535         return ret;
536 }
537
538 /**
539  * Reads data from the object referenced by handle.
540  *
541  * On error, the contents of *data are undefined.
542  */
543 int
544 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
545                      struct drm_file *file)
546 {
547         struct drm_i915_gem_pread *args = data;
548         struct drm_i915_gem_object *obj;
549         int ret = 0;
550
551         if (args->size == 0)
552                 return 0;
553
554         ret = i915_mutex_lock_interruptible(dev);
555         if (ret)
556                 return ret;
557
558         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
559         if (&obj->base == NULL) {
560                 ret = -ENOENT;
561                 goto unlock;
562         }
563
564         /* Bounds check source.  */
565         if (args->offset > obj->base.size ||
566             args->size > obj->base.size - args->offset) {
567                 ret = -EINVAL;
568                 goto out;
569         }
570
571         trace_i915_gem_object_pread(obj, args->offset, args->size);
572
573         ret = i915_gem_shmem_pread(dev, obj, args, file);
574
575 out:
576         drm_gem_object_unreference(&obj->base);
577 unlock:
578         mutex_unlock(&dev->struct_mutex);
579         return ret;
580 }
581
582 /* This is the fast write path which cannot handle
583  * page faults in the source data
584  */
585
586 #if 0   /* XXX: buggy on core2 machines */
587 static inline int
588 fast_user_write(struct io_mapping *mapping,
589                 loff_t page_base, int page_offset,
590                 char __user *user_data,
591                 int length)
592 {
593         void __iomem *vaddr_atomic;
594         void *vaddr;
595         unsigned long unwritten;
596
597         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
598         /* We can use the cpu mem copy function because this is X86. */
599         vaddr = (char __force*)vaddr_atomic + page_offset;
600         unwritten = __copy_from_user_inatomic_nocache(vaddr,
601                                                       user_data, length);
602         io_mapping_unmap_atomic(vaddr_atomic);
603         return unwritten;
604 }
605 #endif
606
607 static int
608 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
609     uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
610 {
611         vm_offset_t mkva;
612         int ret;
613
614         /*
615          * Pass the unaligned physical address and size to pmap_mapdev_attr()
616          * so it can properly calculate whether an extra page needs to be
617          * mapped or not to cover the requested range.  The function will
618          * add the page offset into the returned mkva for us.
619          */
620         mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base +
621             i915_gem_obj_ggtt_offset(obj) + offset, size, PAT_WRITE_COMBINING);
622         ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
623         pmap_unmapdev(mkva, size);
624         return ret;
625 }
626
627 /**
628  * This is the fast pwrite path, where we copy the data directly from the
629  * user into the GTT, uncached.
630  */
631 static int
632 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
633                          struct drm_i915_gem_object *obj,
634                          struct drm_i915_gem_pwrite *args,
635                          struct drm_file *file)
636 {
637         ssize_t remain;
638         loff_t offset, page_base;
639         char __user *user_data;
640         int page_offset, page_length, ret;
641
642         ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
643         if (ret)
644                 goto out;
645
646         ret = i915_gem_object_set_to_gtt_domain(obj, true);
647         if (ret)
648                 goto out_unpin;
649
650         ret = i915_gem_object_put_fence(obj);
651         if (ret)
652                 goto out_unpin;
653
654         user_data = to_user_ptr(args->data_ptr);
655         remain = args->size;
656
657         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
658
659         while (remain > 0) {
660                 /* Operation in this page
661                  *
662                  * page_base = page offset within aperture
663                  * page_offset = offset within page
664                  * page_length = bytes to copy for this page
665                  */
666                 page_base = offset & ~PAGE_MASK;
667                 page_offset = offset_in_page(offset);
668                 page_length = remain;
669                 if ((page_offset + remain) > PAGE_SIZE)
670                         page_length = PAGE_SIZE - page_offset;
671
672                 /* If we get a fault while copying data, then (presumably) our
673                  * source page isn't available.  Return the error and we'll
674                  * retry in the slow path.
675                  */
676 #if 0
677                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
678                                     page_offset, user_data, page_length)) {
679 #else
680                 if (i915_gem_gtt_write(dev, obj, args->data_ptr, args->size, args->offset, file)) {
681 #endif
682                         ret = -EFAULT;
683                         goto out_unpin;
684                 }
685
686                 remain -= page_length;
687                 user_data += page_length;
688                 offset += page_length;
689         }
690
691 out_unpin:
692         i915_gem_object_unpin(obj);
693 out:
694         return ret;
695 }
696
697 /* Per-page copy function for the shmem pwrite fastpath.
698  * Flushes invalid cachelines before writing to the target if
699  * needs_clflush_before is set and flushes out any written cachelines after
700  * writing if needs_clflush is set. */
701 static int
702 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
703                   char __user *user_data,
704                   bool page_do_bit17_swizzling,
705                   bool needs_clflush_before,
706                   bool needs_clflush_after)
707 {
708         char *vaddr;
709         int ret;
710
711         if (unlikely(page_do_bit17_swizzling))
712                 return -EINVAL;
713
714         vaddr = kmap_atomic(page);
715         if (needs_clflush_before)
716                 drm_clflush_virt_range(vaddr + shmem_page_offset,
717                                        page_length);
718         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
719                                                 user_data,
720                                                 page_length);
721         if (needs_clflush_after)
722                 drm_clflush_virt_range(vaddr + shmem_page_offset,
723                                        page_length);
724         kunmap_atomic(vaddr);
725
726         return ret ? -EFAULT : 0;
727 }
728
729 /* Only difference to the fast-path function is that this can handle bit17
730  * and uses non-atomic copy and kmap functions. */
731 static int
732 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
733                   char __user *user_data,
734                   bool page_do_bit17_swizzling,
735                   bool needs_clflush_before,
736                   bool needs_clflush_after)
737 {
738         char *vaddr;
739         int ret;
740
741         vaddr = kmap(page);
742         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
743                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
744                                              page_length,
745                                              page_do_bit17_swizzling);
746         if (page_do_bit17_swizzling)
747                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
748                                                 user_data,
749                                                 page_length);
750         else
751                 ret = __copy_from_user(vaddr + shmem_page_offset,
752                                        user_data,
753                                        page_length);
754         if (needs_clflush_after)
755                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
756                                              page_length,
757                                              page_do_bit17_swizzling);
758         kunmap(page);
759
760         return ret ? -EFAULT : 0;
761 }
762
763 static int
764 i915_gem_shmem_pwrite(struct drm_device *dev,
765                       struct drm_i915_gem_object *obj,
766                       struct drm_i915_gem_pwrite *args,
767                       struct drm_file *file)
768 {
769         ssize_t remain;
770         loff_t offset;
771         char __user *user_data;
772         int shmem_page_offset, page_length, ret = 0;
773         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
774         int hit_slowpath = 0;
775         int needs_clflush_after = 0;
776         int needs_clflush_before = 0;
777         int i;
778
779         user_data = to_user_ptr(args->data_ptr);
780         remain = args->size;
781
782         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
783
784         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
785                 /* If we're not in the cpu write domain, set ourself into the gtt
786                  * write domain and manually flush cachelines (if required). This
787                  * optimizes for the case when the gpu will use the data
788                  * right away and we therefore have to clflush anyway. */
789                 needs_clflush_after = cpu_write_needs_clflush(obj);
790                 ret = i915_gem_object_wait_rendering(obj, false);
791                 if (ret)
792                         return ret;
793         }
794         /* Same trick applies to invalidate partially written cachelines read
795          * before writing. */
796         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
797                 needs_clflush_before =
798                         !cpu_cache_is_coherent(dev, obj->cache_level);
799
800         ret = i915_gem_object_get_pages(obj);
801         if (ret)
802                 return ret;
803
804         i915_gem_object_pin_pages(obj);
805
806         offset = args->offset;
807         obj->dirty = 1;
808
809         VM_OBJECT_LOCK(obj->base.vm_obj);
810         vm_object_pip_add(obj->base.vm_obj, 1);
811         for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
812                 struct vm_page *page = obj->pages[i];
813                 int partial_cacheline_write;
814
815                 if (i < offset >> PAGE_SHIFT)
816                         continue;
817
818                 if (remain <= 0)
819                         break;
820
821                 /* Operation in this page
822                  *
823                  * shmem_page_offset = offset within page in shmem file
824                  * page_length = bytes to copy for this page
825                  */
826                 shmem_page_offset = offset_in_page(offset);
827
828                 page_length = remain;
829                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
830                         page_length = PAGE_SIZE - shmem_page_offset;
831
832                 /* If we don't overwrite a cacheline completely we need to be
833                  * careful to have up-to-date data by first clflushing. Don't
834                  * overcomplicate things and flush the entire patch. */
835                 partial_cacheline_write = needs_clflush_before &&
836                         ((shmem_page_offset | page_length)
837                                 & (cpu_clflush_line_size - 1));
838
839                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
840                         (page_to_phys(page) & (1 << 17)) != 0;
841
842                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
843                                         user_data, page_do_bit17_swizzling,
844                                         partial_cacheline_write,
845                                         needs_clflush_after);
846                 if (ret == 0)
847                         goto next_page;
848
849                 hit_slowpath = 1;
850                 mutex_unlock(&dev->struct_mutex);
851                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
852                                         user_data, page_do_bit17_swizzling,
853                                         partial_cacheline_write,
854                                         needs_clflush_after);
855
856                 mutex_lock(&dev->struct_mutex);
857  
858 next_page:
859                 set_page_dirty(page);
860                 mark_page_accessed(page);
861
862                 if (ret)
863                         goto out;
864
865                 remain -= page_length;
866                 user_data += page_length;
867                 offset += page_length;
868         }
869         vm_object_pip_wakeup(obj->base.vm_obj);
870         VM_OBJECT_UNLOCK(obj->base.vm_obj);
871
872 out:
873         i915_gem_object_unpin_pages(obj);
874
875         if (hit_slowpath) {
876                 /*
877                  * Fixup: Flush cpu caches in case we didn't flush the dirty
878                  * cachelines in-line while writing and the object moved
879                  * out of the cpu write domain while we've dropped the lock.
880                  */
881                 if (!needs_clflush_after &&
882                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
883                         if (i915_gem_clflush_object(obj, obj->pin_display))
884                                 i915_gem_chipset_flush(dev);
885                 }
886         }
887
888         if (needs_clflush_after)
889                 i915_gem_chipset_flush(dev);
890
891         return ret;
892 }
893
894 /**
895  * Writes data to the object referenced by handle.
896  *
897  * On error, the contents of the buffer that were to be modified are undefined.
898  */
899 int
900 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
901                       struct drm_file *file)
902 {
903         struct drm_i915_gem_pwrite *args = data;
904         struct drm_i915_gem_object *obj;
905         int ret;
906
907         if (args->size == 0)
908                 return 0;
909
910         if (likely(!i915_prefault_disable)) {
911                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
912                                                    args->size);
913                 if (ret)
914                         return -EFAULT;
915         }
916
917         ret = i915_mutex_lock_interruptible(dev);
918         if (ret)
919                 return ret;
920
921         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
922         if (&obj->base == NULL) {
923                 ret = -ENOENT;
924                 goto unlock;
925         }
926
927         /* Bounds check destination. */
928         if (args->offset > obj->base.size ||
929             args->size > obj->base.size - args->offset) {
930                 ret = -EINVAL;
931                 goto out;
932         }
933
934         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
935
936         ret = -EFAULT;
937         /* We can only do the GTT pwrite on untiled buffers, as otherwise
938          * it would end up going through the fenced access, and we'll get
939          * different detiling behavior between reading and writing.
940          * pread/pwrite currently are reading and writing from the CPU
941          * perspective, requiring manual detiling by the client.
942          */
943         if (obj->phys_obj) {
944                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
945                 goto out;
946         }
947
948         if (obj->tiling_mode == I915_TILING_NONE &&
949             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
950             cpu_write_needs_clflush(obj)) {
951                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
952                 /* Note that the gtt paths might fail with non-page-backed user
953                  * pointers (e.g. gtt mappings when moving data between
954                  * textures). Fallback to the shmem path in that case. */
955         }
956
957         if (ret == -EFAULT || ret == -ENOSPC)
958                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
959
960 out:
961         drm_gem_object_unreference(&obj->base);
962 unlock:
963         mutex_unlock(&dev->struct_mutex);
964         return ret;
965 }
966
967 int
968 i915_gem_check_wedge(struct i915_gpu_error *error,
969                      bool interruptible)
970 {
971         if (i915_reset_in_progress(error)) {
972                 /* Non-interruptible callers can't handle -EAGAIN, hence return
973                  * -EIO unconditionally for these. */
974                 if (!interruptible)
975                         return -EIO;
976
977                 /* Recovery complete, but the reset failed ... */
978                 if (i915_terminally_wedged(error))
979                         return -EIO;
980
981                 return -EAGAIN;
982         }
983
984         return 0;
985 }
986
987 /*
988  * Compare seqno against outstanding lazy request. Emit a request if they are
989  * equal.
990  */
991 static int
992 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
993 {
994         int ret;
995
996         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
997
998         ret = 0;
999         if (seqno == ring->outstanding_lazy_seqno)
1000                 ret = i915_add_request(ring, NULL);
1001
1002         return ret;
1003 }
1004
1005 #if 0
1006 static void fake_irq(unsigned long data)
1007 {
1008         wake_up_process((struct task_struct *)data);
1009 }
1010
1011 static bool missed_irq(struct drm_i915_private *dev_priv,
1012                        struct intel_ring_buffer *ring)
1013 {
1014         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1015 }
1016
1017 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1018 {
1019         if (file_priv == NULL)
1020                 return true;
1021
1022         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1023 }
1024 #endif
1025
1026 /**
1027  * __wait_seqno - wait until execution of seqno has finished
1028  * @ring: the ring expected to report seqno
1029  * @seqno: duh!
1030  * @reset_counter: reset sequence associated with the given seqno
1031  * @interruptible: do an interruptible wait (normally yes)
1032  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1033  *
1034  * Note: It is of utmost importance that the passed in seqno and reset_counter
1035  * values have been read by the caller in an smp safe manner. Where read-side
1036  * locks are involved, it is sufficient to read the reset_counter before
1037  * unlocking the lock that protects the seqno. For lockless tricks, the
1038  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1039  * inserted.
1040  *
1041  * Returns 0 if the seqno was found within the alloted time. Else returns the
1042  * errno with remaining time filled in timeout argument.
1043  */
1044 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045                         unsigned reset_counter,
1046                         bool interruptible, struct timespec *timeout)
1047 {
1048         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1049         struct timespec before, now, wait_time={1,0};
1050         unsigned long timeout_jiffies;
1051         long end;
1052         bool wait_forever = true;
1053         int ret;
1054
1055         WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1056
1057         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1058                 return 0;
1059
1060         trace_i915_gem_request_wait_begin(ring, seqno);
1061
1062         if (timeout != NULL) {
1063                 wait_time = *timeout;
1064                 wait_forever = false;
1065         }
1066
1067         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1068
1069         if (WARN_ON(!ring->irq_get(ring)))
1070                 return -ENODEV;
1071
1072         /* Record current time in case interrupted by signal, or wedged * */
1073         getrawmonotonic(&before);
1074
1075 #define EXIT_COND \
1076         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1077          i915_reset_in_progress(&dev_priv->gpu_error) || \
1078          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1079         do {
1080                 if (interruptible)
1081                         end = wait_event_interruptible_timeout(ring->irq_queue,
1082                                                                EXIT_COND,
1083                                                                timeout_jiffies);
1084                 else
1085                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1086                                                  timeout_jiffies);
1087
1088                 /* We need to check whether any gpu reset happened in between
1089                  * the caller grabbing the seqno and now ... */
1090                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1091                         end = -EAGAIN;
1092
1093                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1094                  * gone. */
1095                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1096                 if (ret)
1097                         end = ret;
1098         } while (end == 0 && wait_forever);
1099
1100         getrawmonotonic(&now);
1101
1102         ring->irq_put(ring);
1103         trace_i915_gem_request_wait_end(ring, seqno);
1104 #undef EXIT_COND
1105
1106         if (timeout) {
1107                 struct timespec sleep_time = timespec_sub(now, before);
1108                 *timeout = timespec_sub(*timeout, sleep_time);
1109                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1110                         set_normalized_timespec(timeout, 0, 0);
1111         }
1112
1113         switch (end) {
1114         case -EIO:
1115         case -EAGAIN: /* Wedged */
1116         case -ERESTARTSYS: /* Signal */
1117                 return (int)end;
1118         case 0: /* Timeout */
1119                 return -ETIMEDOUT;      /* -ETIME on Linux */
1120         default: /* Completed */
1121                 WARN_ON(end < 0); /* We're not aware of other errors */
1122                 return 0;
1123         }
1124 }
1125
1126 /**
1127  * Waits for a sequence number to be signaled, and cleans up the
1128  * request and object lists appropriately for that event.
1129  */
1130 int
1131 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1132 {
1133         struct drm_device *dev = ring->dev;
1134         struct drm_i915_private *dev_priv = dev->dev_private;
1135         bool interruptible = dev_priv->mm.interruptible;
1136         int ret;
1137
1138         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1139         BUG_ON(seqno == 0);
1140
1141         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1142         if (ret)
1143                 return ret;
1144
1145         ret = i915_gem_check_olr(ring, seqno);
1146         if (ret)
1147                 return ret;
1148
1149         return __wait_seqno(ring, seqno,
1150                             atomic_read(&dev_priv->gpu_error.reset_counter),
1151                             interruptible, NULL);
1152 }
1153
1154 static int
1155 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1156                                      struct intel_ring_buffer *ring)
1157 {
1158         i915_gem_retire_requests_ring(ring);
1159
1160         /* Manually manage the write flush as we may have not yet
1161          * retired the buffer.
1162          *
1163          * Note that the last_write_seqno is always the earlier of
1164          * the two (read/write) seqno, so if we haved successfully waited,
1165          * we know we have passed the last write.
1166          */
1167         obj->last_write_seqno = 0;
1168         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1169
1170         return 0;
1171 }
1172
1173 /**
1174  * Ensures that all rendering to the object has completed and the object is
1175  * safe to unbind from the GTT or access from the CPU.
1176  */
1177 static __must_check int
1178 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1179                                bool readonly)
1180 {
1181         struct intel_ring_buffer *ring = obj->ring;
1182         u32 seqno;
1183         int ret;
1184
1185         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1186         if (seqno == 0)
1187                 return 0;
1188
1189         ret = i915_wait_seqno(ring, seqno);
1190         if (ret)
1191                 return ret;
1192
1193         return i915_gem_object_wait_rendering__tail(obj, ring);
1194 }
1195
1196 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1197  * as the object state may change during this call.
1198  */
1199 static __must_check int
1200 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1201                                             bool readonly)
1202 {
1203         struct drm_device *dev = obj->base.dev;
1204         struct drm_i915_private *dev_priv = dev->dev_private;
1205         struct intel_ring_buffer *ring = obj->ring;
1206         unsigned reset_counter;
1207         u32 seqno;
1208         int ret;
1209
1210         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1211         BUG_ON(!dev_priv->mm.interruptible);
1212
1213         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1214         if (seqno == 0)
1215                 return 0;
1216
1217         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1218         if (ret)
1219                 return ret;
1220
1221         ret = i915_gem_check_olr(ring, seqno);
1222         if (ret)
1223                 return ret;
1224
1225         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1226         mutex_unlock(&dev->struct_mutex);
1227         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1228         mutex_lock(&dev->struct_mutex);
1229         if (ret)
1230                 return ret;
1231
1232         return i915_gem_object_wait_rendering__tail(obj, ring);
1233 }
1234
1235 /**
1236  * Called when user space prepares to use an object with the CPU, either
1237  * through the mmap ioctl's mapping or a GTT mapping.
1238  */
1239 int
1240 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1241                           struct drm_file *file)
1242 {
1243         struct drm_i915_gem_set_domain *args = data;
1244         struct drm_i915_gem_object *obj;
1245         uint32_t read_domains = args->read_domains;
1246         uint32_t write_domain = args->write_domain;
1247         int ret;
1248
1249         /* Only handle setting domains to types used by the CPU. */
1250         if (write_domain & I915_GEM_GPU_DOMAINS)
1251                 return -EINVAL;
1252
1253         if (read_domains & I915_GEM_GPU_DOMAINS)
1254                 return -EINVAL;
1255
1256         /* Having something in the write domain implies it's in the read
1257          * domain, and only that read domain.  Enforce that in the request.
1258          */
1259         if (write_domain != 0 && read_domains != write_domain)
1260                 return -EINVAL;
1261
1262         ret = i915_mutex_lock_interruptible(dev);
1263         if (ret)
1264                 return ret;
1265
1266         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1267         if (&obj->base == NULL) {
1268                 ret = -ENOENT;
1269                 goto unlock;
1270         }
1271
1272         /* Try to flush the object off the GPU without holding the lock.
1273          * We will repeat the flush holding the lock in the normal manner
1274          * to catch cases where we are gazumped.
1275          */
1276         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1277         if (ret)
1278                 goto unref;
1279
1280         if (read_domains & I915_GEM_DOMAIN_GTT) {
1281                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1282
1283                 /* Silently promote "you're not bound, there was nothing to do"
1284                  * to success, since the client was just asking us to
1285                  * make sure everything was done.
1286                  */
1287                 if (ret == -EINVAL)
1288                         ret = 0;
1289         } else {
1290                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1291         }
1292
1293 unref:
1294         drm_gem_object_unreference(&obj->base);
1295 unlock:
1296         mutex_unlock(&dev->struct_mutex);
1297         return ret;
1298 }
1299
1300 /**
1301  * Called when user space has done writes to this buffer
1302  */
1303 int
1304 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1305                          struct drm_file *file)
1306 {
1307         struct drm_i915_gem_sw_finish *args = data;
1308         struct drm_i915_gem_object *obj;
1309         int ret = 0;
1310
1311         ret = i915_mutex_lock_interruptible(dev);
1312         if (ret)
1313                 return ret;
1314
1315         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1316         if (&obj->base == NULL) {
1317                 ret = -ENOENT;
1318                 goto unlock;
1319         }
1320
1321         /* Pinned buffers may be scanout, so flush the cache */
1322         if (obj->pin_display)
1323                 i915_gem_object_flush_cpu_write_domain(obj, true);
1324
1325         drm_gem_object_unreference(&obj->base);
1326 unlock:
1327         mutex_unlock(&dev->struct_mutex);
1328         return ret;
1329 }
1330
1331 /**
1332  * Maps the contents of an object, returning the address it is mapped
1333  * into.
1334  *
1335  * While the mapping holds a reference on the contents of the object, it doesn't
1336  * imply a ref on the object itself.
1337  */
1338 int
1339 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1340                     struct drm_file *file)
1341 {
1342         struct drm_i915_gem_mmap *args = data;
1343         struct drm_gem_object *obj;
1344         unsigned long addr;
1345         struct proc *p = curproc;
1346         vm_map_t map = &p->p_vmspace->vm_map;
1347         vm_size_t size;
1348         int error = 0, rv;
1349
1350         obj = drm_gem_object_lookup(dev, file, args->handle);
1351         if (obj == NULL)
1352                 return -ENOENT;
1353
1354         if (args->size == 0)
1355                 goto out;
1356
1357         size = round_page(args->size);
1358         if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1359                 error = -ENOMEM;
1360                 goto out;
1361         }
1362
1363         /*
1364          * Call hint to ensure that NULL is not returned as a valid address
1365          * and to reduce vm_map traversals.
1366          */
1367         addr = vm_map_hint(p, 0, PROT_READ|PROT_WRITE);
1368         vm_object_hold(obj->vm_obj);
1369         vm_object_reference_locked(obj->vm_obj);
1370         vm_object_drop(obj->vm_obj);
1371         rv = vm_map_find(map, obj->vm_obj, NULL,
1372                          args->offset, &addr, args->size,
1373                          PAGE_SIZE, /* align */
1374                          TRUE, /* fitit */
1375                          VM_MAPTYPE_NORMAL, /* maptype */
1376                          VM_PROT_READ | VM_PROT_WRITE, /* prot */
1377                          VM_PROT_READ | VM_PROT_WRITE, /* max */
1378                          MAP_SHARED /* cow */);
1379         if (rv != KERN_SUCCESS) {
1380                 vm_object_deallocate(obj->vm_obj);
1381                 error = -vm_mmap_to_errno(rv);
1382         } else {
1383                 args->addr_ptr = (uint64_t)addr;
1384         }
1385 out:
1386         drm_gem_object_unreference(obj);
1387         return (error);
1388 }
1389
1390 /**
1391  * i915_gem_fault - fault a page into the GTT
1392  * vma: VMA in question
1393  * vmf: fault info
1394  *
1395  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1396  * from userspace.  The fault handler takes care of binding the object to
1397  * the GTT (if needed), allocating and programming a fence register (again,
1398  * only if needed based on whether the old reg is still valid or the object
1399  * is tiled) and inserting a new PTE into the faulting process.
1400  *
1401  * Note that the faulting process may involve evicting existing objects
1402  * from the GTT and/or fence registers to make room.  So performance may
1403  * suffer if the GTT working set is large or there are few fence registers
1404  * left.
1405  */
1406 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres)
1407 {
1408         struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle);
1409         struct drm_device *dev = obj->base.dev;
1410         drm_i915_private_t *dev_priv = dev->dev_private;
1411         unsigned long page_offset;
1412         vm_page_t m, oldm = NULL;
1413         int ret = 0;
1414         bool write = !!(prot & VM_PROT_WRITE);
1415
1416         intel_runtime_pm_get(dev_priv);
1417
1418         /* We don't use vmf->pgoff since that has the fake offset */
1419         page_offset = (unsigned long)offset;
1420
1421 /* Magic FreeBSD VM stuff */
1422         vm_object_pip_add(vm_obj, 1);
1423
1424         /*
1425          * Remove the placeholder page inserted by vm_fault() from the
1426          * object before dropping the object lock. If
1427          * i915_gem_release_mmap() is active in parallel on this gem
1428          * object, then it owns the drm device sx and might find the
1429          * placeholder already. Then, since the page is busy,
1430          * i915_gem_release_mmap() sleeps waiting for the busy state
1431          * of the page cleared. We will be not able to acquire drm
1432          * device lock until i915_gem_release_mmap() is able to make a
1433          * progress.
1434          */
1435         if (*mres != NULL) {
1436                 oldm = *mres;
1437                 vm_page_remove(oldm);
1438                 *mres = NULL;
1439         } else
1440                 oldm = NULL;
1441 retry:
1442         VM_OBJECT_UNLOCK(vm_obj);
1443 unlocked_vmobj:
1444         ret = 0;
1445         m = NULL;
1446
1447         mutex_lock(&dev->struct_mutex);
1448
1449         /*
1450          * Since the object lock was dropped, other thread might have
1451          * faulted on the same GTT address and instantiated the
1452          * mapping for the page.  Recheck.
1453          */
1454         VM_OBJECT_LOCK(vm_obj);
1455         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1456         if (m != NULL) {
1457                 if ((m->flags & PG_BUSY) != 0) {
1458                         mutex_unlock(&dev->struct_mutex);
1459                         goto retry;
1460                 }
1461                 goto have_page;
1462         } else
1463                 VM_OBJECT_UNLOCK(vm_obj);
1464 /* End magic VM stuff */
1465
1466         trace_i915_gem_object_fault(obj, page_offset, true, write);
1467
1468         /* Access to snoopable pages through the GTT is incoherent. */
1469         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1470                 ret = -EINVAL;
1471                 goto unlock;
1472         }
1473
1474         /* Now bind it into the GTT if needed */
1475         ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
1476         if (ret)
1477                 goto unlock;
1478
1479         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1480         if (ret)
1481                 goto unpin;
1482
1483         ret = i915_gem_object_get_fence(obj);
1484         if (ret)
1485                 goto unpin;
1486
1487         obj->fault_mappable = true;
1488
1489         VM_OBJECT_LOCK(vm_obj);
1490         m = vm_phys_fictitious_to_vm_page(dev->agp->base +
1491             i915_gem_obj_ggtt_offset(obj) + offset);
1492         if (m == NULL) {
1493                 ret = -EFAULT;
1494                 goto unpin;
1495         }
1496         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1497             ("not fictitious %p", m));
1498         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1499
1500         if ((m->flags & PG_BUSY) != 0) {
1501                 i915_gem_object_unpin(obj);
1502                 mutex_unlock(&dev->struct_mutex);
1503                 goto retry;
1504         }
1505         m->valid = VM_PAGE_BITS_ALL;
1506
1507         /* Finally, remap it using the new GTT offset */
1508         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1509 have_page:
1510         *mres = m;
1511         vm_page_busy_try(m, false);
1512
1513         i915_gem_object_unpin(obj);
1514         mutex_unlock(&dev->struct_mutex);
1515         if (oldm != NULL) {
1516                 vm_page_free(oldm);
1517         }
1518         vm_object_pip_wakeup(vm_obj);
1519         return (VM_PAGER_OK);
1520
1521 unpin:
1522         i915_gem_object_unpin(obj);
1523 unlock:
1524         mutex_unlock(&dev->struct_mutex);
1525
1526         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1527         switch (ret) {
1528         case -EIO:
1529         case -EAGAIN:
1530         case -EINTR:
1531                 goto unlocked_vmobj;
1532         default:
1533                 VM_OBJECT_LOCK(vm_obj);
1534                 vm_object_pip_wakeup(vm_obj);
1535                 ret = VM_PAGER_ERROR;
1536         }
1537
1538         intel_runtime_pm_put(dev_priv);
1539         return ret;
1540 }
1541
1542 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1543 {
1544         struct i915_vma *vma;
1545
1546         /*
1547          * Only the global gtt is relevant for gtt memory mappings, so restrict
1548          * list traversal to objects bound into the global address space. Note
1549          * that the active list should be empty, but better safe than sorry.
1550          */
1551         WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1552         list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1553                 i915_gem_release_mmap(vma->obj);
1554         list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1555                 i915_gem_release_mmap(vma->obj);
1556 }
1557
1558 /**
1559  * i915_gem_release_mmap - remove physical page mappings
1560  * @obj: obj in question
1561  *
1562  * Preserve the reservation of the mmapping with the DRM core code, but
1563  * relinquish ownership of the pages back to the system.
1564  *
1565  * It is vital that we remove the page mapping if we have mapped a tiled
1566  * object through the GTT and then lose the fence register due to
1567  * resource pressure. Similarly if the object has been moved out of the
1568  * aperture, than pages mapped into userspace must be revoked. Removing the
1569  * mapping will then trigger a page fault on the next user access, allowing
1570  * fixup by i915_gem_fault().
1571  */
1572 void
1573 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1574 {
1575         vm_object_t devobj;
1576         vm_page_t m;
1577         int i, page_count;
1578
1579         if (!obj->fault_mappable)
1580                 return;
1581
1582         devobj = cdev_pager_lookup(obj);
1583         if (devobj != NULL) {
1584                 page_count = OFF_TO_IDX(obj->base.size);
1585
1586                 VM_OBJECT_LOCK(devobj);
1587                 for (i = 0; i < page_count; i++) {
1588                         m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1589                         if (m == NULL)
1590                                 continue;
1591                         cdev_pager_free_page(devobj, m);
1592                 }
1593                 VM_OBJECT_UNLOCK(devobj);
1594                 vm_object_deallocate(devobj);
1595         }
1596
1597         obj->fault_mappable = false;
1598 }
1599
1600 uint32_t
1601 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1602 {
1603         uint32_t gtt_size;
1604
1605         if (INTEL_INFO(dev)->gen >= 4 ||
1606             tiling_mode == I915_TILING_NONE)
1607                 return size;
1608
1609         /* Previous chips need a power-of-two fence region when tiling */
1610         if (INTEL_INFO(dev)->gen == 3)
1611                 gtt_size = 1024*1024;
1612         else
1613                 gtt_size = 512*1024;
1614
1615         while (gtt_size < size)
1616                 gtt_size <<= 1;
1617
1618         return gtt_size;
1619 }
1620
1621 /**
1622  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1623  * @obj: object to check
1624  *
1625  * Return the required GTT alignment for an object, taking into account
1626  * potential fence register mapping.
1627  */
1628 uint32_t
1629 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1630                            int tiling_mode, bool fenced)
1631 {
1632         /*
1633          * Minimum alignment is 4k (GTT page size), but might be greater
1634          * if a fence register is needed for the object.
1635          */
1636         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1637             tiling_mode == I915_TILING_NONE)
1638                 return 4096;
1639
1640         /*
1641          * Previous chips need to be aligned to the size of the smallest
1642          * fence register that can contain the object.
1643          */
1644         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1645 }
1646
1647 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1648 {
1649         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1650         int ret;
1651
1652 #if 0
1653         if (drm_vma_node_has_offset(&obj->base.vma_node))
1654                 return 0;
1655 #endif
1656
1657         dev_priv->mm.shrinker_no_lock_stealing = true;
1658
1659         ret = drm_gem_create_mmap_offset(&obj->base);
1660         if (ret != -ENOSPC)
1661                 goto out;
1662
1663         /* Badly fragmented mmap space? The only way we can recover
1664          * space is by destroying unwanted objects. We can't randomly release
1665          * mmap_offsets as userspace expects them to be persistent for the
1666          * lifetime of the objects. The closest we can is to release the
1667          * offsets on purgeable objects by truncating it and marking it purged,
1668          * which prevents userspace from ever using that object again.
1669          */
1670         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1671         ret = drm_gem_create_mmap_offset(&obj->base);
1672         if (ret != -ENOSPC)
1673                 goto out;
1674
1675         i915_gem_shrink_all(dev_priv);
1676         ret = drm_gem_create_mmap_offset(&obj->base);
1677 out:
1678         dev_priv->mm.shrinker_no_lock_stealing = false;
1679
1680         return ret;
1681 }
1682
1683 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1684 {
1685         drm_gem_free_mmap_offset(&obj->base);
1686 }
1687
1688 int
1689 i915_gem_mmap_gtt(struct drm_file *file,
1690                   struct drm_device *dev,
1691                   uint32_t handle,
1692                   uint64_t *offset)
1693 {
1694         struct drm_i915_private *dev_priv = dev->dev_private;
1695         struct drm_i915_gem_object *obj;
1696         int ret;
1697
1698         ret = i915_mutex_lock_interruptible(dev);
1699         if (ret)
1700                 return ret;
1701
1702         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1703         if (&obj->base == NULL) {
1704                 ret = -ENOENT;
1705                 goto unlock;
1706         }
1707
1708         if (obj->base.size > dev_priv->gtt.mappable_end) {
1709                 ret = -E2BIG;
1710                 goto out;
1711         }
1712
1713         if (obj->madv != I915_MADV_WILLNEED) {
1714                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1715                 ret = -EINVAL;
1716                 goto out;
1717         }
1718
1719         ret = i915_gem_object_create_mmap_offset(obj);
1720         if (ret)
1721                 goto out;
1722
1723         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1724             DRM_GEM_MAPPING_KEY;
1725
1726 out:
1727         drm_gem_object_unreference(&obj->base);
1728 unlock:
1729         mutex_unlock(&dev->struct_mutex);
1730         return ret;
1731 }
1732
1733 /**
1734  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1735  * @dev: DRM device
1736  * @data: GTT mapping ioctl data
1737  * @file: GEM object info
1738  *
1739  * Simply returns the fake offset to userspace so it can mmap it.
1740  * The mmap call will end up in drm_gem_mmap(), which will set things
1741  * up so we can get faults in the handler above.
1742  *
1743  * The fault handler will take care of binding the object into the GTT
1744  * (since it may have been evicted to make room for something), allocating
1745  * a fence register, and mapping the appropriate aperture address into
1746  * userspace.
1747  */
1748 int
1749 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1750                         struct drm_file *file)
1751 {
1752         struct drm_i915_gem_mmap_gtt *args = data;
1753
1754         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1755 }
1756
1757 /* Immediately discard the backing storage */
1758 static void
1759 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1760 {
1761         vm_object_t vm_obj;
1762
1763         vm_obj = obj->base.vm_obj;
1764         VM_OBJECT_LOCK(vm_obj);
1765         vm_object_page_remove(vm_obj, 0, 0, false);
1766         VM_OBJECT_UNLOCK(vm_obj);
1767
1768         obj->madv = __I915_MADV_PURGED;
1769 }
1770
1771 static inline int
1772 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1773 {
1774         return obj->madv == I915_MADV_DONTNEED;
1775 }
1776
1777 static void
1778 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1779 {
1780         int page_count = obj->base.size / PAGE_SIZE;
1781         int i, ret;
1782
1783         if (!obj->pages)
1784                 return;
1785
1786         BUG_ON(obj->madv == __I915_MADV_PURGED);
1787
1788         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1789         if (ret) {
1790                 /* In the event of a disaster, abandon all caches and
1791                  * hope for the best.
1792                  */
1793                 WARN_ON(ret != -EIO);
1794                 i915_gem_clflush_object(obj, true);
1795                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1796         }
1797
1798         if (i915_gem_object_needs_bit17_swizzle(obj))
1799                 i915_gem_object_save_bit_17_swizzle(obj);
1800
1801         if (obj->madv == I915_MADV_DONTNEED)
1802                 obj->dirty = 0;
1803
1804         for (i = 0; i < page_count; i++) {
1805                 struct vm_page *page = obj->pages[i];
1806
1807                 if (obj->dirty)
1808                         set_page_dirty(page);
1809
1810                 if (obj->madv == I915_MADV_WILLNEED)
1811                         mark_page_accessed(page);
1812
1813                 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1814                 vm_page_unwire(obj->pages[i], 1);
1815                 vm_page_wakeup(obj->pages[i]);
1816         }
1817         obj->dirty = 0;
1818
1819         kfree(obj->pages);
1820         obj->pages = NULL;
1821 }
1822
1823 int
1824 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1825 {
1826         const struct drm_i915_gem_object_ops *ops = obj->ops;
1827
1828         if (obj->pages == NULL)
1829                 return 0;
1830
1831         if (obj->pages_pin_count)
1832                 return -EBUSY;
1833
1834         BUG_ON(i915_gem_obj_bound_any(obj));
1835
1836         /* ->put_pages might need to allocate memory for the bit17 swizzle
1837          * array, hence protect them from being reaped by removing them from gtt
1838          * lists early. */
1839         list_del(&obj->global_list);
1840
1841         ops->put_pages(obj);
1842         obj->pages = NULL;
1843
1844         if (i915_gem_object_is_purgeable(obj))
1845                 i915_gem_object_truncate(obj);
1846
1847         return 0;
1848 }
1849
1850 static unsigned long
1851 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1852                   bool purgeable_only)
1853 {
1854         struct drm_i915_gem_object *obj, *next;
1855         unsigned long count = 0;
1856
1857         list_for_each_entry_safe(obj, next,
1858                                  &dev_priv->mm.unbound_list,
1859                                  global_list) {
1860                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1861                     i915_gem_object_put_pages(obj) == 0) {
1862                         count += obj->base.size >> PAGE_SHIFT;
1863                         if (count >= target)
1864                                 return count;
1865                 }
1866         }
1867
1868         list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1869                                  global_list) {
1870                 struct i915_vma *vma, *v;
1871
1872                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1873                         continue;
1874
1875                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1876                         if (i915_vma_unbind(vma))
1877                                 break;
1878
1879                 if (!i915_gem_object_put_pages(obj)) {
1880                         count += obj->base.size >> PAGE_SHIFT;
1881                         if (count >= target)
1882                                 return count;
1883                 }
1884         }
1885
1886         return count;
1887 }
1888
1889 static unsigned long
1890 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1891 {
1892         return __i915_gem_shrink(dev_priv, target, true);
1893 }
1894
1895 static unsigned long
1896 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1897 {
1898         struct drm_i915_gem_object *obj, *next;
1899         long freed = 0;
1900
1901         i915_gem_evict_everything(dev_priv->dev);
1902
1903         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1904                                  global_list) {
1905                 if (i915_gem_object_put_pages(obj) == 0)
1906                         freed += obj->base.size >> PAGE_SHIFT;
1907         }
1908         return freed;
1909 }
1910
1911 static int
1912 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1913 {
1914         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1915         int page_count, i, j;
1916         vm_object_t vm_obj;
1917         struct vm_page *page;
1918
1919         /* Assert that the object is not currently in any GPU domain. As it
1920          * wasn't in the GTT, there shouldn't be any way it could have been in
1921          * a GPU cache
1922          */
1923         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1924         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1925
1926         page_count = obj->base.size / PAGE_SIZE;
1927         obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1928             M_WAITOK);
1929
1930         /* Get the list of pages out of our struct file.  They'll be pinned
1931          * at this point until we release them.
1932          *
1933          * Fail silently without starting the shrinker
1934          */
1935         vm_obj = obj->base.vm_obj;
1936         VM_OBJECT_LOCK(vm_obj);
1937         for (i = 0; i < page_count; i++) {
1938                 page = shmem_read_mapping_page(vm_obj, i);
1939                 if (IS_ERR(page)) {
1940                         i915_gem_purge(dev_priv, page_count);
1941                         page = shmem_read_mapping_page(vm_obj, i);
1942                 }
1943                 if (IS_ERR(page)) {
1944                         /* We've tried hard to allocate the memory by reaping
1945                          * our own buffer, now let the real VM do its job and
1946                          * go down in flames if truly OOM.
1947                          */
1948
1949                         i915_gem_shrink_all(dev_priv);
1950                         page = shmem_read_mapping_page(vm_obj, i);
1951                         if (IS_ERR(page))
1952                                 goto err_pages;
1953                 }
1954 #ifdef CONFIG_SWIOTLB
1955                 if (swiotlb_nr_tbl()) {
1956                         st->nents++;
1957                         sg_set_page(sg, page, PAGE_SIZE, 0);
1958                         sg = sg_next(sg);
1959                         continue;
1960                 }
1961 #endif
1962                 obj->pages[i] = page;
1963         }
1964 #ifdef CONFIG_SWIOTLB
1965         if (!swiotlb_nr_tbl())
1966 #endif
1967         VM_OBJECT_UNLOCK(vm_obj);
1968
1969         if (i915_gem_object_needs_bit17_swizzle(obj))
1970                 i915_gem_object_do_bit_17_swizzle(obj);
1971
1972         return 0;
1973
1974 err_pages:
1975         for (j = 0; j < i; j++) {
1976                 page = obj->pages[j];
1977                 vm_page_busy_wait(page, FALSE, "i915gem");
1978                 vm_page_unwire(page, 0);
1979                 vm_page_wakeup(page);
1980         }
1981         VM_OBJECT_UNLOCK(vm_obj);
1982         kfree(obj->pages);
1983         obj->pages = NULL;
1984         return (-EIO);
1985 }
1986
1987 /* Ensure that the associated pages are gathered from the backing storage
1988  * and pinned into our object. i915_gem_object_get_pages() may be called
1989  * multiple times before they are released by a single call to
1990  * i915_gem_object_put_pages() - once the pages are no longer referenced
1991  * either as a result of memory pressure (reaping pages under the shrinker)
1992  * or as the object is itself released.
1993  */
1994 int
1995 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1996 {
1997         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1998         const struct drm_i915_gem_object_ops *ops = obj->ops;
1999         int ret;
2000
2001         if (obj->pages)
2002                 return 0;
2003
2004         if (obj->madv != I915_MADV_WILLNEED) {
2005                 DRM_ERROR("Attempting to obtain a purgeable object\n");
2006                 return -EINVAL;
2007         }
2008
2009         BUG_ON(obj->pages_pin_count);
2010
2011         ret = ops->get_pages(obj);
2012         if (ret)
2013                 return ret;
2014
2015         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2016         return 0;
2017 }
2018
2019 static void
2020 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2021                                struct intel_ring_buffer *ring)
2022 {
2023         struct drm_device *dev = obj->base.dev;
2024         struct drm_i915_private *dev_priv = dev->dev_private;
2025         u32 seqno = intel_ring_get_seqno(ring);
2026
2027         BUG_ON(ring == NULL);
2028         if (obj->ring != ring && obj->last_write_seqno) {
2029                 /* Keep the seqno relative to the current ring */
2030                 obj->last_write_seqno = seqno;
2031         }
2032         obj->ring = ring;
2033
2034         /* Add a reference if we're newly entering the active list. */
2035         if (!obj->active) {
2036                 drm_gem_object_reference(&obj->base);
2037                 obj->active = 1;
2038         }
2039
2040         list_move_tail(&obj->ring_list, &ring->active_list);
2041
2042         obj->last_read_seqno = seqno;
2043
2044         if (obj->fenced_gpu_access) {
2045                 obj->last_fenced_seqno = seqno;
2046
2047                 /* Bump MRU to take account of the delayed flush */
2048                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2049                         struct drm_i915_fence_reg *reg;
2050
2051                         reg = &dev_priv->fence_regs[obj->fence_reg];
2052                         list_move_tail(&reg->lru_list,
2053                                        &dev_priv->mm.fence_list);
2054                 }
2055         }
2056 }
2057
2058 void i915_vma_move_to_active(struct i915_vma *vma,
2059                              struct intel_ring_buffer *ring)
2060 {
2061         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2062         return i915_gem_object_move_to_active(vma->obj, ring);
2063 }
2064
2065 static void
2066 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2067 {
2068         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2069         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2070         struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2071
2072         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2073         BUG_ON(!obj->active);
2074
2075         list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2076
2077         list_del_init(&obj->ring_list);
2078         obj->ring = NULL;
2079
2080         obj->last_read_seqno = 0;
2081         obj->last_write_seqno = 0;
2082         obj->base.write_domain = 0;
2083
2084         obj->last_fenced_seqno = 0;
2085         obj->fenced_gpu_access = false;
2086
2087         obj->active = 0;
2088         drm_gem_object_unreference(&obj->base);
2089
2090         WARN_ON(i915_verify_lists(dev));
2091 }
2092
2093 static int
2094 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2095 {
2096         struct drm_i915_private *dev_priv = dev->dev_private;
2097         struct intel_ring_buffer *ring;
2098         int ret, i, j;
2099
2100         /* Carefully retire all requests without writing to the rings */
2101         for_each_ring(ring, dev_priv, i) {
2102                 ret = intel_ring_idle(ring);
2103                 if (ret)
2104                         return ret;
2105         }
2106         i915_gem_retire_requests(dev);
2107
2108         /* Finally reset hw state */
2109         for_each_ring(ring, dev_priv, i) {
2110                 intel_ring_init_seqno(ring, seqno);
2111
2112                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2113                         ring->sync_seqno[j] = 0;
2114         }
2115
2116         return 0;
2117 }
2118
2119 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2120 {
2121         struct drm_i915_private *dev_priv = dev->dev_private;
2122         int ret;
2123
2124         if (seqno == 0)
2125                 return -EINVAL;
2126
2127         /* HWS page needs to be set less than what we
2128          * will inject to ring
2129          */
2130         ret = i915_gem_init_seqno(dev, seqno - 1);
2131         if (ret)
2132                 return ret;
2133
2134         /* Carefully set the last_seqno value so that wrap
2135          * detection still works
2136          */
2137         dev_priv->next_seqno = seqno;
2138         dev_priv->last_seqno = seqno - 1;
2139         if (dev_priv->last_seqno == 0)
2140                 dev_priv->last_seqno--;
2141
2142         return 0;
2143 }
2144
2145 int
2146 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2147 {
2148         struct drm_i915_private *dev_priv = dev->dev_private;
2149
2150         /* reserve 0 for non-seqno */
2151         if (dev_priv->next_seqno == 0) {
2152                 int ret = i915_gem_init_seqno(dev, 0);
2153                 if (ret)
2154                         return ret;
2155
2156                 dev_priv->next_seqno = 1;
2157         }
2158
2159         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2160         return 0;
2161 }
2162
2163 int __i915_add_request(struct intel_ring_buffer *ring,
2164                        struct drm_file *file,
2165                        struct drm_i915_gem_object *obj,
2166                        u32 *out_seqno)
2167 {
2168         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2169         struct drm_i915_gem_request *request;
2170         u32 request_ring_position, request_start;
2171         int was_empty;
2172         int ret;
2173
2174         request_start = intel_ring_get_tail(ring);
2175         /*
2176          * Emit any outstanding flushes - execbuf can fail to emit the flush
2177          * after having emitted the batchbuffer command. Hence we need to fix
2178          * things up similar to emitting the lazy request. The difference here
2179          * is that the flush _must_ happen before the next request, no matter
2180          * what.
2181          */
2182         ret = intel_ring_flush_all_caches(ring);
2183         if (ret)
2184                 return ret;
2185
2186         request = ring->preallocated_lazy_request;
2187         if (WARN_ON(request == NULL))
2188                 return -ENOMEM;
2189
2190         /* Record the position of the start of the request so that
2191          * should we detect the updated seqno part-way through the
2192          * GPU processing the request, we never over-estimate the
2193          * position of the head.
2194          */
2195         request_ring_position = intel_ring_get_tail(ring);
2196
2197         ret = ring->add_request(ring);
2198         if (ret)
2199                 return ret;
2200
2201         request->seqno = intel_ring_get_seqno(ring);
2202         request->ring = ring;
2203         request->head = request_start;
2204         request->tail = request_ring_position;
2205
2206         /* Whilst this request exists, batch_obj will be on the
2207          * active_list, and so will hold the active reference. Only when this
2208          * request is retired will the the batch_obj be moved onto the
2209          * inactive_list and lose its active reference. Hence we do not need
2210          * to explicitly hold another reference here.
2211          */
2212         request->batch_obj = obj;
2213
2214         /* Hold a reference to the current context so that we can inspect
2215          * it later in case a hangcheck error event fires.
2216          */
2217         request->ctx = ring->last_context;
2218         if (request->ctx)
2219                 i915_gem_context_reference(request->ctx);
2220
2221         request->emitted_jiffies = jiffies;
2222         was_empty = list_empty(&ring->request_list);
2223         list_add_tail(&request->list, &ring->request_list);
2224         request->file_priv = NULL;
2225
2226         if (file) {
2227                 struct drm_i915_file_private *file_priv = file->driver_priv;
2228
2229                 spin_lock(&file_priv->mm.lock);
2230                 request->file_priv = file_priv;
2231                 list_add_tail(&request->client_list,
2232                               &file_priv->mm.request_list);
2233                 spin_unlock(&file_priv->mm.lock);
2234         }
2235
2236         trace_i915_gem_request_add(ring, request->seqno);
2237         ring->outstanding_lazy_seqno = 0;
2238         ring->preallocated_lazy_request = NULL;
2239
2240         if (!dev_priv->ums.mm_suspended) {
2241                 i915_queue_hangcheck(ring->dev);
2242
2243                 if (was_empty) {
2244                         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2245                         queue_delayed_work(dev_priv->wq,
2246                                            &dev_priv->mm.retire_work,
2247                                            round_jiffies_up_relative(HZ));
2248                         intel_mark_busy(dev_priv->dev);
2249                 }
2250         }
2251
2252         if (out_seqno)
2253                 *out_seqno = request->seqno;
2254         return 0;
2255 }
2256
2257 static inline void
2258 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2259 {
2260         struct drm_i915_file_private *file_priv = request->file_priv;
2261
2262         if (!file_priv)
2263                 return;
2264
2265         spin_lock(&file_priv->mm.lock);
2266         if (request->file_priv) {
2267                 list_del(&request->client_list);
2268                 request->file_priv = NULL;
2269         }
2270         spin_unlock(&file_priv->mm.lock);
2271 }
2272
2273 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2274                                     struct i915_address_space *vm)
2275 {
2276         if (acthd >= i915_gem_obj_offset(obj, vm) &&
2277             acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2278                 return true;
2279
2280         return false;
2281 }
2282
2283 static bool i915_head_inside_request(const u32 acthd_unmasked,
2284                                      const u32 request_start,
2285                                      const u32 request_end)
2286 {
2287         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2288
2289         if (request_start < request_end) {
2290                 if (acthd >= request_start && acthd < request_end)
2291                         return true;
2292         } else if (request_start > request_end) {
2293                 if (acthd >= request_start || acthd < request_end)
2294                         return true;
2295         }
2296
2297         return false;
2298 }
2299
2300 static struct i915_address_space *
2301 request_to_vm(struct drm_i915_gem_request *request)
2302 {
2303         struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2304         struct i915_address_space *vm;
2305
2306         vm = &dev_priv->gtt.base;
2307
2308         return vm;
2309 }
2310
2311 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2312                                 const u32 acthd, bool *inside)
2313 {
2314         /* There is a possibility that unmasked head address
2315          * pointing inside the ring, matches the batch_obj address range.
2316          * However this is extremely unlikely.
2317          */
2318         if (request->batch_obj) {
2319                 if (i915_head_inside_object(acthd, request->batch_obj,
2320                                             request_to_vm(request))) {
2321                         *inside = true;
2322                         return true;
2323                 }
2324         }
2325
2326         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2327                 *inside = false;
2328                 return true;
2329         }
2330
2331         return false;
2332 }
2333
2334 static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2335 {
2336         const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2337
2338         if (hs->banned)
2339                 return true;
2340
2341         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2342                 DRM_ERROR("context hanging too fast, declaring banned!\n");
2343                 return true;
2344         }
2345
2346         return false;
2347 }
2348
2349 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2350                                   struct drm_i915_gem_request *request,
2351                                   u32 acthd)
2352 {
2353         struct i915_ctx_hang_stats *hs = NULL;
2354         bool inside, guilty;
2355         unsigned long offset = 0;
2356
2357         /* Innocent until proven guilty */
2358         guilty = false;
2359
2360         if (request->batch_obj)
2361                 offset = i915_gem_obj_offset(request->batch_obj,
2362                                              request_to_vm(request));
2363
2364         if (ring->hangcheck.action != HANGCHECK_WAIT &&
2365             i915_request_guilty(request, acthd, &inside)) {
2366                 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2367                           ring->name,
2368                           inside ? "inside" : "flushing",
2369                           offset,
2370                           request->ctx ? request->ctx->id : 0,
2371                           acthd);
2372
2373                 guilty = true;
2374         }
2375
2376         /* If contexts are disabled or this is the default context, use
2377          * file_priv->reset_state
2378          */
2379         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2380                 hs = &request->ctx->hang_stats;
2381         else if (request->file_priv)
2382                 hs = &request->file_priv->hang_stats;
2383
2384         if (hs) {
2385                 if (guilty) {
2386                         hs->banned = i915_context_is_banned(hs);
2387                         hs->batch_active++;
2388                         hs->guilty_ts = get_seconds();
2389                 } else {
2390                         hs->batch_pending++;
2391                 }
2392         }
2393 }
2394
2395 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2396 {
2397         list_del(&request->list);
2398         i915_gem_request_remove_from_client(request);
2399
2400         if (request->ctx)
2401                 i915_gem_context_unreference(request->ctx);
2402
2403         kfree(request);
2404 }
2405
2406 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2407                                        struct intel_ring_buffer *ring)
2408 {
2409         u32 completed_seqno = ring->get_seqno(ring, false);
2410         u32 acthd = intel_ring_get_active_head(ring);
2411         struct drm_i915_gem_request *request;
2412
2413         list_for_each_entry(request, &ring->request_list, list) {
2414                 if (i915_seqno_passed(completed_seqno, request->seqno))
2415                         continue;
2416
2417                 i915_set_reset_status(ring, request, acthd);
2418         }
2419 }
2420
2421 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2422                                         struct intel_ring_buffer *ring)
2423 {
2424         while (!list_empty(&ring->active_list)) {
2425                 struct drm_i915_gem_object *obj;
2426
2427                 obj = list_first_entry(&ring->active_list,
2428                                        struct drm_i915_gem_object,
2429                                        ring_list);
2430
2431                 i915_gem_object_move_to_inactive(obj);
2432         }
2433
2434         /*
2435          * We must free the requests after all the corresponding objects have
2436          * been moved off active lists. Which is the same order as the normal
2437          * retire_requests function does. This is important if object hold
2438          * implicit references on things like e.g. ppgtt address spaces through
2439          * the request.
2440          */
2441         while (!list_empty(&ring->request_list)) {
2442                 struct drm_i915_gem_request *request;
2443
2444                 request = list_first_entry(&ring->request_list,
2445                                            struct drm_i915_gem_request,
2446                                            list);
2447
2448                 i915_gem_free_request(request);
2449         }
2450 }
2451
2452 void i915_gem_restore_fences(struct drm_device *dev)
2453 {
2454         struct drm_i915_private *dev_priv = dev->dev_private;
2455         int i;
2456
2457         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2458                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2459
2460                 /*
2461                  * Commit delayed tiling changes if we have an object still
2462                  * attached to the fence, otherwise just clear the fence.
2463                  */
2464                 if (reg->obj) {
2465                         i915_gem_object_update_fence(reg->obj, reg,
2466                                                      reg->obj->tiling_mode);
2467                 } else {
2468                         i915_gem_write_fence(dev, i, NULL);
2469                 }
2470         }
2471 }
2472
2473 void i915_gem_reset(struct drm_device *dev)
2474 {
2475         struct drm_i915_private *dev_priv = dev->dev_private;
2476         struct intel_ring_buffer *ring;
2477         int i;
2478
2479         /*
2480          * Before we free the objects from the requests, we need to inspect
2481          * them for finding the guilty party. As the requests only borrow
2482          * their reference to the objects, the inspection must be done first.
2483          */
2484         for_each_ring(ring, dev_priv, i)
2485                 i915_gem_reset_ring_status(dev_priv, ring);
2486
2487         for_each_ring(ring, dev_priv, i)
2488                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2489
2490         i915_gem_cleanup_ringbuffer(dev);
2491
2492         i915_gem_restore_fences(dev);
2493 }
2494
2495 /**
2496  * This function clears the request list as sequence numbers are passed.
2497  */
2498 void
2499 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2500 {
2501         uint32_t seqno;
2502
2503         if (list_empty(&ring->request_list))
2504                 return;
2505
2506         WARN_ON(i915_verify_lists(ring->dev));
2507
2508         seqno = ring->get_seqno(ring, true);
2509
2510         while (!list_empty(&ring->request_list)) {
2511                 struct drm_i915_gem_request *request;
2512
2513                 request = list_first_entry(&ring->request_list,
2514                                            struct drm_i915_gem_request,
2515                                            list);
2516
2517                 if (!i915_seqno_passed(seqno, request->seqno))
2518                         break;
2519
2520                 trace_i915_gem_request_retire(ring, request->seqno);
2521                 /* We know the GPU must have read the request to have
2522                  * sent us the seqno + interrupt, so use the position
2523                  * of tail of the request to update the last known position
2524                  * of the GPU head.
2525                  */
2526                 ring->last_retired_head = request->tail;
2527
2528                 i915_gem_free_request(request);
2529         }
2530
2531         /* Move any buffers on the active list that are no longer referenced
2532          * by the ringbuffer to the flushing/inactive lists as appropriate.
2533          */
2534         while (!list_empty(&ring->active_list)) {
2535                 struct drm_i915_gem_object *obj;
2536
2537                 obj = list_first_entry(&ring->active_list,
2538                                       struct drm_i915_gem_object,
2539                                       ring_list);
2540
2541                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2542                         break;
2543
2544                 i915_gem_object_move_to_inactive(obj);
2545         }
2546
2547         if (unlikely(ring->trace_irq_seqno &&
2548                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2549                 ring->irq_put(ring);
2550                 ring->trace_irq_seqno = 0;
2551         }
2552
2553         WARN_ON(i915_verify_lists(ring->dev));
2554 }
2555
2556 bool
2557 i915_gem_retire_requests(struct drm_device *dev)
2558 {
2559         drm_i915_private_t *dev_priv = dev->dev_private;
2560         struct intel_ring_buffer *ring;
2561         bool idle = true;
2562         int i;
2563
2564         for_each_ring(ring, dev_priv, i) {
2565                 i915_gem_retire_requests_ring(ring);
2566                 idle &= list_empty(&ring->request_list);
2567         }
2568
2569         return idle;
2570 }
2571
2572 static void
2573 i915_gem_retire_work_handler(struct work_struct *work)
2574 {
2575         drm_i915_private_t *dev_priv;
2576         struct drm_device *dev;
2577         struct intel_ring_buffer *ring;
2578         bool idle;
2579         int i;
2580
2581         dev_priv = container_of(work, drm_i915_private_t,
2582                                 mm.retire_work.work);
2583         dev = dev_priv->dev;
2584
2585         /* Come back later if the device is busy... */
2586         if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT)) {
2587                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2588                                    round_jiffies_up_relative(HZ));
2589                 return;
2590         }
2591
2592         i915_gem_retire_requests(dev);
2593
2594         /* Send a periodic flush down the ring so we don't hold onto GEM
2595          * objects indefinitely.
2596          */
2597         idle = true;
2598         for_each_ring(ring, dev_priv, i) {
2599                 if (ring->gpu_caches_dirty)
2600                         i915_add_request(ring, NULL);
2601
2602                 idle &= list_empty(&ring->request_list);
2603         }
2604
2605         if (!dev_priv->ums.mm_suspended && !idle)
2606                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2607                                    round_jiffies_up_relative(HZ));
2608         if (idle)
2609                 intel_mark_idle(dev);
2610
2611         mutex_unlock(&dev->struct_mutex);
2612 }
2613
2614 static void
2615 i915_gem_idle_work_handler(struct work_struct *work)
2616 {
2617         struct drm_i915_private *dev_priv =
2618                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2619
2620         intel_mark_idle(dev_priv->dev);
2621 }
2622
2623 /**
2624  * Ensures that an object will eventually get non-busy by flushing any required
2625  * write domains, emitting any outstanding lazy request and retiring and
2626  * completed requests.
2627  */
2628 static int
2629 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2630 {
2631         int ret;
2632
2633         if (obj->active) {
2634                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2635                 if (ret)
2636                         return ret;
2637
2638                 i915_gem_retire_requests_ring(obj->ring);
2639         }
2640
2641         return 0;
2642 }
2643
2644 /**
2645  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2646  * @DRM_IOCTL_ARGS: standard ioctl arguments
2647  *
2648  * Returns 0 if successful, else an error is returned with the remaining time in
2649  * the timeout parameter.
2650  *  -ETIME: object is still busy after timeout
2651  *  -ERESTARTSYS: signal interrupted the wait
2652  *  -ENONENT: object doesn't exist
2653  * Also possible, but rare:
2654  *  -EAGAIN: GPU wedged
2655  *  -ENOMEM: damn
2656  *  -ENODEV: Internal IRQ fail
2657  *  -E?: The add request failed
2658  *
2659  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2660  * non-zero timeout parameter the wait ioctl will wait for the given number of
2661  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2662  * without holding struct_mutex the object may become re-busied before this
2663  * function completes. A similar but shorter * race condition exists in the busy
2664  * ioctl
2665  */
2666 int
2667 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2668 {
2669         drm_i915_private_t *dev_priv = dev->dev_private;
2670         struct drm_i915_gem_wait *args = data;
2671         struct drm_i915_gem_object *obj;
2672         struct intel_ring_buffer *ring = NULL;
2673         struct timespec timeout_stack, *timeout = NULL;
2674         unsigned reset_counter;
2675         u32 seqno = 0;
2676         int ret = 0;
2677
2678         if (args->timeout_ns >= 0) {
2679                 timeout_stack = ns_to_timespec(args->timeout_ns);
2680                 timeout = &timeout_stack;
2681         }
2682
2683         ret = i915_mutex_lock_interruptible(dev);
2684         if (ret)
2685                 return ret;
2686
2687         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2688         if (&obj->base == NULL) {
2689                 mutex_unlock(&dev->struct_mutex);
2690                 return -ENOENT;
2691         }
2692
2693         /* Need to make sure the object gets inactive eventually. */
2694         ret = i915_gem_object_flush_active(obj);
2695         if (ret)
2696                 goto out;
2697
2698         if (obj->active) {
2699                 seqno = obj->last_read_seqno;
2700                 ring = obj->ring;
2701         }
2702
2703         if (seqno == 0)
2704                  goto out;
2705
2706         /* Do this after OLR check to make sure we make forward progress polling
2707          * on this IOCTL with a 0 timeout (like busy ioctl)
2708          */
2709         if (!args->timeout_ns) {
2710                 ret = -ETIMEDOUT;
2711                 goto out;
2712         }
2713
2714         drm_gem_object_unreference(&obj->base);
2715         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2716         mutex_unlock(&dev->struct_mutex);
2717
2718         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2719         if (timeout)
2720                 args->timeout_ns = timespec_to_ns(timeout);
2721         return ret;
2722
2723 out:
2724         drm_gem_object_unreference(&obj->base);
2725         mutex_unlock(&dev->struct_mutex);
2726         return ret;
2727 }
2728
2729 /**
2730  * i915_gem_object_sync - sync an object to a ring.
2731  *
2732  * @obj: object which may be in use on another ring.
2733  * @to: ring we wish to use the object on. May be NULL.
2734  *
2735  * This code is meant to abstract object synchronization with the GPU.
2736  * Calling with NULL implies synchronizing the object with the CPU
2737  * rather than a particular GPU ring.
2738  *
2739  * Returns 0 if successful, else propagates up the lower layer error.
2740  */
2741 int
2742 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2743                      struct intel_ring_buffer *to)
2744 {
2745         struct intel_ring_buffer *from = obj->ring;
2746         u32 seqno;
2747         int ret, idx;
2748
2749         if (from == NULL || to == from)
2750                 return 0;
2751
2752         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2753                 return i915_gem_object_wait_rendering(obj, false);
2754
2755         idx = intel_ring_sync_index(from, to);
2756
2757         seqno = obj->last_read_seqno;
2758         if (seqno <= from->sync_seqno[idx])
2759                 return 0;
2760
2761         ret = i915_gem_check_olr(obj->ring, seqno);
2762         if (ret)
2763                 return ret;
2764
2765         trace_i915_gem_ring_sync_to(from, to, seqno);
2766         ret = to->sync_to(to, from, seqno);
2767         if (!ret)
2768                 /* We use last_read_seqno because sync_to()
2769                  * might have just caused seqno wrap under
2770                  * the radar.
2771                  */
2772                 from->sync_seqno[idx] = obj->last_read_seqno;
2773
2774         return ret;
2775 }
2776
2777 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2778 {
2779         u32 old_write_domain, old_read_domains;
2780
2781         /* Force a pagefault for domain tracking on next user access */
2782         i915_gem_release_mmap(obj);
2783
2784         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2785                 return;
2786
2787         /* Wait for any direct GTT access to complete */
2788         mb();
2789
2790         old_read_domains = obj->base.read_domains;
2791         old_write_domain = obj->base.write_domain;
2792
2793         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2794         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2795
2796         trace_i915_gem_object_change_domain(obj,
2797                                             old_read_domains,
2798                                             old_write_domain);
2799 }
2800
2801 int i915_vma_unbind(struct i915_vma *vma)
2802 {
2803         struct drm_i915_gem_object *obj = vma->obj;
2804         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2805         int ret;
2806
2807         /* For now we only ever use 1 vma per object */
2808 #if 0
2809         WARN_ON(!list_is_singular(&obj->vma_list));
2810 #endif
2811
2812         if (list_empty(&vma->vma_link))
2813                 return 0;
2814
2815         if (!drm_mm_node_allocated(&vma->node)) {
2816                 i915_gem_vma_destroy(vma);
2817
2818                 return 0;
2819         }
2820
2821         if (obj->pin_count)
2822                 return -EBUSY;
2823
2824         BUG_ON(obj->pages == NULL);
2825
2826         ret = i915_gem_object_finish_gpu(obj);
2827         if (ret)
2828                 return ret;
2829         /* Continue on if we fail due to EIO, the GPU is hung so we
2830          * should be safe and we need to cleanup or else we might
2831          * cause memory corruption through use-after-free.
2832          */
2833
2834         i915_gem_object_finish_gtt(obj);
2835
2836         /* release the fence reg _after_ flushing */
2837         ret = i915_gem_object_put_fence(obj);
2838         if (ret)
2839                 return ret;
2840
2841         trace_i915_vma_unbind(vma);
2842
2843         if (obj->has_global_gtt_mapping)
2844                 i915_gem_gtt_unbind_object(obj);
2845         if (obj->has_aliasing_ppgtt_mapping) {
2846                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2847                 obj->has_aliasing_ppgtt_mapping = 0;
2848         }
2849         i915_gem_gtt_finish_object(obj);
2850
2851         list_del(&vma->mm_list);
2852         /* Avoid an unnecessary call to unbind on rebind. */
2853         if (i915_is_ggtt(vma->vm))
2854                 obj->map_and_fenceable = true;
2855
2856         drm_mm_remove_node(&vma->node);
2857         i915_gem_vma_destroy(vma);
2858
2859         /* Since the unbound list is global, only move to that list if
2860          * no more VMAs exist. */
2861         if (list_empty(&obj->vma_list))
2862                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2863
2864         /* And finally now the object is completely decoupled from this vma,
2865          * we can drop its hold on the backing storage and allow it to be
2866          * reaped by the shrinker.
2867          */
2868         i915_gem_object_unpin_pages(obj);
2869
2870         return 0;
2871 }
2872
2873 /**
2874  * Unbinds an object from the global GTT aperture.
2875  */
2876 int
2877 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2878 {
2879         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2880         struct i915_address_space *ggtt = &dev_priv->gtt.base;
2881
2882         if (!i915_gem_obj_ggtt_bound(obj))
2883                 return 0;
2884
2885         if (obj->pin_count)
2886                 return -EBUSY;
2887
2888         BUG_ON(obj->pages == NULL);
2889
2890         return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2891 }
2892
2893 int i915_gpu_idle(struct drm_device *dev)
2894 {
2895         drm_i915_private_t *dev_priv = dev->dev_private;
2896         struct intel_ring_buffer *ring;
2897         int ret, i;
2898
2899         /* Flush everything onto the inactive list. */
2900         for_each_ring(ring, dev_priv, i) {
2901                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2902                 if (ret)
2903                         return ret;
2904
2905                 ret = intel_ring_idle(ring);
2906                 if (ret)
2907                         return ret;
2908         }
2909
2910         return 0;
2911 }
2912
2913 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2914                                  struct drm_i915_gem_object *obj)
2915 {
2916         drm_i915_private_t *dev_priv = dev->dev_private;
2917         int fence_reg;
2918         int fence_pitch_shift;
2919
2920         if (INTEL_INFO(dev)->gen >= 6) {
2921                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2922                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2923         } else {
2924                 fence_reg = FENCE_REG_965_0;
2925                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2926         }
2927
2928         fence_reg += reg * 8;
2929
2930         /* To w/a incoherency with non-atomic 64-bit register updates,
2931          * we split the 64-bit update into two 32-bit writes. In order
2932          * for a partial fence not to be evaluated between writes, we
2933          * precede the update with write to turn off the fence register,
2934          * and only enable the fence as the last step.
2935          *
2936          * For extra levels of paranoia, we make sure each step lands
2937          * before applying the next step.
2938          */
2939         I915_WRITE(fence_reg, 0);
2940         POSTING_READ(fence_reg);
2941
2942         if (obj) {
2943                 u32 size = i915_gem_obj_ggtt_size(obj);
2944                 uint64_t val;
2945
2946                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2947                                  0xfffff000) << 32;
2948                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2949                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2950                 if (obj->tiling_mode == I915_TILING_Y)
2951                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2952                 val |= I965_FENCE_REG_VALID;
2953
2954                 I915_WRITE(fence_reg + 4, val >> 32);
2955                 POSTING_READ(fence_reg + 4);
2956
2957                 I915_WRITE(fence_reg + 0, val);
2958                 POSTING_READ(fence_reg);
2959         } else {
2960                 I915_WRITE(fence_reg + 4, 0);
2961                 POSTING_READ(fence_reg + 4);
2962         }
2963 }
2964
2965 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2966                                  struct drm_i915_gem_object *obj)
2967 {
2968         drm_i915_private_t *dev_priv = dev->dev_private;
2969         u32 val;
2970
2971         if (obj) {
2972                 u32 size = i915_gem_obj_ggtt_size(obj);
2973                 int pitch_val;
2974                 int tile_width;
2975
2976                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2977                      (size & -size) != size ||
2978                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2979                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2980                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2981
2982                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2983                         tile_width = 128;
2984                 else
2985                         tile_width = 512;
2986
2987                 /* Note: pitch better be a power of two tile widths */
2988                 pitch_val = obj->stride / tile_width;
2989                 pitch_val = ffs(pitch_val) - 1;
2990
2991                 val = i915_gem_obj_ggtt_offset(obj);
2992                 if (obj->tiling_mode == I915_TILING_Y)
2993                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2994                 val |= I915_FENCE_SIZE_BITS(size);
2995                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2996                 val |= I830_FENCE_REG_VALID;
2997         } else
2998                 val = 0;
2999
3000         if (reg < 8)
3001                 reg = FENCE_REG_830_0 + reg * 4;
3002         else
3003                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3004
3005         I915_WRITE(reg, val);
3006         POSTING_READ(reg);
3007 }
3008
3009 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3010                                 struct drm_i915_gem_object *obj)
3011 {
3012         drm_i915_private_t *dev_priv = dev->dev_private;
3013         uint32_t val;
3014
3015         if (obj) {
3016                 u32 size = i915_gem_obj_ggtt_size(obj);
3017                 uint32_t pitch_val;
3018
3019                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3020                      (size & -size) != size ||
3021                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3022                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3023                      i915_gem_obj_ggtt_offset(obj), size);
3024
3025                 pitch_val = obj->stride / 128;
3026                 pitch_val = ffs(pitch_val) - 1;
3027
3028                 val = i915_gem_obj_ggtt_offset(obj);
3029                 if (obj->tiling_mode == I915_TILING_Y)
3030                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3031                 val |= I830_FENCE_SIZE_BITS(size);
3032                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3033                 val |= I830_FENCE_REG_VALID;
3034         } else
3035                 val = 0;
3036
3037         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3038         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3039 }
3040
3041 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3042 {
3043         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3044 }
3045
3046 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3047                                  struct drm_i915_gem_object *obj)
3048 {
3049         struct drm_i915_private *dev_priv = dev->dev_private;
3050
3051         /* Ensure that all CPU reads are completed before installing a fence
3052          * and all writes before removing the fence.
3053          */
3054         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3055                 mb();
3056
3057         WARN(obj && (!obj->stride || !obj->tiling_mode),
3058              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3059              obj->stride, obj->tiling_mode);
3060
3061         switch (INTEL_INFO(dev)->gen) {
3062         case 8:
3063         case 7:
3064         case 6:
3065         case 5:
3066         case 4: i965_write_fence_reg(dev, reg, obj); break;
3067         case 3: i915_write_fence_reg(dev, reg, obj); break;
3068         case 2: i830_write_fence_reg(dev, reg, obj); break;
3069         default: BUG();
3070         }
3071
3072         /* And similarly be paranoid that no direct access to this region
3073          * is reordered to before the fence is installed.
3074          */
3075         if (i915_gem_object_needs_mb(obj))
3076                 mb();
3077 }
3078
3079 static inline int fence_number(struct drm_i915_private *dev_priv,
3080                                struct drm_i915_fence_reg *fence)
3081 {
3082         return fence - dev_priv->fence_regs;
3083 }
3084
3085 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3086                                          struct drm_i915_fence_reg *fence,
3087                                          bool enable)
3088 {
3089         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3090         int reg = fence_number(dev_priv, fence);
3091
3092         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3093
3094         if (enable) {
3095                 obj->fence_reg = reg;
3096                 fence->obj = obj;
3097                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3098         } else {
3099                 obj->fence_reg = I915_FENCE_REG_NONE;
3100                 fence->obj = NULL;
3101                 list_del_init(&fence->lru_list);
3102         }
3103         obj->fence_dirty = false;
3104 }
3105
3106 static int
3107 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3108 {
3109         if (obj->last_fenced_seqno) {
3110                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3111                 if (ret)
3112                         return ret;
3113
3114                 obj->last_fenced_seqno = 0;
3115         }
3116
3117         obj->fenced_gpu_access = false;
3118         return 0;
3119 }
3120
3121 int
3122 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3123 {
3124         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3125         struct drm_i915_fence_reg *fence;
3126         int ret;
3127
3128         ret = i915_gem_object_wait_fence(obj);
3129         if (ret)
3130                 return ret;
3131
3132         if (obj->fence_reg == I915_FENCE_REG_NONE)
3133                 return 0;
3134
3135         fence = &dev_priv->fence_regs[obj->fence_reg];
3136
3137         i915_gem_object_fence_lost(obj);
3138         i915_gem_object_update_fence(obj, fence, false);
3139
3140         return 0;
3141 }
3142
3143 static struct drm_i915_fence_reg *
3144 i915_find_fence_reg(struct drm_device *dev)
3145 {
3146         struct drm_i915_private *dev_priv = dev->dev_private;
3147         struct drm_i915_fence_reg *reg, *avail;
3148         int i;
3149
3150         /* First try to find a free reg */
3151         avail = NULL;
3152         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3153                 reg = &dev_priv->fence_regs[i];
3154                 if (!reg->obj)
3155                         return reg;
3156
3157                 if (!reg->pin_count)
3158                         avail = reg;
3159         }
3160
3161         if (avail == NULL)
3162                 goto deadlock;
3163
3164         /* None available, try to steal one or wait for a user to finish */
3165         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3166                 if (reg->pin_count)
3167                         continue;
3168
3169                 return reg;
3170         }
3171
3172 deadlock:
3173         /* Wait for completion of pending flips which consume fences */
3174         if (intel_has_pending_fb_unpin(dev))
3175                 return ERR_PTR(-EAGAIN);
3176
3177         return ERR_PTR(-EDEADLK);
3178 }
3179
3180 /**
3181  * i915_gem_object_get_fence - set up fencing for an object
3182  * @obj: object to map through a fence reg
3183  *
3184  * When mapping objects through the GTT, userspace wants to be able to write
3185  * to them without having to worry about swizzling if the object is tiled.
3186  * This function walks the fence regs looking for a free one for @obj,
3187  * stealing one if it can't find any.
3188  *
3189  * It then sets up the reg based on the object's properties: address, pitch
3190  * and tiling format.
3191  *
3192  * For an untiled surface, this removes any existing fence.
3193  */
3194 int
3195 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3196 {
3197         struct drm_device *dev = obj->base.dev;
3198         struct drm_i915_private *dev_priv = dev->dev_private;
3199         bool enable = obj->tiling_mode != I915_TILING_NONE;
3200         struct drm_i915_fence_reg *reg;
3201         int ret;
3202
3203         /* Have we updated the tiling parameters upon the object and so
3204          * will need to serialise the write to the associated fence register?
3205          */
3206         if (obj->fence_dirty) {
3207                 ret = i915_gem_object_wait_fence(obj);
3208                 if (ret)
3209                         return ret;
3210         }
3211
3212         /* Just update our place in the LRU if our fence is getting reused. */
3213         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3214                 reg = &dev_priv->fence_regs[obj->fence_reg];
3215                 if (!obj->fence_dirty) {
3216                         list_move_tail(&reg->lru_list,
3217                                        &dev_priv->mm.fence_list);
3218                         return 0;
3219                 }
3220         } else if (enable) {
3221                 reg = i915_find_fence_reg(dev);
3222                 if (IS_ERR(reg))
3223                         return PTR_ERR(reg);
3224
3225                 if (reg->obj) {
3226                         struct drm_i915_gem_object *old = reg->obj;
3227
3228                         ret = i915_gem_object_wait_fence(old);
3229                         if (ret)
3230                                 return ret;
3231
3232                         i915_gem_object_fence_lost(old);
3233                 }
3234         } else
3235                 return 0;
3236
3237         i915_gem_object_update_fence(obj, reg, enable);
3238
3239         return 0;
3240 }
3241
3242 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3243                                      struct drm_mm_node *gtt_space,
3244                                      unsigned long cache_level)
3245 {
3246         struct drm_mm_node *other;
3247
3248         /* On non-LLC machines we have to be careful when putting differing
3249          * types of snoopable memory together to avoid the prefetcher
3250          * crossing memory domains and dying.
3251          */
3252         if (HAS_LLC(dev))
3253                 return true;
3254
3255         if (!drm_mm_node_allocated(gtt_space))
3256                 return true;
3257
3258         if (list_empty(&gtt_space->node_list))
3259                 return true;
3260
3261         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3262         if (other->allocated && !other->hole_follows && other->color != cache_level)
3263                 return false;
3264
3265         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3266         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3267                 return false;
3268
3269         return true;
3270 }
3271
3272 static void i915_gem_verify_gtt(struct drm_device *dev)
3273 {
3274 #if WATCH_GTT
3275         struct drm_i915_private *dev_priv = dev->dev_private;
3276         struct drm_i915_gem_object *obj;
3277         int err = 0;
3278
3279         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3280                 if (obj->gtt_space == NULL) {
3281                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3282                         err++;
3283                         continue;
3284                 }
3285
3286                 if (obj->cache_level != obj->gtt_space->color) {
3287                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3288                                i915_gem_obj_ggtt_offset(obj),
3289                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3290                                obj->cache_level,
3291                                obj->gtt_space->color);
3292                         err++;
3293                         continue;
3294                 }
3295
3296                 if (!i915_gem_valid_gtt_space(dev,
3297                                               obj->gtt_space,
3298                                               obj->cache_level)) {
3299                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3300                                i915_gem_obj_ggtt_offset(obj),
3301                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3302                                obj->cache_level);
3303                         err++;
3304                         continue;
3305                 }
3306         }
3307
3308         WARN_ON(err);
3309 #endif
3310 }
3311
3312 /**
3313  * Finds free space in the GTT aperture and binds the object there.
3314  */
3315 static int
3316 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3317                            struct i915_address_space *vm,
3318                            unsigned alignment,
3319                            bool map_and_fenceable,
3320                            bool nonblocking)
3321 {
3322         struct drm_device *dev = obj->base.dev;
3323         drm_i915_private_t *dev_priv = dev->dev_private;
3324         u32 size, fence_size, fence_alignment, unfenced_alignment;
3325         size_t gtt_max =
3326                 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3327         struct i915_vma *vma;
3328         int ret;
3329
3330         fence_size = i915_gem_get_gtt_size(dev,
3331                                            obj->base.size,
3332                                            obj->tiling_mode);
3333         fence_alignment = i915_gem_get_gtt_alignment(dev,
3334                                                      obj->base.size,
3335                                                      obj->tiling_mode, true);
3336         unfenced_alignment =
3337                 i915_gem_get_gtt_alignment(dev,
3338                                                     obj->base.size,
3339                                                     obj->tiling_mode, false);
3340
3341         if (alignment == 0)
3342                 alignment = map_and_fenceable ? fence_alignment :
3343                                                 unfenced_alignment;
3344         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3345                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3346                 return -EINVAL;
3347         }
3348
3349         size = map_and_fenceable ? fence_size : obj->base.size;
3350
3351         /* If the object is bigger than the entire aperture, reject it early
3352          * before evicting everything in a vain attempt to find space.
3353          */
3354         if (obj->base.size > gtt_max) {
3355                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3356                           obj->base.size,
3357                           map_and_fenceable ? "mappable" : "total",
3358                           gtt_max);
3359                 return -E2BIG;
3360         }
3361
3362         ret = i915_gem_object_get_pages(obj);
3363         if (ret)
3364                 return ret;
3365
3366         i915_gem_object_pin_pages(obj);
3367
3368         BUG_ON(!i915_is_ggtt(vm));
3369
3370         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3371         if (IS_ERR(vma)) {
3372                 ret = PTR_ERR(vma);
3373                 goto err_unpin;
3374         }
3375
3376         /* For now we only ever use 1 vma per object */
3377 #if 0
3378         WARN_ON(!list_is_singular(&obj->vma_list));
3379 #endif
3380
3381 search_free:
3382         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3383                                                   size, alignment,
3384                                                   obj->cache_level, 0, gtt_max,
3385                                                   DRM_MM_SEARCH_DEFAULT);
3386         if (ret) {
3387                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3388                                                obj->cache_level,
3389                                                map_and_fenceable,
3390                                                nonblocking);
3391                 if (ret == 0)
3392                         goto search_free;
3393
3394                 goto err_free_vma;
3395         }
3396         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3397                                               obj->cache_level))) {
3398                 ret = -EINVAL;
3399                 goto err_remove_node;
3400         }
3401
3402         ret = i915_gem_gtt_prepare_object(obj);
3403         if (ret)
3404                 goto err_remove_node;
3405
3406         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3407         list_add_tail(&vma->mm_list, &vm->inactive_list);
3408
3409         if (i915_is_ggtt(vm)) {
3410                 bool mappable, fenceable;
3411
3412                 fenceable = (vma->node.size == fence_size &&
3413                              (vma->node.start & (fence_alignment - 1)) == 0);
3414
3415                 mappable = (vma->node.start + obj->base.size <=
3416                             dev_priv->gtt.mappable_end);
3417
3418                 obj->map_and_fenceable = mappable && fenceable;
3419         }
3420
3421         WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3422
3423         trace_i915_vma_bind(vma, map_and_fenceable);
3424         i915_gem_verify_gtt(dev);
3425         return 0;
3426
3427 err_remove_node:
3428         drm_mm_remove_node(&vma->node);
3429 err_free_vma:
3430         i915_gem_vma_destroy(vma);
3431 err_unpin:
3432         i915_gem_object_unpin_pages(obj);
3433         return ret;
3434 }
3435
3436 bool
3437 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3438                         bool force)
3439 {
3440         /* If we don't have a page list set up, then we're not pinned
3441          * to GPU, and we can ignore the cache flush because it'll happen
3442          * again at bind time.
3443          */
3444         if (obj->pages == NULL)
3445                 return false;
3446
3447         /*
3448          * Stolen memory is always coherent with the GPU as it is explicitly
3449          * marked as wc by the system, or the system is cache-coherent.
3450          */
3451         if (obj->stolen)
3452                 return false;
3453
3454         /* If the GPU is snooping the contents of the CPU cache,
3455          * we do not need to manually clear the CPU cache lines.  However,
3456          * the caches are only snooped when the render cache is
3457          * flushed/invalidated.  As we always have to emit invalidations
3458          * and flushes when moving into and out of the RENDER domain, correct
3459          * snooping behaviour occurs naturally as the result of our domain
3460          * tracking.
3461          */
3462         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3463                 return false;
3464
3465         trace_i915_gem_object_clflush(obj);
3466         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3467
3468         return true;
3469 }
3470
3471 /** Flushes the GTT write domain for the object if it's dirty. */
3472 static void
3473 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3474 {
3475         uint32_t old_write_domain;
3476
3477         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3478                 return;
3479
3480         /* No actual flushing is required for the GTT write domain.  Writes
3481          * to it immediately go to main memory as far as we know, so there's
3482          * no chipset flush.  It also doesn't land in render cache.
3483          *
3484          * However, we do have to enforce the order so that all writes through
3485          * the GTT land before any writes to the device, such as updates to
3486          * the GATT itself.
3487          */
3488         wmb();
3489
3490         old_write_domain = obj->base.write_domain;
3491         obj->base.write_domain = 0;
3492
3493         trace_i915_gem_object_change_domain(obj,
3494                                             obj->base.read_domains,
3495                                             old_write_domain);
3496 }
3497
3498 /** Flushes the CPU write domain for the object if it's dirty. */
3499 static void
3500 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3501                                        bool force)
3502 {
3503         uint32_t old_write_domain;
3504
3505         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3506                 return;
3507
3508         if (i915_gem_clflush_object(obj, force))
3509                 i915_gem_chipset_flush(obj->base.dev);
3510
3511         old_write_domain = obj->base.write_domain;
3512         obj->base.write_domain = 0;
3513
3514         trace_i915_gem_object_change_domain(obj,
3515                                             obj->base.read_domains,
3516                                             old_write_domain);
3517 }
3518
3519 /**
3520  * Moves a single object to the GTT read, and possibly write domain.
3521  *
3522  * This function returns when the move is complete, including waiting on
3523  * flushes to occur.
3524  */
3525 int
3526 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3527 {
3528         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3529         uint32_t old_write_domain, old_read_domains;
3530         int ret;
3531
3532         /* Not valid to be called on unbound objects. */
3533         if (!i915_gem_obj_bound_any(obj))
3534                 return -EINVAL;
3535
3536         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3537                 return 0;
3538
3539         ret = i915_gem_object_wait_rendering(obj, !write);
3540         if (ret)
3541                 return ret;
3542
3543         i915_gem_object_flush_cpu_write_domain(obj, false);
3544
3545         /* Serialise direct access to this object with the barriers for
3546          * coherent writes from the GPU, by effectively invalidating the
3547          * GTT domain upon first access.
3548          */
3549         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3550                 mb();
3551
3552         old_write_domain = obj->base.write_domain;
3553         old_read_domains = obj->base.read_domains;
3554
3555         /* It should now be out of any other write domains, and we can update
3556          * the domain values for our changes.
3557          */
3558         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3559         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3560         if (write) {
3561                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3562                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3563                 obj->dirty = 1;
3564         }
3565
3566         trace_i915_gem_object_change_domain(obj,
3567                                             old_read_domains,
3568                                             old_write_domain);
3569
3570         /* And bump the LRU for this access */
3571         if (i915_gem_object_is_inactive(obj)) {
3572                 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3573                 if (vma)
3574                         list_move_tail(&vma->mm_list,
3575                                        &dev_priv->gtt.base.inactive_list);
3576
3577         }
3578
3579         return 0;
3580 }
3581
3582 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3583                                     enum i915_cache_level cache_level)
3584 {
3585         struct drm_device *dev = obj->base.dev;
3586         drm_i915_private_t *dev_priv = dev->dev_private;
3587         struct i915_vma *vma;
3588         int ret;
3589
3590         if (obj->cache_level == cache_level)
3591                 return 0;
3592
3593         if (obj->pin_count) {
3594                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3595                 return -EBUSY;
3596         }
3597
3598         list_for_each_entry(vma, &obj->vma_list, vma_link) {
3599                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3600                         ret = i915_vma_unbind(vma);
3601                         if (ret)
3602                                 return ret;
3603
3604                         break;
3605                 }
3606         }
3607
3608         if (i915_gem_obj_bound_any(obj)) {
3609                 ret = i915_gem_object_finish_gpu(obj);
3610                 if (ret)
3611                         return ret;
3612
3613                 i915_gem_object_finish_gtt(obj);
3614
3615                 /* Before SandyBridge, you could not use tiling or fence
3616                  * registers with snooped memory, so relinquish any fences
3617                  * currently pointing to our region in the aperture.
3618                  */
3619                 if (INTEL_INFO(dev)->gen < 6) {
3620                         ret = i915_gem_object_put_fence(obj);
3621                         if (ret)
3622                                 return ret;
3623                 }
3624
3625                 if (obj->has_global_gtt_mapping)
3626                         i915_gem_gtt_bind_object(obj, cache_level);
3627                 if (obj->has_aliasing_ppgtt_mapping)
3628                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3629                                                obj, cache_level);
3630         }
3631
3632         list_for_each_entry(vma, &obj->vma_list, vma_link)
3633                 vma->node.color = cache_level;
3634         obj->cache_level = cache_level;
3635
3636         if (cpu_write_needs_clflush(obj)) {
3637                 u32 old_read_domains, old_write_domain;
3638
3639                 /* If we're coming from LLC cached, then we haven't
3640                  * actually been tracking whether the data is in the
3641                  * CPU cache or not, since we only allow one bit set
3642                  * in obj->write_domain and have been skipping the clflushes.
3643                  * Just set it to the CPU cache for now.
3644                  */
3645                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3646
3647                 old_read_domains = obj->base.read_domains;
3648                 old_write_domain = obj->base.write_domain;
3649
3650                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3651                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3652
3653                 trace_i915_gem_object_change_domain(obj,
3654                                                     old_read_domains,
3655                                                     old_write_domain);
3656         }
3657
3658         i915_gem_verify_gtt(dev);
3659         return 0;
3660 }
3661
3662 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3663                                struct drm_file *file)
3664 {
3665         struct drm_i915_gem_caching *args = data;
3666         struct drm_i915_gem_object *obj;
3667         int ret;
3668
3669         ret = i915_mutex_lock_interruptible(dev);
3670         if (ret)
3671                 return ret;
3672
3673         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3674         if (&obj->base == NULL) {
3675                 ret = -ENOENT;
3676                 goto unlock;
3677         }
3678
3679         switch (obj->cache_level) {
3680         case I915_CACHE_LLC:
3681         case I915_CACHE_L3_LLC:
3682                 args->caching = I915_CACHING_CACHED;
3683                 break;
3684
3685         case I915_CACHE_WT:
3686                 args->caching = I915_CACHING_DISPLAY;
3687                 break;
3688
3689         default:
3690                 args->caching = I915_CACHING_NONE;
3691                 break;
3692         }
3693
3694         drm_gem_object_unreference(&obj->base);
3695 unlock:
3696         mutex_unlock(&dev->struct_mutex);
3697         return ret;
3698 }
3699
3700 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3701                                struct drm_file *file)
3702 {
3703         struct drm_i915_gem_caching *args = data;
3704         struct drm_i915_gem_object *obj;
3705         enum i915_cache_level level;
3706         int ret;
3707
3708         switch (args->caching) {
3709         case I915_CACHING_NONE:
3710                 level = I915_CACHE_NONE;
3711                 break;
3712         case I915_CACHING_CACHED:
3713                 level = I915_CACHE_LLC;
3714                 break;
3715         case I915_CACHING_DISPLAY:
3716                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3717                 break;
3718         default:
3719                 return -EINVAL;
3720         }
3721
3722         ret = i915_mutex_lock_interruptible(dev);
3723         if (ret)
3724                 return ret;
3725
3726         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3727         if (&obj->base == NULL) {
3728                 ret = -ENOENT;
3729                 goto unlock;
3730         }
3731
3732         ret = i915_gem_object_set_cache_level(obj, level);
3733
3734         drm_gem_object_unreference(&obj->base);
3735 unlock:
3736         mutex_unlock(&dev->struct_mutex);
3737         return ret;
3738 }
3739
3740 static bool is_pin_display(struct drm_i915_gem_object *obj)
3741 {
3742         /* There are 3 sources that pin objects:
3743          *   1. The display engine (scanouts, sprites, cursors);
3744          *   2. Reservations for execbuffer;
3745          *   3. The user.
3746          *
3747          * We can ignore reservations as we hold the struct_mutex and
3748          * are only called outside of the reservation path.  The user
3749          * can only increment pin_count once, and so if after
3750          * subtracting the potential reference by the user, any pin_count
3751          * remains, it must be due to another use by the display engine.
3752          */
3753         return obj->pin_count - !!obj->user_pin_count;
3754 }
3755
3756 /*
3757  * Prepare buffer for display plane (scanout, cursors, etc).
3758  * Can be called from an uninterruptible phase (modesetting) and allows
3759  * any flushes to be pipelined (for pageflips).
3760  */
3761 int
3762 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3763                                      u32 alignment,
3764                                      struct intel_ring_buffer *pipelined)
3765 {
3766         u32 old_read_domains, old_write_domain;
3767         int ret;
3768
3769         if (pipelined != obj->ring) {
3770                 ret = i915_gem_object_sync(obj, pipelined);
3771                 if (ret)
3772                         return ret;
3773         }
3774
3775         /* Mark the pin_display early so that we account for the
3776          * display coherency whilst setting up the cache domains.
3777          */
3778         obj->pin_display = true;
3779
3780         /* The display engine is not coherent with the LLC cache on gen6.  As
3781          * a result, we make sure that the pinning that is about to occur is
3782          * done with uncached PTEs. This is lowest common denominator for all
3783          * chipsets.
3784          *
3785          * However for gen6+, we could do better by using the GFDT bit instead
3786          * of uncaching, which would allow us to flush all the LLC-cached data
3787          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3788          */
3789         ret = i915_gem_object_set_cache_level(obj,
3790                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3791         if (ret)
3792                 goto err_unpin_display;
3793
3794         /* As the user may map the buffer once pinned in the display plane
3795          * (e.g. libkms for the bootup splash), we have to ensure that we
3796          * always use map_and_fenceable for all scanout buffers.
3797          */
3798         ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3799         if (ret)
3800                 goto err_unpin_display;
3801
3802         i915_gem_object_flush_cpu_write_domain(obj, true);
3803
3804         old_write_domain = obj->base.write_domain;
3805         old_read_domains = obj->base.read_domains;
3806
3807         /* It should now be out of any other write domains, and we can update
3808          * the domain values for our changes.
3809          */
3810         obj->base.write_domain = 0;
3811         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3812
3813         trace_i915_gem_object_change_domain(obj,
3814                                             old_read_domains,
3815                                             old_write_domain);
3816
3817         return 0;
3818
3819 err_unpin_display:
3820         obj->pin_display = is_pin_display(obj);
3821         return ret;
3822 }
3823
3824 void
3825 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3826 {
3827         i915_gem_object_unpin(obj);
3828         obj->pin_display = is_pin_display(obj);
3829 }
3830
3831 int
3832 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3833 {
3834         int ret;
3835
3836         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3837                 return 0;
3838
3839         ret = i915_gem_object_wait_rendering(obj, false);
3840         if (ret)
3841                 return ret;
3842
3843         /* Ensure that we invalidate the GPU's caches and TLBs. */
3844         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3845         return 0;
3846 }
3847
3848 /**
3849  * Moves a single object to the CPU read, and possibly write domain.
3850  *
3851  * This function returns when the move is complete, including waiting on
3852  * flushes to occur.
3853  */
3854 int
3855 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3856 {
3857         uint32_t old_write_domain, old_read_domains;
3858         int ret;
3859
3860         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3861                 return 0;
3862
3863         ret = i915_gem_object_wait_rendering(obj, !write);
3864         if (ret)
3865                 return ret;
3866
3867         i915_gem_object_flush_gtt_write_domain(obj);
3868
3869         old_write_domain = obj->base.write_domain;
3870         old_read_domains = obj->base.read_domains;
3871
3872         /* Flush the CPU cache if it's still invalid. */
3873         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3874                 i915_gem_clflush_object(obj, false);
3875
3876                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3877         }
3878
3879         /* It should now be out of any other write domains, and we can update
3880          * the domain values for our changes.
3881          */
3882         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3883
3884         /* If we're writing through the CPU, then the GPU read domains will
3885          * need to be invalidated at next use.
3886          */
3887         if (write) {
3888                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3889                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3890         }
3891
3892         trace_i915_gem_object_change_domain(obj,
3893                                             old_read_domains,
3894                                             old_write_domain);
3895
3896         return 0;
3897 }
3898
3899 /* Throttle our rendering by waiting until the ring has completed our requests
3900  * emitted over 20 msec ago.
3901  *
3902  * Note that if we were to use the current jiffies each time around the loop,
3903  * we wouldn't escape the function with any frames outstanding if the time to
3904  * render a frame was over 20ms.
3905  *
3906  * This should get us reasonable parallelism between CPU and GPU but also
3907  * relatively low latency when blocking on a particular request to finish.
3908  */
3909 static int
3910 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3911 {
3912         struct drm_i915_private *dev_priv = dev->dev_private;
3913         struct drm_i915_file_private *file_priv = file->driver_priv;
3914         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3915         struct drm_i915_gem_request *request;
3916         struct intel_ring_buffer *ring = NULL;
3917         unsigned reset_counter;
3918         u32 seqno = 0;
3919         int ret;
3920
3921         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3922         if (ret)
3923                 return ret;
3924
3925         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3926         if (ret)
3927                 return ret;
3928
3929         spin_lock(&file_priv->mm.lock);
3930         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3931                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3932                         break;
3933
3934                 ring = request->ring;
3935                 seqno = request->seqno;
3936         }
3937         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3938         spin_unlock(&file_priv->mm.lock);
3939
3940         if (seqno == 0)
3941                 return 0;
3942
3943         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3944         if (ret == 0)
3945                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3946
3947         return ret;
3948 }
3949
3950 int
3951 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3952                     struct i915_address_space *vm,
3953                     uint32_t alignment,
3954                     bool map_and_fenceable,
3955                     bool nonblocking)
3956 {
3957         struct i915_vma *vma;
3958         int ret;
3959
3960         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3961                 return -EBUSY;
3962
3963         WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3964
3965         vma = i915_gem_obj_to_vma(obj, vm);
3966
3967         if (vma) {
3968                 if ((alignment &&
3969                      vma->node.start & (alignment - 1)) ||
3970                     (map_and_fenceable && !obj->map_and_fenceable)) {
3971                         WARN(obj->pin_count,
3972                              "bo is already pinned with incorrect alignment:"
3973                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3974                              " obj->map_and_fenceable=%d\n",
3975                              i915_gem_obj_offset(obj, vm), alignment,
3976                              map_and_fenceable,
3977                              obj->map_and_fenceable);
3978                         ret = i915_vma_unbind(vma);
3979                         if (ret)
3980                                 return ret;
3981                 }
3982         }
3983
3984         if (!i915_gem_obj_bound(obj, vm)) {
3985                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3986
3987                 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3988                                                  map_and_fenceable,
3989                                                  nonblocking);
3990                 if (ret)
3991                         return ret;
3992
3993                 if (!dev_priv->mm.aliasing_ppgtt)
3994                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3995         }
3996
3997         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3998                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3999
4000         obj->pin_count++;
4001         obj->pin_mappable |= map_and_fenceable;
4002
4003         return 0;
4004 }
4005
4006 void
4007 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4008 {
4009         BUG_ON(obj->pin_count == 0);
4010         BUG_ON(!i915_gem_obj_bound_any(obj));
4011
4012         if (--obj->pin_count == 0)
4013                 obj->pin_mappable = false;
4014 }
4015
4016 int
4017 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4018                    struct drm_file *file)
4019 {
4020         struct drm_i915_gem_pin *args = data;
4021         struct drm_i915_gem_object *obj;
4022         int ret;
4023
4024         ret = i915_mutex_lock_interruptible(dev);
4025         if (ret)
4026                 return ret;
4027
4028         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4029         if (&obj->base == NULL) {
4030                 ret = -ENOENT;
4031                 goto unlock;
4032         }
4033
4034         if (obj->madv != I915_MADV_WILLNEED) {
4035                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4036                 ret = -EINVAL;
4037                 goto out;
4038         }
4039
4040         if (obj->pin_filp != NULL && obj->pin_filp != file) {
4041                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4042                           args->handle);
4043                 ret = -EINVAL;
4044                 goto out;
4045         }
4046
4047         if (obj->user_pin_count == ULONG_MAX) {
4048                 ret = -EBUSY;
4049                 goto out;
4050         }
4051
4052         if (obj->user_pin_count == 0) {
4053                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
4054                 if (ret)
4055                         goto out;
4056         }
4057
4058         obj->user_pin_count++;
4059         obj->pin_filp = file;
4060
4061         args->offset = i915_gem_obj_ggtt_offset(obj);
4062 out:
4063         drm_gem_object_unreference(&obj->base);
4064 unlock:
4065         mutex_unlock(&dev->struct_mutex);
4066         return ret;
4067 }
4068
4069 int
4070 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4071                      struct drm_file *file)
4072 {
4073         struct drm_i915_gem_pin *args = data;
4074         struct drm_i915_gem_object *obj;
4075         int ret;
4076
4077         ret = i915_mutex_lock_interruptible(dev);
4078         if (ret)
4079                 return ret;
4080
4081         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4082         if (&obj->base == NULL) {
4083                 ret = -ENOENT;
4084                 goto unlock;
4085         }
4086
4087         if (obj->pin_filp != file) {
4088                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4089                           args->handle);
4090                 ret = -EINVAL;
4091                 goto out;
4092         }
4093         obj->user_pin_count--;
4094         if (obj->user_pin_count == 0) {
4095                 obj->pin_filp = NULL;
4096                 i915_gem_object_unpin(obj);
4097         }
4098
4099 out:
4100         drm_gem_object_unreference(&obj->base);
4101 unlock:
4102         mutex_unlock(&dev->struct_mutex);
4103         return ret;
4104 }
4105
4106 int
4107 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4108                     struct drm_file *file)
4109 {
4110         struct drm_i915_gem_busy *args = data;
4111         struct drm_i915_gem_object *obj;
4112         int ret;
4113
4114         ret = i915_mutex_lock_interruptible(dev);
4115         if (ret)
4116                 return ret;
4117
4118         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4119         if (&obj->base == NULL) {
4120                 ret = -ENOENT;
4121                 goto unlock;
4122         }
4123
4124         /* Count all active objects as busy, even if they are currently not used
4125          * by the gpu. Users of this interface expect objects to eventually
4126          * become non-busy without any further actions, therefore emit any
4127          * necessary flushes here.
4128          */
4129         ret = i915_gem_object_flush_active(obj);
4130
4131         args->busy = obj->active;
4132         if (obj->ring) {
4133                 args->busy |= intel_ring_flag(obj->ring) << 16;
4134         }
4135
4136         drm_gem_object_unreference(&obj->base);
4137 unlock:
4138         mutex_unlock(&dev->struct_mutex);
4139         return ret;
4140 }
4141
4142 int
4143 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4144                         struct drm_file *file_priv)
4145 {
4146         return i915_gem_ring_throttle(dev, file_priv);
4147 }
4148
4149 int
4150 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4151                        struct drm_file *file_priv)
4152 {
4153         struct drm_i915_gem_madvise *args = data;
4154         struct drm_i915_gem_object *obj;
4155         int ret;
4156
4157         switch (args->madv) {
4158         case I915_MADV_DONTNEED:
4159         case I915_MADV_WILLNEED:
4160             break;
4161         default:
4162             return -EINVAL;
4163         }
4164
4165         ret = i915_mutex_lock_interruptible(dev);
4166         if (ret)
4167                 return ret;
4168
4169         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4170         if (&obj->base == NULL) {
4171                 ret = -ENOENT;
4172                 goto unlock;
4173         }
4174
4175         if (obj->pin_count) {
4176                 ret = -EINVAL;
4177                 goto out;
4178         }
4179
4180         if (obj->madv != __I915_MADV_PURGED)
4181                 obj->madv = args->madv;
4182
4183         /* if the object is no longer attached, discard its backing storage */
4184         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4185                 i915_gem_object_truncate(obj);
4186
4187         args->retained = obj->madv != __I915_MADV_PURGED;
4188
4189 out:
4190         drm_gem_object_unreference(&obj->base);
4191 unlock:
4192         mutex_unlock(&dev->struct_mutex);
4193         return ret;
4194 }
4195
4196 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4197                           const struct drm_i915_gem_object_ops *ops)
4198 {
4199         INIT_LIST_HEAD(&obj->global_list);
4200         INIT_LIST_HEAD(&obj->ring_list);
4201         INIT_LIST_HEAD(&obj->obj_exec_link);
4202         INIT_LIST_HEAD(&obj->vma_list);
4203
4204         obj->ops = ops;
4205
4206         obj->fence_reg = I915_FENCE_REG_NONE;
4207         obj->madv = I915_MADV_WILLNEED;
4208         /* Avoid an unnecessary call to unbind on the first bind. */
4209         obj->map_and_fenceable = true;
4210
4211         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4212 }
4213
4214 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4215         .get_pages = i915_gem_object_get_pages_gtt,
4216         .put_pages = i915_gem_object_put_pages_gtt,
4217 };
4218
4219 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4220                                                   size_t size)
4221 {
4222         struct drm_i915_gem_object *obj;
4223 #if 0
4224         struct address_space *mapping;
4225         gfp_t mask;
4226 #endif
4227
4228         obj = i915_gem_object_alloc(dev);
4229         if (obj == NULL)
4230                 return NULL;
4231
4232         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4233                 i915_gem_object_free(obj);
4234                 return NULL;
4235         }
4236
4237 #if 0
4238         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4239         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4240                 /* 965gm cannot relocate objects above 4GiB. */
4241                 mask &= ~__GFP_HIGHMEM;
4242                 mask |= __GFP_DMA32;
4243         }
4244
4245         mapping = file_inode(obj->base.filp)->i_mapping;
4246         mapping_set_gfp_mask(mapping, mask);
4247 #endif
4248
4249         i915_gem_object_init(obj, &i915_gem_object_ops);
4250
4251         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4252         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4253
4254         if (HAS_LLC(dev)) {
4255                 /* On some devices, we can have the GPU use the LLC (the CPU
4256                  * cache) for about a 10% performance improvement
4257                  * compared to uncached.  Graphics requests other than
4258                  * display scanout are coherent with the CPU in
4259                  * accessing this cache.  This means in this mode we
4260                  * don't need to clflush on the CPU side, and on the
4261                  * GPU side we only need to flush internal caches to
4262                  * get data visible to the CPU.
4263                  *
4264                  * However, we maintain the display planes as UC, and so
4265                  * need to rebind when first used as such.
4266                  */
4267                 obj->cache_level = I915_CACHE_LLC;
4268         } else
4269                 obj->cache_level = I915_CACHE_NONE;
4270
4271         trace_i915_gem_object_create(obj);
4272
4273         return obj;
4274 }
4275
4276 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4277 {
4278         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4279         struct drm_device *dev = obj->base.dev;
4280         drm_i915_private_t *dev_priv = dev->dev_private;
4281         struct i915_vma *vma, *next;
4282
4283         intel_runtime_pm_get(dev_priv);
4284
4285         trace_i915_gem_object_destroy(obj);
4286
4287         if (obj->phys_obj)
4288                 i915_gem_detach_phys_object(dev, obj);
4289
4290         obj->pin_count = 0;
4291         /* NB: 0 or 1 elements */
4292 #if 0
4293         WARN_ON(!list_empty(&obj->vma_list) &&
4294                 !list_is_singular(&obj->vma_list));
4295 #endif
4296         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4297                 int ret = i915_vma_unbind(vma);
4298                 if (WARN_ON(ret == -ERESTARTSYS)) {
4299                         bool was_interruptible;
4300
4301                         was_interruptible = dev_priv->mm.interruptible;
4302                         dev_priv->mm.interruptible = false;
4303
4304                         WARN_ON(i915_vma_unbind(vma));
4305
4306                         dev_priv->mm.interruptible = was_interruptible;
4307                 }
4308         }
4309
4310         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4311          * before progressing. */
4312         if (obj->stolen)
4313                 i915_gem_object_unpin_pages(obj);
4314
4315         if (WARN_ON(obj->pages_pin_count))
4316                 obj->pages_pin_count = 0;
4317         i915_gem_object_put_pages(obj);
4318         i915_gem_object_free_mmap_offset(obj);
4319
4320         BUG_ON(obj->pages);
4321
4322 #if 0
4323         if (obj->base.import_attach)
4324                 drm_prime_gem_destroy(&obj->base, NULL);
4325 #endif
4326
4327         drm_gem_object_release(&obj->base);
4328         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4329
4330         kfree(obj->bit_17);
4331         i915_gem_object_free(obj);
4332
4333         intel_runtime_pm_put(dev_priv);
4334 }
4335
4336 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4337                                      struct i915_address_space *vm)
4338 {
4339         struct i915_vma *vma;
4340         list_for_each_entry(vma, &obj->vma_list, vma_link)
4341                 if (vma->vm == vm)
4342                         return vma;
4343
4344         return NULL;
4345 }
4346
4347 static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4348                                               struct i915_address_space *vm)
4349 {
4350         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4351         if (vma == NULL)
4352                 return ERR_PTR(-ENOMEM);
4353
4354         INIT_LIST_HEAD(&vma->vma_link);
4355         INIT_LIST_HEAD(&vma->mm_list);
4356         INIT_LIST_HEAD(&vma->exec_list);
4357         vma->vm = vm;
4358         vma->obj = obj;
4359
4360         /* Keep GGTT vmas first to make debug easier */
4361         if (i915_is_ggtt(vm))
4362                 list_add(&vma->vma_link, &obj->vma_list);
4363         else
4364                 list_add_tail(&vma->vma_link, &obj->vma_list);
4365
4366         return vma;
4367 }
4368
4369 struct i915_vma *
4370 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4371                                   struct i915_address_space *vm)
4372 {
4373         struct i915_vma *vma;
4374
4375         vma = i915_gem_obj_to_vma(obj, vm);
4376         if (!vma)
4377                 vma = __i915_gem_vma_create(obj, vm);
4378
4379         return vma;
4380 }
4381
4382 void i915_gem_vma_destroy(struct i915_vma *vma)
4383 {
4384         WARN_ON(vma->node.allocated);
4385
4386         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4387         if (!list_empty(&vma->exec_list))
4388                 return;
4389
4390         list_del(&vma->vma_link);
4391
4392         kfree(vma);
4393 }
4394
4395 int
4396 i915_gem_suspend(struct drm_device *dev)
4397 {
4398         drm_i915_private_t *dev_priv = dev->dev_private;
4399         int ret = 0;
4400
4401         mutex_lock(&dev->struct_mutex);
4402         if (dev_priv->ums.mm_suspended)
4403                 goto err;
4404
4405         ret = i915_gpu_idle(dev);
4406         if (ret)
4407                 goto err;
4408
4409         i915_gem_retire_requests(dev);
4410
4411         /* Under UMS, be paranoid and evict. */
4412         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4413                 i915_gem_evict_everything(dev);
4414
4415         i915_kernel_lost_context(dev);
4416         i915_gem_cleanup_ringbuffer(dev);
4417
4418         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4419          * We need to replace this with a semaphore, or something.
4420          * And not confound ums.mm_suspended!
4421          */
4422         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4423                                                              DRIVER_MODESET);
4424         mutex_unlock(&dev->struct_mutex);
4425
4426         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4427         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4428         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4429
4430         return 0;
4431
4432 err:
4433         mutex_unlock(&dev->struct_mutex);
4434         return ret;
4435 }
4436
4437 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4438 {
4439         struct drm_device *dev = ring->dev;
4440         drm_i915_private_t *dev_priv = dev->dev_private;
4441         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4442         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4443         int i, ret;
4444
4445         if (!HAS_L3_DPF(dev) || !remap_info)
4446                 return 0;
4447
4448         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4449         if (ret)
4450                 return ret;
4451
4452         /*
4453          * Note: We do not worry about the concurrent register cacheline hang
4454          * here because no other code should access these registers other than
4455          * at initialization time.
4456          */
4457         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4458                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4459                 intel_ring_emit(ring, reg_base + i);
4460                 intel_ring_emit(ring, remap_info[i/4]);
4461         }
4462
4463         intel_ring_advance(ring);
4464
4465         return ret;
4466 }
4467
4468 void i915_gem_init_swizzling(struct drm_device *dev)
4469 {
4470         drm_i915_private_t *dev_priv = dev->dev_private;
4471
4472         if (INTEL_INFO(dev)->gen < 5 ||
4473             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4474                 return;
4475
4476         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4477                                  DISP_TILE_SURFACE_SWIZZLING);
4478
4479         if (IS_GEN5(dev))
4480                 return;
4481
4482         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4483         if (IS_GEN6(dev))
4484                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4485         else if (IS_GEN7(dev))
4486                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4487         else if (IS_GEN8(dev))
4488                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4489         else
4490                 BUG();
4491 }
4492
4493 static bool
4494 intel_enable_blt(struct drm_device *dev)
4495 {
4496         int revision;
4497
4498         if (!HAS_BLT(dev))
4499                 return false;
4500
4501         /* The blitter was dysfunctional on early prototypes */
4502         revision = pci_read_config(dev->dev, PCIR_REVID, 1);
4503         if (IS_GEN6(dev) && revision < 8) {
4504                 DRM_INFO("BLT not supported on this pre-production hardware;"
4505                          " graphics performance will be degraded.\n");
4506                 return false;
4507         }
4508
4509         return true;
4510 }
4511
4512 static int i915_gem_init_rings(struct drm_device *dev)
4513 {
4514         struct drm_i915_private *dev_priv = dev->dev_private;
4515         int ret;
4516
4517         ret = intel_init_render_ring_buffer(dev);
4518         if (ret)
4519                 return ret;
4520
4521         if (HAS_BSD(dev)) {
4522                 ret = intel_init_bsd_ring_buffer(dev);
4523                 if (ret)
4524                         goto cleanup_render_ring;
4525         }
4526
4527         if (intel_enable_blt(dev)) {
4528                 ret = intel_init_blt_ring_buffer(dev);
4529                 if (ret)
4530                         goto cleanup_bsd_ring;
4531         }
4532
4533         if (HAS_VEBOX(dev)) {
4534                 ret = intel_init_vebox_ring_buffer(dev);
4535                 if (ret)
4536                         goto cleanup_blt_ring;
4537         }
4538
4539
4540         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4541         if (ret)
4542                 goto cleanup_vebox_ring;
4543
4544         return 0;
4545
4546 cleanup_vebox_ring:
4547         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4548 cleanup_blt_ring:
4549         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4550 cleanup_bsd_ring:
4551         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4552 cleanup_render_ring:
4553         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4554
4555         return ret;
4556 }
4557
4558 int
4559 i915_gem_init_hw(struct drm_device *dev)
4560 {
4561         drm_i915_private_t *dev_priv = dev->dev_private;
4562         int ret, i;
4563
4564 #if 0
4565         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4566                 return -EIO;
4567 #endif
4568
4569         if (dev_priv->ellc_size)
4570                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4571
4572         if (IS_HASWELL(dev))
4573                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4574                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4575
4576         if (HAS_PCH_NOP(dev)) {
4577                 u32 temp = I915_READ(GEN7_MSG_CTL);
4578                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4579                 I915_WRITE(GEN7_MSG_CTL, temp);
4580         }
4581
4582         i915_gem_init_swizzling(dev);
4583
4584         ret = i915_gem_init_rings(dev);
4585         if (ret)
4586                 return ret;
4587
4588         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4589                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4590
4591         /*
4592          * XXX: There was some w/a described somewhere suggesting loading
4593          * contexts before PPGTT.
4594          */
4595         ret = i915_gem_context_init(dev);
4596         if (ret) {
4597                 i915_gem_cleanup_ringbuffer(dev);
4598                 DRM_ERROR("Context initialization failed %d\n", ret);
4599                 return ret;
4600         }
4601
4602         if (dev_priv->mm.aliasing_ppgtt) {
4603                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4604                 if (ret) {
4605                         i915_gem_cleanup_aliasing_ppgtt(dev);
4606                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4607                 }
4608         }
4609
4610         return 0;
4611 }
4612
4613 int i915_gem_init(struct drm_device *dev)
4614 {
4615         struct drm_i915_private *dev_priv = dev->dev_private;
4616         int ret;
4617
4618         mutex_lock(&dev->struct_mutex);
4619
4620         if (IS_VALLEYVIEW(dev)) {
4621                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4622                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4623                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4624                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4625         }
4626
4627         i915_gem_init_global_gtt(dev);
4628
4629         ret = i915_gem_init_hw(dev);
4630         mutex_unlock(&dev->struct_mutex);
4631         if (ret) {
4632                 i915_gem_cleanup_aliasing_ppgtt(dev);
4633                 return ret;
4634         }
4635
4636         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4637         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4638                 dev_priv->dri1.allow_batchbuffer = 1;
4639         return 0;
4640 }
4641
4642 void
4643 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4644 {
4645         drm_i915_private_t *dev_priv = dev->dev_private;
4646         struct intel_ring_buffer *ring;
4647         int i;
4648
4649         for_each_ring(ring, dev_priv, i)
4650                 intel_cleanup_ring_buffer(ring);
4651 }
4652
4653 int
4654 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4655                        struct drm_file *file_priv)
4656 {
4657         struct drm_i915_private *dev_priv = dev->dev_private;
4658         int ret;
4659
4660         if (drm_core_check_feature(dev, DRIVER_MODESET))
4661                 return 0;
4662
4663         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4664                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4665                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4666         }
4667
4668         mutex_lock(&dev->struct_mutex);
4669         dev_priv->ums.mm_suspended = 0;
4670
4671         ret = i915_gem_init_hw(dev);
4672         if (ret != 0) {
4673                 mutex_unlock(&dev->struct_mutex);
4674                 return ret;
4675         }
4676
4677         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4678         mutex_unlock(&dev->struct_mutex);
4679
4680         ret = drm_irq_install(dev);
4681         if (ret)
4682                 goto cleanup_ringbuffer;
4683
4684         return 0;
4685
4686 cleanup_ringbuffer:
4687         mutex_lock(&dev->struct_mutex);
4688         i915_gem_cleanup_ringbuffer(dev);
4689         dev_priv->ums.mm_suspended = 1;
4690         mutex_unlock(&dev->struct_mutex);
4691
4692         return ret;
4693 }
4694
4695 int
4696 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4697                        struct drm_file *file_priv)
4698 {
4699         if (drm_core_check_feature(dev, DRIVER_MODESET))
4700                 return 0;
4701
4702         drm_irq_uninstall(dev);
4703
4704         return i915_gem_suspend(dev);
4705 }
4706
4707 void
4708 i915_gem_lastclose(struct drm_device *dev)
4709 {
4710         int ret;
4711
4712         if (drm_core_check_feature(dev, DRIVER_MODESET))
4713                 return;
4714
4715         ret = i915_gem_suspend(dev);
4716         if (ret)
4717                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4718 }
4719
4720 static void
4721 init_ring_lists(struct intel_ring_buffer *ring)
4722 {
4723         INIT_LIST_HEAD(&ring->active_list);
4724         INIT_LIST_HEAD(&ring->request_list);
4725 }
4726
4727 static void i915_init_vm(struct drm_i915_private *dev_priv,
4728                          struct i915_address_space *vm)
4729 {
4730         vm->dev = dev_priv->dev;
4731         INIT_LIST_HEAD(&vm->active_list);
4732         INIT_LIST_HEAD(&vm->inactive_list);
4733         INIT_LIST_HEAD(&vm->global_link);
4734         list_add(&vm->global_link, &dev_priv->vm_list);
4735 }
4736
4737 void
4738 i915_gem_load(struct drm_device *dev)
4739 {
4740         drm_i915_private_t *dev_priv = dev->dev_private;
4741         int i;
4742
4743         INIT_LIST_HEAD(&dev_priv->vm_list);
4744         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4745
4746         INIT_LIST_HEAD(&dev_priv->context_list);
4747         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4748         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4749         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4750         for (i = 0; i < I915_NUM_RINGS; i++)
4751                 init_ring_lists(&dev_priv->ring[i]);
4752         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4753                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4754         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4755                           i915_gem_retire_work_handler);
4756         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4757                           i915_gem_idle_work_handler);
4758         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4759
4760         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4761         if (IS_GEN3(dev)) {
4762                 I915_WRITE(MI_ARB_STATE,
4763                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4764         }
4765
4766         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4767
4768         /* Old X drivers will take 0-2 for front, back, depth buffers */
4769         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4770                 dev_priv->fence_reg_start = 3;
4771
4772         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4773                 dev_priv->num_fence_regs = 32;
4774         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4775                 dev_priv->num_fence_regs = 16;
4776         else
4777                 dev_priv->num_fence_regs = 8;
4778
4779         /* Initialize fence registers to zero */
4780         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4781         i915_gem_restore_fences(dev);
4782
4783         i915_gem_detect_bit_6_swizzle(dev);
4784         init_waitqueue_head(&dev_priv->pending_flip_queue);
4785
4786         dev_priv->mm.interruptible = true;
4787
4788 #if 0
4789         dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4790         dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4791         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4792         register_shrinker(&dev_priv->mm.inactive_shrinker);
4793         /* Old FreeBSD code */
4794         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4795             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4796 #endif
4797 }
4798
4799 /*
4800  * Create a physically contiguous memory object for this object
4801  * e.g. for cursor + overlay regs
4802  */
4803 static int i915_gem_init_phys_object(struct drm_device *dev,
4804                                      int id, int size, int align)
4805 {
4806         drm_i915_private_t *dev_priv = dev->dev_private;
4807         struct drm_i915_gem_phys_object *phys_obj;
4808         int ret;
4809
4810         if (dev_priv->mm.phys_objs[id - 1] || !size)
4811                 return 0;
4812
4813         phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4814         if (!phys_obj)
4815                 return -ENOMEM;
4816
4817         phys_obj->id = id;
4818
4819         phys_obj->handle = drm_pci_alloc(dev, size, align);
4820         if (!phys_obj->handle) {
4821                 ret = -ENOMEM;
4822                 goto kfree_obj;
4823         }
4824 #ifdef CONFIG_X86
4825         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4826 #endif
4827         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4828             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4829
4830         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4831
4832         return 0;
4833 kfree_obj:
4834         kfree(phys_obj);
4835         return ret;
4836 }
4837
4838 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4839 {
4840         drm_i915_private_t *dev_priv = dev->dev_private;
4841         struct drm_i915_gem_phys_object *phys_obj;
4842
4843         if (!dev_priv->mm.phys_objs[id - 1])
4844                 return;
4845
4846         phys_obj = dev_priv->mm.phys_objs[id - 1];
4847         if (phys_obj->cur_obj) {
4848                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4849         }
4850
4851 #ifdef CONFIG_X86
4852         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4853 #endif
4854         drm_pci_free(dev, phys_obj->handle);
4855         kfree(phys_obj);
4856         dev_priv->mm.phys_objs[id - 1] = NULL;
4857 }
4858
4859 void i915_gem_free_all_phys_object(struct drm_device *dev)
4860 {
4861         int i;
4862
4863         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4864                 i915_gem_free_phys_object(dev, i);
4865 }
4866
4867 void i915_gem_detach_phys_object(struct drm_device *dev,
4868                                  struct drm_i915_gem_object *obj)
4869 {
4870         struct vm_object *mapping = obj->base.vm_obj;
4871         char *vaddr;
4872         int i;
4873         int page_count;
4874
4875         if (!obj->phys_obj)
4876                 return;
4877         vaddr = obj->phys_obj->handle->vaddr;
4878
4879         page_count = obj->base.size / PAGE_SIZE;
4880         for (i = 0; i < page_count; i++) {
4881                 struct vm_page *page = shmem_read_mapping_page(mapping, i);
4882                 if (!IS_ERR(page)) {
4883                         char *dst = kmap_atomic(page);
4884                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4885                         kunmap_atomic(dst);
4886
4887                         drm_clflush_pages(&page, 1);
4888
4889                         set_page_dirty(page);
4890                         mark_page_accessed(page);
4891 #if 0
4892                         page_cache_release(page);
4893 #endif
4894                         vm_page_busy_wait(page, FALSE, "i915gem");
4895                         vm_page_unwire(page, 0);
4896                         vm_page_wakeup(page);
4897                 }
4898         }
4899         i915_gem_chipset_flush(dev);
4900
4901         obj->phys_obj->cur_obj = NULL;
4902         obj->phys_obj = NULL;
4903 }
4904
4905 int
4906 i915_gem_attach_phys_object(struct drm_device *dev,
4907                             struct drm_i915_gem_object *obj,
4908                             int id,
4909                             int align)
4910 {
4911         struct vm_object *mapping = obj->base.vm_obj;
4912         drm_i915_private_t *dev_priv = dev->dev_private;
4913         int ret = 0;
4914         int page_count;
4915         int i;
4916
4917         if (id > I915_MAX_PHYS_OBJECT)
4918                 return -EINVAL;
4919
4920         if (obj->phys_obj) {
4921                 if (obj->phys_obj->id == id)
4922                         return 0;
4923                 i915_gem_detach_phys_object(dev, obj);
4924         }
4925
4926         /* create a new object */
4927         if (!dev_priv->mm.phys_objs[id - 1]) {
4928                 ret = i915_gem_init_phys_object(dev, id,
4929                                                 obj->base.size, align);
4930                 if (ret) {
4931                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4932                                   id, obj->base.size);
4933                         return ret;
4934                 }
4935         }
4936
4937         /* bind to the object */
4938         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4939         obj->phys_obj->cur_obj = obj;
4940
4941         page_count = obj->base.size / PAGE_SIZE;
4942
4943         for (i = 0; i < page_count; i++) {
4944                 struct vm_page *page;
4945                 char *dst, *src;
4946
4947                 page = shmem_read_mapping_page(mapping, i);
4948                 if (IS_ERR(page))
4949                         return PTR_ERR(page);
4950
4951                 src = kmap_atomic(page);
4952                 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4953                 memcpy(dst, src, PAGE_SIZE);
4954                 kunmap_atomic(src);
4955
4956                 mark_page_accessed(page);
4957 #if 0
4958                 page_cache_release(page);
4959 #endif
4960                 vm_page_busy_wait(page, FALSE, "i915gem");
4961                 vm_page_unwire(page, 0);
4962                 vm_page_wakeup(page);
4963         }
4964
4965         return 0;
4966 }
4967
4968 static int
4969 i915_gem_phys_pwrite(struct drm_device *dev,
4970                      struct drm_i915_gem_object *obj,
4971                      struct drm_i915_gem_pwrite *args,
4972                      struct drm_file *file_priv)
4973 {
4974         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4975         char __user *user_data = to_user_ptr(args->data_ptr);
4976
4977         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4978                 unsigned long unwritten;
4979
4980                 /* The physical object once assigned is fixed for the lifetime
4981                  * of the obj, so we can safely drop the lock and continue
4982                  * to access vaddr.
4983                  */
4984                 mutex_unlock(&dev->struct_mutex);
4985                 unwritten = copy_from_user(vaddr, user_data, args->size);
4986                 mutex_lock(&dev->struct_mutex);
4987                 if (unwritten)
4988                         return -EFAULT;
4989         }
4990
4991         i915_gem_chipset_flush(dev);
4992         return 0;
4993 }
4994
4995 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4996 {
4997         struct drm_i915_file_private *file_priv = file->driver_priv;
4998
4999         /* Clean up our request list when the client is going away, so that
5000          * later retire_requests won't dereference our soon-to-be-gone
5001          * file_priv.
5002          */
5003         spin_lock(&file_priv->mm.lock);
5004         while (!list_empty(&file_priv->mm.request_list)) {
5005                 struct drm_i915_gem_request *request;
5006
5007                 request = list_first_entry(&file_priv->mm.request_list,
5008                                            struct drm_i915_gem_request,
5009                                            client_list);
5010                 list_del(&request->client_list);
5011                 request->file_priv = NULL;
5012         }
5013         spin_unlock(&file_priv->mm.lock);
5014 }
5015
5016 int
5017 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
5018     vm_ooffset_t foff, struct ucred *cred, u_short *color)
5019 {
5020         *color = 0; /* XXXKIB */
5021         return (0);
5022 }
5023
5024 void
5025 i915_gem_pager_dtor(void *handle)
5026 {
5027         struct drm_gem_object *obj;
5028         struct drm_device *dev;
5029
5030         obj = handle;
5031         dev = obj->dev;
5032
5033         mutex_lock(&dev->struct_mutex);
5034         drm_gem_free_mmap_offset(obj);
5035         i915_gem_release_mmap(to_intel_bo(obj));
5036         drm_gem_object_unreference(obj);
5037         mutex_unlock(&dev->struct_mutex);
5038 }
5039
5040 static void
5041 i915_gem_file_idle_work_handler(struct work_struct *work)
5042 {
5043         struct drm_i915_file_private *file_priv =
5044                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5045
5046         atomic_set(&file_priv->rps_wait_boost, false);
5047 }
5048
5049 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5050 {
5051         struct drm_i915_file_private *file_priv;
5052
5053         DRM_DEBUG_DRIVER("\n");
5054
5055         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5056         if (!file_priv)
5057                 return -ENOMEM;
5058
5059         file->driver_priv = file_priv;
5060         file_priv->dev_priv = dev->dev_private;
5061
5062         spin_init(&file_priv->mm.lock, "i915_priv");
5063         INIT_LIST_HEAD(&file_priv->mm.request_list);
5064         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5065                           i915_gem_file_idle_work_handler);
5066
5067         idr_init(&file_priv->context_idr);
5068
5069         return 0;
5070 }
5071
5072 #if 0
5073 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5074 {
5075         if (!mutex_is_locked(mutex))
5076                 return false;
5077
5078 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5079         return mutex->owner == task;
5080 #else
5081         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5082         return false;
5083 #endif
5084 }
5085 #endif
5086
5087 #if 0
5088 static unsigned long
5089 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
5090 {
5091         struct drm_i915_private *dev_priv =
5092                 container_of(shrinker,
5093                              struct drm_i915_private,
5094                              mm.inactive_shrinker);
5095         struct drm_device *dev = dev_priv->dev;
5096         struct drm_i915_gem_object *obj;
5097         bool unlock = true;
5098         unsigned long count;
5099
5100         if (!mutex_trylock(&dev->struct_mutex)) {
5101                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5102                         return 0;
5103
5104                 if (dev_priv->mm.shrinker_no_lock_stealing)
5105                         return 0;
5106
5107                 unlock = false;
5108         }
5109
5110         count = 0;
5111         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5112                 if (obj->pages_pin_count == 0)
5113                         count += obj->base.size >> PAGE_SHIFT;
5114
5115         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5116                 if (obj->active)
5117                         continue;
5118
5119                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
5120                         count += obj->base.size >> PAGE_SHIFT;
5121         }
5122
5123         if (unlock)
5124                 mutex_unlock(&dev->struct_mutex);
5125
5126         return count;
5127 }
5128 #endif
5129
5130 /* All the new VM stuff */
5131 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5132                                   struct i915_address_space *vm)
5133 {
5134         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5135         struct i915_vma *vma;
5136
5137         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5138                 vm = &dev_priv->gtt.base;
5139
5140         BUG_ON(list_empty(&o->vma_list));
5141         list_for_each_entry(vma, &o->vma_list, vma_link) {
5142                 if (vma->vm == vm)
5143                         return vma->node.start;
5144
5145         }
5146         return -1;
5147 }
5148
5149 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5150                         struct i915_address_space *vm)
5151 {
5152         struct i915_vma *vma;
5153
5154         list_for_each_entry(vma, &o->vma_list, vma_link)
5155                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5156                         return true;
5157
5158         return false;
5159 }
5160
5161 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5162 {
5163         struct i915_vma *vma;
5164
5165         list_for_each_entry(vma, &o->vma_list, vma_link)
5166                 if (drm_mm_node_allocated(&vma->node))
5167                         return true;
5168
5169         return false;
5170 }
5171
5172 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5173                                 struct i915_address_space *vm)
5174 {
5175         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5176         struct i915_vma *vma;
5177
5178         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5179                 vm = &dev_priv->gtt.base;
5180
5181         BUG_ON(list_empty(&o->vma_list));
5182
5183         list_for_each_entry(vma, &o->vma_list, vma_link)
5184                 if (vma->vm == vm)
5185                         return vma->node.size;
5186
5187         return 0;
5188 }
5189
5190 #if 0
5191 static unsigned long
5192 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5193 {
5194         struct drm_i915_private *dev_priv =
5195                 container_of(shrinker,
5196                              struct drm_i915_private,
5197                              mm.inactive_shrinker);
5198         struct drm_device *dev = dev_priv->dev;
5199         unsigned long freed;
5200         bool unlock = true;
5201
5202         if (!mutex_trylock(&dev->struct_mutex)) {
5203                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5204                         return SHRINK_STOP;
5205
5206                 if (dev_priv->mm.shrinker_no_lock_stealing)
5207                         return SHRINK_STOP;
5208
5209                 unlock = false;
5210         }
5211
5212         freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5213         if (freed < sc->nr_to_scan)
5214                 freed += __i915_gem_shrink(dev_priv,
5215                                            sc->nr_to_scan - freed,
5216                                            false);
5217         if (freed < sc->nr_to_scan)
5218                 freed += i915_gem_shrink_all(dev_priv);
5219
5220         if (unlock)
5221                 mutex_unlock(&dev->struct_mutex);
5222
5223         return freed;
5224 }
5225 #endif
5226
5227 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5228 {
5229         struct i915_vma *vma;
5230
5231         if (WARN_ON(list_empty(&obj->vma_list)))
5232                 return NULL;
5233
5234         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5235         if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
5236                 return NULL;
5237
5238         return vma;
5239 }