Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / dev / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  */
54
55 #include <machine/md_var.h>
56
57 #include <drm/drmP.h>
58 #include <drm/drm_vma_manager.h>
59 #include <drm/i915_drm.h>
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_drv.h"
63 #include <linux/shmem_fs.h>
64 #include <linux/slab.h>
65 #include <linux/swap.h>
66 #include <linux/pci.h>
67
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
70                                                    bool force);
71 static __must_check int
72 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
73                                bool readonly);
74 static __must_check int
75 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
76                            struct i915_address_space *vm,
77                            unsigned alignment,
78                            bool map_and_fenceable,
79                            bool nonblocking);
80 static int i915_gem_phys_pwrite(struct drm_device *dev,
81                                 struct drm_i915_gem_object *obj,
82                                 struct drm_i915_gem_pwrite *args,
83                                 struct drm_file *file);
84
85 static void i915_gem_write_fence(struct drm_device *dev, int reg,
86                                  struct drm_i915_gem_object *obj);
87 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
88                                          struct drm_i915_fence_reg *fence,
89                                          bool enable);
90
91 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
92 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
93 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
94
95 static bool cpu_cache_is_coherent(struct drm_device *dev,
96                                   enum i915_cache_level level)
97 {
98         return HAS_LLC(dev) || level != I915_CACHE_NONE;
99 }
100
101 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
102 {
103         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
104                 return true;
105
106         return obj->pin_display;
107 }
108
109 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
110 {
111         if (obj->tiling_mode)
112                 i915_gem_release_mmap(obj);
113
114         /* As we do not have an associated fence register, we will force
115          * a tiling change if we ever need to acquire one.
116          */
117         obj->fence_dirty = false;
118         obj->fence_reg = I915_FENCE_REG_NONE;
119 }
120
121 /* some bookkeeping */
122 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
123                                   size_t size)
124 {
125         spin_lock(&dev_priv->mm.object_stat_lock);
126         dev_priv->mm.object_count++;
127         dev_priv->mm.object_memory += size;
128         spin_unlock(&dev_priv->mm.object_stat_lock);
129 }
130
131 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
132                                      size_t size)
133 {
134         spin_lock(&dev_priv->mm.object_stat_lock);
135         dev_priv->mm.object_count--;
136         dev_priv->mm.object_memory -= size;
137         spin_unlock(&dev_priv->mm.object_stat_lock);
138 }
139
140 static int
141 i915_gem_wait_for_error(struct i915_gpu_error *error)
142 {
143         int ret;
144
145 #define EXIT_COND (!i915_reset_in_progress(error) || \
146                    i915_terminally_wedged(error))
147         if (EXIT_COND)
148                 return 0;
149
150         /*
151          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
152          * userspace. If it takes that long something really bad is going on and
153          * we should simply try to bail out and fail as gracefully as possible.
154          */
155         ret = wait_event_interruptible_timeout(error->reset_queue,
156                                                EXIT_COND,
157                                                10*HZ);
158         if (ret == 0) {
159                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
160                 return -EIO;
161         } else if (ret < 0) {
162                 return ret;
163         }
164 #undef EXIT_COND
165
166         return 0;
167 }
168
169 int i915_mutex_lock_interruptible(struct drm_device *dev)
170 {
171         struct drm_i915_private *dev_priv = dev->dev_private;
172         int ret;
173
174         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
175         if (ret)
176                 return ret;
177
178         ret = lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_SLEEPFAIL);
179         if (ret)
180                 return -EINTR;
181
182         WARN_ON(i915_verify_lists(dev));
183         return 0;
184 }
185
186 static inline bool
187 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
188 {
189         return i915_gem_obj_bound_any(obj) && !obj->active;
190 }
191
192 int
193 i915_gem_init_ioctl(struct drm_device *dev, void *data,
194                     struct drm_file *file)
195 {
196         struct drm_i915_private *dev_priv = dev->dev_private;
197         struct drm_i915_gem_init *args = data;
198
199         if (drm_core_check_feature(dev, DRIVER_MODESET))
200                 return -ENODEV;
201
202         if (args->gtt_start >= args->gtt_end ||
203             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
204                 return -EINVAL;
205
206         /* GEM with user mode setting was never supported on ilk and later. */
207         if (INTEL_INFO(dev)->gen >= 5)
208                 return -ENODEV;
209
210         mutex_lock(&dev->struct_mutex);
211         dev_priv->gtt.mappable_end = args->gtt_end;
212         kprintf("INITGLOBALGTT GTT_START %016jx\n", (uintmax_t)args->gtt_start);
213         i915_gem_init_global_gtt(dev);
214 #if 0
215         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
216                                   args->gtt_end);
217 #endif
218         mutex_unlock(&dev->struct_mutex);
219
220         return 0;
221 }
222
223 int
224 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
225                             struct drm_file *file)
226 {
227         struct drm_i915_private *dev_priv = dev->dev_private;
228         struct drm_i915_gem_get_aperture *args = data;
229         struct drm_i915_gem_object *obj;
230         size_t pinned;
231
232         pinned = 0;
233         mutex_lock(&dev->struct_mutex);
234         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
235                 if (obj->pin_count)
236                         pinned += i915_gem_obj_ggtt_size(obj);
237         mutex_unlock(&dev->struct_mutex);
238
239         args->aper_size = dev_priv->gtt.base.total;
240         args->aper_available_size = args->aper_size - pinned;
241
242         return 0;
243 }
244
245 void *i915_gem_object_alloc(struct drm_device *dev)
246 {
247         return kmalloc(sizeof(struct drm_i915_gem_object),
248             M_DRM, M_WAITOK | M_ZERO);
249 }
250
251 void i915_gem_object_free(struct drm_i915_gem_object *obj)
252 {
253         kfree(obj);
254 }
255
256 static int
257 i915_gem_create(struct drm_file *file,
258                 struct drm_device *dev,
259                 uint64_t size,
260                 uint32_t *handle_p)
261 {
262         struct drm_i915_gem_object *obj;
263         int ret;
264         u32 handle;
265
266         size = roundup(size, PAGE_SIZE);
267         if (size == 0)
268                 return -EINVAL;
269
270         /* Allocate the new object */
271         obj = i915_gem_alloc_object(dev, size);
272         if (obj == NULL)
273                 return -ENOMEM;
274
275         ret = drm_gem_handle_create(file, &obj->base, &handle);
276         /* drop reference from allocate - handle holds it now */
277         drm_gem_object_unreference_unlocked(&obj->base);
278         if (ret)
279                 return ret;
280
281         *handle_p = handle;
282         return 0;
283 }
284
285 int
286 i915_gem_dumb_create(struct drm_file *file,
287                      struct drm_device *dev,
288                      struct drm_mode_create_dumb *args)
289 {
290         /* have to work out size/pitch and return them */
291         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
292         args->size = args->pitch * args->height;
293         return i915_gem_create(file, dev,
294                                args->size, &args->handle);
295 }
296
297 /**
298  * Creates a new mm object and returns a handle to it.
299  */
300 int
301 i915_gem_create_ioctl(struct drm_device *dev, void *data,
302                       struct drm_file *file)
303 {
304         struct drm_i915_gem_create *args = data;
305
306         return i915_gem_create(file, dev,
307                                args->size, &args->handle);
308 }
309
310 static inline int
311 __copy_to_user_swizzled(char __user *cpu_vaddr,
312                         const char *gpu_vaddr, int gpu_offset,
313                         int length)
314 {
315         int ret, cpu_offset = 0;
316
317         while (length > 0) {
318                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
319                 int this_length = min(cacheline_end - gpu_offset, length);
320                 int swizzled_gpu_offset = gpu_offset ^ 64;
321
322                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
323                                      gpu_vaddr + swizzled_gpu_offset,
324                                      this_length);
325                 if (ret)
326                         return ret + length;
327
328                 cpu_offset += this_length;
329                 gpu_offset += this_length;
330                 length -= this_length;
331         }
332
333         return 0;
334 }
335
336 static inline int
337 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
338                           const char __user *cpu_vaddr,
339                           int length)
340 {
341         int ret, cpu_offset = 0;
342
343         while (length > 0) {
344                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
345                 int this_length = min(cacheline_end - gpu_offset, length);
346                 int swizzled_gpu_offset = gpu_offset ^ 64;
347
348                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
349                                        cpu_vaddr + cpu_offset,
350                                        this_length);
351                 if (ret)
352                         return ret + length;
353
354                 cpu_offset += this_length;
355                 gpu_offset += this_length;
356                 length -= this_length;
357         }
358
359         return 0;
360 }
361
362 /* Per-page copy function for the shmem pread fastpath.
363  * Flushes invalid cachelines before reading the target if
364  * needs_clflush is set. */
365 static int
366 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
367                  char __user *user_data,
368                  bool page_do_bit17_swizzling, bool needs_clflush)
369 {
370         char *vaddr;
371         int ret;
372
373         if (unlikely(page_do_bit17_swizzling))
374                 return -EINVAL;
375
376         vaddr = kmap_atomic(page);
377         if (needs_clflush)
378                 drm_clflush_virt_range(vaddr + shmem_page_offset,
379                                        page_length);
380         ret = __copy_to_user_inatomic(user_data,
381                                       vaddr + shmem_page_offset,
382                                       page_length);
383         kunmap_atomic(vaddr);
384
385         return ret ? -EFAULT : 0;
386 }
387
388 static void
389 shmem_clflush_swizzled_range(char *addr, unsigned long length,
390                              bool swizzled)
391 {
392         if (unlikely(swizzled)) {
393                 unsigned long start = (unsigned long) addr;
394                 unsigned long end = (unsigned long) addr + length;
395
396                 /* For swizzling simply ensure that we always flush both
397                  * channels. Lame, but simple and it works. Swizzled
398                  * pwrite/pread is far from a hotpath - current userspace
399                  * doesn't use it at all. */
400                 start = round_down(start, 128);
401                 end = round_up(end, 128);
402
403                 drm_clflush_virt_range((void *)start, end - start);
404         } else {
405                 drm_clflush_virt_range(addr, length);
406         }
407
408 }
409
410 /* Only difference to the fast-path function is that this can handle bit17
411  * and uses non-atomic copy and kmap functions. */
412 static int
413 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
414                  char __user *user_data,
415                  bool page_do_bit17_swizzling, bool needs_clflush)
416 {
417         char *vaddr;
418         int ret;
419
420         vaddr = kmap(page);
421         if (needs_clflush)
422                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
423                                              page_length,
424                                              page_do_bit17_swizzling);
425
426         if (page_do_bit17_swizzling)
427                 ret = __copy_to_user_swizzled(user_data,
428                                               vaddr, shmem_page_offset,
429                                               page_length);
430         else
431                 ret = __copy_to_user(user_data,
432                                      vaddr + shmem_page_offset,
433                                      page_length);
434         kunmap(page);
435
436         return ret ? - EFAULT : 0;
437 }
438
439 static int
440 i915_gem_shmem_pread(struct drm_device *dev,
441                      struct drm_i915_gem_object *obj,
442                      struct drm_i915_gem_pread *args,
443                      struct drm_file *file)
444 {
445         char __user *user_data;
446         ssize_t remain;
447         loff_t offset;
448         int shmem_page_offset, page_length, ret = 0;
449         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
450         int prefaulted = 0;
451         int needs_clflush = 0;
452         int i;
453
454         user_data = to_user_ptr(args->data_ptr);
455         remain = args->size;
456
457         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
458
459         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
460                 /* If we're not in the cpu read domain, set ourself into the gtt
461                  * read domain and manually flush cachelines (if required). This
462                  * optimizes for the case when the gpu will dirty the data
463                  * anyway again before the next pread happens. */
464                 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
465                 ret = i915_gem_object_wait_rendering(obj, true);
466                 if (ret)
467                         return ret;
468         }
469
470         ret = i915_gem_object_get_pages(obj);
471         if (ret)
472                 return ret;
473
474         i915_gem_object_pin_pages(obj);
475
476         offset = args->offset;
477
478         for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
479                 struct vm_page *page = obj->pages[i];
480
481                 if (remain <= 0)
482                         break;
483
484                 /* Operation in this page
485                  *
486                  * shmem_page_offset = offset within page in shmem file
487                  * page_length = bytes to copy for this page
488                  */
489                 shmem_page_offset = offset_in_page(offset);
490                 page_length = remain;
491                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
492                         page_length = PAGE_SIZE - shmem_page_offset;
493
494                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
495                         (page_to_phys(page) & (1 << 17)) != 0;
496
497                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
498                                        user_data, page_do_bit17_swizzling,
499                                        needs_clflush);
500                 if (ret == 0)
501                         goto next_page;
502
503                 mutex_unlock(&dev->struct_mutex);
504
505                 if (likely(!i915_prefault_disable) && !prefaulted) {
506                         ret = fault_in_multipages_writeable(user_data, remain);
507                         /* Userspace is tricking us, but we've already clobbered
508                          * its pages with the prefault and promised to write the
509                          * data up to the first fault. Hence ignore any errors
510                          * and just continue. */
511                         (void)ret;
512                         prefaulted = 1;
513                 }
514
515                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
516                                        user_data, page_do_bit17_swizzling,
517                                        needs_clflush);
518
519                 mutex_lock(&dev->struct_mutex);
520
521 next_page:
522                 mark_page_accessed(page);
523
524                 if (ret)
525                         goto out;
526
527                 remain -= page_length;
528                 user_data += page_length;
529                 offset += page_length;
530         }
531
532 out:
533         i915_gem_object_unpin_pages(obj);
534
535         return ret;
536 }
537
538 /**
539  * Reads data from the object referenced by handle.
540  *
541  * On error, the contents of *data are undefined.
542  */
543 int
544 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
545                      struct drm_file *file)
546 {
547         struct drm_i915_gem_pread *args = data;
548         struct drm_i915_gem_object *obj;
549         int ret = 0;
550
551         if (args->size == 0)
552                 return 0;
553
554         ret = i915_mutex_lock_interruptible(dev);
555         if (ret)
556                 return ret;
557
558         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
559         if (&obj->base == NULL) {
560                 ret = -ENOENT;
561                 goto unlock;
562         }
563
564         /* Bounds check source.  */
565         if (args->offset > obj->base.size ||
566             args->size > obj->base.size - args->offset) {
567                 ret = -EINVAL;
568                 goto out;
569         }
570
571         trace_i915_gem_object_pread(obj, args->offset, args->size);
572
573         ret = i915_gem_shmem_pread(dev, obj, args, file);
574
575 out:
576         drm_gem_object_unreference(&obj->base);
577 unlock:
578         mutex_unlock(&dev->struct_mutex);
579         return ret;
580 }
581
582 /* This is the fast write path which cannot handle
583  * page faults in the source data
584  */
585
586 #if 0   /* XXX: buggy on core2 machines */
587 static inline int
588 fast_user_write(struct io_mapping *mapping,
589                 loff_t page_base, int page_offset,
590                 char __user *user_data,
591                 int length)
592 {
593         void __iomem *vaddr_atomic;
594         void *vaddr;
595         unsigned long unwritten;
596
597         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
598         /* We can use the cpu mem copy function because this is X86. */
599         vaddr = (char __force*)vaddr_atomic + page_offset;
600         unwritten = __copy_from_user_inatomic_nocache(vaddr,
601                                                       user_data, length);
602         io_mapping_unmap_atomic(vaddr_atomic);
603         return unwritten;
604 }
605 #endif
606
607 static int
608 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
609     uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
610 {
611         vm_offset_t mkva;
612         int ret;
613
614         /*
615          * Pass the unaligned physical address and size to pmap_mapdev_attr()
616          * so it can properly calculate whether an extra page needs to be
617          * mapped or not to cover the requested range.  The function will
618          * add the page offset into the returned mkva for us.
619          */
620         mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base +
621             i915_gem_obj_ggtt_offset(obj) + offset, size, PAT_WRITE_COMBINING);
622         ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
623         pmap_unmapdev(mkva, size);
624         return ret;
625 }
626
627 /**
628  * This is the fast pwrite path, where we copy the data directly from the
629  * user into the GTT, uncached.
630  */
631 static int
632 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
633                          struct drm_i915_gem_object *obj,
634                          struct drm_i915_gem_pwrite *args,
635                          struct drm_file *file)
636 {
637         ssize_t remain;
638         loff_t offset, page_base;
639         char __user *user_data;
640         int page_offset, page_length, ret;
641
642         ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
643         if (ret)
644                 goto out;
645
646         ret = i915_gem_object_set_to_gtt_domain(obj, true);
647         if (ret)
648                 goto out_unpin;
649
650         ret = i915_gem_object_put_fence(obj);
651         if (ret)
652                 goto out_unpin;
653
654         user_data = to_user_ptr(args->data_ptr);
655         remain = args->size;
656
657         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
658
659         while (remain > 0) {
660                 /* Operation in this page
661                  *
662                  * page_base = page offset within aperture
663                  * page_offset = offset within page
664                  * page_length = bytes to copy for this page
665                  */
666                 page_base = offset & ~PAGE_MASK;
667                 page_offset = offset_in_page(offset);
668                 page_length = remain;
669                 if ((page_offset + remain) > PAGE_SIZE)
670                         page_length = PAGE_SIZE - page_offset;
671
672                 /* If we get a fault while copying data, then (presumably) our
673                  * source page isn't available.  Return the error and we'll
674                  * retry in the slow path.
675                  */
676 #if 0
677                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
678                                     page_offset, user_data, page_length)) {
679 #else
680                 if (i915_gem_gtt_write(dev, obj, args->data_ptr, args->size, args->offset, file)) {
681 #endif
682                         ret = -EFAULT;
683                         goto out_unpin;
684                 }
685
686                 remain -= page_length;
687                 user_data += page_length;
688                 offset += page_length;
689         }
690
691 out_unpin:
692         i915_gem_object_unpin(obj);
693 out:
694         return ret;
695 }
696
697 /* Per-page copy function for the shmem pwrite fastpath.
698  * Flushes invalid cachelines before writing to the target if
699  * needs_clflush_before is set and flushes out any written cachelines after
700  * writing if needs_clflush is set. */
701 static int
702 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
703                   char __user *user_data,
704                   bool page_do_bit17_swizzling,
705                   bool needs_clflush_before,
706                   bool needs_clflush_after)
707 {
708         char *vaddr;
709         int ret;
710
711         if (unlikely(page_do_bit17_swizzling))
712                 return -EINVAL;
713
714         vaddr = kmap_atomic(page);
715         if (needs_clflush_before)
716                 drm_clflush_virt_range(vaddr + shmem_page_offset,
717                                        page_length);
718         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
719                                                 user_data,
720                                                 page_length);
721         if (needs_clflush_after)
722                 drm_clflush_virt_range(vaddr + shmem_page_offset,
723                                        page_length);
724         kunmap_atomic(vaddr);
725
726         return ret ? -EFAULT : 0;
727 }
728
729 /* Only difference to the fast-path function is that this can handle bit17
730  * and uses non-atomic copy and kmap functions. */
731 static int
732 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
733                   char __user *user_data,
734                   bool page_do_bit17_swizzling,
735                   bool needs_clflush_before,
736                   bool needs_clflush_after)
737 {
738         char *vaddr;
739         int ret;
740
741         vaddr = kmap(page);
742         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
743                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
744                                              page_length,
745                                              page_do_bit17_swizzling);
746         if (page_do_bit17_swizzling)
747                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
748                                                 user_data,
749                                                 page_length);
750         else
751                 ret = __copy_from_user(vaddr + shmem_page_offset,
752                                        user_data,
753                                        page_length);
754         if (needs_clflush_after)
755                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
756                                              page_length,
757                                              page_do_bit17_swizzling);
758         kunmap(page);
759
760         return ret ? -EFAULT : 0;
761 }
762
763 static int
764 i915_gem_shmem_pwrite(struct drm_device *dev,
765                       struct drm_i915_gem_object *obj,
766                       struct drm_i915_gem_pwrite *args,
767                       struct drm_file *file)
768 {
769         ssize_t remain;
770         loff_t offset;
771         char __user *user_data;
772         int shmem_page_offset, page_length, ret = 0;
773         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
774         int hit_slowpath = 0;
775         int needs_clflush_after = 0;
776         int needs_clflush_before = 0;
777         int i;
778
779         user_data = to_user_ptr(args->data_ptr);
780         remain = args->size;
781
782         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
783
784         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
785                 /* If we're not in the cpu write domain, set ourself into the gtt
786                  * write domain and manually flush cachelines (if required). This
787                  * optimizes for the case when the gpu will use the data
788                  * right away and we therefore have to clflush anyway. */
789                 needs_clflush_after = cpu_write_needs_clflush(obj);
790                 ret = i915_gem_object_wait_rendering(obj, false);
791                 if (ret)
792                         return ret;
793         }
794         /* Same trick applies to invalidate partially written cachelines read
795          * before writing. */
796         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
797                 needs_clflush_before =
798                         !cpu_cache_is_coherent(dev, obj->cache_level);
799
800         ret = i915_gem_object_get_pages(obj);
801         if (ret)
802                 return ret;
803
804         i915_gem_object_pin_pages(obj);
805
806         offset = args->offset;
807         obj->dirty = 1;
808
809         VM_OBJECT_LOCK(obj->base.vm_obj);
810         vm_object_pip_add(obj->base.vm_obj, 1);
811         for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
812                 struct vm_page *page = obj->pages[i];
813                 int partial_cacheline_write;
814
815                 if (i < offset >> PAGE_SHIFT)
816                         continue;
817
818                 if (remain <= 0)
819                         break;
820
821                 /* Operation in this page
822                  *
823                  * shmem_page_offset = offset within page in shmem file
824                  * page_length = bytes to copy for this page
825                  */
826                 shmem_page_offset = offset_in_page(offset);
827
828                 page_length = remain;
829                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
830                         page_length = PAGE_SIZE - shmem_page_offset;
831
832                 /* If we don't overwrite a cacheline completely we need to be
833                  * careful to have up-to-date data by first clflushing. Don't
834                  * overcomplicate things and flush the entire patch. */
835                 partial_cacheline_write = needs_clflush_before &&
836                         ((shmem_page_offset | page_length)
837                                 & (cpu_clflush_line_size - 1));
838
839                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
840                         (page_to_phys(page) & (1 << 17)) != 0;
841
842                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
843                                         user_data, page_do_bit17_swizzling,
844                                         partial_cacheline_write,
845                                         needs_clflush_after);
846                 if (ret == 0)
847                         goto next_page;
848
849                 hit_slowpath = 1;
850                 mutex_unlock(&dev->struct_mutex);
851                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
852                                         user_data, page_do_bit17_swizzling,
853                                         partial_cacheline_write,
854                                         needs_clflush_after);
855
856                 mutex_lock(&dev->struct_mutex);
857  
858 next_page:
859                 set_page_dirty(page);
860                 mark_page_accessed(page);
861
862                 if (ret)
863                         goto out;
864
865                 remain -= page_length;
866                 user_data += page_length;
867                 offset += page_length;
868         }
869         vm_object_pip_wakeup(obj->base.vm_obj);
870         VM_OBJECT_UNLOCK(obj->base.vm_obj);
871
872 out:
873         i915_gem_object_unpin_pages(obj);
874
875         if (hit_slowpath) {
876                 /*
877                  * Fixup: Flush cpu caches in case we didn't flush the dirty
878                  * cachelines in-line while writing and the object moved
879                  * out of the cpu write domain while we've dropped the lock.
880                  */
881                 if (!needs_clflush_after &&
882                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
883                         if (i915_gem_clflush_object(obj, obj->pin_display))
884                                 i915_gem_chipset_flush(dev);
885                 }
886         }
887
888         if (needs_clflush_after)
889                 i915_gem_chipset_flush(dev);
890
891         return ret;
892 }
893
894 /**
895  * Writes data to the object referenced by handle.
896  *
897  * On error, the contents of the buffer that were to be modified are undefined.
898  */
899 int
900 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
901                       struct drm_file *file)
902 {
903         struct drm_i915_gem_pwrite *args = data;
904         struct drm_i915_gem_object *obj;
905         int ret;
906
907         if (args->size == 0)
908                 return 0;
909
910         if (likely(!i915_prefault_disable)) {
911                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
912                                                    args->size);
913                 if (ret)
914                         return -EFAULT;
915         }
916
917         ret = i915_mutex_lock_interruptible(dev);
918         if (ret)
919                 return ret;
920
921         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
922         if (&obj->base == NULL) {
923                 ret = -ENOENT;
924                 goto unlock;
925         }
926
927         /* Bounds check destination. */
928         if (args->offset > obj->base.size ||
929             args->size > obj->base.size - args->offset) {
930                 ret = -EINVAL;
931                 goto out;
932         }
933
934         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
935
936         ret = -EFAULT;
937         /* We can only do the GTT pwrite on untiled buffers, as otherwise
938          * it would end up going through the fenced access, and we'll get
939          * different detiling behavior between reading and writing.
940          * pread/pwrite currently are reading and writing from the CPU
941          * perspective, requiring manual detiling by the client.
942          */
943         if (obj->phys_obj) {
944                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
945                 goto out;
946         }
947
948         if (obj->tiling_mode == I915_TILING_NONE &&
949             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
950             cpu_write_needs_clflush(obj)) {
951                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
952                 /* Note that the gtt paths might fail with non-page-backed user
953                  * pointers (e.g. gtt mappings when moving data between
954                  * textures). Fallback to the shmem path in that case. */
955         }
956
957         if (ret == -EFAULT || ret == -ENOSPC)
958                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
959
960 out:
961         drm_gem_object_unreference(&obj->base);
962 unlock:
963         mutex_unlock(&dev->struct_mutex);
964         return ret;
965 }
966
967 int
968 i915_gem_check_wedge(struct i915_gpu_error *error,
969                      bool interruptible)
970 {
971         if (i915_reset_in_progress(error)) {
972                 /* Non-interruptible callers can't handle -EAGAIN, hence return
973                  * -EIO unconditionally for these. */
974                 if (!interruptible)
975                         return -EIO;
976
977                 /* Recovery complete, but the reset failed ... */
978                 if (i915_terminally_wedged(error))
979                         return -EIO;
980
981                 return -EAGAIN;
982         }
983
984         return 0;
985 }
986
987 /*
988  * Compare seqno against outstanding lazy request. Emit a request if they are
989  * equal.
990  */
991 static int
992 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
993 {
994         int ret;
995
996         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
997
998         ret = 0;
999         if (seqno == ring->outstanding_lazy_seqno)
1000                 ret = i915_add_request(ring, NULL);
1001
1002         return ret;
1003 }
1004
1005 #if 0
1006 static void fake_irq(unsigned long data)
1007 {
1008         wake_up_process((struct task_struct *)data);
1009 }
1010
1011 static bool missed_irq(struct drm_i915_private *dev_priv,
1012                        struct intel_ring_buffer *ring)
1013 {
1014         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1015 }
1016
1017 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1018 {
1019         if (file_priv == NULL)
1020                 return true;
1021
1022         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1023 }
1024 #endif
1025
1026 /**
1027  * __wait_seqno - wait until execution of seqno has finished
1028  * @ring: the ring expected to report seqno
1029  * @seqno: duh!
1030  * @reset_counter: reset sequence associated with the given seqno
1031  * @interruptible: do an interruptible wait (normally yes)
1032  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1033  *
1034  * Note: It is of utmost importance that the passed in seqno and reset_counter
1035  * values have been read by the caller in an smp safe manner. Where read-side
1036  * locks are involved, it is sufficient to read the reset_counter before
1037  * unlocking the lock that protects the seqno. For lockless tricks, the
1038  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1039  * inserted.
1040  *
1041  * Returns 0 if the seqno was found within the alloted time. Else returns the
1042  * errno with remaining time filled in timeout argument.
1043  */
1044 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045                         unsigned reset_counter,
1046                         bool interruptible, struct timespec *timeout)
1047 {
1048         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1049         struct timespec before, now, wait_time={1,0};
1050         unsigned long timeout_jiffies;
1051         long end;
1052         bool wait_forever = true;
1053         int ret;
1054
1055         WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1056
1057         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1058                 return 0;
1059
1060         trace_i915_gem_request_wait_begin(ring, seqno);
1061
1062         if (timeout != NULL) {
1063                 wait_time = *timeout;
1064                 wait_forever = false;
1065         }
1066
1067         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1068
1069         if (WARN_ON(!ring->irq_get(ring)))
1070                 return -ENODEV;
1071
1072         /* Record current time in case interrupted by signal, or wedged * */
1073         getrawmonotonic(&before);
1074
1075 #define EXIT_COND \
1076         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1077          i915_reset_in_progress(&dev_priv->gpu_error) || \
1078          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1079         do {
1080                 if (interruptible)
1081                         end = wait_event_interruptible_timeout(ring->irq_queue,
1082                                                                EXIT_COND,
1083                                                                timeout_jiffies);
1084                 else
1085                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1086                                                  timeout_jiffies);
1087
1088                 /* We need to check whether any gpu reset happened in between
1089                  * the caller grabbing the seqno and now ... */
1090                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1091                         end = -EAGAIN;
1092
1093                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1094                  * gone. */
1095                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1096                 if (ret)
1097                         end = ret;
1098         } while (end == 0 && wait_forever);
1099
1100         getrawmonotonic(&now);
1101
1102         ring->irq_put(ring);
1103         trace_i915_gem_request_wait_end(ring, seqno);
1104 #undef EXIT_COND
1105
1106         if (timeout) {
1107                 struct timespec sleep_time = timespec_sub(now, before);
1108                 *timeout = timespec_sub(*timeout, sleep_time);
1109                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1110                         set_normalized_timespec(timeout, 0, 0);
1111         }
1112
1113         switch (end) {
1114         case -EIO:
1115         case -EAGAIN: /* Wedged */
1116         case -ERESTARTSYS: /* Signal */
1117                 return (int)end;
1118         case 0: /* Timeout */
1119                 return -ETIMEDOUT;      /* -ETIME on Linux */
1120         default: /* Completed */
1121                 WARN_ON(end < 0); /* We're not aware of other errors */
1122                 return 0;
1123         }
1124 }
1125
1126 /**
1127  * Waits for a sequence number to be signaled, and cleans up the
1128  * request and object lists appropriately for that event.
1129  */
1130 int
1131 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1132 {
1133         struct drm_device *dev = ring->dev;
1134         struct drm_i915_private *dev_priv = dev->dev_private;
1135         bool interruptible = dev_priv->mm.interruptible;
1136         int ret;
1137
1138         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1139         BUG_ON(seqno == 0);
1140
1141         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1142         if (ret)
1143                 return ret;
1144
1145         ret = i915_gem_check_olr(ring, seqno);
1146         if (ret)
1147                 return ret;
1148
1149         return __wait_seqno(ring, seqno,
1150                             atomic_read(&dev_priv->gpu_error.reset_counter),
1151                             interruptible, NULL);
1152 }
1153
1154 static int
1155 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1156                                      struct intel_ring_buffer *ring)
1157 {
1158         i915_gem_retire_requests_ring(ring);
1159
1160         /* Manually manage the write flush as we may have not yet
1161          * retired the buffer.
1162          *
1163          * Note that the last_write_seqno is always the earlier of
1164          * the two (read/write) seqno, so if we haved successfully waited,
1165          * we know we have passed the last write.
1166          */
1167         obj->last_write_seqno = 0;
1168         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1169
1170         return 0;
1171 }
1172
1173 /**
1174  * Ensures that all rendering to the object has completed and the object is
1175  * safe to unbind from the GTT or access from the CPU.
1176  */
1177 static __must_check int
1178 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1179                                bool readonly)
1180 {
1181         struct intel_ring_buffer *ring = obj->ring;
1182         u32 seqno;
1183         int ret;
1184
1185         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1186         if (seqno == 0)
1187                 return 0;
1188
1189         ret = i915_wait_seqno(ring, seqno);
1190         if (ret)
1191                 return ret;
1192
1193         return i915_gem_object_wait_rendering__tail(obj, ring);
1194 }
1195
1196 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1197  * as the object state may change during this call.
1198  */
1199 static __must_check int
1200 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1201                                             bool readonly)
1202 {
1203         struct drm_device *dev = obj->base.dev;
1204         struct drm_i915_private *dev_priv = dev->dev_private;
1205         struct intel_ring_buffer *ring = obj->ring;
1206         unsigned reset_counter;
1207         u32 seqno;
1208         int ret;
1209
1210         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1211         BUG_ON(!dev_priv->mm.interruptible);
1212
1213         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1214         if (seqno == 0)
1215                 return 0;
1216
1217         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1218         if (ret)
1219                 return ret;
1220
1221         ret = i915_gem_check_olr(ring, seqno);
1222         if (ret)
1223                 return ret;
1224
1225         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1226         mutex_unlock(&dev->struct_mutex);
1227         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1228         mutex_lock(&dev->struct_mutex);
1229         if (ret)
1230                 return ret;
1231
1232         return i915_gem_object_wait_rendering__tail(obj, ring);
1233 }
1234
1235 /**
1236  * Called when user space prepares to use an object with the CPU, either
1237  * through the mmap ioctl's mapping or a GTT mapping.
1238  */
1239 int
1240 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1241                           struct drm_file *file)
1242 {
1243         struct drm_i915_gem_set_domain *args = data;
1244         struct drm_i915_gem_object *obj;
1245         uint32_t read_domains = args->read_domains;
1246         uint32_t write_domain = args->write_domain;
1247         int ret;
1248
1249         /* Only handle setting domains to types used by the CPU. */
1250         if (write_domain & I915_GEM_GPU_DOMAINS)
1251                 return -EINVAL;
1252
1253         if (read_domains & I915_GEM_GPU_DOMAINS)
1254                 return -EINVAL;
1255
1256         /* Having something in the write domain implies it's in the read
1257          * domain, and only that read domain.  Enforce that in the request.
1258          */
1259         if (write_domain != 0 && read_domains != write_domain)
1260                 return -EINVAL;
1261
1262         ret = i915_mutex_lock_interruptible(dev);
1263         if (ret)
1264                 return ret;
1265
1266         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1267         if (&obj->base == NULL) {
1268                 ret = -ENOENT;
1269                 goto unlock;
1270         }
1271
1272         /* Try to flush the object off the GPU without holding the lock.
1273          * We will repeat the flush holding the lock in the normal manner
1274          * to catch cases where we are gazumped.
1275          */
1276         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1277         if (ret)
1278                 goto unref;
1279
1280         if (read_domains & I915_GEM_DOMAIN_GTT) {
1281                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1282
1283                 /* Silently promote "you're not bound, there was nothing to do"
1284                  * to success, since the client was just asking us to
1285                  * make sure everything was done.
1286                  */
1287                 if (ret == -EINVAL)
1288                         ret = 0;
1289         } else {
1290                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1291         }
1292
1293 unref:
1294         drm_gem_object_unreference(&obj->base);
1295 unlock:
1296         mutex_unlock(&dev->struct_mutex);
1297         return ret;
1298 }
1299
1300 /**
1301  * Called when user space has done writes to this buffer
1302  */
1303 int
1304 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1305                          struct drm_file *file)
1306 {
1307         struct drm_i915_gem_sw_finish *args = data;
1308         struct drm_i915_gem_object *obj;
1309         int ret = 0;
1310
1311         ret = i915_mutex_lock_interruptible(dev);
1312         if (ret)
1313                 return ret;
1314
1315         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1316         if (&obj->base == NULL) {
1317                 ret = -ENOENT;
1318                 goto unlock;
1319         }
1320
1321         /* Pinned buffers may be scanout, so flush the cache */
1322         if (obj->pin_display)
1323                 i915_gem_object_flush_cpu_write_domain(obj, true);
1324
1325         drm_gem_object_unreference(&obj->base);
1326 unlock:
1327         mutex_unlock(&dev->struct_mutex);
1328         return ret;
1329 }
1330
1331 /**
1332  * Maps the contents of an object, returning the address it is mapped
1333  * into.
1334  *
1335  * While the mapping holds a reference on the contents of the object, it doesn't
1336  * imply a ref on the object itself.
1337  */
1338 int
1339 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1340                     struct drm_file *file)
1341 {
1342         struct drm_i915_gem_mmap *args = data;
1343         struct drm_gem_object *obj;
1344         unsigned long addr;
1345         struct proc *p = curproc;
1346         vm_map_t map = &p->p_vmspace->vm_map;
1347         vm_size_t size;
1348         int error = 0, rv;
1349
1350         obj = drm_gem_object_lookup(dev, file, args->handle);
1351         if (obj == NULL)
1352                 return -ENOENT;
1353
1354         if (args->size == 0)
1355                 goto out;
1356
1357         size = round_page(args->size);
1358         if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1359                 error = -ENOMEM;
1360                 goto out;
1361         }
1362
1363         addr = 0;
1364         vm_object_hold(obj->vm_obj);
1365         vm_object_reference_locked(obj->vm_obj);
1366         vm_object_drop(obj->vm_obj);
1367         rv = vm_map_find(map, obj->vm_obj, NULL,
1368                          args->offset, &addr, args->size,
1369                          PAGE_SIZE, /* align */
1370                          TRUE, /* fitit */
1371                          VM_MAPTYPE_NORMAL, /* maptype */
1372                          VM_PROT_READ | VM_PROT_WRITE, /* prot */
1373                          VM_PROT_READ | VM_PROT_WRITE, /* max */
1374                          MAP_SHARED /* cow */);
1375         if (rv != KERN_SUCCESS) {
1376                 vm_object_deallocate(obj->vm_obj);
1377                 error = -vm_mmap_to_errno(rv);
1378         } else {
1379                 args->addr_ptr = (uint64_t)addr;
1380         }
1381 out:
1382         drm_gem_object_unreference(obj);
1383         return (error);
1384 }
1385
1386 /**
1387  * i915_gem_fault - fault a page into the GTT
1388  * vma: VMA in question
1389  * vmf: fault info
1390  *
1391  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1392  * from userspace.  The fault handler takes care of binding the object to
1393  * the GTT (if needed), allocating and programming a fence register (again,
1394  * only if needed based on whether the old reg is still valid or the object
1395  * is tiled) and inserting a new PTE into the faulting process.
1396  *
1397  * Note that the faulting process may involve evicting existing objects
1398  * from the GTT and/or fence registers to make room.  So performance may
1399  * suffer if the GTT working set is large or there are few fence registers
1400  * left.
1401  */
1402 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres)
1403 {
1404         struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle);
1405         struct drm_device *dev = obj->base.dev;
1406         drm_i915_private_t *dev_priv = dev->dev_private;
1407         unsigned long page_offset;
1408         vm_page_t m, oldm = NULL;
1409         int ret = 0;
1410         bool write = !!(prot & VM_PROT_WRITE);
1411
1412         intel_runtime_pm_get(dev_priv);
1413
1414         /* We don't use vmf->pgoff since that has the fake offset */
1415         page_offset = (unsigned long)offset;
1416
1417 /* Magic FreeBSD VM stuff */
1418         vm_object_pip_add(vm_obj, 1);
1419
1420         /*
1421          * Remove the placeholder page inserted by vm_fault() from the
1422          * object before dropping the object lock. If
1423          * i915_gem_release_mmap() is active in parallel on this gem
1424          * object, then it owns the drm device sx and might find the
1425          * placeholder already. Then, since the page is busy,
1426          * i915_gem_release_mmap() sleeps waiting for the busy state
1427          * of the page cleared. We will be not able to acquire drm
1428          * device lock until i915_gem_release_mmap() is able to make a
1429          * progress.
1430          */
1431         if (*mres != NULL) {
1432                 oldm = *mres;
1433                 vm_page_remove(oldm);
1434                 *mres = NULL;
1435         } else
1436                 oldm = NULL;
1437 retry:
1438         VM_OBJECT_UNLOCK(vm_obj);
1439 unlocked_vmobj:
1440         ret = 0;
1441         m = NULL;
1442
1443         mutex_lock(&dev->struct_mutex);
1444
1445         /*
1446          * Since the object lock was dropped, other thread might have
1447          * faulted on the same GTT address and instantiated the
1448          * mapping for the page.  Recheck.
1449          */
1450         VM_OBJECT_LOCK(vm_obj);
1451         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1452         if (m != NULL) {
1453                 if ((m->flags & PG_BUSY) != 0) {
1454                         mutex_unlock(&dev->struct_mutex);
1455                         goto retry;
1456                 }
1457                 goto have_page;
1458         } else
1459                 VM_OBJECT_UNLOCK(vm_obj);
1460 /* End magic VM stuff */
1461
1462         trace_i915_gem_object_fault(obj, page_offset, true, write);
1463
1464         /* Access to snoopable pages through the GTT is incoherent. */
1465         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1466                 ret = -EINVAL;
1467                 goto unlock;
1468         }
1469
1470         /* Now bind it into the GTT if needed */
1471         ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
1472         if (ret)
1473                 goto unlock;
1474
1475         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1476         if (ret)
1477                 goto unpin;
1478
1479         ret = i915_gem_object_get_fence(obj);
1480         if (ret)
1481                 goto unpin;
1482
1483         obj->fault_mappable = true;
1484
1485         VM_OBJECT_LOCK(vm_obj);
1486         m = vm_phys_fictitious_to_vm_page(dev->agp->base +
1487             i915_gem_obj_ggtt_offset(obj) + offset);
1488         if (m == NULL) {
1489                 ret = -EFAULT;
1490                 goto unpin;
1491         }
1492         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1493             ("not fictitious %p", m));
1494         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1495
1496         if ((m->flags & PG_BUSY) != 0) {
1497                 i915_gem_object_unpin(obj);
1498                 mutex_unlock(&dev->struct_mutex);
1499                 goto retry;
1500         }
1501         m->valid = VM_PAGE_BITS_ALL;
1502
1503         /* Finally, remap it using the new GTT offset */
1504         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1505 have_page:
1506         *mres = m;
1507         vm_page_busy_try(m, false);
1508
1509         i915_gem_object_unpin(obj);
1510         mutex_unlock(&dev->struct_mutex);
1511         if (oldm != NULL) {
1512                 vm_page_free(oldm);
1513         }
1514         vm_object_pip_wakeup(vm_obj);
1515         return (VM_PAGER_OK);
1516
1517 unpin:
1518         i915_gem_object_unpin(obj);
1519 unlock:
1520         mutex_unlock(&dev->struct_mutex);
1521
1522         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1523         switch (ret) {
1524         case -EIO:
1525         case -EAGAIN:
1526         case -EINTR:
1527                 goto unlocked_vmobj;
1528         default:
1529                 VM_OBJECT_LOCK(vm_obj);
1530                 vm_object_pip_wakeup(vm_obj);
1531                 ret = VM_PAGER_ERROR;
1532         }
1533
1534         intel_runtime_pm_put(dev_priv);
1535         return ret;
1536 }
1537
1538 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1539 {
1540         struct i915_vma *vma;
1541
1542         /*
1543          * Only the global gtt is relevant for gtt memory mappings, so restrict
1544          * list traversal to objects bound into the global address space. Note
1545          * that the active list should be empty, but better safe than sorry.
1546          */
1547         WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1548         list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1549                 i915_gem_release_mmap(vma->obj);
1550         list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1551                 i915_gem_release_mmap(vma->obj);
1552 }
1553
1554 /**
1555  * i915_gem_release_mmap - remove physical page mappings
1556  * @obj: obj in question
1557  *
1558  * Preserve the reservation of the mmapping with the DRM core code, but
1559  * relinquish ownership of the pages back to the system.
1560  *
1561  * It is vital that we remove the page mapping if we have mapped a tiled
1562  * object through the GTT and then lose the fence register due to
1563  * resource pressure. Similarly if the object has been moved out of the
1564  * aperture, than pages mapped into userspace must be revoked. Removing the
1565  * mapping will then trigger a page fault on the next user access, allowing
1566  * fixup by i915_gem_fault().
1567  */
1568 void
1569 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1570 {
1571         vm_object_t devobj;
1572         vm_page_t m;
1573         int i, page_count;
1574
1575         if (!obj->fault_mappable)
1576                 return;
1577
1578         devobj = cdev_pager_lookup(obj);
1579         if (devobj != NULL) {
1580                 page_count = OFF_TO_IDX(obj->base.size);
1581
1582                 VM_OBJECT_LOCK(devobj);
1583                 for (i = 0; i < page_count; i++) {
1584                         m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1585                         if (m == NULL)
1586                                 continue;
1587                         cdev_pager_free_page(devobj, m);
1588                 }
1589                 VM_OBJECT_UNLOCK(devobj);
1590                 vm_object_deallocate(devobj);
1591         }
1592
1593         obj->fault_mappable = false;
1594 }
1595
1596 uint32_t
1597 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1598 {
1599         uint32_t gtt_size;
1600
1601         if (INTEL_INFO(dev)->gen >= 4 ||
1602             tiling_mode == I915_TILING_NONE)
1603                 return size;
1604
1605         /* Previous chips need a power-of-two fence region when tiling */
1606         if (INTEL_INFO(dev)->gen == 3)
1607                 gtt_size = 1024*1024;
1608         else
1609                 gtt_size = 512*1024;
1610
1611         while (gtt_size < size)
1612                 gtt_size <<= 1;
1613
1614         return gtt_size;
1615 }
1616
1617 /**
1618  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1619  * @obj: object to check
1620  *
1621  * Return the required GTT alignment for an object, taking into account
1622  * potential fence register mapping.
1623  */
1624 uint32_t
1625 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1626                            int tiling_mode, bool fenced)
1627 {
1628         /*
1629          * Minimum alignment is 4k (GTT page size), but might be greater
1630          * if a fence register is needed for the object.
1631          */
1632         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1633             tiling_mode == I915_TILING_NONE)
1634                 return 4096;
1635
1636         /*
1637          * Previous chips need to be aligned to the size of the smallest
1638          * fence register that can contain the object.
1639          */
1640         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1641 }
1642
1643 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1644 {
1645         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1646         int ret;
1647
1648 #if 0
1649         if (drm_vma_node_has_offset(&obj->base.vma_node))
1650                 return 0;
1651 #endif
1652
1653         dev_priv->mm.shrinker_no_lock_stealing = true;
1654
1655         ret = drm_gem_create_mmap_offset(&obj->base);
1656         if (ret != -ENOSPC)
1657                 goto out;
1658
1659         /* Badly fragmented mmap space? The only way we can recover
1660          * space is by destroying unwanted objects. We can't randomly release
1661          * mmap_offsets as userspace expects them to be persistent for the
1662          * lifetime of the objects. The closest we can is to release the
1663          * offsets on purgeable objects by truncating it and marking it purged,
1664          * which prevents userspace from ever using that object again.
1665          */
1666         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1667         ret = drm_gem_create_mmap_offset(&obj->base);
1668         if (ret != -ENOSPC)
1669                 goto out;
1670
1671         i915_gem_shrink_all(dev_priv);
1672         ret = drm_gem_create_mmap_offset(&obj->base);
1673 out:
1674         dev_priv->mm.shrinker_no_lock_stealing = false;
1675
1676         return ret;
1677 }
1678
1679 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1680 {
1681         drm_gem_free_mmap_offset(&obj->base);
1682 }
1683
1684 int
1685 i915_gem_mmap_gtt(struct drm_file *file,
1686                   struct drm_device *dev,
1687                   uint32_t handle,
1688                   uint64_t *offset)
1689 {
1690         struct drm_i915_private *dev_priv = dev->dev_private;
1691         struct drm_i915_gem_object *obj;
1692         int ret;
1693
1694         ret = i915_mutex_lock_interruptible(dev);
1695         if (ret)
1696                 return ret;
1697
1698         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1699         if (&obj->base == NULL) {
1700                 ret = -ENOENT;
1701                 goto unlock;
1702         }
1703
1704         if (obj->base.size > dev_priv->gtt.mappable_end) {
1705                 ret = -E2BIG;
1706                 goto out;
1707         }
1708
1709         if (obj->madv != I915_MADV_WILLNEED) {
1710                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1711                 ret = -EINVAL;
1712                 goto out;
1713         }
1714
1715         ret = i915_gem_object_create_mmap_offset(obj);
1716         if (ret)
1717                 goto out;
1718
1719         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1720             DRM_GEM_MAPPING_KEY;
1721
1722 out:
1723         drm_gem_object_unreference(&obj->base);
1724 unlock:
1725         mutex_unlock(&dev->struct_mutex);
1726         return ret;
1727 }
1728
1729 /**
1730  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1731  * @dev: DRM device
1732  * @data: GTT mapping ioctl data
1733  * @file: GEM object info
1734  *
1735  * Simply returns the fake offset to userspace so it can mmap it.
1736  * The mmap call will end up in drm_gem_mmap(), which will set things
1737  * up so we can get faults in the handler above.
1738  *
1739  * The fault handler will take care of binding the object into the GTT
1740  * (since it may have been evicted to make room for something), allocating
1741  * a fence register, and mapping the appropriate aperture address into
1742  * userspace.
1743  */
1744 int
1745 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1746                         struct drm_file *file)
1747 {
1748         struct drm_i915_gem_mmap_gtt *args = data;
1749
1750         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1751 }
1752
1753 /* Immediately discard the backing storage */
1754 static void
1755 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1756 {
1757         vm_object_t vm_obj;
1758
1759         vm_obj = obj->base.vm_obj;
1760         VM_OBJECT_LOCK(vm_obj);
1761         vm_object_page_remove(vm_obj, 0, 0, false);
1762         VM_OBJECT_UNLOCK(vm_obj);
1763
1764         obj->madv = __I915_MADV_PURGED;
1765 }
1766
1767 static inline int
1768 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1769 {
1770         return obj->madv == I915_MADV_DONTNEED;
1771 }
1772
1773 static void
1774 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1775 {
1776         int page_count = obj->base.size / PAGE_SIZE;
1777         int i, ret;
1778
1779         if (!obj->pages)
1780                 return;
1781
1782         BUG_ON(obj->madv == __I915_MADV_PURGED);
1783
1784         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1785         if (ret) {
1786                 /* In the event of a disaster, abandon all caches and
1787                  * hope for the best.
1788                  */
1789                 WARN_ON(ret != -EIO);
1790                 i915_gem_clflush_object(obj, true);
1791                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1792         }
1793
1794         if (i915_gem_object_needs_bit17_swizzle(obj))
1795                 i915_gem_object_save_bit_17_swizzle(obj);
1796
1797         if (obj->madv == I915_MADV_DONTNEED)
1798                 obj->dirty = 0;
1799
1800         for (i = 0; i < page_count; i++) {
1801                 struct vm_page *page = obj->pages[i];
1802
1803                 if (obj->dirty)
1804                         set_page_dirty(page);
1805
1806                 if (obj->madv == I915_MADV_WILLNEED)
1807                         mark_page_accessed(page);
1808
1809                 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1810                 vm_page_unwire(obj->pages[i], 1);
1811                 vm_page_wakeup(obj->pages[i]);
1812         }
1813         obj->dirty = 0;
1814
1815         kfree(obj->pages);
1816         obj->pages = NULL;
1817 }
1818
1819 int
1820 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1821 {
1822         const struct drm_i915_gem_object_ops *ops = obj->ops;
1823
1824         if (obj->pages == NULL)
1825                 return 0;
1826
1827         if (obj->pages_pin_count)
1828                 return -EBUSY;
1829
1830         BUG_ON(i915_gem_obj_bound_any(obj));
1831
1832         /* ->put_pages might need to allocate memory for the bit17 swizzle
1833          * array, hence protect them from being reaped by removing them from gtt
1834          * lists early. */
1835         list_del(&obj->global_list);
1836
1837         ops->put_pages(obj);
1838         obj->pages = NULL;
1839
1840         if (i915_gem_object_is_purgeable(obj))
1841                 i915_gem_object_truncate(obj);
1842
1843         return 0;
1844 }
1845
1846 static unsigned long
1847 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1848                   bool purgeable_only)
1849 {
1850         struct drm_i915_gem_object *obj, *next;
1851         unsigned long count = 0;
1852
1853         list_for_each_entry_safe(obj, next,
1854                                  &dev_priv->mm.unbound_list,
1855                                  global_list) {
1856                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1857                     i915_gem_object_put_pages(obj) == 0) {
1858                         count += obj->base.size >> PAGE_SHIFT;
1859                         if (count >= target)
1860                                 return count;
1861                 }
1862         }
1863
1864         list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1865                                  global_list) {
1866                 struct i915_vma *vma, *v;
1867
1868                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1869                         continue;
1870
1871                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1872                         if (i915_vma_unbind(vma))
1873                                 break;
1874
1875                 if (!i915_gem_object_put_pages(obj)) {
1876                         count += obj->base.size >> PAGE_SHIFT;
1877                         if (count >= target)
1878                                 return count;
1879                 }
1880         }
1881
1882         return count;
1883 }
1884
1885 static unsigned long
1886 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1887 {
1888         return __i915_gem_shrink(dev_priv, target, true);
1889 }
1890
1891 static unsigned long
1892 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1893 {
1894         struct drm_i915_gem_object *obj, *next;
1895         long freed = 0;
1896
1897         i915_gem_evict_everything(dev_priv->dev);
1898
1899         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1900                                  global_list) {
1901                 if (i915_gem_object_put_pages(obj) == 0)
1902                         freed += obj->base.size >> PAGE_SHIFT;
1903         }
1904         return freed;
1905 }
1906
1907 static int
1908 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1909 {
1910         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1911         int page_count, i, j;
1912         vm_object_t vm_obj;
1913         struct vm_page *page;
1914
1915         /* Assert that the object is not currently in any GPU domain. As it
1916          * wasn't in the GTT, there shouldn't be any way it could have been in
1917          * a GPU cache
1918          */
1919         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1920         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1921
1922         page_count = obj->base.size / PAGE_SIZE;
1923         obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1924             M_WAITOK);
1925
1926         /* Get the list of pages out of our struct file.  They'll be pinned
1927          * at this point until we release them.
1928          *
1929          * Fail silently without starting the shrinker
1930          */
1931         vm_obj = obj->base.vm_obj;
1932         VM_OBJECT_LOCK(vm_obj);
1933         for (i = 0; i < page_count; i++) {
1934                 page = shmem_read_mapping_page(vm_obj, i);
1935                 if (IS_ERR(page)) {
1936                         i915_gem_purge(dev_priv, page_count);
1937                         page = shmem_read_mapping_page(vm_obj, i);
1938                 }
1939                 if (IS_ERR(page)) {
1940                         /* We've tried hard to allocate the memory by reaping
1941                          * our own buffer, now let the real VM do its job and
1942                          * go down in flames if truly OOM.
1943                          */
1944
1945                         i915_gem_shrink_all(dev_priv);
1946                         page = shmem_read_mapping_page(vm_obj, i);
1947                         if (IS_ERR(page))
1948                                 goto err_pages;
1949                 }
1950 #ifdef CONFIG_SWIOTLB
1951                 if (swiotlb_nr_tbl()) {
1952                         st->nents++;
1953                         sg_set_page(sg, page, PAGE_SIZE, 0);
1954                         sg = sg_next(sg);
1955                         continue;
1956                 }
1957 #endif
1958                 obj->pages[i] = page;
1959         }
1960 #ifdef CONFIG_SWIOTLB
1961         if (!swiotlb_nr_tbl())
1962 #endif
1963         VM_OBJECT_UNLOCK(vm_obj);
1964
1965         if (i915_gem_object_needs_bit17_swizzle(obj))
1966                 i915_gem_object_do_bit_17_swizzle(obj);
1967
1968         return 0;
1969
1970 err_pages:
1971         for (j = 0; j < i; j++) {
1972                 page = obj->pages[j];
1973                 vm_page_busy_wait(page, FALSE, "i915gem");
1974                 vm_page_unwire(page, 0);
1975                 vm_page_wakeup(page);
1976         }
1977         VM_OBJECT_UNLOCK(vm_obj);
1978         kfree(obj->pages);
1979         obj->pages = NULL;
1980         return (-EIO);
1981 }
1982
1983 /* Ensure that the associated pages are gathered from the backing storage
1984  * and pinned into our object. i915_gem_object_get_pages() may be called
1985  * multiple times before they are released by a single call to
1986  * i915_gem_object_put_pages() - once the pages are no longer referenced
1987  * either as a result of memory pressure (reaping pages under the shrinker)
1988  * or as the object is itself released.
1989  */
1990 int
1991 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1992 {
1993         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1994         const struct drm_i915_gem_object_ops *ops = obj->ops;
1995         int ret;
1996
1997         if (obj->pages)
1998                 return 0;
1999
2000         if (obj->madv != I915_MADV_WILLNEED) {
2001                 DRM_ERROR("Attempting to obtain a purgeable object\n");
2002                 return -EINVAL;
2003         }
2004
2005         BUG_ON(obj->pages_pin_count);
2006
2007         ret = ops->get_pages(obj);
2008         if (ret)
2009                 return ret;
2010
2011         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2012         return 0;
2013 }
2014
2015 static void
2016 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2017                                struct intel_ring_buffer *ring)
2018 {
2019         struct drm_device *dev = obj->base.dev;
2020         struct drm_i915_private *dev_priv = dev->dev_private;
2021         u32 seqno = intel_ring_get_seqno(ring);
2022
2023         BUG_ON(ring == NULL);
2024         if (obj->ring != ring && obj->last_write_seqno) {
2025                 /* Keep the seqno relative to the current ring */
2026                 obj->last_write_seqno = seqno;
2027         }
2028         obj->ring = ring;
2029
2030         /* Add a reference if we're newly entering the active list. */
2031         if (!obj->active) {
2032                 drm_gem_object_reference(&obj->base);
2033                 obj->active = 1;
2034         }
2035
2036         list_move_tail(&obj->ring_list, &ring->active_list);
2037
2038         obj->last_read_seqno = seqno;
2039
2040         if (obj->fenced_gpu_access) {
2041                 obj->last_fenced_seqno = seqno;
2042
2043                 /* Bump MRU to take account of the delayed flush */
2044                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2045                         struct drm_i915_fence_reg *reg;
2046
2047                         reg = &dev_priv->fence_regs[obj->fence_reg];
2048                         list_move_tail(&reg->lru_list,
2049                                        &dev_priv->mm.fence_list);
2050                 }
2051         }
2052 }
2053
2054 void i915_vma_move_to_active(struct i915_vma *vma,
2055                              struct intel_ring_buffer *ring)
2056 {
2057         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2058         return i915_gem_object_move_to_active(vma->obj, ring);
2059 }
2060
2061 static void
2062 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2063 {
2064         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2065         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2066         struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2067
2068         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2069         BUG_ON(!obj->active);
2070
2071         list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2072
2073         list_del_init(&obj->ring_list);
2074         obj->ring = NULL;
2075
2076         obj->last_read_seqno = 0;
2077         obj->last_write_seqno = 0;
2078         obj->base.write_domain = 0;
2079
2080         obj->last_fenced_seqno = 0;
2081         obj->fenced_gpu_access = false;
2082
2083         obj->active = 0;
2084         drm_gem_object_unreference(&obj->base);
2085
2086         WARN_ON(i915_verify_lists(dev));
2087 }
2088
2089 static int
2090 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2091 {
2092         struct drm_i915_private *dev_priv = dev->dev_private;
2093         struct intel_ring_buffer *ring;
2094         int ret, i, j;
2095
2096         /* Carefully retire all requests without writing to the rings */
2097         for_each_ring(ring, dev_priv, i) {
2098                 ret = intel_ring_idle(ring);
2099                 if (ret)
2100                         return ret;
2101         }
2102         i915_gem_retire_requests(dev);
2103
2104         /* Finally reset hw state */
2105         for_each_ring(ring, dev_priv, i) {
2106                 intel_ring_init_seqno(ring, seqno);
2107
2108                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2109                         ring->sync_seqno[j] = 0;
2110         }
2111
2112         return 0;
2113 }
2114
2115 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2116 {
2117         struct drm_i915_private *dev_priv = dev->dev_private;
2118         int ret;
2119
2120         if (seqno == 0)
2121                 return -EINVAL;
2122
2123         /* HWS page needs to be set less than what we
2124          * will inject to ring
2125          */
2126         ret = i915_gem_init_seqno(dev, seqno - 1);
2127         if (ret)
2128                 return ret;
2129
2130         /* Carefully set the last_seqno value so that wrap
2131          * detection still works
2132          */
2133         dev_priv->next_seqno = seqno;
2134         dev_priv->last_seqno = seqno - 1;
2135         if (dev_priv->last_seqno == 0)
2136                 dev_priv->last_seqno--;
2137
2138         return 0;
2139 }
2140
2141 int
2142 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2143 {
2144         struct drm_i915_private *dev_priv = dev->dev_private;
2145
2146         /* reserve 0 for non-seqno */
2147         if (dev_priv->next_seqno == 0) {
2148                 int ret = i915_gem_init_seqno(dev, 0);
2149                 if (ret)
2150                         return ret;
2151
2152                 dev_priv->next_seqno = 1;
2153         }
2154
2155         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2156         return 0;
2157 }
2158
2159 int __i915_add_request(struct intel_ring_buffer *ring,
2160                        struct drm_file *file,
2161                        struct drm_i915_gem_object *obj,
2162                        u32 *out_seqno)
2163 {
2164         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2165         struct drm_i915_gem_request *request;
2166         u32 request_ring_position, request_start;
2167         int was_empty;
2168         int ret;
2169
2170         request_start = intel_ring_get_tail(ring);
2171         /*
2172          * Emit any outstanding flushes - execbuf can fail to emit the flush
2173          * after having emitted the batchbuffer command. Hence we need to fix
2174          * things up similar to emitting the lazy request. The difference here
2175          * is that the flush _must_ happen before the next request, no matter
2176          * what.
2177          */
2178         ret = intel_ring_flush_all_caches(ring);
2179         if (ret)
2180                 return ret;
2181
2182         request = ring->preallocated_lazy_request;
2183         if (WARN_ON(request == NULL))
2184                 return -ENOMEM;
2185
2186         /* Record the position of the start of the request so that
2187          * should we detect the updated seqno part-way through the
2188          * GPU processing the request, we never over-estimate the
2189          * position of the head.
2190          */
2191         request_ring_position = intel_ring_get_tail(ring);
2192
2193         ret = ring->add_request(ring);
2194         if (ret)
2195                 return ret;
2196
2197         request->seqno = intel_ring_get_seqno(ring);
2198         request->ring = ring;
2199         request->head = request_start;
2200         request->tail = request_ring_position;
2201
2202         /* Whilst this request exists, batch_obj will be on the
2203          * active_list, and so will hold the active reference. Only when this
2204          * request is retired will the the batch_obj be moved onto the
2205          * inactive_list and lose its active reference. Hence we do not need
2206          * to explicitly hold another reference here.
2207          */
2208         request->batch_obj = obj;
2209
2210         /* Hold a reference to the current context so that we can inspect
2211          * it later in case a hangcheck error event fires.
2212          */
2213         request->ctx = ring->last_context;
2214         if (request->ctx)
2215                 i915_gem_context_reference(request->ctx);
2216
2217         request->emitted_jiffies = jiffies;
2218         was_empty = list_empty(&ring->request_list);
2219         list_add_tail(&request->list, &ring->request_list);
2220         request->file_priv = NULL;
2221
2222         if (file) {
2223                 struct drm_i915_file_private *file_priv = file->driver_priv;
2224
2225                 spin_lock(&file_priv->mm.lock);
2226                 request->file_priv = file_priv;
2227                 list_add_tail(&request->client_list,
2228                               &file_priv->mm.request_list);
2229                 spin_unlock(&file_priv->mm.lock);
2230         }
2231
2232         trace_i915_gem_request_add(ring, request->seqno);
2233         ring->outstanding_lazy_seqno = 0;
2234         ring->preallocated_lazy_request = NULL;
2235
2236         if (!dev_priv->ums.mm_suspended) {
2237                 i915_queue_hangcheck(ring->dev);
2238
2239                 if (was_empty) {
2240                         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2241                         queue_delayed_work(dev_priv->wq,
2242                                            &dev_priv->mm.retire_work,
2243                                            round_jiffies_up_relative(HZ));
2244                         intel_mark_busy(dev_priv->dev);
2245                 }
2246         }
2247
2248         if (out_seqno)
2249                 *out_seqno = request->seqno;
2250         return 0;
2251 }
2252
2253 static inline void
2254 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2255 {
2256         struct drm_i915_file_private *file_priv = request->file_priv;
2257
2258         if (!file_priv)
2259                 return;
2260
2261         spin_lock(&file_priv->mm.lock);
2262         if (request->file_priv) {
2263                 list_del(&request->client_list);
2264                 request->file_priv = NULL;
2265         }
2266         spin_unlock(&file_priv->mm.lock);
2267 }
2268
2269 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2270                                     struct i915_address_space *vm)
2271 {
2272         if (acthd >= i915_gem_obj_offset(obj, vm) &&
2273             acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2274                 return true;
2275
2276         return false;
2277 }
2278
2279 static bool i915_head_inside_request(const u32 acthd_unmasked,
2280                                      const u32 request_start,
2281                                      const u32 request_end)
2282 {
2283         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2284
2285         if (request_start < request_end) {
2286                 if (acthd >= request_start && acthd < request_end)
2287                         return true;
2288         } else if (request_start > request_end) {
2289                 if (acthd >= request_start || acthd < request_end)
2290                         return true;
2291         }
2292
2293         return false;
2294 }
2295
2296 static struct i915_address_space *
2297 request_to_vm(struct drm_i915_gem_request *request)
2298 {
2299         struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2300         struct i915_address_space *vm;
2301
2302         vm = &dev_priv->gtt.base;
2303
2304         return vm;
2305 }
2306
2307 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2308                                 const u32 acthd, bool *inside)
2309 {
2310         /* There is a possibility that unmasked head address
2311          * pointing inside the ring, matches the batch_obj address range.
2312          * However this is extremely unlikely.
2313          */
2314         if (request->batch_obj) {
2315                 if (i915_head_inside_object(acthd, request->batch_obj,
2316                                             request_to_vm(request))) {
2317                         *inside = true;
2318                         return true;
2319                 }
2320         }
2321
2322         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2323                 *inside = false;
2324                 return true;
2325         }
2326
2327         return false;
2328 }
2329
2330 static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2331 {
2332         const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2333
2334         if (hs->banned)
2335                 return true;
2336
2337         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2338                 DRM_ERROR("context hanging too fast, declaring banned!\n");
2339                 return true;
2340         }
2341
2342         return false;
2343 }
2344
2345 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2346                                   struct drm_i915_gem_request *request,
2347                                   u32 acthd)
2348 {
2349         struct i915_ctx_hang_stats *hs = NULL;
2350         bool inside, guilty;
2351         unsigned long offset = 0;
2352
2353         /* Innocent until proven guilty */
2354         guilty = false;
2355
2356         if (request->batch_obj)
2357                 offset = i915_gem_obj_offset(request->batch_obj,
2358                                              request_to_vm(request));
2359
2360         if (ring->hangcheck.action != HANGCHECK_WAIT &&
2361             i915_request_guilty(request, acthd, &inside)) {
2362                 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2363                           ring->name,
2364                           inside ? "inside" : "flushing",
2365                           offset,
2366                           request->ctx ? request->ctx->id : 0,
2367                           acthd);
2368
2369                 guilty = true;
2370         }
2371
2372         /* If contexts are disabled or this is the default context, use
2373          * file_priv->reset_state
2374          */
2375         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2376                 hs = &request->ctx->hang_stats;
2377         else if (request->file_priv)
2378                 hs = &request->file_priv->hang_stats;
2379
2380         if (hs) {
2381                 if (guilty) {
2382                         hs->banned = i915_context_is_banned(hs);
2383                         hs->batch_active++;
2384                         hs->guilty_ts = get_seconds();
2385                 } else {
2386                         hs->batch_pending++;
2387                 }
2388         }
2389 }
2390
2391 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2392 {
2393         list_del(&request->list);
2394         i915_gem_request_remove_from_client(request);
2395
2396         if (request->ctx)
2397                 i915_gem_context_unreference(request->ctx);
2398
2399         kfree(request);
2400 }
2401
2402 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2403                                        struct intel_ring_buffer *ring)
2404 {
2405         u32 completed_seqno = ring->get_seqno(ring, false);
2406         u32 acthd = intel_ring_get_active_head(ring);
2407         struct drm_i915_gem_request *request;
2408
2409         list_for_each_entry(request, &ring->request_list, list) {
2410                 if (i915_seqno_passed(completed_seqno, request->seqno))
2411                         continue;
2412
2413                 i915_set_reset_status(ring, request, acthd);
2414         }
2415 }
2416
2417 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2418                                         struct intel_ring_buffer *ring)
2419 {
2420         while (!list_empty(&ring->active_list)) {
2421                 struct drm_i915_gem_object *obj;
2422
2423                 obj = list_first_entry(&ring->active_list,
2424                                        struct drm_i915_gem_object,
2425                                        ring_list);
2426
2427                 i915_gem_object_move_to_inactive(obj);
2428         }
2429
2430         /*
2431          * We must free the requests after all the corresponding objects have
2432          * been moved off active lists. Which is the same order as the normal
2433          * retire_requests function does. This is important if object hold
2434          * implicit references on things like e.g. ppgtt address spaces through
2435          * the request.
2436          */
2437         while (!list_empty(&ring->request_list)) {
2438                 struct drm_i915_gem_request *request;
2439
2440                 request = list_first_entry(&ring->request_list,
2441                                            struct drm_i915_gem_request,
2442                                            list);
2443
2444                 i915_gem_free_request(request);
2445         }
2446 }
2447
2448 void i915_gem_restore_fences(struct drm_device *dev)
2449 {
2450         struct drm_i915_private *dev_priv = dev->dev_private;
2451         int i;
2452
2453         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2454                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2455
2456                 /*
2457                  * Commit delayed tiling changes if we have an object still
2458                  * attached to the fence, otherwise just clear the fence.
2459                  */
2460                 if (reg->obj) {
2461                         i915_gem_object_update_fence(reg->obj, reg,
2462                                                      reg->obj->tiling_mode);
2463                 } else {
2464                         i915_gem_write_fence(dev, i, NULL);
2465                 }
2466         }
2467 }
2468
2469 void i915_gem_reset(struct drm_device *dev)
2470 {
2471         struct drm_i915_private *dev_priv = dev->dev_private;
2472         struct intel_ring_buffer *ring;
2473         int i;
2474
2475         /*
2476          * Before we free the objects from the requests, we need to inspect
2477          * them for finding the guilty party. As the requests only borrow
2478          * their reference to the objects, the inspection must be done first.
2479          */
2480         for_each_ring(ring, dev_priv, i)
2481                 i915_gem_reset_ring_status(dev_priv, ring);
2482
2483         for_each_ring(ring, dev_priv, i)
2484                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2485
2486         i915_gem_cleanup_ringbuffer(dev);
2487
2488         i915_gem_restore_fences(dev);
2489 }
2490
2491 /**
2492  * This function clears the request list as sequence numbers are passed.
2493  */
2494 void
2495 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2496 {
2497         uint32_t seqno;
2498
2499         if (list_empty(&ring->request_list))
2500                 return;
2501
2502         WARN_ON(i915_verify_lists(ring->dev));
2503
2504         seqno = ring->get_seqno(ring, true);
2505
2506         while (!list_empty(&ring->request_list)) {
2507                 struct drm_i915_gem_request *request;
2508
2509                 request = list_first_entry(&ring->request_list,
2510                                            struct drm_i915_gem_request,
2511                                            list);
2512
2513                 if (!i915_seqno_passed(seqno, request->seqno))
2514                         break;
2515
2516                 trace_i915_gem_request_retire(ring, request->seqno);
2517                 /* We know the GPU must have read the request to have
2518                  * sent us the seqno + interrupt, so use the position
2519                  * of tail of the request to update the last known position
2520                  * of the GPU head.
2521                  */
2522                 ring->last_retired_head = request->tail;
2523
2524                 i915_gem_free_request(request);
2525         }
2526
2527         /* Move any buffers on the active list that are no longer referenced
2528          * by the ringbuffer to the flushing/inactive lists as appropriate.
2529          */
2530         while (!list_empty(&ring->active_list)) {
2531                 struct drm_i915_gem_object *obj;
2532
2533                 obj = list_first_entry(&ring->active_list,
2534                                       struct drm_i915_gem_object,
2535                                       ring_list);
2536
2537                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2538                         break;
2539
2540                 i915_gem_object_move_to_inactive(obj);
2541         }
2542
2543         if (unlikely(ring->trace_irq_seqno &&
2544                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2545                 ring->irq_put(ring);
2546                 ring->trace_irq_seqno = 0;
2547         }
2548
2549         WARN_ON(i915_verify_lists(ring->dev));
2550 }
2551
2552 bool
2553 i915_gem_retire_requests(struct drm_device *dev)
2554 {
2555         drm_i915_private_t *dev_priv = dev->dev_private;
2556         struct intel_ring_buffer *ring;
2557         bool idle = true;
2558         int i;
2559
2560         for_each_ring(ring, dev_priv, i) {
2561                 i915_gem_retire_requests_ring(ring);
2562                 idle &= list_empty(&ring->request_list);
2563         }
2564
2565         return idle;
2566 }
2567
2568 static void
2569 i915_gem_retire_work_handler(struct work_struct *work)
2570 {
2571         drm_i915_private_t *dev_priv;
2572         struct drm_device *dev;
2573         struct intel_ring_buffer *ring;
2574         bool idle;
2575         int i;
2576
2577         dev_priv = container_of(work, drm_i915_private_t,
2578                                 mm.retire_work.work);
2579         dev = dev_priv->dev;
2580
2581         /* Come back later if the device is busy... */
2582         if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT)) {
2583                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2584                                    round_jiffies_up_relative(HZ));
2585                 return;
2586         }
2587
2588         i915_gem_retire_requests(dev);
2589
2590         /* Send a periodic flush down the ring so we don't hold onto GEM
2591          * objects indefinitely.
2592          */
2593         idle = true;
2594         for_each_ring(ring, dev_priv, i) {
2595                 if (ring->gpu_caches_dirty)
2596                         i915_add_request(ring, NULL);
2597
2598                 idle &= list_empty(&ring->request_list);
2599         }
2600
2601         if (!dev_priv->ums.mm_suspended && !idle)
2602                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2603                                    round_jiffies_up_relative(HZ));
2604         if (idle)
2605                 intel_mark_idle(dev);
2606
2607         mutex_unlock(&dev->struct_mutex);
2608 }
2609
2610 static void
2611 i915_gem_idle_work_handler(struct work_struct *work)
2612 {
2613         struct drm_i915_private *dev_priv =
2614                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2615
2616         intel_mark_idle(dev_priv->dev);
2617 }
2618
2619 /**
2620  * Ensures that an object will eventually get non-busy by flushing any required
2621  * write domains, emitting any outstanding lazy request and retiring and
2622  * completed requests.
2623  */
2624 static int
2625 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2626 {
2627         int ret;
2628
2629         if (obj->active) {
2630                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2631                 if (ret)
2632                         return ret;
2633
2634                 i915_gem_retire_requests_ring(obj->ring);
2635         }
2636
2637         return 0;
2638 }
2639
2640 /**
2641  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2642  * @DRM_IOCTL_ARGS: standard ioctl arguments
2643  *
2644  * Returns 0 if successful, else an error is returned with the remaining time in
2645  * the timeout parameter.
2646  *  -ETIME: object is still busy after timeout
2647  *  -ERESTARTSYS: signal interrupted the wait
2648  *  -ENONENT: object doesn't exist
2649  * Also possible, but rare:
2650  *  -EAGAIN: GPU wedged
2651  *  -ENOMEM: damn
2652  *  -ENODEV: Internal IRQ fail
2653  *  -E?: The add request failed
2654  *
2655  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2656  * non-zero timeout parameter the wait ioctl will wait for the given number of
2657  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2658  * without holding struct_mutex the object may become re-busied before this
2659  * function completes. A similar but shorter * race condition exists in the busy
2660  * ioctl
2661  */
2662 int
2663 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2664 {
2665         drm_i915_private_t *dev_priv = dev->dev_private;
2666         struct drm_i915_gem_wait *args = data;
2667         struct drm_i915_gem_object *obj;
2668         struct intel_ring_buffer *ring = NULL;
2669         struct timespec timeout_stack, *timeout = NULL;
2670         unsigned reset_counter;
2671         u32 seqno = 0;
2672         int ret = 0;
2673
2674         if (args->timeout_ns >= 0) {
2675                 timeout_stack = ns_to_timespec(args->timeout_ns);
2676                 timeout = &timeout_stack;
2677         }
2678
2679         ret = i915_mutex_lock_interruptible(dev);
2680         if (ret)
2681                 return ret;
2682
2683         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2684         if (&obj->base == NULL) {
2685                 mutex_unlock(&dev->struct_mutex);
2686                 return -ENOENT;
2687         }
2688
2689         /* Need to make sure the object gets inactive eventually. */
2690         ret = i915_gem_object_flush_active(obj);
2691         if (ret)
2692                 goto out;
2693
2694         if (obj->active) {
2695                 seqno = obj->last_read_seqno;
2696                 ring = obj->ring;
2697         }
2698
2699         if (seqno == 0)
2700                  goto out;
2701
2702         /* Do this after OLR check to make sure we make forward progress polling
2703          * on this IOCTL with a 0 timeout (like busy ioctl)
2704          */
2705         if (!args->timeout_ns) {
2706                 ret = -ETIMEDOUT;
2707                 goto out;
2708         }
2709
2710         drm_gem_object_unreference(&obj->base);
2711         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2712         mutex_unlock(&dev->struct_mutex);
2713
2714         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2715         if (timeout)
2716                 args->timeout_ns = timespec_to_ns(timeout);
2717         return ret;
2718
2719 out:
2720         drm_gem_object_unreference(&obj->base);
2721         mutex_unlock(&dev->struct_mutex);
2722         return ret;
2723 }
2724
2725 /**
2726  * i915_gem_object_sync - sync an object to a ring.
2727  *
2728  * @obj: object which may be in use on another ring.
2729  * @to: ring we wish to use the object on. May be NULL.
2730  *
2731  * This code is meant to abstract object synchronization with the GPU.
2732  * Calling with NULL implies synchronizing the object with the CPU
2733  * rather than a particular GPU ring.
2734  *
2735  * Returns 0 if successful, else propagates up the lower layer error.
2736  */
2737 int
2738 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2739                      struct intel_ring_buffer *to)
2740 {
2741         struct intel_ring_buffer *from = obj->ring;
2742         u32 seqno;
2743         int ret, idx;
2744
2745         if (from == NULL || to == from)
2746                 return 0;
2747
2748         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2749                 return i915_gem_object_wait_rendering(obj, false);
2750
2751         idx = intel_ring_sync_index(from, to);
2752
2753         seqno = obj->last_read_seqno;
2754         if (seqno <= from->sync_seqno[idx])
2755                 return 0;
2756
2757         ret = i915_gem_check_olr(obj->ring, seqno);
2758         if (ret)
2759                 return ret;
2760
2761         trace_i915_gem_ring_sync_to(from, to, seqno);
2762         ret = to->sync_to(to, from, seqno);
2763         if (!ret)
2764                 /* We use last_read_seqno because sync_to()
2765                  * might have just caused seqno wrap under
2766                  * the radar.
2767                  */
2768                 from->sync_seqno[idx] = obj->last_read_seqno;
2769
2770         return ret;
2771 }
2772
2773 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2774 {
2775         u32 old_write_domain, old_read_domains;
2776
2777         /* Force a pagefault for domain tracking on next user access */
2778         i915_gem_release_mmap(obj);
2779
2780         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2781                 return;
2782
2783         /* Wait for any direct GTT access to complete */
2784         mb();
2785
2786         old_read_domains = obj->base.read_domains;
2787         old_write_domain = obj->base.write_domain;
2788
2789         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2790         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2791
2792         trace_i915_gem_object_change_domain(obj,
2793                                             old_read_domains,
2794                                             old_write_domain);
2795 }
2796
2797 int i915_vma_unbind(struct i915_vma *vma)
2798 {
2799         struct drm_i915_gem_object *obj = vma->obj;
2800         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2801         int ret;
2802
2803         /* For now we only ever use 1 vma per object */
2804 #if 0
2805         WARN_ON(!list_is_singular(&obj->vma_list));
2806 #endif
2807
2808         if (list_empty(&vma->vma_link))
2809                 return 0;
2810
2811         if (!drm_mm_node_allocated(&vma->node)) {
2812                 i915_gem_vma_destroy(vma);
2813
2814                 return 0;
2815         }
2816
2817         if (obj->pin_count)
2818                 return -EBUSY;
2819
2820         BUG_ON(obj->pages == NULL);
2821
2822         ret = i915_gem_object_finish_gpu(obj);
2823         if (ret)
2824                 return ret;
2825         /* Continue on if we fail due to EIO, the GPU is hung so we
2826          * should be safe and we need to cleanup or else we might
2827          * cause memory corruption through use-after-free.
2828          */
2829
2830         i915_gem_object_finish_gtt(obj);
2831
2832         /* release the fence reg _after_ flushing */
2833         ret = i915_gem_object_put_fence(obj);
2834         if (ret)
2835                 return ret;
2836
2837         trace_i915_vma_unbind(vma);
2838
2839         if (obj->has_global_gtt_mapping)
2840                 i915_gem_gtt_unbind_object(obj);
2841         if (obj->has_aliasing_ppgtt_mapping) {
2842                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2843                 obj->has_aliasing_ppgtt_mapping = 0;
2844         }
2845         i915_gem_gtt_finish_object(obj);
2846
2847         list_del(&vma->mm_list);
2848         /* Avoid an unnecessary call to unbind on rebind. */
2849         if (i915_is_ggtt(vma->vm))
2850                 obj->map_and_fenceable = true;
2851
2852         drm_mm_remove_node(&vma->node);
2853         i915_gem_vma_destroy(vma);
2854
2855         /* Since the unbound list is global, only move to that list if
2856          * no more VMAs exist. */
2857         if (list_empty(&obj->vma_list))
2858                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2859
2860         /* And finally now the object is completely decoupled from this vma,
2861          * we can drop its hold on the backing storage and allow it to be
2862          * reaped by the shrinker.
2863          */
2864         i915_gem_object_unpin_pages(obj);
2865
2866         return 0;
2867 }
2868
2869 /**
2870  * Unbinds an object from the global GTT aperture.
2871  */
2872 int
2873 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2874 {
2875         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2876         struct i915_address_space *ggtt = &dev_priv->gtt.base;
2877
2878         if (!i915_gem_obj_ggtt_bound(obj))
2879                 return 0;
2880
2881         if (obj->pin_count)
2882                 return -EBUSY;
2883
2884         BUG_ON(obj->pages == NULL);
2885
2886         return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2887 }
2888
2889 int i915_gpu_idle(struct drm_device *dev)
2890 {
2891         drm_i915_private_t *dev_priv = dev->dev_private;
2892         struct intel_ring_buffer *ring;
2893         int ret, i;
2894
2895         /* Flush everything onto the inactive list. */
2896         for_each_ring(ring, dev_priv, i) {
2897                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2898                 if (ret)
2899                         return ret;
2900
2901                 ret = intel_ring_idle(ring);
2902                 if (ret)
2903                         return ret;
2904         }
2905
2906         return 0;
2907 }
2908
2909 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2910                                  struct drm_i915_gem_object *obj)
2911 {
2912         drm_i915_private_t *dev_priv = dev->dev_private;
2913         int fence_reg;
2914         int fence_pitch_shift;
2915
2916         if (INTEL_INFO(dev)->gen >= 6) {
2917                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2918                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2919         } else {
2920                 fence_reg = FENCE_REG_965_0;
2921                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2922         }
2923
2924         fence_reg += reg * 8;
2925
2926         /* To w/a incoherency with non-atomic 64-bit register updates,
2927          * we split the 64-bit update into two 32-bit writes. In order
2928          * for a partial fence not to be evaluated between writes, we
2929          * precede the update with write to turn off the fence register,
2930          * and only enable the fence as the last step.
2931          *
2932          * For extra levels of paranoia, we make sure each step lands
2933          * before applying the next step.
2934          */
2935         I915_WRITE(fence_reg, 0);
2936         POSTING_READ(fence_reg);
2937
2938         if (obj) {
2939                 u32 size = i915_gem_obj_ggtt_size(obj);
2940                 uint64_t val;
2941
2942                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2943                                  0xfffff000) << 32;
2944                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2945                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2946                 if (obj->tiling_mode == I915_TILING_Y)
2947                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2948                 val |= I965_FENCE_REG_VALID;
2949
2950                 I915_WRITE(fence_reg + 4, val >> 32);
2951                 POSTING_READ(fence_reg + 4);
2952
2953                 I915_WRITE(fence_reg + 0, val);
2954                 POSTING_READ(fence_reg);
2955         } else {
2956                 I915_WRITE(fence_reg + 4, 0);
2957                 POSTING_READ(fence_reg + 4);
2958         }
2959 }
2960
2961 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2962                                  struct drm_i915_gem_object *obj)
2963 {
2964         drm_i915_private_t *dev_priv = dev->dev_private;
2965         u32 val;
2966
2967         if (obj) {
2968                 u32 size = i915_gem_obj_ggtt_size(obj);
2969                 int pitch_val;
2970                 int tile_width;
2971
2972                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2973                      (size & -size) != size ||
2974                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2975                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2976                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2977
2978                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2979                         tile_width = 128;
2980                 else
2981                         tile_width = 512;
2982
2983                 /* Note: pitch better be a power of two tile widths */
2984                 pitch_val = obj->stride / tile_width;
2985                 pitch_val = ffs(pitch_val) - 1;
2986
2987                 val = i915_gem_obj_ggtt_offset(obj);
2988                 if (obj->tiling_mode == I915_TILING_Y)
2989                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2990                 val |= I915_FENCE_SIZE_BITS(size);
2991                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2992                 val |= I830_FENCE_REG_VALID;
2993         } else
2994                 val = 0;
2995
2996         if (reg < 8)
2997                 reg = FENCE_REG_830_0 + reg * 4;
2998         else
2999                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3000
3001         I915_WRITE(reg, val);
3002         POSTING_READ(reg);
3003 }
3004
3005 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3006                                 struct drm_i915_gem_object *obj)
3007 {
3008         drm_i915_private_t *dev_priv = dev->dev_private;
3009         uint32_t val;
3010
3011         if (obj) {
3012                 u32 size = i915_gem_obj_ggtt_size(obj);
3013                 uint32_t pitch_val;
3014
3015                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3016                      (size & -size) != size ||
3017                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3018                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3019                      i915_gem_obj_ggtt_offset(obj), size);
3020
3021                 pitch_val = obj->stride / 128;
3022                 pitch_val = ffs(pitch_val) - 1;
3023
3024                 val = i915_gem_obj_ggtt_offset(obj);
3025                 if (obj->tiling_mode == I915_TILING_Y)
3026                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3027                 val |= I830_FENCE_SIZE_BITS(size);
3028                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3029                 val |= I830_FENCE_REG_VALID;
3030         } else
3031                 val = 0;
3032
3033         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3034         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3035 }
3036
3037 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3038 {
3039         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3040 }
3041
3042 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3043                                  struct drm_i915_gem_object *obj)
3044 {
3045         struct drm_i915_private *dev_priv = dev->dev_private;
3046
3047         /* Ensure that all CPU reads are completed before installing a fence
3048          * and all writes before removing the fence.
3049          */
3050         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3051                 mb();
3052
3053         WARN(obj && (!obj->stride || !obj->tiling_mode),
3054              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3055              obj->stride, obj->tiling_mode);
3056
3057         switch (INTEL_INFO(dev)->gen) {
3058         case 8:
3059         case 7:
3060         case 6:
3061         case 5:
3062         case 4: i965_write_fence_reg(dev, reg, obj); break;
3063         case 3: i915_write_fence_reg(dev, reg, obj); break;
3064         case 2: i830_write_fence_reg(dev, reg, obj); break;
3065         default: BUG();
3066         }
3067
3068         /* And similarly be paranoid that no direct access to this region
3069          * is reordered to before the fence is installed.
3070          */
3071         if (i915_gem_object_needs_mb(obj))
3072                 mb();
3073 }
3074
3075 static inline int fence_number(struct drm_i915_private *dev_priv,
3076                                struct drm_i915_fence_reg *fence)
3077 {
3078         return fence - dev_priv->fence_regs;
3079 }
3080
3081 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3082                                          struct drm_i915_fence_reg *fence,
3083                                          bool enable)
3084 {
3085         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3086         int reg = fence_number(dev_priv, fence);
3087
3088         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3089
3090         if (enable) {
3091                 obj->fence_reg = reg;
3092                 fence->obj = obj;
3093                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3094         } else {
3095                 obj->fence_reg = I915_FENCE_REG_NONE;
3096                 fence->obj = NULL;
3097                 list_del_init(&fence->lru_list);
3098         }
3099         obj->fence_dirty = false;
3100 }
3101
3102 static int
3103 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3104 {
3105         if (obj->last_fenced_seqno) {
3106                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3107                 if (ret)
3108                         return ret;
3109
3110                 obj->last_fenced_seqno = 0;
3111         }
3112
3113         obj->fenced_gpu_access = false;
3114         return 0;
3115 }
3116
3117 int
3118 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3119 {
3120         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3121         struct drm_i915_fence_reg *fence;
3122         int ret;
3123
3124         ret = i915_gem_object_wait_fence(obj);
3125         if (ret)
3126                 return ret;
3127
3128         if (obj->fence_reg == I915_FENCE_REG_NONE)
3129                 return 0;
3130
3131         fence = &dev_priv->fence_regs[obj->fence_reg];
3132
3133         i915_gem_object_fence_lost(obj);
3134         i915_gem_object_update_fence(obj, fence, false);
3135
3136         return 0;
3137 }
3138
3139 static struct drm_i915_fence_reg *
3140 i915_find_fence_reg(struct drm_device *dev)
3141 {
3142         struct drm_i915_private *dev_priv = dev->dev_private;
3143         struct drm_i915_fence_reg *reg, *avail;
3144         int i;
3145
3146         /* First try to find a free reg */
3147         avail = NULL;
3148         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3149                 reg = &dev_priv->fence_regs[i];
3150                 if (!reg->obj)
3151                         return reg;
3152
3153                 if (!reg->pin_count)
3154                         avail = reg;
3155         }
3156
3157         if (avail == NULL)
3158                 goto deadlock;
3159
3160         /* None available, try to steal one or wait for a user to finish */
3161         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3162                 if (reg->pin_count)
3163                         continue;
3164
3165                 return reg;
3166         }
3167
3168 deadlock:
3169         /* Wait for completion of pending flips which consume fences */
3170         if (intel_has_pending_fb_unpin(dev))
3171                 return ERR_PTR(-EAGAIN);
3172
3173         return ERR_PTR(-EDEADLK);
3174 }
3175
3176 /**
3177  * i915_gem_object_get_fence - set up fencing for an object
3178  * @obj: object to map through a fence reg
3179  *
3180  * When mapping objects through the GTT, userspace wants to be able to write
3181  * to them without having to worry about swizzling if the object is tiled.
3182  * This function walks the fence regs looking for a free one for @obj,
3183  * stealing one if it can't find any.
3184  *
3185  * It then sets up the reg based on the object's properties: address, pitch
3186  * and tiling format.
3187  *
3188  * For an untiled surface, this removes any existing fence.
3189  */
3190 int
3191 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3192 {
3193         struct drm_device *dev = obj->base.dev;
3194         struct drm_i915_private *dev_priv = dev->dev_private;
3195         bool enable = obj->tiling_mode != I915_TILING_NONE;
3196         struct drm_i915_fence_reg *reg;
3197         int ret;
3198
3199         /* Have we updated the tiling parameters upon the object and so
3200          * will need to serialise the write to the associated fence register?
3201          */
3202         if (obj->fence_dirty) {
3203                 ret = i915_gem_object_wait_fence(obj);
3204                 if (ret)
3205                         return ret;
3206         }
3207
3208         /* Just update our place in the LRU if our fence is getting reused. */
3209         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3210                 reg = &dev_priv->fence_regs[obj->fence_reg];
3211                 if (!obj->fence_dirty) {
3212                         list_move_tail(&reg->lru_list,
3213                                        &dev_priv->mm.fence_list);
3214                         return 0;
3215                 }
3216         } else if (enable) {
3217                 reg = i915_find_fence_reg(dev);
3218                 if (IS_ERR(reg))
3219                         return PTR_ERR(reg);
3220
3221                 if (reg->obj) {
3222                         struct drm_i915_gem_object *old = reg->obj;
3223
3224                         ret = i915_gem_object_wait_fence(old);
3225                         if (ret)
3226                                 return ret;
3227
3228                         i915_gem_object_fence_lost(old);
3229                 }
3230         } else
3231                 return 0;
3232
3233         i915_gem_object_update_fence(obj, reg, enable);
3234
3235         return 0;
3236 }
3237
3238 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3239                                      struct drm_mm_node *gtt_space,
3240                                      unsigned long cache_level)
3241 {
3242         struct drm_mm_node *other;
3243
3244         /* On non-LLC machines we have to be careful when putting differing
3245          * types of snoopable memory together to avoid the prefetcher
3246          * crossing memory domains and dying.
3247          */
3248         if (HAS_LLC(dev))
3249                 return true;
3250
3251         if (!drm_mm_node_allocated(gtt_space))
3252                 return true;
3253
3254         if (list_empty(&gtt_space->node_list))
3255                 return true;
3256
3257         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3258         if (other->allocated && !other->hole_follows && other->color != cache_level)
3259                 return false;
3260
3261         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3262         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3263                 return false;
3264
3265         return true;
3266 }
3267
3268 static void i915_gem_verify_gtt(struct drm_device *dev)
3269 {
3270 #if WATCH_GTT
3271         struct drm_i915_private *dev_priv = dev->dev_private;
3272         struct drm_i915_gem_object *obj;
3273         int err = 0;
3274
3275         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3276                 if (obj->gtt_space == NULL) {
3277                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3278                         err++;
3279                         continue;
3280                 }
3281
3282                 if (obj->cache_level != obj->gtt_space->color) {
3283                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3284                                i915_gem_obj_ggtt_offset(obj),
3285                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3286                                obj->cache_level,
3287                                obj->gtt_space->color);
3288                         err++;
3289                         continue;
3290                 }
3291
3292                 if (!i915_gem_valid_gtt_space(dev,
3293                                               obj->gtt_space,
3294                                               obj->cache_level)) {
3295                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3296                                i915_gem_obj_ggtt_offset(obj),
3297                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3298                                obj->cache_level);
3299                         err++;
3300                         continue;
3301                 }
3302         }
3303
3304         WARN_ON(err);
3305 #endif
3306 }
3307
3308 /**
3309  * Finds free space in the GTT aperture and binds the object there.
3310  */
3311 static int
3312 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3313                            struct i915_address_space *vm,
3314                            unsigned alignment,
3315                            bool map_and_fenceable,
3316                            bool nonblocking)
3317 {
3318         struct drm_device *dev = obj->base.dev;
3319         drm_i915_private_t *dev_priv = dev->dev_private;
3320         u32 size, fence_size, fence_alignment, unfenced_alignment;
3321         size_t gtt_max =
3322                 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3323         struct i915_vma *vma;
3324         int ret;
3325
3326         fence_size = i915_gem_get_gtt_size(dev,
3327                                            obj->base.size,
3328                                            obj->tiling_mode);
3329         fence_alignment = i915_gem_get_gtt_alignment(dev,
3330                                                      obj->base.size,
3331                                                      obj->tiling_mode, true);
3332         unfenced_alignment =
3333                 i915_gem_get_gtt_alignment(dev,
3334                                                     obj->base.size,
3335                                                     obj->tiling_mode, false);
3336
3337         if (alignment == 0)
3338                 alignment = map_and_fenceable ? fence_alignment :
3339                                                 unfenced_alignment;
3340         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3341                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3342                 return -EINVAL;
3343         }
3344
3345         size = map_and_fenceable ? fence_size : obj->base.size;
3346
3347         /* If the object is bigger than the entire aperture, reject it early
3348          * before evicting everything in a vain attempt to find space.
3349          */
3350         if (obj->base.size > gtt_max) {
3351                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3352                           obj->base.size,
3353                           map_and_fenceable ? "mappable" : "total",
3354                           gtt_max);
3355                 return -E2BIG;
3356         }
3357
3358         ret = i915_gem_object_get_pages(obj);
3359         if (ret)
3360                 return ret;
3361
3362         i915_gem_object_pin_pages(obj);
3363
3364         BUG_ON(!i915_is_ggtt(vm));
3365
3366         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3367         if (IS_ERR(vma)) {
3368                 ret = PTR_ERR(vma);
3369                 goto err_unpin;
3370         }
3371
3372         /* For now we only ever use 1 vma per object */
3373 #if 0
3374         WARN_ON(!list_is_singular(&obj->vma_list));
3375 #endif
3376
3377 search_free:
3378         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3379                                                   size, alignment,
3380                                                   obj->cache_level, 0, gtt_max,
3381                                                   DRM_MM_SEARCH_DEFAULT);
3382         if (ret) {
3383                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3384                                                obj->cache_level,
3385                                                map_and_fenceable,
3386                                                nonblocking);
3387                 if (ret == 0)
3388                         goto search_free;
3389
3390                 goto err_free_vma;
3391         }
3392         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3393                                               obj->cache_level))) {
3394                 ret = -EINVAL;
3395                 goto err_remove_node;
3396         }
3397
3398         ret = i915_gem_gtt_prepare_object(obj);
3399         if (ret)
3400                 goto err_remove_node;
3401
3402         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3403         list_add_tail(&vma->mm_list, &vm->inactive_list);
3404
3405         if (i915_is_ggtt(vm)) {
3406                 bool mappable, fenceable;
3407
3408                 fenceable = (vma->node.size == fence_size &&
3409                              (vma->node.start & (fence_alignment - 1)) == 0);
3410
3411                 mappable = (vma->node.start + obj->base.size <=
3412                             dev_priv->gtt.mappable_end);
3413
3414                 obj->map_and_fenceable = mappable && fenceable;
3415         }
3416
3417         WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3418
3419         trace_i915_vma_bind(vma, map_and_fenceable);
3420         i915_gem_verify_gtt(dev);
3421         return 0;
3422
3423 err_remove_node:
3424         drm_mm_remove_node(&vma->node);
3425 err_free_vma:
3426         i915_gem_vma_destroy(vma);
3427 err_unpin:
3428         i915_gem_object_unpin_pages(obj);
3429         return ret;
3430 }
3431
3432 bool
3433 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3434                         bool force)
3435 {
3436         /* If we don't have a page list set up, then we're not pinned
3437          * to GPU, and we can ignore the cache flush because it'll happen
3438          * again at bind time.
3439          */
3440         if (obj->pages == NULL)
3441                 return false;
3442
3443         /*
3444          * Stolen memory is always coherent with the GPU as it is explicitly
3445          * marked as wc by the system, or the system is cache-coherent.
3446          */
3447         if (obj->stolen)
3448                 return false;
3449
3450         /* If the GPU is snooping the contents of the CPU cache,
3451          * we do not need to manually clear the CPU cache lines.  However,
3452          * the caches are only snooped when the render cache is
3453          * flushed/invalidated.  As we always have to emit invalidations
3454          * and flushes when moving into and out of the RENDER domain, correct
3455          * snooping behaviour occurs naturally as the result of our domain
3456          * tracking.
3457          */
3458         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3459                 return false;
3460
3461         trace_i915_gem_object_clflush(obj);
3462         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3463
3464         return true;
3465 }
3466
3467 /** Flushes the GTT write domain for the object if it's dirty. */
3468 static void
3469 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3470 {
3471         uint32_t old_write_domain;
3472
3473         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3474                 return;
3475
3476         /* No actual flushing is required for the GTT write domain.  Writes
3477          * to it immediately go to main memory as far as we know, so there's
3478          * no chipset flush.  It also doesn't land in render cache.
3479          *
3480          * However, we do have to enforce the order so that all writes through
3481          * the GTT land before any writes to the device, such as updates to
3482          * the GATT itself.
3483          */
3484         wmb();
3485
3486         old_write_domain = obj->base.write_domain;
3487         obj->base.write_domain = 0;
3488
3489         trace_i915_gem_object_change_domain(obj,
3490                                             obj->base.read_domains,
3491                                             old_write_domain);
3492 }
3493
3494 /** Flushes the CPU write domain for the object if it's dirty. */
3495 static void
3496 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3497                                        bool force)
3498 {
3499         uint32_t old_write_domain;
3500
3501         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3502                 return;
3503
3504         if (i915_gem_clflush_object(obj, force))
3505                 i915_gem_chipset_flush(obj->base.dev);
3506
3507         old_write_domain = obj->base.write_domain;
3508         obj->base.write_domain = 0;
3509
3510         trace_i915_gem_object_change_domain(obj,
3511                                             obj->base.read_domains,
3512                                             old_write_domain);
3513 }
3514
3515 /**
3516  * Moves a single object to the GTT read, and possibly write domain.
3517  *
3518  * This function returns when the move is complete, including waiting on
3519  * flushes to occur.
3520  */
3521 int
3522 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3523 {
3524         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3525         uint32_t old_write_domain, old_read_domains;
3526         int ret;
3527
3528         /* Not valid to be called on unbound objects. */
3529         if (!i915_gem_obj_bound_any(obj))
3530                 return -EINVAL;
3531
3532         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3533                 return 0;
3534
3535         ret = i915_gem_object_wait_rendering(obj, !write);
3536         if (ret)
3537                 return ret;
3538
3539         i915_gem_object_flush_cpu_write_domain(obj, false);
3540
3541         /* Serialise direct access to this object with the barriers for
3542          * coherent writes from the GPU, by effectively invalidating the
3543          * GTT domain upon first access.
3544          */
3545         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3546                 mb();
3547
3548         old_write_domain = obj->base.write_domain;
3549         old_read_domains = obj->base.read_domains;
3550
3551         /* It should now be out of any other write domains, and we can update
3552          * the domain values for our changes.
3553          */
3554         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3555         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3556         if (write) {
3557                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3558                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3559                 obj->dirty = 1;
3560         }
3561
3562         trace_i915_gem_object_change_domain(obj,
3563                                             old_read_domains,
3564                                             old_write_domain);
3565
3566         /* And bump the LRU for this access */
3567         if (i915_gem_object_is_inactive(obj)) {
3568                 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3569                 if (vma)
3570                         list_move_tail(&vma->mm_list,
3571                                        &dev_priv->gtt.base.inactive_list);
3572
3573         }
3574
3575         return 0;
3576 }
3577
3578 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3579                                     enum i915_cache_level cache_level)
3580 {
3581         struct drm_device *dev = obj->base.dev;
3582         drm_i915_private_t *dev_priv = dev->dev_private;
3583         struct i915_vma *vma;
3584         int ret;
3585
3586         if (obj->cache_level == cache_level)
3587                 return 0;
3588
3589         if (obj->pin_count) {
3590                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3591                 return -EBUSY;
3592         }
3593
3594         list_for_each_entry(vma, &obj->vma_list, vma_link) {
3595                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3596                         ret = i915_vma_unbind(vma);
3597                         if (ret)
3598                                 return ret;
3599
3600                         break;
3601                 }
3602         }
3603
3604         if (i915_gem_obj_bound_any(obj)) {
3605                 ret = i915_gem_object_finish_gpu(obj);
3606                 if (ret)
3607                         return ret;
3608
3609                 i915_gem_object_finish_gtt(obj);
3610
3611                 /* Before SandyBridge, you could not use tiling or fence
3612                  * registers with snooped memory, so relinquish any fences
3613                  * currently pointing to our region in the aperture.
3614                  */
3615                 if (INTEL_INFO(dev)->gen < 6) {
3616                         ret = i915_gem_object_put_fence(obj);
3617                         if (ret)
3618                                 return ret;
3619                 }
3620
3621                 if (obj->has_global_gtt_mapping)
3622                         i915_gem_gtt_bind_object(obj, cache_level);
3623                 if (obj->has_aliasing_ppgtt_mapping)
3624                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3625                                                obj, cache_level);
3626         }
3627
3628         list_for_each_entry(vma, &obj->vma_list, vma_link)
3629                 vma->node.color = cache_level;
3630         obj->cache_level = cache_level;
3631
3632         if (cpu_write_needs_clflush(obj)) {
3633                 u32 old_read_domains, old_write_domain;
3634
3635                 /* If we're coming from LLC cached, then we haven't
3636                  * actually been tracking whether the data is in the
3637                  * CPU cache or not, since we only allow one bit set
3638                  * in obj->write_domain and have been skipping the clflushes.
3639                  * Just set it to the CPU cache for now.
3640                  */
3641                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3642
3643                 old_read_domains = obj->base.read_domains;
3644                 old_write_domain = obj->base.write_domain;
3645
3646                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3647                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3648
3649                 trace_i915_gem_object_change_domain(obj,
3650                                                     old_read_domains,
3651                                                     old_write_domain);
3652         }
3653
3654         i915_gem_verify_gtt(dev);
3655         return 0;
3656 }
3657
3658 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3659                                struct drm_file *file)
3660 {
3661         struct drm_i915_gem_caching *args = data;
3662         struct drm_i915_gem_object *obj;
3663         int ret;
3664
3665         ret = i915_mutex_lock_interruptible(dev);
3666         if (ret)
3667                 return ret;
3668
3669         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3670         if (&obj->base == NULL) {
3671                 ret = -ENOENT;
3672                 goto unlock;
3673         }
3674
3675         switch (obj->cache_level) {
3676         case I915_CACHE_LLC:
3677         case I915_CACHE_L3_LLC:
3678                 args->caching = I915_CACHING_CACHED;
3679                 break;
3680
3681         case I915_CACHE_WT:
3682                 args->caching = I915_CACHING_DISPLAY;
3683                 break;
3684
3685         default:
3686                 args->caching = I915_CACHING_NONE;
3687                 break;
3688         }
3689
3690         drm_gem_object_unreference(&obj->base);
3691 unlock:
3692         mutex_unlock(&dev->struct_mutex);
3693         return ret;
3694 }
3695
3696 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3697                                struct drm_file *file)
3698 {
3699         struct drm_i915_gem_caching *args = data;
3700         struct drm_i915_gem_object *obj;
3701         enum i915_cache_level level;
3702         int ret;
3703
3704         switch (args->caching) {
3705         case I915_CACHING_NONE:
3706                 level = I915_CACHE_NONE;
3707                 break;
3708         case I915_CACHING_CACHED:
3709                 level = I915_CACHE_LLC;
3710                 break;
3711         case I915_CACHING_DISPLAY:
3712                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3713                 break;
3714         default:
3715                 return -EINVAL;
3716         }
3717
3718         ret = i915_mutex_lock_interruptible(dev);
3719         if (ret)
3720                 return ret;
3721
3722         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3723         if (&obj->base == NULL) {
3724                 ret = -ENOENT;
3725                 goto unlock;
3726         }
3727
3728         ret = i915_gem_object_set_cache_level(obj, level);
3729
3730         drm_gem_object_unreference(&obj->base);
3731 unlock:
3732         mutex_unlock(&dev->struct_mutex);
3733         return ret;
3734 }
3735
3736 static bool is_pin_display(struct drm_i915_gem_object *obj)
3737 {
3738         /* There are 3 sources that pin objects:
3739          *   1. The display engine (scanouts, sprites, cursors);
3740          *   2. Reservations for execbuffer;
3741          *   3. The user.
3742          *
3743          * We can ignore reservations as we hold the struct_mutex and
3744          * are only called outside of the reservation path.  The user
3745          * can only increment pin_count once, and so if after
3746          * subtracting the potential reference by the user, any pin_count
3747          * remains, it must be due to another use by the display engine.
3748          */
3749         return obj->pin_count - !!obj->user_pin_count;
3750 }
3751
3752 /*
3753  * Prepare buffer for display plane (scanout, cursors, etc).
3754  * Can be called from an uninterruptible phase (modesetting) and allows
3755  * any flushes to be pipelined (for pageflips).
3756  */
3757 int
3758 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3759                                      u32 alignment,
3760                                      struct intel_ring_buffer *pipelined)
3761 {
3762         u32 old_read_domains, old_write_domain;
3763         int ret;
3764
3765         if (pipelined != obj->ring) {
3766                 ret = i915_gem_object_sync(obj, pipelined);
3767                 if (ret)
3768                         return ret;
3769         }
3770
3771         /* Mark the pin_display early so that we account for the
3772          * display coherency whilst setting up the cache domains.
3773          */
3774         obj->pin_display = true;
3775
3776         /* The display engine is not coherent with the LLC cache on gen6.  As
3777          * a result, we make sure that the pinning that is about to occur is
3778          * done with uncached PTEs. This is lowest common denominator for all
3779          * chipsets.
3780          *
3781          * However for gen6+, we could do better by using the GFDT bit instead
3782          * of uncaching, which would allow us to flush all the LLC-cached data
3783          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3784          */
3785         ret = i915_gem_object_set_cache_level(obj,
3786                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3787         if (ret)
3788                 goto err_unpin_display;
3789
3790         /* As the user may map the buffer once pinned in the display plane
3791          * (e.g. libkms for the bootup splash), we have to ensure that we
3792          * always use map_and_fenceable for all scanout buffers.
3793          */
3794         ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3795         if (ret)
3796                 goto err_unpin_display;
3797
3798         i915_gem_object_flush_cpu_write_domain(obj, true);
3799
3800         old_write_domain = obj->base.write_domain;
3801         old_read_domains = obj->base.read_domains;
3802
3803         /* It should now be out of any other write domains, and we can update
3804          * the domain values for our changes.
3805          */
3806         obj->base.write_domain = 0;
3807         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3808
3809         trace_i915_gem_object_change_domain(obj,
3810                                             old_read_domains,
3811                                             old_write_domain);
3812
3813         return 0;
3814
3815 err_unpin_display:
3816         obj->pin_display = is_pin_display(obj);
3817         return ret;
3818 }
3819
3820 void
3821 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3822 {
3823         i915_gem_object_unpin(obj);
3824         obj->pin_display = is_pin_display(obj);
3825 }
3826
3827 int
3828 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3829 {
3830         int ret;
3831
3832         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3833                 return 0;
3834
3835         ret = i915_gem_object_wait_rendering(obj, false);
3836         if (ret)
3837                 return ret;
3838
3839         /* Ensure that we invalidate the GPU's caches and TLBs. */
3840         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3841         return 0;
3842 }
3843
3844 /**
3845  * Moves a single object to the CPU read, and possibly write domain.
3846  *
3847  * This function returns when the move is complete, including waiting on
3848  * flushes to occur.
3849  */
3850 int
3851 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3852 {
3853         uint32_t old_write_domain, old_read_domains;
3854         int ret;
3855
3856         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3857                 return 0;
3858
3859         ret = i915_gem_object_wait_rendering(obj, !write);
3860         if (ret)
3861                 return ret;
3862
3863         i915_gem_object_flush_gtt_write_domain(obj);
3864
3865         old_write_domain = obj->base.write_domain;
3866         old_read_domains = obj->base.read_domains;
3867
3868         /* Flush the CPU cache if it's still invalid. */
3869         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3870                 i915_gem_clflush_object(obj, false);
3871
3872                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3873         }
3874
3875         /* It should now be out of any other write domains, and we can update
3876          * the domain values for our changes.
3877          */
3878         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3879
3880         /* If we're writing through the CPU, then the GPU read domains will
3881          * need to be invalidated at next use.
3882          */
3883         if (write) {
3884                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3885                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3886         }
3887
3888         trace_i915_gem_object_change_domain(obj,
3889                                             old_read_domains,
3890                                             old_write_domain);
3891
3892         return 0;
3893 }
3894
3895 /* Throttle our rendering by waiting until the ring has completed our requests
3896  * emitted over 20 msec ago.
3897  *
3898  * Note that if we were to use the current jiffies each time around the loop,
3899  * we wouldn't escape the function with any frames outstanding if the time to
3900  * render a frame was over 20ms.
3901  *
3902  * This should get us reasonable parallelism between CPU and GPU but also
3903  * relatively low latency when blocking on a particular request to finish.
3904  */
3905 static int
3906 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3907 {
3908         struct drm_i915_private *dev_priv = dev->dev_private;
3909         struct drm_i915_file_private *file_priv = file->driver_priv;
3910         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3911         struct drm_i915_gem_request *request;
3912         struct intel_ring_buffer *ring = NULL;
3913         unsigned reset_counter;
3914         u32 seqno = 0;
3915         int ret;
3916
3917         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3918         if (ret)
3919                 return ret;
3920
3921         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3922         if (ret)
3923                 return ret;
3924
3925         spin_lock(&file_priv->mm.lock);
3926         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3927                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3928                         break;
3929
3930                 ring = request->ring;
3931                 seqno = request->seqno;
3932         }
3933         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3934         spin_unlock(&file_priv->mm.lock);
3935
3936         if (seqno == 0)
3937                 return 0;
3938
3939         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3940         if (ret == 0)
3941                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3942
3943         return ret;
3944 }
3945
3946 int
3947 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3948                     struct i915_address_space *vm,
3949                     uint32_t alignment,
3950                     bool map_and_fenceable,
3951                     bool nonblocking)
3952 {
3953         struct i915_vma *vma;
3954         int ret;
3955
3956         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3957                 return -EBUSY;
3958
3959         WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3960
3961         vma = i915_gem_obj_to_vma(obj, vm);
3962
3963         if (vma) {
3964                 if ((alignment &&
3965                      vma->node.start & (alignment - 1)) ||
3966                     (map_and_fenceable && !obj->map_and_fenceable)) {
3967                         WARN(obj->pin_count,
3968                              "bo is already pinned with incorrect alignment:"
3969                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3970                              " obj->map_and_fenceable=%d\n",
3971                              i915_gem_obj_offset(obj, vm), alignment,
3972                              map_and_fenceable,
3973                              obj->map_and_fenceable);
3974                         ret = i915_vma_unbind(vma);
3975                         if (ret)
3976                                 return ret;
3977                 }
3978         }
3979
3980         if (!i915_gem_obj_bound(obj, vm)) {
3981                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3982
3983                 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3984                                                  map_and_fenceable,
3985                                                  nonblocking);
3986                 if (ret)
3987                         return ret;
3988
3989                 if (!dev_priv->mm.aliasing_ppgtt)
3990                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3991         }
3992
3993         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3994                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3995
3996         obj->pin_count++;
3997         obj->pin_mappable |= map_and_fenceable;
3998
3999         return 0;
4000 }
4001
4002 void
4003 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4004 {
4005         BUG_ON(obj->pin_count == 0);
4006         BUG_ON(!i915_gem_obj_bound_any(obj));
4007
4008         if (--obj->pin_count == 0)
4009                 obj->pin_mappable = false;
4010 }
4011
4012 int
4013 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4014                    struct drm_file *file)
4015 {
4016         struct drm_i915_gem_pin *args = data;
4017         struct drm_i915_gem_object *obj;
4018         int ret;
4019
4020         ret = i915_mutex_lock_interruptible(dev);
4021         if (ret)
4022                 return ret;
4023
4024         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4025         if (&obj->base == NULL) {
4026                 ret = -ENOENT;
4027                 goto unlock;
4028         }
4029
4030         if (obj->madv != I915_MADV_WILLNEED) {
4031                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4032                 ret = -EINVAL;
4033                 goto out;
4034         }
4035
4036         if (obj->pin_filp != NULL && obj->pin_filp != file) {
4037                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4038                           args->handle);
4039                 ret = -EINVAL;
4040                 goto out;
4041         }
4042
4043         if (obj->user_pin_count == ULONG_MAX) {
4044                 ret = -EBUSY;
4045                 goto out;
4046         }
4047
4048         if (obj->user_pin_count == 0) {
4049                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
4050                 if (ret)
4051                         goto out;
4052         }
4053
4054         obj->user_pin_count++;
4055         obj->pin_filp = file;
4056
4057         args->offset = i915_gem_obj_ggtt_offset(obj);
4058 out:
4059         drm_gem_object_unreference(&obj->base);
4060 unlock:
4061         mutex_unlock(&dev->struct_mutex);
4062         return ret;
4063 }
4064
4065 int
4066 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4067                      struct drm_file *file)
4068 {
4069         struct drm_i915_gem_pin *args = data;
4070         struct drm_i915_gem_object *obj;
4071         int ret;
4072
4073         ret = i915_mutex_lock_interruptible(dev);
4074         if (ret)
4075                 return ret;
4076
4077         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4078         if (&obj->base == NULL) {
4079                 ret = -ENOENT;
4080                 goto unlock;
4081         }
4082
4083         if (obj->pin_filp != file) {
4084                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4085                           args->handle);
4086                 ret = -EINVAL;
4087                 goto out;
4088         }
4089         obj->user_pin_count--;
4090         if (obj->user_pin_count == 0) {
4091                 obj->pin_filp = NULL;
4092                 i915_gem_object_unpin(obj);
4093         }
4094
4095 out:
4096         drm_gem_object_unreference(&obj->base);
4097 unlock:
4098         mutex_unlock(&dev->struct_mutex);
4099         return ret;
4100 }
4101
4102 int
4103 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4104                     struct drm_file *file)
4105 {
4106         struct drm_i915_gem_busy *args = data;
4107         struct drm_i915_gem_object *obj;
4108         int ret;
4109
4110         ret = i915_mutex_lock_interruptible(dev);
4111         if (ret)
4112                 return ret;
4113
4114         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4115         if (&obj->base == NULL) {
4116                 ret = -ENOENT;
4117                 goto unlock;
4118         }
4119
4120         /* Count all active objects as busy, even if they are currently not used
4121          * by the gpu. Users of this interface expect objects to eventually
4122          * become non-busy without any further actions, therefore emit any
4123          * necessary flushes here.
4124          */
4125         ret = i915_gem_object_flush_active(obj);
4126
4127         args->busy = obj->active;
4128         if (obj->ring) {
4129                 args->busy |= intel_ring_flag(obj->ring) << 16;
4130         }
4131
4132         drm_gem_object_unreference(&obj->base);
4133 unlock:
4134         mutex_unlock(&dev->struct_mutex);
4135         return ret;
4136 }
4137
4138 int
4139 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4140                         struct drm_file *file_priv)
4141 {
4142         return i915_gem_ring_throttle(dev, file_priv);
4143 }
4144
4145 int
4146 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4147                        struct drm_file *file_priv)
4148 {
4149         struct drm_i915_gem_madvise *args = data;
4150         struct drm_i915_gem_object *obj;
4151         int ret;
4152
4153         switch (args->madv) {
4154         case I915_MADV_DONTNEED:
4155         case I915_MADV_WILLNEED:
4156             break;
4157         default:
4158             return -EINVAL;
4159         }
4160
4161         ret = i915_mutex_lock_interruptible(dev);
4162         if (ret)
4163                 return ret;
4164
4165         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4166         if (&obj->base == NULL) {
4167                 ret = -ENOENT;
4168                 goto unlock;
4169         }
4170
4171         if (obj->pin_count) {
4172                 ret = -EINVAL;
4173                 goto out;
4174         }
4175
4176         if (obj->madv != __I915_MADV_PURGED)
4177                 obj->madv = args->madv;
4178
4179         /* if the object is no longer attached, discard its backing storage */
4180         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4181                 i915_gem_object_truncate(obj);
4182
4183         args->retained = obj->madv != __I915_MADV_PURGED;
4184
4185 out:
4186         drm_gem_object_unreference(&obj->base);
4187 unlock:
4188         mutex_unlock(&dev->struct_mutex);
4189         return ret;
4190 }
4191
4192 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4193                           const struct drm_i915_gem_object_ops *ops)
4194 {
4195         INIT_LIST_HEAD(&obj->global_list);
4196         INIT_LIST_HEAD(&obj->ring_list);
4197         INIT_LIST_HEAD(&obj->obj_exec_link);
4198         INIT_LIST_HEAD(&obj->vma_list);
4199
4200         obj->ops = ops;
4201
4202         obj->fence_reg = I915_FENCE_REG_NONE;
4203         obj->madv = I915_MADV_WILLNEED;
4204         /* Avoid an unnecessary call to unbind on the first bind. */
4205         obj->map_and_fenceable = true;
4206
4207         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4208 }
4209
4210 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4211         .get_pages = i915_gem_object_get_pages_gtt,
4212         .put_pages = i915_gem_object_put_pages_gtt,
4213 };
4214
4215 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4216                                                   size_t size)
4217 {
4218         struct drm_i915_gem_object *obj;
4219 #if 0
4220         struct address_space *mapping;
4221         gfp_t mask;
4222 #endif
4223
4224         obj = i915_gem_object_alloc(dev);
4225         if (obj == NULL)
4226                 return NULL;
4227
4228         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4229                 i915_gem_object_free(obj);
4230                 return NULL;
4231         }
4232
4233 #if 0
4234         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4235         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4236                 /* 965gm cannot relocate objects above 4GiB. */
4237                 mask &= ~__GFP_HIGHMEM;
4238                 mask |= __GFP_DMA32;
4239         }
4240
4241         mapping = file_inode(obj->base.filp)->i_mapping;
4242         mapping_set_gfp_mask(mapping, mask);
4243 #endif
4244
4245         i915_gem_object_init(obj, &i915_gem_object_ops);
4246
4247         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4248         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4249
4250         if (HAS_LLC(dev)) {
4251                 /* On some devices, we can have the GPU use the LLC (the CPU
4252                  * cache) for about a 10% performance improvement
4253                  * compared to uncached.  Graphics requests other than
4254                  * display scanout are coherent with the CPU in
4255                  * accessing this cache.  This means in this mode we
4256                  * don't need to clflush on the CPU side, and on the
4257                  * GPU side we only need to flush internal caches to
4258                  * get data visible to the CPU.
4259                  *
4260                  * However, we maintain the display planes as UC, and so
4261                  * need to rebind when first used as such.
4262                  */
4263                 obj->cache_level = I915_CACHE_LLC;
4264         } else
4265                 obj->cache_level = I915_CACHE_NONE;
4266
4267         trace_i915_gem_object_create(obj);
4268
4269         return obj;
4270 }
4271
4272 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4273 {
4274         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4275         struct drm_device *dev = obj->base.dev;
4276         drm_i915_private_t *dev_priv = dev->dev_private;
4277         struct i915_vma *vma, *next;
4278
4279         intel_runtime_pm_get(dev_priv);
4280
4281         trace_i915_gem_object_destroy(obj);
4282
4283         if (obj->phys_obj)
4284                 i915_gem_detach_phys_object(dev, obj);
4285
4286         obj->pin_count = 0;
4287         /* NB: 0 or 1 elements */
4288 #if 0
4289         WARN_ON(!list_empty(&obj->vma_list) &&
4290                 !list_is_singular(&obj->vma_list));
4291 #endif
4292         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4293                 int ret = i915_vma_unbind(vma);
4294                 if (WARN_ON(ret == -ERESTARTSYS)) {
4295                         bool was_interruptible;
4296
4297                         was_interruptible = dev_priv->mm.interruptible;
4298                         dev_priv->mm.interruptible = false;
4299
4300                         WARN_ON(i915_vma_unbind(vma));
4301
4302                         dev_priv->mm.interruptible = was_interruptible;
4303                 }
4304         }
4305
4306         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4307          * before progressing. */
4308         if (obj->stolen)
4309                 i915_gem_object_unpin_pages(obj);
4310
4311         if (WARN_ON(obj->pages_pin_count))
4312                 obj->pages_pin_count = 0;
4313         i915_gem_object_put_pages(obj);
4314         i915_gem_object_free_mmap_offset(obj);
4315
4316         BUG_ON(obj->pages);
4317
4318 #if 0
4319         if (obj->base.import_attach)
4320                 drm_prime_gem_destroy(&obj->base, NULL);
4321 #endif
4322
4323         drm_gem_object_release(&obj->base);
4324         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4325
4326         kfree(obj->bit_17);
4327         i915_gem_object_free(obj);
4328
4329         intel_runtime_pm_put(dev_priv);
4330 }
4331
4332 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4333                                      struct i915_address_space *vm)
4334 {
4335         struct i915_vma *vma;
4336         list_for_each_entry(vma, &obj->vma_list, vma_link)
4337                 if (vma->vm == vm)
4338                         return vma;
4339
4340         return NULL;
4341 }
4342
4343 static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4344                                               struct i915_address_space *vm)
4345 {
4346         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4347         if (vma == NULL)
4348                 return ERR_PTR(-ENOMEM);
4349
4350         INIT_LIST_HEAD(&vma->vma_link);
4351         INIT_LIST_HEAD(&vma->mm_list);
4352         INIT_LIST_HEAD(&vma->exec_list);
4353         vma->vm = vm;
4354         vma->obj = obj;
4355
4356         /* Keep GGTT vmas first to make debug easier */
4357         if (i915_is_ggtt(vm))
4358                 list_add(&vma->vma_link, &obj->vma_list);
4359         else
4360                 list_add_tail(&vma->vma_link, &obj->vma_list);
4361
4362         return vma;
4363 }
4364
4365 struct i915_vma *
4366 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4367                                   struct i915_address_space *vm)
4368 {
4369         struct i915_vma *vma;
4370
4371         vma = i915_gem_obj_to_vma(obj, vm);
4372         if (!vma)
4373                 vma = __i915_gem_vma_create(obj, vm);
4374
4375         return vma;
4376 }
4377
4378 void i915_gem_vma_destroy(struct i915_vma *vma)
4379 {
4380         WARN_ON(vma->node.allocated);
4381
4382         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4383         if (!list_empty(&vma->exec_list))
4384                 return;
4385
4386         list_del(&vma->vma_link);
4387
4388         kfree(vma);
4389 }
4390
4391 int
4392 i915_gem_suspend(struct drm_device *dev)
4393 {
4394         drm_i915_private_t *dev_priv = dev->dev_private;
4395         int ret = 0;
4396
4397         mutex_lock(&dev->struct_mutex);
4398         if (dev_priv->ums.mm_suspended)
4399                 goto err;
4400
4401         ret = i915_gpu_idle(dev);
4402         if (ret)
4403                 goto err;
4404
4405         i915_gem_retire_requests(dev);
4406
4407         /* Under UMS, be paranoid and evict. */
4408         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4409                 i915_gem_evict_everything(dev);
4410
4411         i915_kernel_lost_context(dev);
4412         i915_gem_cleanup_ringbuffer(dev);
4413
4414         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4415          * We need to replace this with a semaphore, or something.
4416          * And not confound ums.mm_suspended!
4417          */
4418         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4419                                                              DRIVER_MODESET);
4420         mutex_unlock(&dev->struct_mutex);
4421
4422         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4423         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4424         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4425
4426         return 0;
4427
4428 err:
4429         mutex_unlock(&dev->struct_mutex);
4430         return ret;
4431 }
4432
4433 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4434 {
4435         struct drm_device *dev = ring->dev;
4436         drm_i915_private_t *dev_priv = dev->dev_private;
4437         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4438         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4439         int i, ret;
4440
4441         if (!HAS_L3_DPF(dev) || !remap_info)
4442                 return 0;
4443
4444         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4445         if (ret)
4446                 return ret;
4447
4448         /*
4449          * Note: We do not worry about the concurrent register cacheline hang
4450          * here because no other code should access these registers other than
4451          * at initialization time.
4452          */
4453         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4454                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4455                 intel_ring_emit(ring, reg_base + i);
4456                 intel_ring_emit(ring, remap_info[i/4]);
4457         }
4458
4459         intel_ring_advance(ring);
4460
4461         return ret;
4462 }
4463
4464 void i915_gem_init_swizzling(struct drm_device *dev)
4465 {
4466         drm_i915_private_t *dev_priv = dev->dev_private;
4467
4468         if (INTEL_INFO(dev)->gen < 5 ||
4469             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4470                 return;
4471
4472         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4473                                  DISP_TILE_SURFACE_SWIZZLING);
4474
4475         if (IS_GEN5(dev))
4476                 return;
4477
4478         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4479         if (IS_GEN6(dev))
4480                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4481         else if (IS_GEN7(dev))
4482                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4483         else if (IS_GEN8(dev))
4484                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4485         else
4486                 BUG();
4487 }
4488
4489 static bool
4490 intel_enable_blt(struct drm_device *dev)
4491 {
4492         int revision;
4493
4494         if (!HAS_BLT(dev))
4495                 return false;
4496
4497         /* The blitter was dysfunctional on early prototypes */
4498         revision = pci_read_config(dev->dev, PCIR_REVID, 1);
4499         if (IS_GEN6(dev) && revision < 8) {
4500                 DRM_INFO("BLT not supported on this pre-production hardware;"
4501                          " graphics performance will be degraded.\n");
4502                 return false;
4503         }
4504
4505         return true;
4506 }
4507
4508 static int i915_gem_init_rings(struct drm_device *dev)
4509 {
4510         struct drm_i915_private *dev_priv = dev->dev_private;
4511         int ret;
4512
4513         ret = intel_init_render_ring_buffer(dev);
4514         if (ret)
4515                 return ret;
4516
4517         if (HAS_BSD(dev)) {
4518                 ret = intel_init_bsd_ring_buffer(dev);
4519                 if (ret)
4520                         goto cleanup_render_ring;
4521         }
4522
4523         if (intel_enable_blt(dev)) {
4524                 ret = intel_init_blt_ring_buffer(dev);
4525                 if (ret)
4526                         goto cleanup_bsd_ring;
4527         }
4528
4529         if (HAS_VEBOX(dev)) {
4530                 ret = intel_init_vebox_ring_buffer(dev);
4531                 if (ret)
4532                         goto cleanup_blt_ring;
4533         }
4534
4535
4536         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4537         if (ret)
4538                 goto cleanup_vebox_ring;
4539
4540         return 0;
4541
4542 cleanup_vebox_ring:
4543         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4544 cleanup_blt_ring:
4545         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4546 cleanup_bsd_ring:
4547         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4548 cleanup_render_ring:
4549         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4550
4551         return ret;
4552 }
4553
4554 int
4555 i915_gem_init_hw(struct drm_device *dev)
4556 {
4557         drm_i915_private_t *dev_priv = dev->dev_private;
4558         int ret, i;
4559
4560 #if 0
4561         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4562                 return -EIO;
4563 #endif
4564
4565         if (dev_priv->ellc_size)
4566                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4567
4568         if (IS_HASWELL(dev))
4569                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4570                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4571
4572         if (HAS_PCH_NOP(dev)) {
4573                 u32 temp = I915_READ(GEN7_MSG_CTL);
4574                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4575                 I915_WRITE(GEN7_MSG_CTL, temp);
4576         }
4577
4578         i915_gem_init_swizzling(dev);
4579
4580         ret = i915_gem_init_rings(dev);
4581         if (ret)
4582                 return ret;
4583
4584         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4585                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4586
4587         /*
4588          * XXX: There was some w/a described somewhere suggesting loading
4589          * contexts before PPGTT.
4590          */
4591         ret = i915_gem_context_init(dev);
4592         if (ret) {
4593                 i915_gem_cleanup_ringbuffer(dev);
4594                 DRM_ERROR("Context initialization failed %d\n", ret);
4595                 return ret;
4596         }
4597
4598         if (dev_priv->mm.aliasing_ppgtt) {
4599                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4600                 if (ret) {
4601                         i915_gem_cleanup_aliasing_ppgtt(dev);
4602                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4603                 }
4604         }
4605
4606         return 0;
4607 }
4608
4609 int i915_gem_init(struct drm_device *dev)
4610 {
4611         struct drm_i915_private *dev_priv = dev->dev_private;
4612         int ret;
4613
4614         mutex_lock(&dev->struct_mutex);
4615
4616         if (IS_VALLEYVIEW(dev)) {
4617                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4618                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4619                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4620                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4621         }
4622
4623         i915_gem_init_global_gtt(dev);
4624
4625         ret = i915_gem_init_hw(dev);
4626         mutex_unlock(&dev->struct_mutex);
4627         if (ret) {
4628                 i915_gem_cleanup_aliasing_ppgtt(dev);
4629                 return ret;
4630         }
4631
4632         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4633         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4634                 dev_priv->dri1.allow_batchbuffer = 1;
4635         return 0;
4636 }
4637
4638 void
4639 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4640 {
4641         drm_i915_private_t *dev_priv = dev->dev_private;
4642         struct intel_ring_buffer *ring;
4643         int i;
4644
4645         for_each_ring(ring, dev_priv, i)
4646                 intel_cleanup_ring_buffer(ring);
4647 }
4648
4649 int
4650 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4651                        struct drm_file *file_priv)
4652 {
4653         struct drm_i915_private *dev_priv = dev->dev_private;
4654         int ret;
4655
4656         if (drm_core_check_feature(dev, DRIVER_MODESET))
4657                 return 0;
4658
4659         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4660                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4661                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4662         }
4663
4664         mutex_lock(&dev->struct_mutex);
4665         dev_priv->ums.mm_suspended = 0;
4666
4667         ret = i915_gem_init_hw(dev);
4668         if (ret != 0) {
4669                 mutex_unlock(&dev->struct_mutex);
4670                 return ret;
4671         }
4672
4673         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4674         mutex_unlock(&dev->struct_mutex);
4675
4676         ret = drm_irq_install(dev);
4677         if (ret)
4678                 goto cleanup_ringbuffer;
4679
4680         return 0;
4681
4682 cleanup_ringbuffer:
4683         mutex_lock(&dev->struct_mutex);
4684         i915_gem_cleanup_ringbuffer(dev);
4685         dev_priv->ums.mm_suspended = 1;
4686         mutex_unlock(&dev->struct_mutex);
4687
4688         return ret;
4689 }
4690
4691 int
4692 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4693                        struct drm_file *file_priv)
4694 {
4695         if (drm_core_check_feature(dev, DRIVER_MODESET))
4696                 return 0;
4697
4698         drm_irq_uninstall(dev);
4699
4700         return i915_gem_suspend(dev);
4701 }
4702
4703 void
4704 i915_gem_lastclose(struct drm_device *dev)
4705 {
4706         int ret;
4707
4708         if (drm_core_check_feature(dev, DRIVER_MODESET))
4709                 return;
4710
4711         ret = i915_gem_suspend(dev);
4712         if (ret)
4713                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4714 }
4715
4716 static void
4717 init_ring_lists(struct intel_ring_buffer *ring)
4718 {
4719         INIT_LIST_HEAD(&ring->active_list);
4720         INIT_LIST_HEAD(&ring->request_list);
4721 }
4722
4723 static void i915_init_vm(struct drm_i915_private *dev_priv,
4724                          struct i915_address_space *vm)
4725 {
4726         vm->dev = dev_priv->dev;
4727         INIT_LIST_HEAD(&vm->active_list);
4728         INIT_LIST_HEAD(&vm->inactive_list);
4729         INIT_LIST_HEAD(&vm->global_link);
4730         list_add(&vm->global_link, &dev_priv->vm_list);
4731 }
4732
4733 void
4734 i915_gem_load(struct drm_device *dev)
4735 {
4736         drm_i915_private_t *dev_priv = dev->dev_private;
4737         int i;
4738
4739         INIT_LIST_HEAD(&dev_priv->vm_list);
4740         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4741
4742         INIT_LIST_HEAD(&dev_priv->context_list);
4743         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4744         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4745         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4746         for (i = 0; i < I915_NUM_RINGS; i++)
4747                 init_ring_lists(&dev_priv->ring[i]);
4748         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4749                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4750         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4751                           i915_gem_retire_work_handler);
4752         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4753                           i915_gem_idle_work_handler);
4754         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4755
4756         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4757         if (IS_GEN3(dev)) {
4758                 I915_WRITE(MI_ARB_STATE,
4759                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4760         }
4761
4762         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4763
4764         /* Old X drivers will take 0-2 for front, back, depth buffers */
4765         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4766                 dev_priv->fence_reg_start = 3;
4767
4768         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4769                 dev_priv->num_fence_regs = 32;
4770         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4771                 dev_priv->num_fence_regs = 16;
4772         else
4773                 dev_priv->num_fence_regs = 8;
4774
4775         /* Initialize fence registers to zero */
4776         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4777         i915_gem_restore_fences(dev);
4778
4779         i915_gem_detect_bit_6_swizzle(dev);
4780         init_waitqueue_head(&dev_priv->pending_flip_queue);
4781
4782         dev_priv->mm.interruptible = true;
4783
4784 #if 0
4785         dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4786         dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4787         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4788         register_shrinker(&dev_priv->mm.inactive_shrinker);
4789         /* Old FreeBSD code */
4790         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4791             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4792 #endif
4793 }
4794
4795 /*
4796  * Create a physically contiguous memory object for this object
4797  * e.g. for cursor + overlay regs
4798  */
4799 static int i915_gem_init_phys_object(struct drm_device *dev,
4800                                      int id, int size, int align)
4801 {
4802         drm_i915_private_t *dev_priv = dev->dev_private;
4803         struct drm_i915_gem_phys_object *phys_obj;
4804         int ret;
4805
4806         if (dev_priv->mm.phys_objs[id - 1] || !size)
4807                 return 0;
4808
4809         phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4810         if (!phys_obj)
4811                 return -ENOMEM;
4812
4813         phys_obj->id = id;
4814
4815         phys_obj->handle = drm_pci_alloc(dev, size, align);
4816         if (!phys_obj->handle) {
4817                 ret = -ENOMEM;
4818                 goto kfree_obj;
4819         }
4820 #ifdef CONFIG_X86
4821         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4822 #endif
4823         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4824             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4825
4826         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4827
4828         return 0;
4829 kfree_obj:
4830         kfree(phys_obj);
4831         return ret;
4832 }
4833
4834 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4835 {
4836         drm_i915_private_t *dev_priv = dev->dev_private;
4837         struct drm_i915_gem_phys_object *phys_obj;
4838
4839         if (!dev_priv->mm.phys_objs[id - 1])
4840                 return;
4841
4842         phys_obj = dev_priv->mm.phys_objs[id - 1];
4843         if (phys_obj->cur_obj) {
4844                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4845         }
4846
4847 #ifdef CONFIG_X86
4848         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4849 #endif
4850         drm_pci_free(dev, phys_obj->handle);
4851         kfree(phys_obj);
4852         dev_priv->mm.phys_objs[id - 1] = NULL;
4853 }
4854
4855 void i915_gem_free_all_phys_object(struct drm_device *dev)
4856 {
4857         int i;
4858
4859         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4860                 i915_gem_free_phys_object(dev, i);
4861 }
4862
4863 void i915_gem_detach_phys_object(struct drm_device *dev,
4864                                  struct drm_i915_gem_object *obj)
4865 {
4866         struct vm_object *mapping = obj->base.vm_obj;
4867         char *vaddr;
4868         int i;
4869         int page_count;
4870
4871         if (!obj->phys_obj)
4872                 return;
4873         vaddr = obj->phys_obj->handle->vaddr;
4874
4875         page_count = obj->base.size / PAGE_SIZE;
4876         for (i = 0; i < page_count; i++) {
4877                 struct vm_page *page = shmem_read_mapping_page(mapping, i);
4878                 if (!IS_ERR(page)) {
4879                         char *dst = kmap_atomic(page);
4880                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4881                         kunmap_atomic(dst);
4882
4883                         drm_clflush_pages(&page, 1);
4884
4885                         set_page_dirty(page);
4886                         mark_page_accessed(page);
4887 #if 0
4888                         page_cache_release(page);
4889 #endif
4890                         vm_page_busy_wait(page, FALSE, "i915gem");
4891                         vm_page_unwire(page, 0);
4892                         vm_page_wakeup(page);
4893                 }
4894         }
4895         i915_gem_chipset_flush(dev);
4896
4897         obj->phys_obj->cur_obj = NULL;
4898         obj->phys_obj = NULL;
4899 }
4900
4901 int
4902 i915_gem_attach_phys_object(struct drm_device *dev,
4903                             struct drm_i915_gem_object *obj,
4904                             int id,
4905                             int align)
4906 {
4907         struct vm_object *mapping = obj->base.vm_obj;
4908         drm_i915_private_t *dev_priv = dev->dev_private;
4909         int ret = 0;
4910         int page_count;
4911         int i;
4912
4913         if (id > I915_MAX_PHYS_OBJECT)
4914                 return -EINVAL;
4915
4916         if (obj->phys_obj) {
4917                 if (obj->phys_obj->id == id)
4918                         return 0;
4919                 i915_gem_detach_phys_object(dev, obj);
4920         }
4921
4922         /* create a new object */
4923         if (!dev_priv->mm.phys_objs[id - 1]) {
4924                 ret = i915_gem_init_phys_object(dev, id,
4925                                                 obj->base.size, align);
4926                 if (ret) {
4927                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4928                                   id, obj->base.size);
4929                         return ret;
4930                 }
4931         }
4932
4933         /* bind to the object */
4934         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4935         obj->phys_obj->cur_obj = obj;
4936
4937         page_count = obj->base.size / PAGE_SIZE;
4938
4939         for (i = 0; i < page_count; i++) {
4940                 struct vm_page *page;
4941                 char *dst, *src;
4942
4943                 page = shmem_read_mapping_page(mapping, i);
4944                 if (IS_ERR(page))
4945                         return PTR_ERR(page);
4946
4947                 src = kmap_atomic(page);
4948                 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4949                 memcpy(dst, src, PAGE_SIZE);
4950                 kunmap_atomic(src);
4951
4952                 mark_page_accessed(page);
4953 #if 0
4954                 page_cache_release(page);
4955 #endif
4956                 vm_page_busy_wait(page, FALSE, "i915gem");
4957                 vm_page_unwire(page, 0);
4958                 vm_page_wakeup(page);
4959         }
4960
4961         return 0;
4962 }
4963
4964 static int
4965 i915_gem_phys_pwrite(struct drm_device *dev,
4966                      struct drm_i915_gem_object *obj,
4967                      struct drm_i915_gem_pwrite *args,
4968                      struct drm_file *file_priv)
4969 {
4970         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4971         char __user *user_data = to_user_ptr(args->data_ptr);
4972
4973         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4974                 unsigned long unwritten;
4975
4976                 /* The physical object once assigned is fixed for the lifetime
4977                  * of the obj, so we can safely drop the lock and continue
4978                  * to access vaddr.
4979                  */
4980                 mutex_unlock(&dev->struct_mutex);
4981                 unwritten = copy_from_user(vaddr, user_data, args->size);
4982                 mutex_lock(&dev->struct_mutex);
4983                 if (unwritten)
4984                         return -EFAULT;
4985         }
4986
4987         i915_gem_chipset_flush(dev);
4988         return 0;
4989 }
4990
4991 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4992 {
4993         struct drm_i915_file_private *file_priv = file->driver_priv;
4994
4995         /* Clean up our request list when the client is going away, so that
4996          * later retire_requests won't dereference our soon-to-be-gone
4997          * file_priv.
4998          */
4999         spin_lock(&file_priv->mm.lock);
5000         while (!list_empty(&file_priv->mm.request_list)) {
5001                 struct drm_i915_gem_request *request;
5002
5003                 request = list_first_entry(&file_priv->mm.request_list,
5004                                            struct drm_i915_gem_request,
5005                                            client_list);
5006                 list_del(&request->client_list);
5007                 request->file_priv = NULL;
5008         }
5009         spin_unlock(&file_priv->mm.lock);
5010 }
5011
5012 int
5013 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
5014     vm_ooffset_t foff, struct ucred *cred, u_short *color)
5015 {
5016         *color = 0; /* XXXKIB */
5017         return (0);
5018 }
5019
5020 void
5021 i915_gem_pager_dtor(void *handle)
5022 {
5023         struct drm_gem_object *obj;
5024         struct drm_device *dev;
5025
5026         obj = handle;
5027         dev = obj->dev;
5028
5029         mutex_lock(&dev->struct_mutex);
5030         drm_gem_free_mmap_offset(obj);
5031         i915_gem_release_mmap(to_intel_bo(obj));
5032         drm_gem_object_unreference(obj);
5033         mutex_unlock(&dev->struct_mutex);
5034 }
5035
5036 static void
5037 i915_gem_file_idle_work_handler(struct work_struct *work)
5038 {
5039         struct drm_i915_file_private *file_priv =
5040                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5041
5042         atomic_set(&file_priv->rps_wait_boost, false);
5043 }
5044
5045 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5046 {
5047         struct drm_i915_file_private *file_priv;
5048
5049         DRM_DEBUG_DRIVER("\n");
5050
5051         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5052         if (!file_priv)
5053                 return -ENOMEM;
5054
5055         file->driver_priv = file_priv;
5056         file_priv->dev_priv = dev->dev_private;
5057
5058         spin_init(&file_priv->mm.lock, "i915_priv");
5059         INIT_LIST_HEAD(&file_priv->mm.request_list);
5060         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5061                           i915_gem_file_idle_work_handler);
5062
5063         idr_init(&file_priv->context_idr);
5064
5065         return 0;
5066 }
5067
5068 #if 0
5069 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5070 {
5071         if (!mutex_is_locked(mutex))
5072                 return false;
5073
5074 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5075         return mutex->owner == task;
5076 #else
5077         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5078         return false;
5079 #endif
5080 }
5081 #endif
5082
5083 #if 0
5084 static unsigned long
5085 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
5086 {
5087         struct drm_i915_private *dev_priv =
5088                 container_of(shrinker,
5089                              struct drm_i915_private,
5090                              mm.inactive_shrinker);
5091         struct drm_device *dev = dev_priv->dev;
5092         struct drm_i915_gem_object *obj;
5093         bool unlock = true;
5094         unsigned long count;
5095
5096         if (!mutex_trylock(&dev->struct_mutex)) {
5097                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5098                         return 0;
5099
5100                 if (dev_priv->mm.shrinker_no_lock_stealing)
5101                         return 0;
5102
5103                 unlock = false;
5104         }
5105
5106         count = 0;
5107         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5108                 if (obj->pages_pin_count == 0)
5109                         count += obj->base.size >> PAGE_SHIFT;
5110
5111         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5112                 if (obj->active)
5113                         continue;
5114
5115                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
5116                         count += obj->base.size >> PAGE_SHIFT;
5117         }
5118
5119         if (unlock)
5120                 mutex_unlock(&dev->struct_mutex);
5121
5122         return count;
5123 }
5124 #endif
5125
5126 /* All the new VM stuff */
5127 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5128                                   struct i915_address_space *vm)
5129 {
5130         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5131         struct i915_vma *vma;
5132
5133         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5134                 vm = &dev_priv->gtt.base;
5135
5136         BUG_ON(list_empty(&o->vma_list));
5137         list_for_each_entry(vma, &o->vma_list, vma_link) {
5138                 if (vma->vm == vm)
5139                         return vma->node.start;
5140
5141         }
5142         return -1;
5143 }
5144
5145 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5146                         struct i915_address_space *vm)
5147 {
5148         struct i915_vma *vma;
5149
5150         list_for_each_entry(vma, &o->vma_list, vma_link)
5151                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5152                         return true;
5153
5154         return false;
5155 }
5156
5157 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5158 {
5159         struct i915_vma *vma;
5160
5161         list_for_each_entry(vma, &o->vma_list, vma_link)
5162                 if (drm_mm_node_allocated(&vma->node))
5163                         return true;
5164
5165         return false;
5166 }
5167
5168 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5169                                 struct i915_address_space *vm)
5170 {
5171         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5172         struct i915_vma *vma;
5173
5174         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5175                 vm = &dev_priv->gtt.base;
5176
5177         BUG_ON(list_empty(&o->vma_list));
5178
5179         list_for_each_entry(vma, &o->vma_list, vma_link)
5180                 if (vma->vm == vm)
5181                         return vma->node.size;
5182
5183         return 0;
5184 }
5185
5186 #if 0
5187 static unsigned long
5188 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5189 {
5190         struct drm_i915_private *dev_priv =
5191                 container_of(shrinker,
5192                              struct drm_i915_private,
5193                              mm.inactive_shrinker);
5194         struct drm_device *dev = dev_priv->dev;
5195         unsigned long freed;
5196         bool unlock = true;
5197
5198         if (!mutex_trylock(&dev->struct_mutex)) {
5199                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5200                         return SHRINK_STOP;
5201
5202                 if (dev_priv->mm.shrinker_no_lock_stealing)
5203                         return SHRINK_STOP;
5204
5205                 unlock = false;
5206         }
5207
5208         freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5209         if (freed < sc->nr_to_scan)
5210                 freed += __i915_gem_shrink(dev_priv,
5211                                            sc->nr_to_scan - freed,
5212                                            false);
5213         if (freed < sc->nr_to_scan)
5214                 freed += i915_gem_shrink_all(dev_priv);
5215
5216         if (unlock)
5217                 mutex_unlock(&dev->struct_mutex);
5218
5219         return freed;
5220 }
5221 #endif
5222
5223 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5224 {
5225         struct i915_vma *vma;
5226
5227         if (WARN_ON(list_empty(&obj->vma_list)))
5228                 return NULL;
5229
5230         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5231         if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
5232                 return NULL;
5233
5234         return vma;
5235 }