drm/i915: Sync i915_gem_pwrite_ioctl() with Linux 3.11
[dragonfly.git] / sys / dev / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  */
54
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
57 #include <machine/md_var.h>
58
59 #include <drm/drmP.h>
60 #include <drm/i915_drm.h>
61 #include "i915_drv.h"
62 #include "i915_trace.h"
63 #include "intel_drv.h"
64 #include <linux/shmem_fs.h>
65 #include <linux/slab.h>
66 #include <linux/pci.h>
67
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
70 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
71                                                     unsigned alignment,
72                                                     bool map_and_fenceable,
73                                                     bool nonblocking);
74 static int i915_gem_phys_pwrite(struct drm_device *dev,
75                                 struct drm_i915_gem_object *obj,
76                                 struct drm_i915_gem_pwrite *args,
77                                 struct drm_file *file);
78
79 static void i915_gem_write_fence(struct drm_device *dev, int reg,
80                                  struct drm_i915_gem_object *obj);
81 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
82                                          struct drm_i915_fence_reg *fence,
83                                          bool enable);
84
85 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
86 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
87
88 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
89 {
90         if (obj->tiling_mode)
91                 i915_gem_release_mmap(obj);
92
93         /* As we do not have an associated fence register, we will force
94          * a tiling change if we ever need to acquire one.
95          */
96         obj->fence_dirty = false;
97         obj->fence_reg = I915_FENCE_REG_NONE;
98 }
99
100 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
101 static void i915_gem_lowmem(void *arg);
102
103 /* some bookkeeping */
104 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
105                                   size_t size)
106 {
107         dev_priv->mm.object_count++;
108         dev_priv->mm.object_memory += size;
109 }
110
111 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
112                                      size_t size)
113 {
114         dev_priv->mm.object_count--;
115         dev_priv->mm.object_memory -= size;
116 }
117
118 static int
119 i915_gem_wait_for_error(struct i915_gpu_error *error)
120 {
121         int ret;
122
123 #define EXIT_COND (!i915_reset_in_progress(error) || \
124                    i915_terminally_wedged(error))
125         if (EXIT_COND)
126                 return 0;
127
128         /*
129          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
130          * userspace. If it takes that long something really bad is going on and
131          * we should simply try to bail out and fail as gracefully as possible.
132          */
133         ret = wait_event_interruptible_timeout(error->reset_queue,
134                                                EXIT_COND,
135                                                10*HZ);
136         if (ret == 0) {
137                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
138                 return -EIO;
139         } else if (ret < 0) {
140                 return ret;
141         }
142 #undef EXIT_COND
143
144         return 0;
145 }
146
147 int i915_mutex_lock_interruptible(struct drm_device *dev)
148 {
149         struct drm_i915_private *dev_priv = dev->dev_private;
150         int ret;
151
152         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
153         if (ret)
154                 return ret;
155
156         ret = lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_SLEEPFAIL);
157         if (ret)
158                 return -EINTR;
159
160         WARN_ON(i915_verify_lists(dev));
161         return 0;
162 }
163
164 static inline bool
165 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
166 {
167         return !obj->active;
168 }
169
170 int
171 i915_gem_init_ioctl(struct drm_device *dev, void *data,
172                     struct drm_file *file)
173 {
174         struct drm_i915_private *dev_priv = dev->dev_private;
175         struct drm_i915_gem_init *args = data;
176
177         if (drm_core_check_feature(dev, DRIVER_MODESET))
178                 return -ENODEV;
179
180         if (args->gtt_start >= args->gtt_end ||
181             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
182                 return -EINVAL;
183
184         /* GEM with user mode setting was never supported on ilk and later. */
185         if (INTEL_INFO(dev)->gen >= 5)
186                 return -ENODEV;
187
188         mutex_lock(&dev->struct_mutex);
189         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
190                                   args->gtt_end);
191         dev_priv->gtt.mappable_end = args->gtt_end;
192         mutex_unlock(&dev->struct_mutex);
193
194         return 0;
195 }
196
197 int
198 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
199                             struct drm_file *file)
200 {
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         struct drm_i915_gem_get_aperture *args = data;
203         struct drm_i915_gem_object *obj;
204         size_t pinned;
205
206         pinned = 0;
207         mutex_lock(&dev->struct_mutex);
208         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
209                 if (obj->pin_count)
210                         pinned += obj->gtt_space->size;
211         mutex_unlock(&dev->struct_mutex);
212
213         args->aper_size = dev_priv->gtt.total;
214         args->aper_available_size = args->aper_size - pinned;
215
216         return 0;
217 }
218
219 void i915_gem_object_free(struct drm_i915_gem_object *obj)
220 {
221         kfree(obj);
222 }
223
224 static int
225 i915_gem_create(struct drm_file *file,
226                 struct drm_device *dev,
227                 uint64_t size,
228                 uint32_t *handle_p)
229 {
230         struct drm_i915_gem_object *obj;
231         int ret;
232         u32 handle;
233
234         size = roundup(size, PAGE_SIZE);
235         if (size == 0)
236                 return -EINVAL;
237
238         /* Allocate the new object */
239         obj = i915_gem_alloc_object(dev, size);
240         if (obj == NULL)
241                 return -ENOMEM;
242
243         ret = drm_gem_handle_create(file, &obj->base, &handle);
244         if (ret) {
245                 drm_gem_object_release(&obj->base);
246                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
247                 i915_gem_object_free(obj);
248                 return ret;
249         }
250
251         /* drop reference from allocate - handle holds it now */
252         drm_gem_object_unreference(&obj->base);
253         trace_i915_gem_object_create(obj);
254
255         *handle_p = handle;
256         return 0;
257 }
258
259 int
260 i915_gem_dumb_create(struct drm_file *file,
261                      struct drm_device *dev,
262                      struct drm_mode_create_dumb *args)
263 {
264
265         /* have to work out size/pitch and return them */
266         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
267         args->size = args->pitch * args->height;
268         return i915_gem_create(file, dev,
269                                args->size, &args->handle);
270 }
271
272 int i915_gem_dumb_destroy(struct drm_file *file,
273                           struct drm_device *dev,
274                           uint32_t handle)
275 {
276
277         return drm_gem_handle_delete(file, handle);
278 }
279
280 /**
281  * Creates a new mm object and returns a handle to it.
282  */
283 int
284 i915_gem_create_ioctl(struct drm_device *dev, void *data,
285                       struct drm_file *file)
286 {
287         struct drm_i915_gem_create *args = data;
288
289         return i915_gem_create(file, dev,
290                                args->size, &args->handle);
291 }
292
293 static inline int
294 __copy_to_user_swizzled(char __user *cpu_vaddr,
295                         const char *gpu_vaddr, int gpu_offset,
296                         int length)
297 {
298         int ret, cpu_offset = 0;
299
300         while (length > 0) {
301                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
302                 int this_length = min(cacheline_end - gpu_offset, length);
303                 int swizzled_gpu_offset = gpu_offset ^ 64;
304
305                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
306                                      gpu_vaddr + swizzled_gpu_offset,
307                                      this_length);
308                 if (ret)
309                         return ret + length;
310
311                 cpu_offset += this_length;
312                 gpu_offset += this_length;
313                 length -= this_length;
314         }
315
316         return 0;
317 }
318
319 static inline int
320 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
321                           const char __user *cpu_vaddr,
322                           int length)
323 {
324         int ret, cpu_offset = 0;
325
326         while (length > 0) {
327                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
328                 int this_length = min(cacheline_end - gpu_offset, length);
329                 int swizzled_gpu_offset = gpu_offset ^ 64;
330
331                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
332                                        cpu_vaddr + cpu_offset,
333                                        this_length);
334                 if (ret)
335                         return ret + length;
336
337                 cpu_offset += this_length;
338                 gpu_offset += this_length;
339                 length -= this_length;
340         }
341
342         return 0;
343 }
344
345 /* Per-page copy function for the shmem pread fastpath.
346  * Flushes invalid cachelines before reading the target if
347  * needs_clflush is set. */
348 static int
349 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
350                  char __user *user_data,
351                  bool page_do_bit17_swizzling, bool needs_clflush)
352 {
353         char *vaddr;
354         int ret;
355
356         if (unlikely(page_do_bit17_swizzling))
357                 return -EINVAL;
358
359         vaddr = kmap_atomic(page);
360         if (needs_clflush)
361                 drm_clflush_virt_range(vaddr + shmem_page_offset,
362                                        page_length);
363         ret = __copy_to_user_inatomic(user_data,
364                                       vaddr + shmem_page_offset,
365                                       page_length);
366         kunmap_atomic(vaddr);
367
368         return ret ? -EFAULT : 0;
369 }
370
371 static void
372 shmem_clflush_swizzled_range(char *addr, unsigned long length,
373                              bool swizzled)
374 {
375         if (unlikely(swizzled)) {
376                 unsigned long start = (unsigned long) addr;
377                 unsigned long end = (unsigned long) addr + length;
378
379                 /* For swizzling simply ensure that we always flush both
380                  * channels. Lame, but simple and it works. Swizzled
381                  * pwrite/pread is far from a hotpath - current userspace
382                  * doesn't use it at all. */
383                 start = round_down(start, 128);
384                 end = round_up(end, 128);
385
386                 drm_clflush_virt_range((void *)start, end - start);
387         } else {
388                 drm_clflush_virt_range(addr, length);
389         }
390
391 }
392
393 /* Only difference to the fast-path function is that this can handle bit17
394  * and uses non-atomic copy and kmap functions. */
395 static int
396 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
397                  char __user *user_data,
398                  bool page_do_bit17_swizzling, bool needs_clflush)
399 {
400         char *vaddr;
401         int ret;
402
403         vaddr = kmap(page);
404         if (needs_clflush)
405                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
406                                              page_length,
407                                              page_do_bit17_swizzling);
408
409         if (page_do_bit17_swizzling)
410                 ret = __copy_to_user_swizzled(user_data,
411                                               vaddr, shmem_page_offset,
412                                               page_length);
413         else
414                 ret = __copy_to_user(user_data,
415                                      vaddr + shmem_page_offset,
416                                      page_length);
417         kunmap(page);
418
419         return ret ? - EFAULT : 0;
420 }
421
422 static inline void vm_page_reference(vm_page_t m)
423 {
424         vm_page_flag_set(m, PG_REFERENCED);
425 }
426
427 static int
428 i915_gem_shmem_pread(struct drm_device *dev,
429                      struct drm_i915_gem_object *obj,
430                      struct drm_i915_gem_pread *args,
431                      struct drm_file *file)
432 {
433         char __user *user_data;
434         ssize_t remain;
435         off_t offset;
436         int shmem_page_offset, page_length, ret = 0;
437         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
438         int hit_slowpath = 0;
439         int needs_clflush = 0;
440         int i;
441
442         user_data = (char __user *) (uintptr_t) args->data_ptr;
443         remain = args->size;
444
445         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
446
447         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
448                 /* If we're not in the cpu read domain, set ourself into the gtt
449                  * read domain and manually flush cachelines (if required). This
450                  * optimizes for the case when the gpu will dirty the data
451                  * anyway again before the next pread happens. */
452                 if (obj->cache_level == I915_CACHE_NONE)
453                         needs_clflush = 1;
454                 if (obj->gtt_space) {
455                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
456                         if (ret)
457                                 return ret;
458                 }
459         }
460
461         ret = i915_gem_object_get_pages(obj);
462         if (ret)
463                 return ret;
464
465         i915_gem_object_pin_pages(obj);
466
467         offset = args->offset;
468
469         for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
470                 struct vm_page *page;
471
472                 if (i < offset >> PAGE_SHIFT)
473                         continue;
474
475                 if (remain <= 0)
476                         break;
477
478                 /* Operation in this page
479                  *
480                  * shmem_page_offset = offset within page in shmem file
481                  * page_length = bytes to copy for this page
482                  */
483                 shmem_page_offset = offset_in_page(offset);
484                 page_length = remain;
485                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
486                         page_length = PAGE_SIZE - shmem_page_offset;
487
488 #ifdef __linux__
489                 page = sg_page(sg);
490                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
491                         (page_to_phys(page) & (1 << 17)) != 0;
492 #else
493                 page = obj->pages[i];
494                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
495                         (VM_PAGE_TO_PHYS(page) & (1 << 17)) != 0;
496 #endif
497
498                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
499                                        user_data, page_do_bit17_swizzling,
500                                        needs_clflush);
501                 if (ret == 0)
502                         goto next_page;
503
504                 hit_slowpath = 1;
505                 mutex_unlock(&dev->struct_mutex);
506
507 #ifdef __linux__
508                 if (!prefaulted) {
509                         ret = fault_in_multipages_writeable(user_data, remain);
510                         /* Userspace is tricking us, but we've already clobbered
511                          * its pages with the prefault and promised to write the
512                          * data up to the first fault. Hence ignore any errors
513                          * and just continue. */
514                         (void)ret;
515                         prefaulted = 1;
516                 }
517 #endif
518
519                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
520                                        user_data, page_do_bit17_swizzling,
521                                        needs_clflush);
522
523                 mutex_lock(&dev->struct_mutex);
524
525 next_page:
526 #ifdef __linux__
527                 mark_page_accessed(page);
528 #endif
529
530                 if (ret)
531                         goto out;
532
533                 remain -= page_length;
534                 user_data += page_length;
535                 offset += page_length;
536         }
537
538 out:
539         i915_gem_object_unpin_pages(obj);
540
541         if (hit_slowpath) {
542                 /* Fixup: Kill any reinstated backing storage pages */
543                 if (obj->madv == __I915_MADV_PURGED)
544                         i915_gem_object_truncate(obj);
545         }
546
547         return ret;
548 }
549
550 /**
551  * Reads data from the object referenced by handle.
552  *
553  * On error, the contents of *data are undefined.
554  */
555 int
556 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
557                      struct drm_file *file)
558 {
559         struct drm_i915_gem_pread *args = data;
560         struct drm_i915_gem_object *obj;
561         int ret = 0;
562
563         if (args->size == 0)
564                 return 0;
565
566         ret = i915_mutex_lock_interruptible(dev);
567         if (ret)
568                 return ret;
569
570         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
571         if (&obj->base == NULL) {
572                 ret = -ENOENT;
573                 goto unlock;
574         }
575
576         /* Bounds check source.  */
577         if (args->offset > obj->base.size ||
578             args->size > obj->base.size - args->offset) {
579                 ret = -EINVAL;
580                 goto out;
581         }
582
583         ret = i915_gem_shmem_pread(dev, obj, args, file);
584 out:
585         drm_gem_object_unreference(&obj->base);
586 unlock:
587         mutex_unlock(&dev->struct_mutex);
588         return ret;
589 }
590
591 /* This is the fast write path which cannot handle
592  * page faults in the source data
593  */
594
595 static inline int
596 fast_user_write(struct io_mapping *mapping,
597                 loff_t page_base, int page_offset,
598                 char __user *user_data,
599                 int length)
600 {
601         void __iomem *vaddr_atomic;
602         void *vaddr;
603         unsigned long unwritten;
604
605         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
606         /* We can use the cpu mem copy function because this is X86. */
607         vaddr = (char __force*)vaddr_atomic + page_offset;
608         unwritten = __copy_from_user_inatomic_nocache(vaddr,
609                                                       user_data, length);
610         io_mapping_unmap_atomic(vaddr_atomic);
611         return unwritten;
612 }
613
614 /**
615  * This is the fast pwrite path, where we copy the data directly from the
616  * user into the GTT, uncached.
617  */
618 static int
619 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
620                          struct drm_i915_gem_object *obj,
621                          struct drm_i915_gem_pwrite *args,
622                          struct drm_file *file)
623 {
624         drm_i915_private_t *dev_priv = dev->dev_private;
625         ssize_t remain;
626         loff_t offset, page_base;
627         char __user *user_data;
628         int page_offset, page_length, ret;
629
630         ret = i915_gem_object_pin(obj, 0, true, true);
631         if (ret)
632                 goto out;
633
634         ret = i915_gem_object_set_to_gtt_domain(obj, true);
635         if (ret)
636                 goto out_unpin;
637
638         ret = i915_gem_object_put_fence(obj);
639         if (ret)
640                 goto out_unpin;
641
642         user_data = to_user_ptr(args->data_ptr);
643         remain = args->size;
644
645         offset = obj->gtt_offset + args->offset;
646
647         while (remain > 0) {
648                 /* Operation in this page
649                  *
650                  * page_base = page offset within aperture
651                  * page_offset = offset within page
652                  * page_length = bytes to copy for this page
653                  */
654                 page_base = offset & PAGE_MASK;
655                 page_offset = offset_in_page(offset);
656                 page_length = remain;
657                 if ((page_offset + remain) > PAGE_SIZE)
658                         page_length = PAGE_SIZE - page_offset;
659
660                 /* If we get a fault while copying data, then (presumably) our
661                  * source page isn't available.  Return the error and we'll
662                  * retry in the slow path.
663                  */
664                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
665                                     page_offset, user_data, page_length)) {
666                         ret = -EFAULT;
667                         goto out_unpin;
668                 }
669
670                 remain -= page_length;
671                 user_data += page_length;
672                 offset += page_length;
673         }
674
675 out_unpin:
676         i915_gem_object_unpin(obj);
677 out:
678         return ret;
679 }
680
681 #if 0
682 /* Per-page copy function for the shmem pwrite fastpath.
683  * Flushes invalid cachelines before writing to the target if
684  * needs_clflush_before is set and flushes out any written cachelines after
685  * writing if needs_clflush is set. */
686 static int
687 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
688                   char __user *user_data,
689                   bool page_do_bit17_swizzling,
690                   bool needs_clflush_before,
691                   bool needs_clflush_after)
692 {
693         char *vaddr;
694         int ret;
695
696         if (unlikely(page_do_bit17_swizzling))
697                 return -EINVAL;
698
699         vaddr = kmap_atomic(page);
700         if (needs_clflush_before)
701                 drm_clflush_virt_range(vaddr + shmem_page_offset,
702                                        page_length);
703         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
704                                                 user_data,
705                                                 page_length);
706         if (needs_clflush_after)
707                 drm_clflush_virt_range(vaddr + shmem_page_offset,
708                                        page_length);
709         kunmap_atomic(vaddr);
710
711         return ret ? -EFAULT : 0;
712 }
713
714 /* Only difference to the fast-path function is that this can handle bit17
715  * and uses non-atomic copy and kmap functions. */
716 static int
717 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
718                   char __user *user_data,
719                   bool page_do_bit17_swizzling,
720                   bool needs_clflush_before,
721                   bool needs_clflush_after)
722 {
723         char *vaddr;
724         int ret;
725
726         vaddr = kmap(page);
727         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
728                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
729                                              page_length,
730                                              page_do_bit17_swizzling);
731         if (page_do_bit17_swizzling)
732                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
733                                                 user_data,
734                                                 page_length);
735         else
736                 ret = __copy_from_user(vaddr + shmem_page_offset,
737                                        user_data,
738                                        page_length);
739         if (needs_clflush_after)
740                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
741                                              page_length,
742                                              page_do_bit17_swizzling);
743         kunmap(page);
744
745         return ret ? -EFAULT : 0;
746 }
747 #endif
748
749 static int
750 i915_gem_shmem_pwrite(struct drm_device *dev,
751                       struct drm_i915_gem_object *obj,
752                       struct drm_i915_gem_pwrite *args,
753                       struct drm_file *file)
754 {
755         vm_object_t vm_obj;
756         vm_page_t m;
757         struct sf_buf *sf;
758         vm_offset_t mkva;
759         vm_pindex_t obj_pi;
760         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
761
762         do_bit17_swizzling = 0;
763
764         obj->dirty = 1;
765         vm_obj = obj->base.vm_obj;
766         ret = 0;
767
768         VM_OBJECT_LOCK(vm_obj);
769         vm_object_pip_add(vm_obj, 1);
770         while (args->size > 0) {
771                 obj_pi = OFF_TO_IDX(args->offset);
772                 obj_po = args->offset & PAGE_MASK;
773
774                 m = shmem_read_mapping_page(vm_obj, obj_pi);
775                 VM_OBJECT_UNLOCK(vm_obj);
776
777                 sf = sf_buf_alloc(m);
778                 mkva = sf_buf_kva(sf);
779                 length = min(args->size, PAGE_SIZE - obj_po);
780                 while (length > 0) {
781                         if (do_bit17_swizzling &&
782                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
783                                 cnt = roundup2(obj_po + 1, 64);
784                                 cnt = min(cnt - obj_po, length);
785                                 swizzled_po = obj_po ^ 64;
786                         } else {
787                                 cnt = length;
788                                 swizzled_po = obj_po;
789                         }
790                         ret = -copyin_nofault(
791                             (void *)(uintptr_t)args->data_ptr,
792                             (char *)mkva + swizzled_po, cnt);
793                         if (ret != 0)
794                                 break;
795                         args->data_ptr += cnt;
796                         args->size -= cnt;
797                         length -= cnt;
798                         args->offset += cnt;
799                         obj_po += cnt;
800                 }
801                 sf_buf_free(sf);
802                 VM_OBJECT_LOCK(vm_obj);
803                 vm_page_dirty(m);
804                 vm_page_reference(m);
805                 vm_page_busy_wait(m, FALSE, "i915gem");
806                 vm_page_unwire(m, 1);
807                 vm_page_wakeup(m);
808
809                 if (ret != 0)
810                         break;
811         }
812         vm_object_pip_wakeup(vm_obj);
813         VM_OBJECT_UNLOCK(vm_obj);
814
815         return (ret);
816 }
817
818 /**
819  * Writes data to the object referenced by handle.
820  *
821  * On error, the contents of the buffer that were to be modified are undefined.
822  */
823 int
824 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
825                       struct drm_file *file)
826 {
827         struct drm_i915_gem_pwrite *args = data;
828         struct drm_i915_gem_object *obj;
829         int ret;
830
831         if (args->size == 0)
832                 return 0;
833
834 #if 0
835         if (!access_ok(VERIFY_READ,
836                        to_user_ptr(args->data_ptr),
837                        args->size))
838                 return -EFAULT;
839
840         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
841                                            args->size);
842         if (ret)
843                 return -EFAULT;
844 #endif
845
846         ret = i915_mutex_lock_interruptible(dev);
847         if (ret)
848                 return ret;
849
850         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
851         if (&obj->base == NULL) {
852                 ret = -ENOENT;
853                 goto unlock;
854         }
855
856         /* Bounds check destination. */
857         if (args->offset > obj->base.size ||
858             args->size > obj->base.size - args->offset) {
859                 ret = -EINVAL;
860                 goto out;
861         }
862
863         /* prime objects have no backing filp to GEM pread/pwrite
864          * pages from.
865          */
866 #if 0
867         if (!obj->base.filp) {
868                 ret = -EINVAL;
869                 goto out;
870         }
871 #endif
872
873         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
874
875         ret = -EFAULT;
876         /* We can only do the GTT pwrite on untiled buffers, as otherwise
877          * it would end up going through the fenced access, and we'll get
878          * different detiling behavior between reading and writing.
879          * pread/pwrite currently are reading and writing from the CPU
880          * perspective, requiring manual detiling by the client.
881          */
882         if (obj->phys_obj) {
883                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
884                 goto out;
885         }
886
887         if (obj->cache_level == I915_CACHE_NONE &&
888             obj->tiling_mode == I915_TILING_NONE &&
889             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
890                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
891                 /* Note that the gtt paths might fail with non-page-backed user
892                  * pointers (e.g. gtt mappings when moving data between
893                  * textures). Fallback to the shmem path in that case. */
894         }
895
896         if (ret == -EFAULT || ret == -ENOSPC)
897                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
898
899 out:
900         drm_gem_object_unreference(&obj->base);
901 unlock:
902         mutex_unlock(&dev->struct_mutex);
903         return ret;
904 }
905
906 int
907 i915_gem_check_wedge(struct i915_gpu_error *error,
908                      bool interruptible)
909 {
910         if (i915_reset_in_progress(error)) {
911                 /* Non-interruptible callers can't handle -EAGAIN, hence return
912                  * -EIO unconditionally for these. */
913                 if (!interruptible)
914                         return -EIO;
915
916                 /* Recovery complete, but the reset failed ... */
917                 if (i915_terminally_wedged(error))
918                         return -EIO;
919
920                 return -EAGAIN;
921         }
922
923         return 0;
924 }
925
926 /*
927  * Compare seqno against outstanding lazy request. Emit a request if they are
928  * equal.
929  */
930 static int
931 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
932 {
933         int ret;
934
935         DRM_LOCK_ASSERT(ring->dev);
936
937         ret = 0;
938         if (seqno == ring->outstanding_lazy_request)
939                 ret = i915_add_request(ring, NULL);
940
941         return ret;
942 }
943
944 /**
945  * __wait_seqno - wait until execution of seqno has finished
946  * @ring: the ring expected to report seqno
947  * @seqno: duh!
948  * @reset_counter: reset sequence associated with the given seqno
949  * @interruptible: do an interruptible wait (normally yes)
950  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
951  *
952  * Note: It is of utmost importance that the passed in seqno and reset_counter
953  * values have been read by the caller in an smp safe manner. Where read-side
954  * locks are involved, it is sufficient to read the reset_counter before
955  * unlocking the lock that protects the seqno. For lockless tricks, the
956  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
957  * inserted.
958  *
959  * Returns 0 if the seqno was found within the alloted time. Else returns the
960  * errno with remaining time filled in timeout argument.
961  */
962 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
963                         unsigned reset_counter,
964                         bool interruptible, struct timespec *timeout)
965 {
966         drm_i915_private_t *dev_priv = ring->dev->dev_private;
967         struct timespec before, now, wait_time={1,0};
968         unsigned long timeout_jiffies;
969         long end;
970         bool wait_forever = true;
971         int ret;
972
973         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
974                 return 0;
975
976         if (timeout != NULL) {
977                 wait_time = *timeout;
978                 wait_forever = false;
979         }
980
981         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
982
983         if (WARN_ON(!ring->irq_get(ring)))
984                 return -ENODEV;
985
986         /* Record current time in case interrupted by signal, or wedged * */
987         getrawmonotonic(&before);
988
989 #define EXIT_COND \
990         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
991          i915_reset_in_progress(&dev_priv->gpu_error) || \
992          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
993         do {
994                 if (interruptible)
995                         end = wait_event_interruptible_timeout(ring->irq_queue,
996                                                                EXIT_COND,
997                                                                timeout_jiffies);
998                 else
999                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1000                                                  timeout_jiffies);
1001
1002                 /* We need to check whether any gpu reset happened in between
1003                  * the caller grabbing the seqno and now ... */
1004                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1005                         end = -EAGAIN;
1006
1007                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1008                  * gone. */
1009                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1010                 if (ret)
1011                         end = ret;
1012         } while (end == 0 && wait_forever);
1013
1014         getrawmonotonic(&now);
1015
1016         ring->irq_put(ring);
1017 #undef EXIT_COND
1018
1019         if (timeout) {
1020                 struct timespec sleep_time = timespec_sub(now, before);
1021                 *timeout = timespec_sub(*timeout, sleep_time);
1022                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1023                         set_normalized_timespec(timeout, 0, 0);
1024         }
1025
1026         switch (end) {
1027         case -EIO:
1028         case -EAGAIN: /* Wedged */
1029         case -ERESTARTSYS: /* Signal */
1030                 return (int)end;
1031         case 0: /* Timeout */
1032                 return -ETIMEDOUT;      /* -ETIME on Linux */
1033         default: /* Completed */
1034                 WARN_ON(end < 0); /* We're not aware of other errors */
1035                 return 0;
1036         }
1037 }
1038
1039 /**
1040  * Waits for a sequence number to be signaled, and cleans up the
1041  * request and object lists appropriately for that event.
1042  */
1043 int
1044 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1045 {
1046         struct drm_device *dev = ring->dev;
1047         struct drm_i915_private *dev_priv = dev->dev_private;
1048         bool interruptible = dev_priv->mm.interruptible;
1049         int ret;
1050
1051         DRM_LOCK_ASSERT(dev);
1052         BUG_ON(seqno == 0);
1053
1054         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1055         if (ret)
1056                 return ret;
1057
1058         ret = i915_gem_check_olr(ring, seqno);
1059         if (ret)
1060                 return ret;
1061
1062         return __wait_seqno(ring, seqno,
1063                             atomic_read(&dev_priv->gpu_error.reset_counter),
1064                             interruptible, NULL);
1065 }
1066
1067 static int
1068 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1069                                      struct intel_ring_buffer *ring)
1070 {
1071         i915_gem_retire_requests_ring(ring);
1072
1073         /* Manually manage the write flush as we may have not yet
1074          * retired the buffer.
1075          *
1076          * Note that the last_write_seqno is always the earlier of
1077          * the two (read/write) seqno, so if we haved successfully waited,
1078          * we know we have passed the last write.
1079          */
1080         obj->last_write_seqno = 0;
1081         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1082
1083         return 0;
1084 }
1085
1086 /**
1087  * Ensures that all rendering to the object has completed and the object is
1088  * safe to unbind from the GTT or access from the CPU.
1089  */
1090 static __must_check int
1091 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1092                                bool readonly)
1093 {
1094         struct intel_ring_buffer *ring = obj->ring;
1095         u32 seqno;
1096         int ret;
1097
1098         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1099         if (seqno == 0)
1100                 return 0;
1101
1102         ret = i915_wait_seqno(ring, seqno);
1103         if (ret)
1104                 return ret;
1105
1106         return i915_gem_object_wait_rendering__tail(obj, ring);
1107 }
1108
1109 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1110  * as the object state may change during this call.
1111  */
1112 static __must_check int
1113 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1114                                             bool readonly)
1115 {
1116         struct drm_device *dev = obj->base.dev;
1117         struct drm_i915_private *dev_priv = dev->dev_private;
1118         struct intel_ring_buffer *ring = obj->ring;
1119         unsigned reset_counter;
1120         u32 seqno;
1121         int ret;
1122
1123         DRM_LOCK_ASSERT(dev);
1124         BUG_ON(!dev_priv->mm.interruptible);
1125
1126         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1127         if (seqno == 0)
1128                 return 0;
1129
1130         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1131         if (ret)
1132                 return ret;
1133
1134         ret = i915_gem_check_olr(ring, seqno);
1135         if (ret)
1136                 return ret;
1137
1138         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1139         mutex_unlock(&dev->struct_mutex);
1140         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1141         mutex_lock(&dev->struct_mutex);
1142         if (ret)
1143                 return ret;
1144
1145         return i915_gem_object_wait_rendering__tail(obj, ring);
1146 }
1147
1148 /**
1149  * Called when user space prepares to use an object with the CPU, either
1150  * through the mmap ioctl's mapping or a GTT mapping.
1151  */
1152 int
1153 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1154                           struct drm_file *file)
1155 {
1156         struct drm_i915_gem_set_domain *args = data;
1157         struct drm_i915_gem_object *obj;
1158         uint32_t read_domains = args->read_domains;
1159         uint32_t write_domain = args->write_domain;
1160         int ret;
1161
1162         /* Only handle setting domains to types used by the CPU. */
1163         if (write_domain & I915_GEM_GPU_DOMAINS)
1164                 return -EINVAL;
1165
1166         if (read_domains & I915_GEM_GPU_DOMAINS)
1167                 return -EINVAL;
1168
1169         /* Having something in the write domain implies it's in the read
1170          * domain, and only that read domain.  Enforce that in the request.
1171          */
1172         if (write_domain != 0 && read_domains != write_domain)
1173                 return -EINVAL;
1174
1175         ret = i915_mutex_lock_interruptible(dev);
1176         if (ret)
1177                 return ret;
1178
1179         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1180         if (&obj->base == NULL) {
1181                 ret = -ENOENT;
1182                 goto unlock;
1183         }
1184
1185         /* Try to flush the object off the GPU without holding the lock.
1186          * We will repeat the flush holding the lock in the normal manner
1187          * to catch cases where we are gazumped.
1188          */
1189         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1190         if (ret)
1191                 goto unref;
1192
1193         if (read_domains & I915_GEM_DOMAIN_GTT) {
1194                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1195
1196                 /* Silently promote "you're not bound, there was nothing to do"
1197                  * to success, since the client was just asking us to
1198                  * make sure everything was done.
1199                  */
1200                 if (ret == -EINVAL)
1201                         ret = 0;
1202         } else {
1203                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1204         }
1205
1206 unref:
1207         drm_gem_object_unreference(&obj->base);
1208 unlock:
1209         mutex_unlock(&dev->struct_mutex);
1210         return ret;
1211 }
1212
1213 /**
1214  * Called when user space has done writes to this buffer
1215  */
1216 int
1217 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1218                          struct drm_file *file)
1219 {
1220         struct drm_i915_gem_sw_finish *args = data;
1221         struct drm_i915_gem_object *obj;
1222         int ret = 0;
1223
1224         ret = i915_mutex_lock_interruptible(dev);
1225         if (ret)
1226                 return ret;
1227         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1228         if (&obj->base == NULL) {
1229                 ret = -ENOENT;
1230                 goto unlock;
1231         }
1232
1233         /* Pinned buffers may be scanout, so flush the cache */
1234         if (obj->pin_count)
1235                 i915_gem_object_flush_cpu_write_domain(obj);
1236
1237         drm_gem_object_unreference(&obj->base);
1238 unlock:
1239         mutex_unlock(&dev->struct_mutex);
1240         return ret;
1241 }
1242
1243 /**
1244  * Maps the contents of an object, returning the address it is mapped
1245  * into.
1246  *
1247  * While the mapping holds a reference on the contents of the object, it doesn't
1248  * imply a ref on the object itself.
1249  */
1250 int
1251 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1252                     struct drm_file *file)
1253 {
1254         struct drm_i915_gem_mmap *args = data;
1255         struct drm_gem_object *obj;
1256         struct proc *p = curproc;
1257         vm_map_t map = &p->p_vmspace->vm_map;
1258         vm_offset_t addr;
1259         vm_size_t size;
1260         int error = 0, rv;
1261
1262         obj = drm_gem_object_lookup(dev, file, args->handle);
1263         if (obj == NULL)
1264                 return -ENOENT;
1265
1266         if (args->size == 0)
1267                 goto out;
1268
1269         size = round_page(args->size);
1270         if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1271                 error = ENOMEM;
1272                 goto out;
1273         }
1274
1275         addr = 0;
1276         vm_object_hold(obj->vm_obj);
1277         vm_object_reference_locked(obj->vm_obj);
1278         vm_object_drop(obj->vm_obj);
1279         rv = vm_map_find(map, obj->vm_obj, NULL,
1280                          args->offset, &addr, args->size,
1281                          PAGE_SIZE, /* align */
1282                          TRUE, /* fitit */
1283                          VM_MAPTYPE_NORMAL, /* maptype */
1284                          VM_PROT_READ | VM_PROT_WRITE, /* prot */
1285                          VM_PROT_READ | VM_PROT_WRITE, /* max */
1286                          MAP_SHARED /* cow */);
1287         if (rv != KERN_SUCCESS) {
1288                 vm_object_deallocate(obj->vm_obj);
1289                 error = -vm_mmap_to_errno(rv);
1290         } else {
1291                 args->addr_ptr = (uint64_t)addr;
1292         }
1293 out:
1294         drm_gem_object_unreference(obj);
1295         return (error);
1296 }
1297
1298 int i915_intr_pf;
1299
1300 /**
1301  * i915_gem_fault - fault a page into the GTT
1302  * vma: VMA in question
1303  * vmf: fault info
1304  *
1305  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1306  * from userspace.  The fault handler takes care of binding the object to
1307  * the GTT (if needed), allocating and programming a fence register (again,
1308  * only if needed based on whether the old reg is still valid or the object
1309  * is tiled) and inserting a new PTE into the faulting process.
1310  *
1311  * Note that the faulting process may involve evicting existing objects
1312  * from the GTT and/or fence registers to make room.  So performance may
1313  * suffer if the GTT working set is large or there are few fence registers
1314  * left.
1315  */
1316 int
1317 i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
1318     vm_page_t *mres)
1319 {
1320         struct drm_gem_object *gem_obj;
1321         struct drm_i915_gem_object *obj;
1322         struct drm_device *dev;
1323         drm_i915_private_t *dev_priv;
1324         vm_page_t m, oldm;
1325         int cause, ret;
1326         bool write;
1327
1328         gem_obj = vm_obj->handle;
1329         obj = to_intel_bo(gem_obj);
1330         dev = obj->base.dev;
1331         dev_priv = dev->dev_private;
1332 #if 0
1333         write = (prot & VM_PROT_WRITE) != 0;
1334 #else
1335         write = true;
1336 #endif
1337         vm_object_pip_add(vm_obj, 1);
1338
1339         /*
1340          * Remove the placeholder page inserted by vm_fault() from the
1341          * object before dropping the object lock. If
1342          * i915_gem_release_mmap() is active in parallel on this gem
1343          * object, then it owns the drm device sx and might find the
1344          * placeholder already. Then, since the page is busy,
1345          * i915_gem_release_mmap() sleeps waiting for the busy state
1346          * of the page cleared. We will be not able to acquire drm
1347          * device lock until i915_gem_release_mmap() is able to make a
1348          * progress.
1349          */
1350         if (*mres != NULL) {
1351                 oldm = *mres;
1352                 vm_page_remove(oldm);
1353                 *mres = NULL;
1354         } else
1355                 oldm = NULL;
1356 retry:
1357         VM_OBJECT_UNLOCK(vm_obj);
1358 unlocked_vmobj:
1359         cause = ret = 0;
1360         m = NULL;
1361
1362         if (i915_intr_pf) {
1363                 ret = i915_mutex_lock_interruptible(dev);
1364                 if (ret != 0) {
1365                         cause = 10;
1366                         goto out;
1367                 }
1368         } else
1369                 mutex_lock(&dev->struct_mutex);
1370
1371         /*
1372          * Since the object lock was dropped, other thread might have
1373          * faulted on the same GTT address and instantiated the
1374          * mapping for the page.  Recheck.
1375          */
1376         VM_OBJECT_LOCK(vm_obj);
1377         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1378         if (m != NULL) {
1379                 if ((m->flags & PG_BUSY) != 0) {
1380                         mutex_unlock(&dev->struct_mutex);
1381 #if 0 /* XXX */
1382                         vm_page_sleep(m, "915pee");
1383 #endif
1384                         goto retry;
1385                 }
1386                 goto have_page;
1387         } else
1388                 VM_OBJECT_UNLOCK(vm_obj);
1389
1390         /* Access to snoopable pages through the GTT is incoherent. */
1391         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1392                 ret = -EINVAL;
1393                 goto unlock;
1394         }
1395
1396         /* Now bind it into the GTT if needed */
1397         if (!obj->map_and_fenceable) {
1398                 ret = i915_gem_object_unbind(obj);
1399                 if (ret != 0) {
1400                         cause = 20;
1401                         goto unlock;
1402                 }
1403         }
1404         if (!obj->gtt_space) {
1405                 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1406                 if (ret != 0) {
1407                         cause = 30;
1408                         goto unlock;
1409                 }
1410
1411                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1412                 if (ret != 0) {
1413                         cause = 40;
1414                         goto unlock;
1415                 }
1416         }
1417
1418         if (obj->tiling_mode == I915_TILING_NONE)
1419                 ret = i915_gem_object_put_fence(obj);
1420         else
1421                 ret = i915_gem_object_get_fence(obj);
1422         if (ret != 0) {
1423                 cause = 50;
1424                 goto unlock;
1425         }
1426
1427         if (i915_gem_object_is_inactive(obj))
1428                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1429
1430         obj->fault_mappable = true;
1431         VM_OBJECT_LOCK(vm_obj);
1432         m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
1433             offset);
1434         if (m == NULL) {
1435                 cause = 60;
1436                 ret = -EFAULT;
1437                 goto unlock;
1438         }
1439         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1440             ("not fictitious %p", m));
1441         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1442
1443         if ((m->flags & PG_BUSY) != 0) {
1444                 mutex_unlock(&dev->struct_mutex);
1445 #if 0 /* XXX */
1446                 vm_page_sleep(m, "915pbs");
1447 #endif
1448                 goto retry;
1449         }
1450         m->valid = VM_PAGE_BITS_ALL;
1451         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1452 have_page:
1453         *mres = m;
1454         vm_page_busy_try(m, false);
1455
1456         mutex_unlock(&dev->struct_mutex);
1457         if (oldm != NULL) {
1458                 vm_page_free(oldm);
1459         }
1460         vm_object_pip_wakeup(vm_obj);
1461         return (VM_PAGER_OK);
1462
1463 unlock:
1464         mutex_unlock(&dev->struct_mutex);
1465 out:
1466         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1467         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1468                 goto unlocked_vmobj;
1469         }
1470         VM_OBJECT_LOCK(vm_obj);
1471         vm_object_pip_wakeup(vm_obj);
1472         return (VM_PAGER_ERROR);
1473 }
1474
1475 /**
1476  * i915_gem_release_mmap - remove physical page mappings
1477  * @obj: obj in question
1478  *
1479  * Preserve the reservation of the mmapping with the DRM core code, but
1480  * relinquish ownership of the pages back to the system.
1481  *
1482  * It is vital that we remove the page mapping if we have mapped a tiled
1483  * object through the GTT and then lose the fence register due to
1484  * resource pressure. Similarly if the object has been moved out of the
1485  * aperture, than pages mapped into userspace must be revoked. Removing the
1486  * mapping will then trigger a page fault on the next user access, allowing
1487  * fixup by i915_gem_fault().
1488  */
1489 void
1490 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1491 {
1492         vm_object_t devobj;
1493         vm_page_t m;
1494         int i, page_count;
1495
1496         if (!obj->fault_mappable)
1497                 return;
1498
1499         devobj = cdev_pager_lookup(obj);
1500         if (devobj != NULL) {
1501                 page_count = OFF_TO_IDX(obj->base.size);
1502
1503                 VM_OBJECT_LOCK(devobj);
1504                 for (i = 0; i < page_count; i++) {
1505                         m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1506                         if (m == NULL)
1507                                 continue;
1508                         cdev_pager_free_page(devobj, m);
1509                 }
1510                 VM_OBJECT_UNLOCK(devobj);
1511                 vm_object_deallocate(devobj);
1512         }
1513
1514         obj->fault_mappable = false;
1515 }
1516
1517 uint32_t
1518 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1519 {
1520         uint32_t gtt_size;
1521
1522         if (INTEL_INFO(dev)->gen >= 4 ||
1523             tiling_mode == I915_TILING_NONE)
1524                 return size;
1525
1526         /* Previous chips need a power-of-two fence region when tiling */
1527         if (INTEL_INFO(dev)->gen == 3)
1528                 gtt_size = 1024*1024;
1529         else
1530                 gtt_size = 512*1024;
1531
1532         while (gtt_size < size)
1533                 gtt_size <<= 1;
1534
1535         return gtt_size;
1536 }
1537
1538 /**
1539  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1540  * @obj: object to check
1541  *
1542  * Return the required GTT alignment for an object, taking into account
1543  * potential fence register mapping.
1544  */
1545 uint32_t
1546 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1547                            int tiling_mode, bool fenced)
1548 {
1549
1550         /*
1551          * Minimum alignment is 4k (GTT page size), but might be greater
1552          * if a fence register is needed for the object.
1553          */
1554         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1555             tiling_mode == I915_TILING_NONE)
1556                 return 4096;
1557
1558         /*
1559          * Previous chips need to be aligned to the size of the smallest
1560          * fence register that can contain the object.
1561          */
1562         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1563 }
1564
1565 int
1566 i915_gem_mmap_gtt(struct drm_file *file,
1567                   struct drm_device *dev,
1568                   uint32_t handle,
1569                   uint64_t *offset)
1570 {
1571         struct drm_i915_private *dev_priv = dev->dev_private;
1572         struct drm_i915_gem_object *obj;
1573         int ret;
1574
1575         ret = i915_mutex_lock_interruptible(dev);
1576         if (ret)
1577                 return ret;
1578
1579         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1580         if (&obj->base == NULL) {
1581                 ret = -ENOENT;
1582                 goto unlock;
1583         }
1584
1585         if (obj->base.size > dev_priv->gtt.mappable_end) {
1586                 ret = -E2BIG;
1587                 goto out;
1588         }
1589
1590         if (obj->madv != I915_MADV_WILLNEED) {
1591                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1592                 ret = -EINVAL;
1593                 goto out;
1594         }
1595
1596         ret = drm_gem_create_mmap_offset(&obj->base);
1597         if (ret)
1598                 goto out;
1599
1600         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1601             DRM_GEM_MAPPING_KEY;
1602 out:
1603         drm_gem_object_unreference(&obj->base);
1604 unlock:
1605         mutex_unlock(&dev->struct_mutex);
1606         return ret;
1607 }
1608
1609 /**
1610  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1611  * @dev: DRM device
1612  * @data: GTT mapping ioctl data
1613  * @file: GEM object info
1614  *
1615  * Simply returns the fake offset to userspace so it can mmap it.
1616  * The mmap call will end up in drm_gem_mmap(), which will set things
1617  * up so we can get faults in the handler above.
1618  *
1619  * The fault handler will take care of binding the object into the GTT
1620  * (since it may have been evicted to make room for something), allocating
1621  * a fence register, and mapping the appropriate aperture address into
1622  * userspace.
1623  */
1624 int
1625 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1626                         struct drm_file *file)
1627 {
1628         struct drm_i915_gem_mmap_gtt *args = data;
1629
1630         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1631 }
1632
1633 /* Immediately discard the backing storage */
1634 static void
1635 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1636 {
1637         vm_object_t vm_obj;
1638
1639         vm_obj = obj->base.vm_obj;
1640         VM_OBJECT_LOCK(vm_obj);
1641         vm_object_page_remove(vm_obj, 0, 0, false);
1642         VM_OBJECT_UNLOCK(vm_obj);
1643         obj->madv = __I915_MADV_PURGED;
1644 }
1645
1646 static inline int
1647 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1648 {
1649         return obj->madv == I915_MADV_DONTNEED;
1650 }
1651
1652 static void
1653 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1654 {
1655         vm_page_t m;
1656         int page_count, i;
1657
1658         BUG_ON(obj->madv == __I915_MADV_PURGED);
1659
1660         if (obj->tiling_mode != I915_TILING_NONE)
1661                 i915_gem_object_save_bit_17_swizzle(obj);
1662         if (obj->madv == I915_MADV_DONTNEED)
1663                 obj->dirty = 0;
1664         page_count = obj->base.size / PAGE_SIZE;
1665         VM_OBJECT_LOCK(obj->base.vm_obj);
1666 #if GEM_PARANOID_CHECK_GTT
1667         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1668 #endif
1669         for (i = 0; i < page_count; i++) {
1670                 m = obj->pages[i];
1671                 if (obj->dirty)
1672                         vm_page_dirty(m);
1673                 if (obj->madv == I915_MADV_WILLNEED)
1674                         vm_page_reference(m);
1675                 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1676                 vm_page_unwire(obj->pages[i], 1);
1677                 vm_page_wakeup(obj->pages[i]);
1678         }
1679         VM_OBJECT_UNLOCK(obj->base.vm_obj);
1680         obj->dirty = 0;
1681         drm_free(obj->pages, M_DRM);
1682         obj->pages = NULL;
1683 }
1684
1685 int
1686 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1687 {
1688         const struct drm_i915_gem_object_ops *ops = obj->ops;
1689
1690         if (obj->pages == NULL)
1691                 return 0;
1692
1693         BUG_ON(obj->gtt_space);
1694
1695         if (obj->pages_pin_count)
1696                 return -EBUSY;
1697
1698         /* ->put_pages might need to allocate memory for the bit17 swizzle
1699          * array, hence protect them from being reaped by removing them from gtt
1700          * lists early. */
1701         list_del(&obj->global_list);
1702
1703         ops->put_pages(obj);
1704         obj->pages = NULL;
1705
1706         if (i915_gem_object_is_purgeable(obj))
1707                 i915_gem_object_truncate(obj);
1708
1709         return 0;
1710 }
1711
1712 static int
1713 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1714 {
1715         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1716         struct drm_device *dev;
1717         vm_object_t vm_obj;
1718         int page_count, i, j;
1719         struct vm_page *page;
1720
1721         dev = obj->base.dev;
1722         KASSERT(obj->pages == NULL, ("Obj already has pages"));
1723         page_count = obj->base.size / PAGE_SIZE;
1724         obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1725             M_WAITOK);
1726
1727         vm_obj = obj->base.vm_obj;
1728         VM_OBJECT_LOCK(vm_obj);
1729
1730         for (i = 0; i < page_count; i++) {
1731                 page = shmem_read_mapping_page(vm_obj, i);
1732                 if (IS_ERR(page)) {
1733                         i915_gem_purge(dev_priv, page_count);
1734                         goto err_pages;
1735                 }
1736
1737                 obj->pages[i] = page;
1738         }
1739
1740         VM_OBJECT_UNLOCK(vm_obj);
1741         if (i915_gem_object_needs_bit17_swizzle(obj))
1742                 i915_gem_object_do_bit_17_swizzle(obj);
1743
1744         return 0;
1745
1746 err_pages:
1747         for (j = 0; j < i; j++) {
1748                 page = obj->pages[j];
1749                 vm_page_busy_wait(page, FALSE, "i915gem");
1750                 vm_page_unwire(page, 0);
1751                 vm_page_wakeup(page);
1752         }
1753         VM_OBJECT_UNLOCK(vm_obj);
1754         drm_free(obj->pages, M_DRM);
1755         obj->pages = NULL;
1756         return (-EIO);
1757 }
1758
1759 /* Ensure that the associated pages are gathered from the backing storage
1760  * and pinned into our object. i915_gem_object_get_pages() may be called
1761  * multiple times before they are released by a single call to
1762  * i915_gem_object_put_pages() - once the pages are no longer referenced
1763  * either as a result of memory pressure (reaping pages under the shrinker)
1764  * or as the object is itself released.
1765  */
1766 int
1767 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1768 {
1769         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1770         const struct drm_i915_gem_object_ops *ops = obj->ops;
1771         int ret;
1772
1773         if (obj->pages)
1774                 return 0;
1775
1776         if (obj->madv != I915_MADV_WILLNEED) {
1777                 DRM_ERROR("Attempting to obtain a purgeable object\n");
1778                 return -EINVAL;
1779         }
1780
1781         BUG_ON(obj->pages_pin_count);
1782
1783         ret = ops->get_pages(obj);
1784         if (ret)
1785                 return ret;
1786
1787         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1788         return 0;
1789 }
1790
1791 void
1792 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1793                                struct intel_ring_buffer *ring)
1794 {
1795         struct drm_device *dev = obj->base.dev;
1796         struct drm_i915_private *dev_priv = dev->dev_private;
1797         u32 seqno = intel_ring_get_seqno(ring);
1798
1799         BUG_ON(ring == NULL);
1800         if (obj->ring != ring && obj->last_write_seqno) {
1801                 /* Keep the seqno relative to the current ring */
1802                 obj->last_write_seqno = seqno;
1803         }
1804         obj->ring = ring;
1805
1806         /* Add a reference if we're newly entering the active list. */
1807         if (!obj->active) {
1808                 drm_gem_object_reference(&obj->base);
1809                 obj->active = 1;
1810         }
1811
1812         /* Move from whatever list we were on to the tail of execution. */
1813         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1814         list_move_tail(&obj->ring_list, &ring->active_list);
1815
1816         obj->last_read_seqno = seqno;
1817
1818         if (obj->fenced_gpu_access) {
1819                 obj->last_fenced_seqno = seqno;
1820
1821                 /* Bump MRU to take account of the delayed flush */
1822                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1823                         struct drm_i915_fence_reg *reg;
1824
1825                         reg = &dev_priv->fence_regs[obj->fence_reg];
1826                         list_move_tail(&reg->lru_list,
1827                                        &dev_priv->mm.fence_list);
1828                 }
1829         }
1830 }
1831
1832 static void
1833 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1834 {
1835         struct drm_device *dev = obj->base.dev;
1836         struct drm_i915_private *dev_priv = dev->dev_private;
1837
1838         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1839         BUG_ON(!obj->active);
1840
1841         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1842
1843         list_del_init(&obj->ring_list);
1844         obj->ring = NULL;
1845
1846         obj->last_read_seqno = 0;
1847         obj->last_write_seqno = 0;
1848         obj->base.write_domain = 0;
1849
1850         obj->last_fenced_seqno = 0;
1851         obj->fenced_gpu_access = false;
1852
1853         obj->active = 0;
1854         drm_gem_object_unreference(&obj->base);
1855
1856         WARN_ON(i915_verify_lists(dev));
1857 }
1858
1859 static int
1860 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1861 {
1862         struct drm_i915_private *dev_priv = dev->dev_private;
1863         struct intel_ring_buffer *ring;
1864         int ret, i, j;
1865
1866         /* Carefully retire all requests without writing to the rings */
1867         for_each_ring(ring, dev_priv, i) {
1868                 ret = intel_ring_idle(ring);
1869                 if (ret)
1870                         return ret;
1871         }
1872         i915_gem_retire_requests(dev);
1873
1874         /* Finally reset hw state */
1875         for_each_ring(ring, dev_priv, i) {
1876                 intel_ring_init_seqno(ring, seqno);
1877
1878                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1879                         ring->sync_seqno[j] = 0;
1880         }
1881
1882         return 0;
1883 }
1884
1885 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1886 {
1887         struct drm_i915_private *dev_priv = dev->dev_private;
1888         int ret;
1889
1890         if (seqno == 0)
1891                 return -EINVAL;
1892
1893         /* HWS page needs to be set less than what we
1894          * will inject to ring
1895          */
1896         ret = i915_gem_init_seqno(dev, seqno - 1);
1897         if (ret)
1898                 return ret;
1899
1900         /* Carefully set the last_seqno value so that wrap
1901          * detection still works
1902          */
1903         dev_priv->next_seqno = seqno;
1904         dev_priv->last_seqno = seqno - 1;
1905         if (dev_priv->last_seqno == 0)
1906                 dev_priv->last_seqno--;
1907
1908         return 0;
1909 }
1910
1911 int
1912 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1913 {
1914         struct drm_i915_private *dev_priv = dev->dev_private;
1915
1916         /* reserve 0 for non-seqno */
1917         if (dev_priv->next_seqno == 0) {
1918                 int ret = i915_gem_init_seqno(dev, 0);
1919                 if (ret)
1920                         return ret;
1921
1922                 dev_priv->next_seqno = 1;
1923         }
1924
1925         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1926         return 0;
1927 }
1928
1929 int __i915_add_request(struct intel_ring_buffer *ring,
1930                        struct drm_file *file,
1931                        struct drm_i915_gem_object *obj,
1932                        u32 *out_seqno)
1933 {
1934         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1935         struct drm_i915_gem_request *request;
1936         u32 request_ring_position, request_start;
1937         int was_empty;
1938         int ret;
1939
1940         request_start = intel_ring_get_tail(ring);
1941         /*
1942          * Emit any outstanding flushes - execbuf can fail to emit the flush
1943          * after having emitted the batchbuffer command. Hence we need to fix
1944          * things up similar to emitting the lazy request. The difference here
1945          * is that the flush _must_ happen before the next request, no matter
1946          * what.
1947          */
1948         ret = intel_ring_flush_all_caches(ring);
1949         if (ret)
1950                 return ret;
1951
1952         request = kmalloc(sizeof(*request), M_DRM, M_WAITOK);
1953         if (request == NULL)
1954                 return -ENOMEM;
1955
1956
1957         /* Record the position of the start of the request so that
1958          * should we detect the updated seqno part-way through the
1959          * GPU processing the request, we never over-estimate the
1960          * position of the head.
1961          */
1962         request_ring_position = intel_ring_get_tail(ring);
1963
1964         ret = ring->add_request(ring);
1965         if (ret) {
1966                 kfree(request);
1967                 return ret;
1968         }
1969
1970         request->seqno = intel_ring_get_seqno(ring);
1971         request->ring = ring;
1972         request->head = request_start;
1973         request->tail = request_ring_position;
1974         request->ctx = ring->last_context;
1975         request->batch_obj = obj;
1976
1977         /* Whilst this request exists, batch_obj will be on the
1978          * active_list, and so will hold the active reference. Only when this
1979          * request is retired will the the batch_obj be moved onto the
1980          * inactive_list and lose its active reference. Hence we do not need
1981          * to explicitly hold another reference here.
1982          */
1983
1984         if (request->ctx)
1985                 i915_gem_context_reference(request->ctx);
1986
1987         request->emitted_jiffies = jiffies;
1988         was_empty = list_empty(&ring->request_list);
1989         list_add_tail(&request->list, &ring->request_list);
1990         request->file_priv = NULL;
1991
1992         if (file) {
1993                 struct drm_i915_file_private *file_priv = file->driver_priv;
1994
1995                 spin_lock(&file_priv->mm.lock);
1996                 request->file_priv = file_priv;
1997                 list_add_tail(&request->client_list,
1998                               &file_priv->mm.request_list);
1999                 spin_unlock(&file_priv->mm.lock);
2000         }
2001
2002         ring->outstanding_lazy_request = 0;
2003
2004         if (!dev_priv->mm.suspended) {
2005                 if (i915_enable_hangcheck) {
2006                         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2007                                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2008                 }
2009                 if (was_empty) {
2010                         queue_delayed_work(dev_priv->wq,
2011                                            &dev_priv->mm.retire_work,
2012                                            round_jiffies_up_relative(hz));
2013                         intel_mark_busy(dev_priv->dev);
2014                 }
2015         }
2016
2017         if (out_seqno)
2018                 *out_seqno = request->seqno;
2019         return 0;
2020 }
2021
2022 static inline void
2023 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2024 {
2025         struct drm_i915_file_private *file_priv = request->file_priv;
2026
2027         if (!file_priv)
2028                 return;
2029
2030         spin_lock(&file_priv->mm.lock);
2031         if (request->file_priv) {
2032                 list_del(&request->client_list);
2033                 request->file_priv = NULL;
2034         }
2035         spin_unlock(&file_priv->mm.lock);
2036 }
2037
2038 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2039 {
2040         if (acthd >= obj->gtt_offset &&
2041             acthd < obj->gtt_offset + obj->base.size)
2042                 return true;
2043
2044         return false;
2045 }
2046
2047 static bool i915_head_inside_request(const u32 acthd_unmasked,
2048                                      const u32 request_start,
2049                                      const u32 request_end)
2050 {
2051         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2052
2053         if (request_start < request_end) {
2054                 if (acthd >= request_start && acthd < request_end)
2055                         return true;
2056         } else if (request_start > request_end) {
2057                 if (acthd >= request_start || acthd < request_end)
2058                         return true;
2059         }
2060
2061         return false;
2062 }
2063
2064 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2065                                 const u32 acthd, bool *inside)
2066 {
2067         /* There is a possibility that unmasked head address
2068          * pointing inside the ring, matches the batch_obj address range.
2069          * However this is extremely unlikely.
2070          */
2071
2072         if (request->batch_obj) {
2073                 if (i915_head_inside_object(acthd, request->batch_obj)) {
2074                         *inside = true;
2075                         return true;
2076                 }
2077         }
2078
2079         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2080                 *inside = false;
2081                 return true;
2082         }
2083
2084         return false;
2085 }
2086
2087 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2088                                   struct drm_i915_gem_request *request,
2089                                   u32 acthd)
2090 {
2091         struct i915_ctx_hang_stats *hs = NULL;
2092         bool inside, guilty;
2093
2094         /* Innocent until proven guilty */
2095         guilty = false;
2096
2097         if (ring->hangcheck.action != wait &&
2098             i915_request_guilty(request, acthd, &inside)) {
2099                 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
2100                           ring->name,
2101                           inside ? "inside" : "flushing",
2102                           request->batch_obj ?
2103                           request->batch_obj->gtt_offset : 0,
2104                           request->ctx ? request->ctx->id : 0,
2105                           acthd);
2106
2107                 guilty = true;
2108         }
2109
2110         /* If contexts are disabled or this is the default context, use
2111          * file_priv->reset_state
2112          */
2113         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2114                 hs = &request->ctx->hang_stats;
2115         else if (request->file_priv)
2116                 hs = &request->file_priv->hang_stats;
2117
2118         if (hs) {
2119                 if (guilty)
2120                         hs->batch_active++;
2121                 else
2122                         hs->batch_pending++;
2123         }
2124 }
2125
2126 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2127 {
2128         list_del(&request->list);
2129         i915_gem_request_remove_from_client(request);
2130
2131         if (request->ctx)
2132                 i915_gem_context_unreference(request->ctx);
2133
2134         kfree(request);
2135 }
2136
2137 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2138                                       struct intel_ring_buffer *ring)
2139 {
2140         u32 completed_seqno;
2141         u32 acthd;
2142
2143         acthd = intel_ring_get_active_head(ring);
2144         completed_seqno = ring->get_seqno(ring, false);
2145
2146         while (!list_empty(&ring->request_list)) {
2147                 struct drm_i915_gem_request *request;
2148
2149                 request = list_first_entry(&ring->request_list,
2150                                            struct drm_i915_gem_request,
2151                                            list);
2152
2153                 if (request->seqno > completed_seqno)
2154                         i915_set_reset_status(ring, request, acthd);
2155
2156                 i915_gem_free_request(request);
2157         }
2158
2159         while (!list_empty(&ring->active_list)) {
2160                 struct drm_i915_gem_object *obj;
2161
2162                 obj = list_first_entry(&ring->active_list,
2163                                        struct drm_i915_gem_object,
2164                                        ring_list);
2165
2166                 i915_gem_object_move_to_inactive(obj);
2167         }
2168 }
2169
2170 void i915_gem_restore_fences(struct drm_device *dev)
2171 {
2172         struct drm_i915_private *dev_priv = dev->dev_private;
2173         int i;
2174
2175         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2176                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2177
2178                 /*
2179                  * Commit delayed tiling changes if we have an object still
2180                  * attached to the fence, otherwise just clear the fence.
2181                  */
2182                 if (reg->obj) {
2183                         i915_gem_object_update_fence(reg->obj, reg,
2184                                                      reg->obj->tiling_mode);
2185                 } else {
2186                         i915_gem_write_fence(dev, i, NULL);
2187                 }
2188         }
2189 }
2190
2191 void i915_gem_reset(struct drm_device *dev)
2192 {
2193         struct drm_i915_private *dev_priv = dev->dev_private;
2194         struct drm_i915_gem_object *obj;
2195         struct intel_ring_buffer *ring;
2196         int i;
2197
2198         for_each_ring(ring, dev_priv, i)
2199                 i915_gem_reset_ring_lists(dev_priv, ring);
2200
2201         /* Move everything out of the GPU domains to ensure we do any
2202          * necessary invalidation upon reuse.
2203          */
2204         list_for_each_entry(obj,
2205                             &dev_priv->mm.inactive_list,
2206                             mm_list)
2207         {
2208                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2209         }
2210
2211         i915_gem_restore_fences(dev);
2212 }
2213
2214 /**
2215  * This function clears the request list as sequence numbers are passed.
2216  */
2217 void
2218 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2219 {
2220         uint32_t seqno;
2221
2222         if (list_empty(&ring->request_list))
2223                 return;
2224
2225         WARN_ON(i915_verify_lists(ring->dev));
2226
2227         seqno = ring->get_seqno(ring, true);
2228
2229         while (!list_empty(&ring->request_list)) {
2230                 struct drm_i915_gem_request *request;
2231
2232                 request = list_first_entry(&ring->request_list,
2233                                            struct drm_i915_gem_request,
2234                                            list);
2235
2236                 if (!i915_seqno_passed(seqno, request->seqno))
2237                         break;
2238
2239                 /* We know the GPU must have read the request to have
2240                  * sent us the seqno + interrupt, so use the position
2241                  * of tail of the request to update the last known position
2242                  * of the GPU head.
2243                  */
2244                 ring->last_retired_head = request->tail;
2245
2246                 i915_gem_free_request(request);
2247         }
2248
2249         /* Move any buffers on the active list that are no longer referenced
2250          * by the ringbuffer to the flushing/inactive lists as appropriate.
2251          */
2252         while (!list_empty(&ring->active_list)) {
2253                 struct drm_i915_gem_object *obj;
2254
2255                 obj = list_first_entry(&ring->active_list,
2256                                       struct drm_i915_gem_object,
2257                                       ring_list);
2258
2259                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2260                         break;
2261
2262                 i915_gem_object_move_to_inactive(obj);
2263         }
2264
2265         if (unlikely(ring->trace_irq_seqno &&
2266                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2267                 ring->irq_put(ring);
2268                 ring->trace_irq_seqno = 0;
2269         }
2270
2271 }
2272
2273 void
2274 i915_gem_retire_requests(struct drm_device *dev)
2275 {
2276         drm_i915_private_t *dev_priv = dev->dev_private;
2277         struct intel_ring_buffer *ring;
2278         int i;
2279
2280         for_each_ring(ring, dev_priv, i)
2281                 i915_gem_retire_requests_ring(ring);
2282 }
2283
2284 static long
2285 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2286                   bool purgeable_only)
2287 {
2288         struct drm_i915_gem_object *obj, *next;
2289         long count = 0;
2290
2291         list_for_each_entry_safe(obj, next,
2292                                  &dev_priv->mm.unbound_list,
2293                                  global_list) {
2294 #if 0
2295                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2296                     i915_gem_object_put_pages(obj) == 0) {
2297                         count += obj->base.size >> PAGE_SHIFT;
2298                         if (count >= target)
2299                                 return count;
2300                 }
2301 #endif
2302         }
2303
2304         list_for_each_entry_safe(obj, next,
2305                                  &dev_priv->mm.inactive_list,
2306                                  mm_list) {
2307 #if 0
2308                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2309                     i915_gem_object_unbind(obj) == 0 &&
2310                     i915_gem_object_put_pages(obj) == 0) {
2311                         count += obj->base.size >> PAGE_SHIFT;
2312                         if (count >= target)
2313                                 return count;
2314                 }
2315 #endif
2316         }
2317
2318         return count;
2319 }
2320
2321 static long
2322 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2323 {
2324         return __i915_gem_shrink(dev_priv, target, true);
2325 }
2326
2327 static void
2328 i915_gem_retire_work_handler(struct work_struct *work)
2329 {
2330         drm_i915_private_t *dev_priv;
2331         struct drm_device *dev;
2332         struct intel_ring_buffer *ring;
2333         bool idle;
2334         int i;
2335
2336         dev_priv = container_of(work, drm_i915_private_t,
2337                                 mm.retire_work.work);
2338         dev = dev_priv->dev;
2339
2340         /* Come back later if the device is busy... */
2341         if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT)) {
2342                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2343                                    round_jiffies_up_relative(hz));
2344                 return;
2345         }
2346
2347         i915_gem_retire_requests(dev);
2348
2349         /* Send a periodic flush down the ring so we don't hold onto GEM
2350          * objects indefinitely.
2351          */
2352         idle = true;
2353         for_each_ring(ring, dev_priv, i) {
2354                 if (ring->gpu_caches_dirty)
2355                         i915_add_request(ring, NULL);
2356
2357                 idle &= list_empty(&ring->request_list);
2358         }
2359
2360         if (!dev_priv->mm.suspended && !idle)
2361                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2362                                    round_jiffies_up_relative(hz));
2363         if (idle)
2364                 intel_mark_idle(dev);
2365
2366         mutex_unlock(&dev->struct_mutex);
2367 }
2368 /**
2369  * Ensures that an object will eventually get non-busy by flushing any required
2370  * write domains, emitting any outstanding lazy request and retiring and
2371  * completed requests.
2372  */
2373 static int
2374 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2375 {
2376         int ret;
2377
2378         if (obj->active) {
2379                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2380                 if (ret)
2381                         return ret;
2382
2383                 i915_gem_retire_requests_ring(obj->ring);
2384         }
2385
2386         return 0;
2387 }
2388
2389 /**
2390  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2391  * @DRM_IOCTL_ARGS: standard ioctl arguments
2392  *
2393  * Returns 0 if successful, else an error is returned with the remaining time in
2394  * the timeout parameter.
2395  *  -ETIME: object is still busy after timeout
2396  *  -ERESTARTSYS: signal interrupted the wait
2397  *  -ENONENT: object doesn't exist
2398  * Also possible, but rare:
2399  *  -EAGAIN: GPU wedged
2400  *  -ENOMEM: damn
2401  *  -ENODEV: Internal IRQ fail
2402  *  -E?: The add request failed
2403  *
2404  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2405  * non-zero timeout parameter the wait ioctl will wait for the given number of
2406  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2407  * without holding struct_mutex the object may become re-busied before this
2408  * function completes. A similar but shorter * race condition exists in the busy
2409  * ioctl
2410  */
2411 int
2412 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2413 {
2414         drm_i915_private_t *dev_priv = dev->dev_private;
2415         struct drm_i915_gem_wait *args = data;
2416         struct drm_i915_gem_object *obj;
2417         struct intel_ring_buffer *ring = NULL;
2418         struct timespec timeout_stack, *timeout = NULL;
2419         unsigned reset_counter;
2420         u32 seqno = 0;
2421         int ret = 0;
2422
2423         if (args->timeout_ns >= 0) {
2424                 timeout_stack = ns_to_timespec(args->timeout_ns);
2425                 timeout = &timeout_stack;
2426         }
2427
2428         ret = i915_mutex_lock_interruptible(dev);
2429         if (ret)
2430                 return ret;
2431
2432         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2433         if (&obj->base == NULL) {
2434                 mutex_unlock(&dev->struct_mutex);
2435                 return -ENOENT;
2436         }
2437
2438         /* Need to make sure the object gets inactive eventually. */
2439         ret = i915_gem_object_flush_active(obj);
2440         if (ret)
2441                 goto out;
2442
2443         if (obj->active) {
2444                 seqno = obj->last_read_seqno;
2445                 ring = obj->ring;
2446         }
2447
2448         if (seqno == 0)
2449                  goto out;
2450
2451         /* Do this after OLR check to make sure we make forward progress polling
2452          * on this IOCTL with a 0 timeout (like busy ioctl)
2453          */
2454         if (!args->timeout_ns) {
2455                 ret = -ETIMEDOUT;
2456                 goto out;
2457         }
2458
2459         drm_gem_object_unreference(&obj->base);
2460         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2461         mutex_unlock(&dev->struct_mutex);
2462
2463         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2464         if (timeout)
2465                 args->timeout_ns = timespec_to_ns(timeout);
2466         return ret;
2467
2468 out:
2469         drm_gem_object_unreference(&obj->base);
2470         mutex_unlock(&dev->struct_mutex);
2471         return ret;
2472 }
2473
2474 /**
2475  * i915_gem_object_sync - sync an object to a ring.
2476  *
2477  * @obj: object which may be in use on another ring.
2478  * @to: ring we wish to use the object on. May be NULL.
2479  *
2480  * This code is meant to abstract object synchronization with the GPU.
2481  * Calling with NULL implies synchronizing the object with the CPU
2482  * rather than a particular GPU ring.
2483  *
2484  * Returns 0 if successful, else propagates up the lower layer error.
2485  */
2486 int
2487 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2488                      struct intel_ring_buffer *to)
2489 {
2490         struct intel_ring_buffer *from = obj->ring;
2491         u32 seqno;
2492         int ret, idx;
2493
2494         if (from == NULL || to == from)
2495                 return 0;
2496
2497         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2498                 return i915_gem_object_wait_rendering(obj, false);
2499
2500         idx = intel_ring_sync_index(from, to);
2501
2502         seqno = obj->last_read_seqno;
2503         if (seqno <= from->sync_seqno[idx])
2504                 return 0;
2505
2506         ret = i915_gem_check_olr(obj->ring, seqno);
2507         if (ret)
2508                 return ret;
2509
2510         ret = to->sync_to(to, from, seqno);
2511         if (!ret)
2512                 /* We use last_read_seqno because sync_to()
2513                  * might have just caused seqno wrap under
2514                  * the radar.
2515                  */
2516                 from->sync_seqno[idx] = obj->last_read_seqno;
2517
2518         return ret;
2519 }
2520
2521 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2522 {
2523         u32 old_write_domain, old_read_domains;
2524
2525         /* Force a pagefault for domain tracking on next user access */
2526         i915_gem_release_mmap(obj);
2527
2528         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2529                 return;
2530
2531         /* Wait for any direct GTT access to complete */
2532         cpu_mfence();
2533
2534         old_read_domains = obj->base.read_domains;
2535         old_write_domain = obj->base.write_domain;
2536
2537         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2538         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2539
2540 }
2541
2542 /**
2543  * Unbinds an object from the GTT aperture.
2544  */
2545 int
2546 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2547 {
2548         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2549         int ret;
2550
2551         if (obj->gtt_space == NULL)
2552                 return 0;
2553
2554         if (obj->pin_count)
2555                 return -EBUSY;
2556
2557         BUG_ON(obj->pages == NULL);
2558
2559         ret = i915_gem_object_finish_gpu(obj);
2560         if (ret)
2561                 return ret;
2562         /* Continue on if we fail due to EIO, the GPU is hung so we
2563          * should be safe and we need to cleanup or else we might
2564          * cause memory corruption through use-after-free.
2565          */
2566
2567         i915_gem_object_finish_gtt(obj);
2568
2569         /* Move the object to the CPU domain to ensure that
2570          * any possible CPU writes while it's not in the GTT
2571          * are flushed when we go to remap it.
2572          */
2573         if (ret == 0)
2574                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2575         if (ret == -ERESTARTSYS)
2576                 return ret;
2577         if (ret) {
2578                 /* In the event of a disaster, abandon all caches and
2579                  * hope for the best.
2580                  */
2581                 i915_gem_clflush_object(obj);
2582                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2583         }
2584
2585         /* release the fence reg _after_ flushing */
2586         ret = i915_gem_object_put_fence(obj);
2587         if (ret)
2588                 return ret;
2589
2590         if (obj->has_global_gtt_mapping)
2591                 i915_gem_gtt_unbind_object(obj);
2592         if (obj->has_aliasing_ppgtt_mapping) {
2593                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2594                 obj->has_aliasing_ppgtt_mapping = 0;
2595         }
2596         i915_gem_gtt_finish_object(obj);
2597
2598         i915_gem_object_put_pages_gtt(obj);
2599
2600         list_del_init(&obj->global_list);
2601         list_del_init(&obj->mm_list);
2602         /* Avoid an unnecessary call to unbind on rebind. */
2603         obj->map_and_fenceable = true;
2604
2605         drm_mm_put_block(obj->gtt_space);
2606         obj->gtt_space = NULL;
2607         obj->gtt_offset = 0;
2608
2609         if (i915_gem_object_is_purgeable(obj))
2610                 i915_gem_object_truncate(obj);
2611
2612         return ret;
2613 }
2614
2615 int i915_gpu_idle(struct drm_device *dev)
2616 {
2617         drm_i915_private_t *dev_priv = dev->dev_private;
2618         struct intel_ring_buffer *ring;
2619         int ret, i;
2620
2621         /* Flush everything onto the inactive list. */
2622         for_each_ring(ring, dev_priv, i) {
2623                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2624                 if (ret)
2625                         return ret;
2626
2627                 ret = intel_ring_idle(ring);
2628                 if (ret)
2629                         return ret;
2630         }
2631
2632         return 0;
2633 }
2634
2635 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2636                                  struct drm_i915_gem_object *obj)
2637 {
2638         drm_i915_private_t *dev_priv = dev->dev_private;
2639         int fence_reg;
2640         int fence_pitch_shift;
2641
2642         if (INTEL_INFO(dev)->gen >= 6) {
2643                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2644                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2645         } else {
2646                 fence_reg = FENCE_REG_965_0;
2647                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2648         }
2649
2650         fence_reg += reg * 8;
2651
2652         /* To w/a incoherency with non-atomic 64-bit register updates,
2653          * we split the 64-bit update into two 32-bit writes. In order
2654          * for a partial fence not to be evaluated between writes, we
2655          * precede the update with write to turn off the fence register,
2656          * and only enable the fence as the last step.
2657          *
2658          * For extra levels of paranoia, we make sure each step lands
2659          * before applying the next step.
2660          */
2661         I915_WRITE(fence_reg, 0);
2662         POSTING_READ(fence_reg);
2663
2664         if (obj) {
2665                 u32 size = obj->gtt_space->size;
2666                 uint64_t val;
2667
2668                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2669                                  0xfffff000) << 32;
2670                 val |= obj->gtt_offset & 0xfffff000;
2671                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2672                 if (obj->tiling_mode == I915_TILING_Y)
2673                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2674                 val |= I965_FENCE_REG_VALID;
2675
2676                 I915_WRITE(fence_reg + 4, val >> 32);
2677                 POSTING_READ(fence_reg + 4);
2678
2679                 I915_WRITE(fence_reg + 0, val);
2680                 POSTING_READ(fence_reg);
2681         } else {
2682                 I915_WRITE(fence_reg + 4, 0);
2683                 POSTING_READ(fence_reg + 4);
2684         }
2685 }
2686
2687 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2688                                  struct drm_i915_gem_object *obj)
2689 {
2690         drm_i915_private_t *dev_priv = dev->dev_private;
2691         u32 val;
2692
2693         if (obj) {
2694                 u32 size = obj->gtt_space->size;
2695                 int pitch_val;
2696                 int tile_width;
2697
2698                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2699                      (size & -size) != size ||
2700                      (obj->gtt_offset & (size - 1)),
2701                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2702                      obj->gtt_offset, obj->map_and_fenceable, size);
2703
2704                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2705                         tile_width = 128;
2706                 else
2707                         tile_width = 512;
2708
2709                 /* Note: pitch better be a power of two tile widths */
2710                 pitch_val = obj->stride / tile_width;
2711                 pitch_val = ffs(pitch_val) - 1;
2712
2713                 val = obj->gtt_offset;
2714                 if (obj->tiling_mode == I915_TILING_Y)
2715                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2716                 val |= I915_FENCE_SIZE_BITS(size);
2717                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2718                 val |= I830_FENCE_REG_VALID;
2719         } else
2720                 val = 0;
2721
2722         if (reg < 8)
2723                 reg = FENCE_REG_830_0 + reg * 4;
2724         else
2725                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2726
2727         I915_WRITE(reg, val);
2728         POSTING_READ(reg);
2729 }
2730
2731 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2732                                 struct drm_i915_gem_object *obj)
2733 {
2734         drm_i915_private_t *dev_priv = dev->dev_private;
2735         uint32_t val;
2736
2737         if (obj) {
2738                 u32 size = obj->gtt_space->size;
2739                 uint32_t pitch_val;
2740
2741                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2742                      (size & -size) != size ||
2743                      (obj->gtt_offset & (size - 1)),
2744                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2745                      obj->gtt_offset, size);
2746
2747                 pitch_val = obj->stride / 128;
2748                 pitch_val = ffs(pitch_val) - 1;
2749
2750                 val = obj->gtt_offset;
2751                 if (obj->tiling_mode == I915_TILING_Y)
2752                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2753                 val |= I830_FENCE_SIZE_BITS(size);
2754                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2755                 val |= I830_FENCE_REG_VALID;
2756         } else
2757                 val = 0;
2758
2759         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2760         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2761 }
2762
2763 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2764 {
2765         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2766 }
2767
2768 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2769                                  struct drm_i915_gem_object *obj)
2770 {
2771         struct drm_i915_private *dev_priv = dev->dev_private;
2772
2773         /* Ensure that all CPU reads are completed before installing a fence
2774          * and all writes before removing the fence.
2775          */
2776         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2777                 cpu_mfence();
2778
2779         WARN(obj && (!obj->stride || !obj->tiling_mode),
2780              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2781              obj->stride, obj->tiling_mode);
2782
2783         switch (INTEL_INFO(dev)->gen) {
2784         case 7:
2785         case 6:
2786         case 5:
2787         case 4: i965_write_fence_reg(dev, reg, obj); break;
2788         case 3: i915_write_fence_reg(dev, reg, obj); break;
2789         case 2: i830_write_fence_reg(dev, reg, obj); break;
2790         default: BUG();
2791         }
2792
2793         /* And similarly be paranoid that no direct access to this region
2794          * is reordered to before the fence is installed.
2795          */
2796         if (i915_gem_object_needs_mb(obj))
2797                 cpu_mfence();
2798 }
2799
2800 static inline int fence_number(struct drm_i915_private *dev_priv,
2801                                struct drm_i915_fence_reg *fence)
2802 {
2803         return fence - dev_priv->fence_regs;
2804 }
2805
2806 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2807                                          struct drm_i915_fence_reg *fence,
2808                                          bool enable)
2809 {
2810         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2811         int reg = fence_number(dev_priv, fence);
2812
2813         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2814
2815         if (enable) {
2816                 obj->fence_reg = reg;
2817                 fence->obj = obj;
2818                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2819         } else {
2820                 obj->fence_reg = I915_FENCE_REG_NONE;
2821                 fence->obj = NULL;
2822                 list_del_init(&fence->lru_list);
2823         }
2824         obj->fence_dirty = false;
2825 }
2826
2827 static int
2828 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2829 {
2830         if (obj->last_fenced_seqno) {
2831                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2832                 if (ret)
2833                         return ret;
2834
2835                 obj->last_fenced_seqno = 0;
2836         }
2837
2838         obj->fenced_gpu_access = false;
2839         return 0;
2840 }
2841
2842 int
2843 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2844 {
2845         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2846         struct drm_i915_fence_reg *fence;
2847         int ret;
2848
2849         ret = i915_gem_object_wait_fence(obj);
2850         if (ret)
2851                 return ret;
2852
2853         if (obj->fence_reg == I915_FENCE_REG_NONE)
2854                 return 0;
2855
2856         fence = &dev_priv->fence_regs[obj->fence_reg];
2857
2858         i915_gem_object_fence_lost(obj);
2859         i915_gem_object_update_fence(obj, fence, false);
2860
2861         return 0;
2862 }
2863
2864 static struct drm_i915_fence_reg *
2865 i915_find_fence_reg(struct drm_device *dev)
2866 {
2867         struct drm_i915_private *dev_priv = dev->dev_private;
2868         struct drm_i915_fence_reg *reg, *avail;
2869         int i;
2870
2871         /* First try to find a free reg */
2872         avail = NULL;
2873         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2874                 reg = &dev_priv->fence_regs[i];
2875                 if (!reg->obj)
2876                         return reg;
2877
2878                 if (!reg->pin_count)
2879                         avail = reg;
2880         }
2881
2882         if (avail == NULL)
2883                 return NULL;
2884
2885         /* None available, try to steal one or wait for a user to finish */
2886         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2887                 if (reg->pin_count)
2888                         continue;
2889
2890                 return reg;
2891         }
2892
2893         return NULL;
2894 }
2895
2896 /**
2897  * i915_gem_object_get_fence - set up fencing for an object
2898  * @obj: object to map through a fence reg
2899  *
2900  * When mapping objects through the GTT, userspace wants to be able to write
2901  * to them without having to worry about swizzling if the object is tiled.
2902  * This function walks the fence regs looking for a free one for @obj,
2903  * stealing one if it can't find any.
2904  *
2905  * It then sets up the reg based on the object's properties: address, pitch
2906  * and tiling format.
2907  *
2908  * For an untiled surface, this removes any existing fence.
2909  */
2910 int
2911 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2912 {
2913         struct drm_device *dev = obj->base.dev;
2914         struct drm_i915_private *dev_priv = dev->dev_private;
2915         bool enable = obj->tiling_mode != I915_TILING_NONE;
2916         struct drm_i915_fence_reg *reg;
2917         int ret;
2918
2919         /* Have we updated the tiling parameters upon the object and so
2920          * will need to serialise the write to the associated fence register?
2921          */
2922         if (obj->fence_dirty) {
2923                 ret = i915_gem_object_wait_fence(obj);
2924                 if (ret)
2925                         return ret;
2926         }
2927
2928         /* Just update our place in the LRU if our fence is getting reused. */
2929         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2930                 reg = &dev_priv->fence_regs[obj->fence_reg];
2931                 if (!obj->fence_dirty) {
2932                         list_move_tail(&reg->lru_list,
2933                                        &dev_priv->mm.fence_list);
2934                         return 0;
2935                 }
2936         } else if (enable) {
2937                 reg = i915_find_fence_reg(dev);
2938                 if (reg == NULL)
2939                         return -EDEADLK;
2940
2941                 if (reg->obj) {
2942                         struct drm_i915_gem_object *old = reg->obj;
2943
2944                         ret = i915_gem_object_wait_fence(old);
2945                         if (ret)
2946                                 return ret;
2947
2948                         i915_gem_object_fence_lost(old);
2949                 }
2950         } else
2951                 return 0;
2952
2953         i915_gem_object_update_fence(obj, reg, enable);
2954
2955         return 0;
2956 }
2957
2958 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2959                                      struct drm_mm_node *gtt_space,
2960                                      unsigned long cache_level)
2961 {
2962         struct drm_mm_node *other;
2963
2964         /* On non-LLC machines we have to be careful when putting differing
2965          * types of snoopable memory together to avoid the prefetcher
2966          * crossing memory domains and dying.
2967          */
2968         if (HAS_LLC(dev))
2969                 return true;
2970
2971         if (gtt_space == NULL)
2972                 return true;
2973
2974         if (list_empty(&gtt_space->node_list))
2975                 return true;
2976
2977         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2978         if (other->allocated && !other->hole_follows && other->color != cache_level)
2979                 return false;
2980
2981         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2982         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2983                 return false;
2984
2985         return true;
2986 }
2987
2988 static void i915_gem_verify_gtt(struct drm_device *dev)
2989 {
2990 #if WATCH_GTT
2991         struct drm_i915_private *dev_priv = dev->dev_private;
2992         struct drm_i915_gem_object *obj;
2993         int err = 0;
2994
2995         list_for_each_entry(obj, &dev_priv->mm.global_list, global_list) {
2996                 if (obj->gtt_space == NULL) {
2997                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
2998                         err++;
2999                         continue;
3000                 }
3001
3002                 if (obj->cache_level != obj->gtt_space->color) {
3003                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3004                                obj->gtt_space->start,
3005                                obj->gtt_space->start + obj->gtt_space->size,
3006                                obj->cache_level,
3007                                obj->gtt_space->color);
3008                         err++;
3009                         continue;
3010                 }
3011
3012                 if (!i915_gem_valid_gtt_space(dev,
3013                                               obj->gtt_space,
3014                                               obj->cache_level)) {
3015                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3016                                obj->gtt_space->start,
3017                                obj->gtt_space->start + obj->gtt_space->size,
3018                                obj->cache_level);
3019                         err++;
3020                         continue;
3021                 }
3022         }
3023
3024         WARN_ON(err);
3025 #endif
3026 }
3027
3028 /**
3029  * Finds free space in the GTT aperture and binds the object there.
3030  */
3031 static int
3032 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3033                             unsigned alignment,
3034                             bool map_and_fenceable,
3035                             bool nonblocking)
3036 {
3037         struct drm_device *dev = obj->base.dev;
3038         drm_i915_private_t *dev_priv = dev->dev_private;
3039         struct drm_mm_node *node;
3040         u32 size, fence_size, fence_alignment, unfenced_alignment;
3041         bool mappable, fenceable;
3042         size_t gtt_max = map_and_fenceable ?
3043                 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
3044         int ret;
3045
3046         fence_size = i915_gem_get_gtt_size(dev,
3047                                            obj->base.size,
3048                                            obj->tiling_mode);
3049         fence_alignment = i915_gem_get_gtt_alignment(dev,
3050                                                      obj->base.size,
3051                                                      obj->tiling_mode, true);
3052         unfenced_alignment =
3053                 i915_gem_get_gtt_alignment(dev,
3054                                                     obj->base.size,
3055                                                     obj->tiling_mode, false);
3056
3057         if (alignment == 0)
3058                 alignment = map_and_fenceable ? fence_alignment :
3059                                                 unfenced_alignment;
3060         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3061                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3062                 return -EINVAL;
3063         }
3064
3065         size = map_and_fenceable ? fence_size : obj->base.size;
3066
3067         /* If the object is bigger than the entire aperture, reject it early
3068          * before evicting everything in a vain attempt to find space.
3069          */
3070         if (obj->base.size > gtt_max) {
3071                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3072                           obj->base.size,
3073                           map_and_fenceable ? "mappable" : "total",
3074                           gtt_max);
3075                 return -E2BIG;
3076         }
3077
3078  search_free:
3079         if (map_and_fenceable)
3080                 node = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
3081                                                           size, alignment, obj->cache_level,
3082                                                           0, dev_priv->gtt.mappable_end,
3083                                                           false);
3084         else
3085                 node = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
3086                                                       size, alignment, obj->cache_level,
3087                                                       false);
3088         if (node != NULL) {
3089                 if (map_and_fenceable)
3090                         obj->gtt_space =
3091                                 drm_mm_get_block_range_generic(node,
3092                                                                size, alignment, obj->cache_level,
3093                                                                0, dev_priv->gtt.mappable_end,
3094                                                                false);
3095                 else
3096                         obj->gtt_space =
3097                                 drm_mm_get_block_generic(node,
3098                                                          size, alignment, obj->cache_level,
3099                                                          false);
3100         }
3101         if (obj->gtt_space == NULL) {
3102                 ret = i915_gem_evict_something(dev, size, alignment,
3103                                                obj->cache_level,
3104                                                map_and_fenceable,
3105                                                nonblocking);
3106                 if (ret)
3107                         return ret;
3108
3109                 goto search_free;
3110         }
3111
3112         /*
3113          * NOTE: i915_gem_object_get_pages_gtt() cannot
3114          *       return ENOMEM, since we used VM_ALLOC_RETRY.
3115          */
3116         ret = i915_gem_object_get_pages_gtt(obj);
3117         if (ret != 0) {
3118                 drm_mm_put_block(obj->gtt_space);
3119                 obj->gtt_space = NULL;
3120                 return ret;
3121         }
3122
3123         i915_gem_gtt_bind_object(obj, obj->cache_level);
3124         if (ret != 0) {
3125                 i915_gem_object_put_pages_gtt(obj);
3126                 drm_mm_put_block(obj->gtt_space);
3127                 obj->gtt_space = NULL;
3128                 if (i915_gem_evict_everything(dev))
3129                         return (ret);
3130                 goto search_free;
3131         }
3132
3133         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
3134         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3135
3136         obj->gtt_offset = obj->gtt_space->start;
3137
3138         fenceable =
3139                 obj->gtt_space->size == fence_size &&
3140                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
3141
3142         mappable =
3143                 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3144
3145         obj->map_and_fenceable = mappable && fenceable;
3146
3147         trace_i915_gem_object_bind(obj, map_and_fenceable);
3148         i915_gem_verify_gtt(dev);
3149         return 0;
3150 }
3151
3152 void
3153 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3154 {
3155
3156         /* If we don't have a page list set up, then we're not pinned
3157          * to GPU, and we can ignore the cache flush because it'll happen
3158          * again at bind time.
3159          */
3160         if (obj->pages == NULL)
3161                 return;
3162
3163         /*
3164          * Stolen memory is always coherent with the GPU as it is explicitly
3165          * marked as wc by the system, or the system is cache-coherent.
3166          */
3167         if (obj->stolen)
3168                 return;
3169
3170         /* If the GPU is snooping the contents of the CPU cache,
3171          * we do not need to manually clear the CPU cache lines.  However,
3172          * the caches are only snooped when the render cache is
3173          * flushed/invalidated.  As we always have to emit invalidations
3174          * and flushes when moving into and out of the RENDER domain, correct
3175          * snooping behaviour occurs naturally as the result of our domain
3176          * tracking.
3177          */
3178         if (obj->cache_level != I915_CACHE_NONE)
3179                 return;
3180
3181         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3182 }
3183
3184 /** Flushes the GTT write domain for the object if it's dirty. */
3185 static void
3186 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3187 {
3188         uint32_t old_write_domain;
3189
3190         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3191                 return;
3192
3193         /* No actual flushing is required for the GTT write domain.  Writes
3194          * to it immediately go to main memory as far as we know, so there's
3195          * no chipset flush.  It also doesn't land in render cache.
3196          *
3197          * However, we do have to enforce the order so that all writes through
3198          * the GTT land before any writes to the device, such as updates to
3199          * the GATT itself.
3200          */
3201         cpu_sfence();
3202
3203         old_write_domain = obj->base.write_domain;
3204         obj->base.write_domain = 0;
3205 }
3206
3207 /** Flushes the CPU write domain for the object if it's dirty. */
3208 static void
3209 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3210 {
3211         uint32_t old_write_domain;
3212
3213         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3214                 return;
3215
3216         i915_gem_clflush_object(obj);
3217         i915_gem_chipset_flush(obj->base.dev);
3218         old_write_domain = obj->base.write_domain;
3219         obj->base.write_domain = 0;
3220 }
3221
3222 /**
3223  * Moves a single object to the GTT read, and possibly write domain.
3224  *
3225  * This function returns when the move is complete, including waiting on
3226  * flushes to occur.
3227  */
3228 int
3229 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3230 {
3231         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3232         uint32_t old_write_domain, old_read_domains;
3233         int ret;
3234
3235         /* Not valid to be called on unbound objects. */
3236         if (obj->gtt_space == NULL)
3237                 return -EINVAL;
3238
3239         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3240                 return 0;
3241
3242         ret = i915_gem_object_wait_rendering(obj, !write);
3243         if (ret)
3244                 return ret;
3245
3246         i915_gem_object_flush_cpu_write_domain(obj);
3247
3248         /* Serialise direct access to this object with the barriers for
3249          * coherent writes from the GPU, by effectively invalidating the
3250          * GTT domain upon first access.
3251          */
3252         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3253                 cpu_mfence();
3254
3255         old_write_domain = obj->base.write_domain;
3256         old_read_domains = obj->base.read_domains;
3257
3258         /* It should now be out of any other write domains, and we can update
3259          * the domain values for our changes.
3260          */
3261         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3262         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3263         if (write) {
3264                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3265                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3266                 obj->dirty = 1;
3267         }
3268
3269         /* And bump the LRU for this access */
3270         if (i915_gem_object_is_inactive(obj))
3271                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3272
3273         return 0;
3274 }
3275
3276 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3277                                     enum i915_cache_level cache_level)
3278 {
3279         struct drm_device *dev = obj->base.dev;
3280         drm_i915_private_t *dev_priv = dev->dev_private;
3281         int ret;
3282
3283         if (obj->cache_level == cache_level)
3284                 return 0;
3285
3286         if (obj->pin_count) {
3287                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3288                 return -EBUSY;
3289         }
3290
3291         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3292                 ret = i915_gem_object_unbind(obj);
3293                 if (ret)
3294                         return ret;
3295         }
3296
3297         if (obj->gtt_space) {
3298                 ret = i915_gem_object_finish_gpu(obj);
3299                 if (ret)
3300                         return ret;
3301
3302                 i915_gem_object_finish_gtt(obj);
3303
3304                 /* Before SandyBridge, you could not use tiling or fence
3305                  * registers with snooped memory, so relinquish any fences
3306                  * currently pointing to our region in the aperture.
3307                  */
3308                 if (INTEL_INFO(dev)->gen < 6) {
3309                         ret = i915_gem_object_put_fence(obj);
3310                         if (ret)
3311                                 return ret;
3312                 }
3313
3314                 if (obj->has_global_gtt_mapping)
3315                         i915_gem_gtt_bind_object(obj, cache_level);
3316                 if (obj->has_aliasing_ppgtt_mapping)
3317                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3318                                                obj, cache_level);
3319
3320                 obj->gtt_space->color = cache_level;
3321         }
3322
3323         if (cache_level == I915_CACHE_NONE) {
3324                 u32 old_read_domains, old_write_domain;
3325
3326                 /* If we're coming from LLC cached, then we haven't
3327                  * actually been tracking whether the data is in the
3328                  * CPU cache or not, since we only allow one bit set
3329                  * in obj->write_domain and have been skipping the clflushes.
3330                  * Just set it to the CPU cache for now.
3331                  */
3332                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3333                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3334
3335                 old_read_domains = obj->base.read_domains;
3336                 old_write_domain = obj->base.write_domain;
3337
3338                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3339                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3340
3341         }
3342
3343         obj->cache_level = cache_level;
3344         i915_gem_verify_gtt(dev);
3345         return 0;
3346 }
3347
3348 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3349                                struct drm_file *file)
3350 {
3351         struct drm_i915_gem_caching *args = data;
3352         struct drm_i915_gem_object *obj;
3353         int ret;
3354
3355         ret = i915_mutex_lock_interruptible(dev);
3356         if (ret)
3357                 return ret;
3358
3359         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3360         if (&obj->base == NULL) {
3361                 ret = -ENOENT;
3362                 goto unlock;
3363         }
3364
3365         args->caching = obj->cache_level != I915_CACHE_NONE;
3366
3367         drm_gem_object_unreference(&obj->base);
3368 unlock:
3369         mutex_unlock(&dev->struct_mutex);
3370         return ret;
3371 }
3372
3373 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3374                                struct drm_file *file)
3375 {
3376         struct drm_i915_gem_caching *args = data;
3377         struct drm_i915_gem_object *obj;
3378         enum i915_cache_level level;
3379         int ret;
3380
3381         switch (args->caching) {
3382         case I915_CACHING_NONE:
3383                 level = I915_CACHE_NONE;
3384                 break;
3385         case I915_CACHING_CACHED:
3386                 level = I915_CACHE_LLC;
3387                 break;
3388         default:
3389                 return -EINVAL;
3390         }
3391
3392         ret = i915_mutex_lock_interruptible(dev);
3393         if (ret)
3394                 return ret;
3395
3396         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3397         if (&obj->base == NULL) {
3398                 ret = -ENOENT;
3399                 goto unlock;
3400         }
3401
3402         ret = i915_gem_object_set_cache_level(obj, level);
3403
3404         drm_gem_object_unreference(&obj->base);
3405 unlock:
3406         mutex_unlock(&dev->struct_mutex);
3407         return ret;
3408 }
3409
3410 /*
3411  * Prepare buffer for display plane (scanout, cursors, etc).
3412  * Can be called from an uninterruptible phase (modesetting) and allows
3413  * any flushes to be pipelined (for pageflips).
3414  */
3415 int
3416 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3417                                      u32 alignment,
3418                                      struct intel_ring_buffer *pipelined)
3419 {
3420         u32 old_read_domains, old_write_domain;
3421         int ret;
3422
3423         if (pipelined != obj->ring) {
3424                 ret = i915_gem_object_sync(obj, pipelined);
3425                 if (ret)
3426                         return ret;
3427         }
3428
3429         /* The display engine is not coherent with the LLC cache on gen6.  As
3430          * a result, we make sure that the pinning that is about to occur is
3431          * done with uncached PTEs. This is lowest common denominator for all
3432          * chipsets.
3433          *
3434          * However for gen6+, we could do better by using the GFDT bit instead
3435          * of uncaching, which would allow us to flush all the LLC-cached data
3436          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3437          */
3438         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3439         if (ret)
3440                 return ret;
3441
3442         /* As the user may map the buffer once pinned in the display plane
3443          * (e.g. libkms for the bootup splash), we have to ensure that we
3444          * always use map_and_fenceable for all scanout buffers.
3445          */
3446         ret = i915_gem_object_pin(obj, alignment, true, false);
3447         if (ret)
3448                 return ret;
3449
3450         i915_gem_object_flush_cpu_write_domain(obj);
3451
3452         old_write_domain = obj->base.write_domain;
3453         old_read_domains = obj->base.read_domains;
3454
3455         /* It should now be out of any other write domains, and we can update
3456          * the domain values for our changes.
3457          */
3458         obj->base.write_domain = 0;
3459         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3460
3461         return 0;
3462 }
3463
3464 int
3465 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3466 {
3467         int ret;
3468
3469         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3470                 return 0;
3471
3472         ret = i915_gem_object_wait_rendering(obj, false);
3473         if (ret)
3474                 return ret;
3475
3476         /* Ensure that we invalidate the GPU's caches and TLBs. */
3477         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3478         return 0;
3479 }
3480
3481 /**
3482  * Moves a single object to the CPU read, and possibly write domain.
3483  *
3484  * This function returns when the move is complete, including waiting on
3485  * flushes to occur.
3486  */
3487 int
3488 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3489 {
3490         uint32_t old_write_domain, old_read_domains;
3491         int ret;
3492
3493         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3494                 return 0;
3495
3496         ret = i915_gem_object_wait_rendering(obj, !write);
3497         if (ret)
3498                 return ret;
3499
3500         i915_gem_object_flush_gtt_write_domain(obj);
3501
3502         old_write_domain = obj->base.write_domain;
3503         old_read_domains = obj->base.read_domains;
3504
3505         /* Flush the CPU cache if it's still invalid. */
3506         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3507                 i915_gem_clflush_object(obj);
3508
3509                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3510         }
3511
3512         /* It should now be out of any other write domains, and we can update
3513          * the domain values for our changes.
3514          */
3515         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3516
3517         /* If we're writing through the CPU, then the GPU read domains will
3518          * need to be invalidated at next use.
3519          */
3520         if (write) {
3521                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3522                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3523         }
3524
3525         return 0;
3526 }
3527
3528 /* Throttle our rendering by waiting until the ring has completed our requests
3529  * emitted over 20 msec ago.
3530  *
3531  * Note that if we were to use the current jiffies each time around the loop,
3532  * we wouldn't escape the function with any frames outstanding if the time to
3533  * render a frame was over 20ms.
3534  *
3535  * This should get us reasonable parallelism between CPU and GPU but also
3536  * relatively low latency when blocking on a particular request to finish.
3537  */
3538 static int
3539 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3540 {
3541         struct drm_i915_private *dev_priv = dev->dev_private;
3542         struct drm_i915_file_private *file_priv = file->driver_priv;
3543         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3544         struct drm_i915_gem_request *request;
3545         struct intel_ring_buffer *ring = NULL;
3546         unsigned reset_counter;
3547         u32 seqno = 0;
3548         int ret;
3549
3550         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3551         if (ret)
3552                 return ret;
3553
3554         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3555         if (ret)
3556                 return ret;
3557
3558         spin_lock(&file_priv->mm.lock);
3559         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3560                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3561                         break;
3562
3563                 ring = request->ring;
3564                 seqno = request->seqno;
3565         }
3566         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3567         spin_unlock(&file_priv->mm.lock);
3568
3569         if (seqno == 0)
3570                 return 0;
3571
3572         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3573         if (ret == 0)
3574                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3575
3576         return ret;
3577 }
3578
3579 int
3580 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3581                     uint32_t alignment,
3582                     bool map_and_fenceable,
3583                     bool nonblocking)
3584 {
3585         int ret;
3586
3587         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3588                 return -EBUSY;
3589
3590         if (obj->gtt_space != NULL) {
3591                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3592                     (map_and_fenceable && !obj->map_and_fenceable)) {
3593                         WARN(obj->pin_count,
3594                              "bo is already pinned with incorrect alignment:"
3595                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3596                              " obj->map_and_fenceable=%d\n",
3597                              obj->gtt_offset, alignment,
3598                              map_and_fenceable,
3599                              obj->map_and_fenceable);
3600                         ret = i915_gem_object_unbind(obj);
3601                         if (ret)
3602                                 return ret;
3603                 }
3604         }
3605
3606         if (obj->gtt_space == NULL) {
3607                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3608
3609                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3610                                                   map_and_fenceable,
3611                                                   nonblocking);
3612                 if (ret)
3613                         return ret;
3614
3615                 if (!dev_priv->mm.aliasing_ppgtt)
3616                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3617         }
3618
3619         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3620                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3621
3622         obj->pin_count++;
3623         obj->pin_mappable |= map_and_fenceable;
3624
3625         return 0;
3626 }
3627
3628 void
3629 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3630 {
3631         BUG_ON(obj->pin_count == 0);
3632         BUG_ON(obj->gtt_space == NULL);
3633
3634         if (--obj->pin_count == 0)
3635                 obj->pin_mappable = false;
3636 }
3637
3638 int
3639 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3640                    struct drm_file *file)
3641 {
3642         struct drm_i915_gem_pin *args = data;
3643         struct drm_i915_gem_object *obj;
3644         int ret;
3645
3646         ret = i915_mutex_lock_interruptible(dev);
3647         if (ret)
3648                 return ret;
3649
3650         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3651         if (&obj->base == NULL) {
3652                 ret = -ENOENT;
3653                 goto unlock;
3654         }
3655
3656         if (obj->madv != I915_MADV_WILLNEED) {
3657                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3658                 ret = -EINVAL;
3659                 goto out;
3660         }
3661
3662         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3663                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3664                           args->handle);
3665                 ret = -EINVAL;
3666                 goto out;
3667         }
3668
3669         if (obj->user_pin_count == 0) {
3670                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3671                 if (ret)
3672                         goto out;
3673         }
3674
3675         obj->user_pin_count++;
3676         obj->pin_filp = file;
3677
3678         /* XXX - flush the CPU caches for pinned objects
3679          * as the X server doesn't manage domains yet
3680          */
3681         i915_gem_object_flush_cpu_write_domain(obj);
3682         args->offset = obj->gtt_offset;
3683 out:
3684         drm_gem_object_unreference(&obj->base);
3685 unlock:
3686         mutex_unlock(&dev->struct_mutex);
3687         return ret;
3688 }
3689
3690 int
3691 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3692                      struct drm_file *file)
3693 {
3694         struct drm_i915_gem_pin *args = data;
3695         struct drm_i915_gem_object *obj;
3696         int ret;
3697
3698         ret = i915_mutex_lock_interruptible(dev);
3699         if (ret)
3700                 return ret;
3701
3702         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3703         if (&obj->base == NULL) {
3704                 ret = -ENOENT;
3705                 goto unlock;
3706         }
3707
3708         if (obj->pin_filp != file) {
3709                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3710                           args->handle);
3711                 ret = -EINVAL;
3712                 goto out;
3713         }
3714         obj->user_pin_count--;
3715         if (obj->user_pin_count == 0) {
3716                 obj->pin_filp = NULL;
3717                 i915_gem_object_unpin(obj);
3718         }
3719
3720 out:
3721         drm_gem_object_unreference(&obj->base);
3722 unlock:
3723         mutex_unlock(&dev->struct_mutex);
3724         return ret;
3725 }
3726
3727 int
3728 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3729                     struct drm_file *file)
3730 {
3731         struct drm_i915_gem_busy *args = data;
3732         struct drm_i915_gem_object *obj;
3733         int ret;
3734
3735         ret = i915_mutex_lock_interruptible(dev);
3736         if (ret)
3737                 return ret;
3738
3739         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3740         if (&obj->base == NULL) {
3741                 ret = -ENOENT;
3742                 goto unlock;
3743         }
3744
3745         /* Count all active objects as busy, even if they are currently not used
3746          * by the gpu. Users of this interface expect objects to eventually
3747          * become non-busy without any further actions, therefore emit any
3748          * necessary flushes here.
3749          */
3750         ret = i915_gem_object_flush_active(obj);
3751
3752         args->busy = obj->active;
3753         if (obj->ring) {
3754                 args->busy |= intel_ring_flag(obj->ring) << 16;
3755         }
3756
3757         drm_gem_object_unreference(&obj->base);
3758 unlock:
3759         mutex_unlock(&dev->struct_mutex);
3760         return ret;
3761 }
3762
3763 int
3764 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3765                         struct drm_file *file_priv)
3766 {
3767         return i915_gem_ring_throttle(dev, file_priv);
3768 }
3769
3770 int
3771 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3772                        struct drm_file *file_priv)
3773 {
3774         struct drm_i915_gem_madvise *args = data;
3775         struct drm_i915_gem_object *obj;
3776         int ret;
3777
3778         switch (args->madv) {
3779         case I915_MADV_DONTNEED:
3780         case I915_MADV_WILLNEED:
3781             break;
3782         default:
3783             return -EINVAL;
3784         }
3785
3786         ret = i915_mutex_lock_interruptible(dev);
3787         if (ret)
3788                 return ret;
3789
3790         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3791         if (&obj->base == NULL) {
3792                 ret = -ENOENT;
3793                 goto unlock;
3794         }
3795
3796         if (obj->pin_count) {
3797                 ret = -EINVAL;
3798                 goto out;
3799         }
3800
3801         if (obj->madv != __I915_MADV_PURGED)
3802                 obj->madv = args->madv;
3803
3804         /* if the object is no longer attached, discard its backing storage */
3805         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3806                 i915_gem_object_truncate(obj);
3807
3808         args->retained = obj->madv != __I915_MADV_PURGED;
3809
3810 out:
3811         drm_gem_object_unreference(&obj->base);
3812 unlock:
3813         mutex_unlock(&dev->struct_mutex);
3814         return ret;
3815 }
3816
3817 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3818                           const struct drm_i915_gem_object_ops *ops)
3819 {
3820         INIT_LIST_HEAD(&obj->mm_list);
3821         INIT_LIST_HEAD(&obj->global_list);
3822         INIT_LIST_HEAD(&obj->ring_list);
3823         INIT_LIST_HEAD(&obj->exec_list);
3824
3825         obj->ops = ops;
3826
3827         obj->fence_reg = I915_FENCE_REG_NONE;
3828         obj->madv = I915_MADV_WILLNEED;
3829         /* Avoid an unnecessary call to unbind on the first bind. */
3830         obj->map_and_fenceable = true;
3831
3832         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3833 }
3834
3835 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3836         .get_pages = i915_gem_object_get_pages_gtt,
3837         .put_pages = i915_gem_object_put_pages_gtt,
3838 };
3839
3840 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3841                                                   size_t size)
3842 {
3843         struct drm_i915_gem_object *obj;
3844 #if 0
3845         struct address_space *mapping;
3846         u32 mask;
3847 #endif
3848
3849         obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3850         if (obj == NULL)
3851                 return NULL;
3852
3853         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3854                 kfree(obj);
3855                 return NULL;
3856         }
3857
3858 #if 0
3859         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3860         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3861                 /* 965gm cannot relocate objects above 4GiB. */
3862                 mask &= ~__GFP_HIGHMEM;
3863                 mask |= __GFP_DMA32;
3864         }
3865
3866         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3867         mapping_set_gfp_mask(mapping, mask);
3868 #endif
3869
3870         i915_gem_object_init(obj, &i915_gem_object_ops);
3871
3872         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3873         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3874
3875         if (HAS_LLC(dev)) {
3876                 /* On some devices, we can have the GPU use the LLC (the CPU
3877                  * cache) for about a 10% performance improvement
3878                  * compared to uncached.  Graphics requests other than
3879                  * display scanout are coherent with the CPU in
3880                  * accessing this cache.  This means in this mode we
3881                  * don't need to clflush on the CPU side, and on the
3882                  * GPU side we only need to flush internal caches to
3883                  * get data visible to the CPU.
3884                  *
3885                  * However, we maintain the display planes as UC, and so
3886                  * need to rebind when first used as such.
3887                  */
3888                 obj->cache_level = I915_CACHE_LLC;
3889         } else
3890                 obj->cache_level = I915_CACHE_NONE;
3891
3892         return obj;
3893 }
3894
3895 int i915_gem_init_object(struct drm_gem_object *obj)
3896 {
3897         BUG();
3898
3899         return 0;
3900 }
3901
3902 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3903 {
3904         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3905         struct drm_device *dev = obj->base.dev;
3906         drm_i915_private_t *dev_priv = dev->dev_private;
3907
3908         if (obj->phys_obj)
3909                 i915_gem_detach_phys_object(dev, obj);
3910
3911         obj->pin_count = 0;
3912         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3913                 bool was_interruptible;
3914
3915                 was_interruptible = dev_priv->mm.interruptible;
3916                 dev_priv->mm.interruptible = false;
3917
3918                 WARN_ON(i915_gem_object_unbind(obj));
3919
3920                 dev_priv->mm.interruptible = was_interruptible;
3921         }
3922
3923         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3924          * before progressing. */
3925         if (obj->stolen)
3926                 i915_gem_object_unpin_pages(obj);
3927
3928         if (WARN_ON(obj->pages_pin_count))
3929                 obj->pages_pin_count = 0;
3930         i915_gem_object_put_pages(obj);
3931         drm_gem_free_mmap_offset(&obj->base);
3932
3933         BUG_ON(obj->pages);
3934
3935         drm_gem_object_release(&obj->base);
3936         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3937
3938         kfree(obj->bit_17);
3939         i915_gem_object_free(obj);
3940 }
3941
3942 int
3943 i915_gem_idle(struct drm_device *dev)
3944 {
3945         drm_i915_private_t *dev_priv = dev->dev_private;
3946         int ret;
3947
3948         mutex_lock(&dev->struct_mutex);
3949
3950         if (dev_priv->mm.suspended) {
3951                 mutex_unlock(&dev->struct_mutex);
3952                 return 0;
3953         }
3954
3955         ret = i915_gpu_idle(dev);
3956         if (ret) {
3957                 mutex_unlock(&dev->struct_mutex);
3958                 return ret;
3959         }
3960         i915_gem_retire_requests(dev);
3961
3962         /* Under UMS, be paranoid and evict. */
3963         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3964                 i915_gem_evict_everything(dev);
3965
3966         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3967          * We need to replace this with a semaphore, or something.
3968          * And not confound mm.suspended!
3969          */
3970         dev_priv->mm.suspended = 1;
3971         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3972
3973         i915_kernel_lost_context(dev);
3974         i915_gem_cleanup_ringbuffer(dev);
3975
3976         mutex_unlock(&dev->struct_mutex);
3977
3978         /* Cancel the retire work handler, which should be idle now. */
3979         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3980
3981         return 0;
3982 }
3983
3984 void i915_gem_l3_remap(struct drm_device *dev)
3985 {
3986         drm_i915_private_t *dev_priv = dev->dev_private;
3987         u32 misccpctl;
3988         int i;
3989
3990         if (!HAS_L3_GPU_CACHE(dev))
3991                 return;
3992
3993         if (!dev_priv->l3_parity.remap_info)
3994                 return;
3995
3996         misccpctl = I915_READ(GEN7_MISCCPCTL);
3997         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3998         POSTING_READ(GEN7_MISCCPCTL);
3999
4000         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4001                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4002                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4003                         DRM_DEBUG("0x%x was already programmed to %x\n",
4004                                   GEN7_L3LOG_BASE + i, remap);
4005                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4006                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4007                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4008         }
4009
4010         /* Make sure all the writes land before disabling dop clock gating */
4011         POSTING_READ(GEN7_L3LOG_BASE);
4012
4013         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4014 }
4015
4016 void i915_gem_init_swizzling(struct drm_device *dev)
4017 {
4018         drm_i915_private_t *dev_priv = dev->dev_private;
4019
4020         if (INTEL_INFO(dev)->gen < 5 ||
4021             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4022                 return;
4023
4024         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4025                                  DISP_TILE_SURFACE_SWIZZLING);
4026
4027         if (IS_GEN5(dev))
4028                 return;
4029
4030         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4031         if (IS_GEN6(dev))
4032                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4033         else if (IS_GEN7(dev))
4034                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4035         else
4036                 BUG();
4037 }
4038
4039 static bool
4040 intel_enable_blt(struct drm_device *dev)
4041 {
4042         int revision;
4043
4044         if (!HAS_BLT(dev))
4045                 return false;
4046
4047         /* The blitter was dysfunctional on early prototypes */
4048         revision = pci_read_config(dev->dev, PCIR_REVID, 1);
4049         if (IS_GEN6(dev) && revision < 8) {
4050                 DRM_INFO("BLT not supported on this pre-production hardware;"
4051                          " graphics performance will be degraded.\n");
4052                 return false;
4053         }
4054
4055         return true;
4056 }
4057
4058 static int i915_gem_init_rings(struct drm_device *dev)
4059 {
4060         struct drm_i915_private *dev_priv = dev->dev_private;
4061         int ret;
4062
4063         ret = intel_init_render_ring_buffer(dev);
4064         if (ret)
4065                 return ret;
4066
4067         if (HAS_BSD(dev)) {
4068                 ret = intel_init_bsd_ring_buffer(dev);
4069                 if (ret)
4070                         goto cleanup_render_ring;
4071         }
4072
4073         if (intel_enable_blt(dev)) {
4074                 ret = intel_init_blt_ring_buffer(dev);
4075                 if (ret)
4076                         goto cleanup_bsd_ring;
4077         }
4078
4079         if (HAS_VEBOX(dev)) {
4080                 ret = intel_init_vebox_ring_buffer(dev);
4081                 if (ret)
4082                         goto cleanup_blt_ring;
4083         }
4084
4085
4086         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4087         if (ret)
4088                 goto cleanup_vebox_ring;
4089
4090         return 0;
4091
4092 cleanup_vebox_ring:
4093         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4094 cleanup_blt_ring:
4095         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4096 cleanup_bsd_ring:
4097         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4098 cleanup_render_ring:
4099         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4100
4101         return ret;
4102 }
4103
4104 int
4105 i915_gem_init_hw(struct drm_device *dev)
4106 {
4107         drm_i915_private_t *dev_priv = dev->dev_private;
4108         int ret;
4109
4110 #if 0
4111         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4112                 return -EIO;
4113 #endif
4114
4115         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4116                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4117
4118         if (HAS_PCH_NOP(dev)) {
4119                 u32 temp = I915_READ(GEN7_MSG_CTL);
4120                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4121                 I915_WRITE(GEN7_MSG_CTL, temp);
4122         }
4123
4124         i915_gem_l3_remap(dev);
4125
4126         i915_gem_init_swizzling(dev);
4127
4128         ret = i915_gem_init_rings(dev);
4129         if (ret)
4130                 return ret;
4131
4132         /*
4133          * XXX: There was some w/a described somewhere suggesting loading
4134          * contexts before PPGTT.
4135          */
4136         i915_gem_context_init(dev);
4137         if (dev_priv->mm.aliasing_ppgtt) {
4138                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4139                 if (ret) {
4140                         i915_gem_cleanup_aliasing_ppgtt(dev);
4141                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4142                 }
4143         }
4144
4145         return 0;
4146 }
4147
4148 int i915_gem_init(struct drm_device *dev)
4149 {
4150         struct drm_i915_private *dev_priv = dev->dev_private;
4151         int ret;
4152
4153         mutex_lock(&dev->struct_mutex);
4154
4155         if (IS_VALLEYVIEW(dev)) {
4156                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4157                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4158                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4159                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4160         }
4161
4162         i915_gem_init_global_gtt(dev);
4163
4164         ret = i915_gem_init_hw(dev);
4165         mutex_unlock(&dev->struct_mutex);
4166         if (ret) {
4167                 i915_gem_cleanup_aliasing_ppgtt(dev);
4168                 return ret;
4169         }
4170
4171         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4172         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4173                 dev_priv->dri1.allow_batchbuffer = 1;
4174         return 0;
4175 }
4176
4177 void
4178 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4179 {
4180         drm_i915_private_t *dev_priv = dev->dev_private;
4181         struct intel_ring_buffer *ring;
4182         int i;
4183
4184         for_each_ring(ring, dev_priv, i)
4185                 intel_cleanup_ring_buffer(ring);
4186 }
4187
4188 int
4189 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4190                        struct drm_file *file_priv)
4191 {
4192         drm_i915_private_t *dev_priv = dev->dev_private;
4193         int ret;
4194
4195         if (drm_core_check_feature(dev, DRIVER_MODESET))
4196                 return 0;
4197
4198         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4199                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4200                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4201         }
4202
4203         mutex_lock(&dev->struct_mutex);
4204         dev_priv->mm.suspended = 0;
4205
4206         ret = i915_gem_init_hw(dev);
4207         if (ret != 0) {
4208                 mutex_unlock(&dev->struct_mutex);
4209                 return ret;
4210         }
4211
4212         KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
4213         mutex_unlock(&dev->struct_mutex);
4214
4215         ret = drm_irq_install(dev);
4216         if (ret)
4217                 goto cleanup_ringbuffer;
4218
4219         return 0;
4220
4221 cleanup_ringbuffer:
4222         mutex_lock(&dev->struct_mutex);
4223         i915_gem_cleanup_ringbuffer(dev);
4224         dev_priv->mm.suspended = 1;
4225         mutex_unlock(&dev->struct_mutex);
4226
4227         return ret;
4228 }
4229
4230 int
4231 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4232                        struct drm_file *file_priv)
4233 {
4234         if (drm_core_check_feature(dev, DRIVER_MODESET))
4235                 return 0;
4236
4237         drm_irq_uninstall(dev);
4238         return i915_gem_idle(dev);
4239 }
4240
4241 void
4242 i915_gem_lastclose(struct drm_device *dev)
4243 {
4244         int ret;
4245
4246         if (drm_core_check_feature(dev, DRIVER_MODESET))
4247                 return;
4248
4249         ret = i915_gem_idle(dev);
4250         if (ret)
4251                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4252 }
4253
4254 static void
4255 init_ring_lists(struct intel_ring_buffer *ring)
4256 {
4257         INIT_LIST_HEAD(&ring->active_list);
4258         INIT_LIST_HEAD(&ring->request_list);
4259 }
4260
4261 void
4262 i915_gem_load(struct drm_device *dev)
4263 {
4264         int i;
4265         drm_i915_private_t *dev_priv = dev->dev_private;
4266
4267         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4268         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4269         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4270         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4271         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4272         for (i = 0; i < I915_NUM_RINGS; i++)
4273                 init_ring_lists(&dev_priv->ring[i]);
4274         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4275                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4276         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4277                           i915_gem_retire_work_handler);
4278         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4279
4280         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4281         if (IS_GEN3(dev)) {
4282                 I915_WRITE(MI_ARB_STATE,
4283                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4284         }
4285
4286         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4287
4288         /* Old X drivers will take 0-2 for front, back, depth buffers */
4289         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4290                 dev_priv->fence_reg_start = 3;
4291
4292         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4293                 dev_priv->num_fence_regs = 32;
4294         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4295                 dev_priv->num_fence_regs = 16;
4296         else
4297                 dev_priv->num_fence_regs = 8;
4298
4299         /* Initialize fence registers to zero */
4300         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4301         i915_gem_restore_fences(dev);
4302
4303         i915_gem_detect_bit_6_swizzle(dev);
4304         init_waitqueue_head(&dev_priv->pending_flip_queue);
4305
4306         dev_priv->mm.interruptible = true;
4307
4308 #if 0
4309         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4310         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4311         register_shrinker(&dev_priv->mm.inactive_shrinker);
4312 #else
4313         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4314             i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
4315 #endif
4316 }
4317
4318 /*
4319  * Create a physically contiguous memory object for this object
4320  * e.g. for cursor + overlay regs
4321  */
4322 static int i915_gem_init_phys_object(struct drm_device *dev,
4323                                      int id, int size, int align)
4324 {
4325         drm_i915_private_t *dev_priv = dev->dev_private;
4326         struct drm_i915_gem_phys_object *phys_obj;
4327         int ret;
4328
4329         if (dev_priv->mm.phys_objs[id - 1] || !size)
4330                 return 0;
4331
4332         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4333         if (!phys_obj)
4334                 return -ENOMEM;
4335
4336         phys_obj->id = id;
4337
4338         phys_obj->handle = drm_pci_alloc(dev, size, align);
4339         if (!phys_obj->handle) {
4340                 ret = -ENOMEM;
4341                 goto kfree_obj;
4342         }
4343         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4344             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4345
4346         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4347
4348         return 0;
4349
4350 kfree_obj:
4351         kfree(phys_obj);
4352         return ret;
4353 }
4354
4355 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4356 {
4357         drm_i915_private_t *dev_priv = dev->dev_private;
4358         struct drm_i915_gem_phys_object *phys_obj;
4359
4360         if (!dev_priv->mm.phys_objs[id - 1])
4361                 return;
4362
4363         phys_obj = dev_priv->mm.phys_objs[id - 1];
4364         if (phys_obj->cur_obj) {
4365                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4366         }
4367
4368         drm_pci_free(dev, phys_obj->handle);
4369         kfree(phys_obj);
4370         dev_priv->mm.phys_objs[id - 1] = NULL;
4371 }
4372
4373 void i915_gem_free_all_phys_object(struct drm_device *dev)
4374 {
4375         int i;
4376
4377         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4378                 i915_gem_free_phys_object(dev, i);
4379 }
4380
4381 void i915_gem_detach_phys_object(struct drm_device *dev,
4382                                  struct drm_i915_gem_object *obj)
4383 {
4384         struct vm_object *mapping = obj->base.vm_obj;
4385         char *vaddr;
4386         int i;
4387         int page_count;
4388
4389         if (!obj->phys_obj)
4390                 return;
4391         vaddr = obj->phys_obj->handle->vaddr;
4392
4393         page_count = obj->base.size / PAGE_SIZE;
4394         VM_OBJECT_LOCK(obj->base.vm_obj);
4395         for (i = 0; i < page_count; i++) {
4396                 struct vm_page *page = shmem_read_mapping_page(mapping, i);
4397                 if (!IS_ERR(page)) {
4398                         VM_OBJECT_UNLOCK(obj->base.vm_obj);
4399                         char *dst = kmap_atomic(page);
4400                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4401                         kunmap_atomic(dst);
4402
4403                         drm_clflush_pages(&page, 1);
4404
4405 #if 0
4406                         set_page_dirty(page);
4407                         mark_page_accessed(page);
4408                         page_cache_release(page);
4409 #endif
4410                         VM_OBJECT_LOCK(obj->base.vm_obj);
4411                         vm_page_reference(page);
4412                         vm_page_dirty(page);
4413                         vm_page_busy_wait(page, FALSE, "i915gem");
4414                         vm_page_unwire(page, 0);
4415                         vm_page_wakeup(page);
4416                 }
4417         }
4418         VM_OBJECT_UNLOCK(obj->base.vm_obj);
4419         intel_gtt_chipset_flush();
4420
4421         obj->phys_obj->cur_obj = NULL;
4422         obj->phys_obj = NULL;
4423 }
4424
4425 int
4426 i915_gem_attach_phys_object(struct drm_device *dev,
4427                             struct drm_i915_gem_object *obj,
4428                             int id,
4429                             int align)
4430 {
4431         struct vm_object *mapping = obj->base.vm_obj;
4432         drm_i915_private_t *dev_priv = dev->dev_private;
4433         int ret = 0;
4434         int page_count;
4435         int i;
4436
4437         if (id > I915_MAX_PHYS_OBJECT)
4438                 return -EINVAL;
4439
4440         if (obj->phys_obj) {
4441                 if (obj->phys_obj->id == id)
4442                         return 0;
4443                 i915_gem_detach_phys_object(dev, obj);
4444         }
4445
4446         /* create a new object */
4447         if (!dev_priv->mm.phys_objs[id - 1]) {
4448                 ret = i915_gem_init_phys_object(dev, id,
4449                                                 obj->base.size, align);
4450                 if (ret) {
4451                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4452                                   id, obj->base.size);
4453                         return ret;
4454                 }
4455         }
4456
4457         /* bind to the object */
4458         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4459         obj->phys_obj->cur_obj = obj;
4460
4461         page_count = obj->base.size / PAGE_SIZE;
4462
4463         VM_OBJECT_LOCK(obj->base.vm_obj);
4464         for (i = 0; i < page_count; i++) {
4465                 struct vm_page *page;
4466                 char *dst, *src;
4467
4468                 page = shmem_read_mapping_page(mapping, i);
4469                 VM_OBJECT_UNLOCK(obj->base.vm_obj);
4470                 if (IS_ERR(page))
4471                         return PTR_ERR(page);
4472
4473                 src = kmap_atomic(page);
4474                 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4475                 memcpy(dst, src, PAGE_SIZE);
4476                 kunmap_atomic(src);
4477
4478 #if 0
4479                 mark_page_accessed(page);
4480                 page_cache_release(page);
4481 #endif
4482                 VM_OBJECT_LOCK(obj->base.vm_obj);
4483                 vm_page_reference(page);
4484                 vm_page_busy_wait(page, FALSE, "i915gem");
4485                 vm_page_unwire(page, 0);
4486                 vm_page_wakeup(page);
4487         }
4488         VM_OBJECT_UNLOCK(obj->base.vm_obj);
4489
4490         return 0;
4491 }
4492
4493 static int
4494 i915_gem_phys_pwrite(struct drm_device *dev,
4495                      struct drm_i915_gem_object *obj,
4496                      struct drm_i915_gem_pwrite *args,
4497                      struct drm_file *file_priv)
4498 {
4499         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4500         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4501
4502         if (copyin_nofault(user_data, vaddr, args->size) != 0) {
4503                 unsigned long unwritten;
4504
4505                 /* The physical object once assigned is fixed for the lifetime
4506                  * of the obj, so we can safely drop the lock and continue
4507                  * to access vaddr.
4508                  */
4509                 mutex_unlock(&dev->struct_mutex);
4510                 unwritten = copy_from_user(vaddr, user_data, args->size);
4511                 mutex_lock(&dev->struct_mutex);
4512                 if (unwritten)
4513                         return -EFAULT;
4514         }
4515
4516         i915_gem_chipset_flush(dev);
4517         return 0;
4518 }
4519
4520 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4521 {
4522         struct drm_i915_file_private *file_priv = file->driver_priv;
4523
4524         /* Clean up our request list when the client is going away, so that
4525          * later retire_requests won't dereference our soon-to-be-gone
4526          * file_priv.
4527          */
4528         spin_lock(&file_priv->mm.lock);
4529         while (!list_empty(&file_priv->mm.request_list)) {
4530                 struct drm_i915_gem_request *request;
4531
4532                 request = list_first_entry(&file_priv->mm.request_list,
4533                                            struct drm_i915_gem_request,
4534                                            client_list);
4535                 list_del(&request->client_list);
4536                 request->file_priv = NULL;
4537         }
4538         spin_unlock(&file_priv->mm.lock);
4539 }
4540
4541 int
4542 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
4543     vm_ooffset_t foff, struct ucred *cred, u_short *color)
4544 {
4545
4546         *color = 0; /* XXXKIB */
4547         return (0);
4548 }
4549
4550 void
4551 i915_gem_pager_dtor(void *handle)
4552 {
4553         struct drm_gem_object *obj;
4554         struct drm_device *dev;
4555
4556         obj = handle;
4557         dev = obj->dev;
4558
4559         mutex_lock(&dev->struct_mutex);
4560         drm_gem_free_mmap_offset(obj);
4561         i915_gem_release_mmap(to_intel_bo(obj));
4562         drm_gem_object_unreference(obj);
4563         mutex_unlock(&dev->struct_mutex);
4564 }
4565
4566 #define GEM_PARANOID_CHECK_GTT 0
4567 #if GEM_PARANOID_CHECK_GTT
4568 static void
4569 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
4570     int page_count)
4571 {
4572         struct drm_i915_private *dev_priv;
4573         vm_paddr_t pa;
4574         unsigned long start, end;
4575         u_int i;
4576         int j;
4577
4578         dev_priv = dev->dev_private;
4579         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
4580         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
4581         for (i = start; i < end; i++) {
4582                 pa = intel_gtt_read_pte_paddr(i);
4583                 for (j = 0; j < page_count; j++) {
4584                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
4585                                 panic("Page %p in GTT pte index %d pte %x",
4586                                     ma[i], i, intel_gtt_read_pte(i));
4587                         }
4588                 }
4589         }
4590         obj->fence_dirty = false;
4591 }
4592 #endif
4593
4594 static int
4595 i915_gpu_is_active(struct drm_device *dev)
4596 {
4597         drm_i915_private_t *dev_priv = dev->dev_private;
4598
4599         return !list_empty(&dev_priv->mm.active_list);
4600 }
4601
4602 static void
4603 i915_gem_lowmem(void *arg)
4604 {
4605         struct drm_device *dev;
4606         struct drm_i915_private *dev_priv;
4607         struct drm_i915_gem_object *obj, *next;
4608         int cnt, cnt_fail, cnt_total;
4609
4610         dev = arg;
4611         dev_priv = dev->dev_private;
4612
4613         if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT))
4614                 return;
4615
4616 rescan:
4617         /* first scan for clean buffers */
4618         i915_gem_retire_requests(dev);
4619
4620         cnt_total = cnt_fail = cnt = 0;
4621
4622         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4623             mm_list) {
4624                 if (i915_gem_object_is_purgeable(obj)) {
4625                         if (i915_gem_object_unbind(obj) != 0)
4626                                 cnt_total++;
4627                 } else
4628                         cnt_total++;
4629         }
4630
4631         /* second pass, evict/count anything still on the inactive list */
4632         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4633             mm_list) {
4634                 if (i915_gem_object_unbind(obj) == 0)
4635                         cnt++;
4636                 else
4637                         cnt_fail++;
4638         }
4639
4640         if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4641                 /*
4642                  * We are desperate for pages, so as a last resort, wait
4643                  * for the GPU to finish and discard whatever we can.
4644                  * This has a dramatic impact to reduce the number of
4645                  * OOM-killer events whilst running the GPU aggressively.
4646                  */
4647                 if (i915_gpu_idle(dev) == 0)
4648                         goto rescan;
4649         }
4650         mutex_unlock(&dev->struct_mutex);
4651 }