drm: Remove some useless macros
[dragonfly.git] / sys / dev / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  */
54
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
57
58 #include <drm/drmP.h>
59 #include <drm/i915_drm.h>
60 #include "i915_drv.h"
61 #include "intel_drv.h"
62 #include "intel_ringbuffer.h"
63 #include <linux/completion.h>
64 #include <linux/jiffies.h>
65 #include <linux/time.h>
66
67 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
68 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
69 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
70                                                     unsigned alignment,
71                                                     bool map_and_fenceable,
72                                                     bool nonblocking);
73 static int i915_gem_phys_pwrite(struct drm_device *dev,
74                                 struct drm_i915_gem_object *obj,
75                                 struct drm_i915_gem_pwrite *args,
76                                 struct drm_file *file);
77
78 static void i915_gem_write_fence(struct drm_device *dev, int reg,
79                                  struct drm_i915_gem_object *obj);
80 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
81                                          struct drm_i915_fence_reg *fence,
82                                          bool enable);
83
84 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
85     int tiling_mode);
86 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
87     uint32_t size, int tiling_mode);
88 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
89     int flags);
90 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92
93 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
94 {
95         if (obj->tiling_mode)
96                 i915_gem_release_mmap(obj);
97
98         /* As we do not have an associated fence register, we will force
99          * a tiling change if we ever need to acquire one.
100          */
101         obj->fence_dirty = false;
102         obj->fence_reg = I915_FENCE_REG_NONE;
103 }
104
105 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
106 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
107 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
108 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
109 static void i915_gem_reset_fences(struct drm_device *dev);
110 static void i915_gem_lowmem(void *arg);
111
112 /* some bookkeeping */
113 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
114                                   size_t size)
115 {
116         dev_priv->mm.object_count++;
117         dev_priv->mm.object_memory += size;
118 }
119
120 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
121                                      size_t size)
122 {
123         dev_priv->mm.object_count--;
124         dev_priv->mm.object_memory -= size;
125 }
126
127 static int
128 i915_gem_wait_for_error(struct drm_device *dev)
129 {
130         struct drm_i915_private *dev_priv = dev->dev_private;
131         struct completion *x = &dev_priv->error_completion;
132         int ret;
133
134         if (!atomic_read(&dev_priv->mm.wedged))
135                 return 0;
136
137         /*
138          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
139          * userspace. If it takes that long something really bad is going on and
140          * we should simply try to bail out and fail as gracefully as possible.
141          */
142         ret = wait_for_completion_interruptible_timeout(x, 10*hz);
143         if (ret == 0) {
144                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
145                 return -EIO;
146         } else if (ret < 0) {
147                 return ret;
148         }
149
150         if (atomic_read(&dev_priv->mm.wedged)) {
151                 /* GPU is hung, bump the completion count to account for
152                  * the token we just consumed so that we never hit zero and
153                  * end up waiting upon a subsequent completion event that
154                  * will never happen.
155                  */
156                 spin_lock(&x->wait.lock);
157                 x->done++;
158                 spin_unlock(&x->wait.lock);
159         }
160         return 0;
161 }
162
163 int i915_mutex_lock_interruptible(struct drm_device *dev)
164 {
165         int ret;
166
167         ret = i915_gem_wait_for_error(dev);
168         if (ret)
169                 return ret;
170
171         ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL);
172         if (ret)
173                 return -EINTR;
174
175         WARN_ON(i915_verify_lists(dev));
176         return 0;
177 }
178
179 static inline bool
180 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
181 {
182         return !obj->active;
183 }
184
185 int
186 i915_gem_init_ioctl(struct drm_device *dev, void *data,
187                     struct drm_file *file)
188 {
189         struct drm_i915_gem_init *args = data;
190
191         if (drm_core_check_feature(dev, DRIVER_MODESET))
192                 return -ENODEV;
193
194         if (args->gtt_start >= args->gtt_end ||
195             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
196                 return -EINVAL;
197
198         /* GEM with user mode setting was never supported on ilk and later. */
199         if (INTEL_INFO(dev)->gen >= 5)
200                 return -ENODEV;
201
202         lockmgr(&dev->dev_lock, LK_EXCLUSIVE|LK_RETRY|LK_CANRECURSE);
203         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
204         lockmgr(&dev->dev_lock, LK_RELEASE);
205
206         return 0;
207 }
208
209 int
210 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
211                             struct drm_file *file)
212 {
213         struct drm_i915_private *dev_priv = dev->dev_private;
214         struct drm_i915_gem_get_aperture *args = data;
215         struct drm_i915_gem_object *obj;
216         size_t pinned;
217
218         pinned = 0;
219         DRM_LOCK(dev);
220         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
221                 if (obj->pin_count)
222                         pinned += obj->gtt_space->size;
223         DRM_UNLOCK(dev);
224
225         args->aper_size = dev_priv->mm.gtt_total;
226         args->aper_available_size = args->aper_size - pinned;
227
228         return 0;
229 }
230
231 static int
232 i915_gem_create(struct drm_file *file,
233                 struct drm_device *dev,
234                 uint64_t size,
235                 uint32_t *handle_p)
236 {
237         struct drm_i915_gem_object *obj;
238         int ret;
239         u32 handle;
240
241         size = roundup(size, PAGE_SIZE);
242         if (size == 0)
243                 return -EINVAL;
244
245         /* Allocate the new object */
246         obj = i915_gem_alloc_object(dev, size);
247         if (obj == NULL)
248                 return -ENOMEM;
249
250         handle = 0;
251         ret = drm_gem_handle_create(file, &obj->base, &handle);
252         if (ret) {
253                 drm_gem_object_release(&obj->base);
254                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
255                 drm_free(obj, M_DRM);
256                 return (-ret);
257         }
258
259         /* drop reference from allocate - handle holds it now */
260         drm_gem_object_unreference(&obj->base);
261         *handle_p = handle;
262         return 0;
263 }
264
265 int
266 i915_gem_dumb_create(struct drm_file *file,
267                      struct drm_device *dev,
268                      struct drm_mode_create_dumb *args)
269 {
270
271         /* have to work out size/pitch and return them */
272         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
273         args->size = args->pitch * args->height;
274         return i915_gem_create(file, dev,
275                                args->size, &args->handle);
276 }
277
278 int i915_gem_dumb_destroy(struct drm_file *file,
279                           struct drm_device *dev,
280                           uint32_t handle)
281 {
282
283         return drm_gem_handle_delete(file, handle);
284 }
285
286 /**
287  * Creates a new mm object and returns a handle to it.
288  */
289 int
290 i915_gem_create_ioctl(struct drm_device *dev, void *data,
291                       struct drm_file *file)
292 {
293         struct drm_i915_gem_create *args = data;
294
295         return i915_gem_create(file, dev,
296                                args->size, &args->handle);
297 }
298
299 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
300 {
301         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
302
303         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
304                 obj->tiling_mode != I915_TILING_NONE;
305 }
306
307 static inline void vm_page_reference(vm_page_t m)
308 {
309         vm_page_flag_set(m, PG_REFERENCED);
310 }
311
312 static int
313 i915_gem_shmem_pread(struct drm_device *dev,
314                      struct drm_i915_gem_object *obj,
315                      struct drm_i915_gem_pread *args,
316                      struct drm_file *file)
317 {
318         vm_object_t vm_obj;
319         vm_page_t m;
320         struct sf_buf *sf;
321         vm_offset_t mkva;
322         vm_pindex_t obj_pi;
323         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
324
325         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
326
327         obj->dirty = 1;
328         vm_obj = obj->base.vm_obj;
329         ret = 0;
330
331         VM_OBJECT_LOCK(vm_obj);
332         vm_object_pip_add(vm_obj, 1);
333         while (args->size > 0) {
334                 obj_pi = OFF_TO_IDX(args->offset);
335                 obj_po = args->offset & PAGE_MASK;
336
337                 m = i915_gem_wire_page(vm_obj, obj_pi);
338                 VM_OBJECT_UNLOCK(vm_obj);
339
340                 sf = sf_buf_alloc(m);
341                 mkva = sf_buf_kva(sf);
342                 length = min(args->size, PAGE_SIZE - obj_po);
343                 while (length > 0) {
344                         if (do_bit17_swizzling &&
345                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
346                                 cnt = roundup2(obj_po + 1, 64);
347                                 cnt = min(cnt - obj_po, length);
348                                 swizzled_po = obj_po ^ 64;
349                         } else {
350                                 cnt = length;
351                                 swizzled_po = obj_po;
352                         }
353                         ret = -copyout_nofault(
354                             (char *)mkva + swizzled_po,
355                             (void *)(uintptr_t)args->data_ptr, cnt);
356                         if (ret != 0)
357                                 break;
358                         args->data_ptr += cnt;
359                         args->size -= cnt;
360                         length -= cnt;
361                         args->offset += cnt;
362                         obj_po += cnt;
363                 }
364                 sf_buf_free(sf);
365                 VM_OBJECT_LOCK(vm_obj);
366                 vm_page_reference(m);
367                 vm_page_busy_wait(m, FALSE, "i915gem");
368                 vm_page_unwire(m, 1);
369                 vm_page_wakeup(m);
370
371                 if (ret != 0)
372                         break;
373         }
374         vm_object_pip_wakeup(vm_obj);
375         VM_OBJECT_UNLOCK(vm_obj);
376
377         return (ret);
378 }
379
380 /**
381  * Reads data from the object referenced by handle.
382  *
383  * On error, the contents of *data are undefined.
384  */
385 int
386 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
387                      struct drm_file *file)
388 {
389         struct drm_i915_gem_pread *args = data;
390         struct drm_i915_gem_object *obj;
391         int ret = 0;
392
393         if (args->size == 0)
394                 return 0;
395
396         ret = i915_mutex_lock_interruptible(dev);
397         if (ret)
398                 return ret;
399
400         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
401         if (&obj->base == NULL) {
402                 ret = -ENOENT;
403                 goto unlock;
404         }
405
406         /* Bounds check source.  */
407         if (args->offset > obj->base.size ||
408             args->size > obj->base.size - args->offset) {
409                 ret = -EINVAL;
410                 goto out;
411         }
412
413         ret = i915_gem_shmem_pread(dev, obj, args, file);
414 out:
415         drm_gem_object_unreference(&obj->base);
416 unlock:
417         DRM_UNLOCK(dev);
418         return ret;
419 }
420
421 /**
422  * This is the fast pwrite path, where we copy the data directly from the
423  * user into the GTT, uncached.
424  */
425 static int
426 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
427                          struct drm_i915_gem_object *obj,
428                          struct drm_i915_gem_pwrite *args,
429                          struct drm_file *file)
430 {
431         vm_offset_t mkva;
432         int ret;
433
434         /*
435          * Pass the unaligned physical address and size to pmap_mapdev_attr()
436          * so it can properly calculate whether an extra page needs to be
437          * mapped or not to cover the requested range.  The function will
438          * add the page offset into the returned mkva for us.
439          */
440         mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
441             args->offset, args->size, PAT_WRITE_COMBINING);
442         ret = -copyin_nofault((void *)(uintptr_t)args->data_ptr, (char *)mkva, args->size);
443         pmap_unmapdev(mkva, args->size);
444
445         return ret;
446 }
447
448 static int
449 i915_gem_shmem_pwrite(struct drm_device *dev,
450                       struct drm_i915_gem_object *obj,
451                       struct drm_i915_gem_pwrite *args,
452                       struct drm_file *file)
453 {
454         vm_object_t vm_obj;
455         vm_page_t m;
456         struct sf_buf *sf;
457         vm_offset_t mkva;
458         vm_pindex_t obj_pi;
459         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
460
461         do_bit17_swizzling = 0;
462
463         obj->dirty = 1;
464         vm_obj = obj->base.vm_obj;
465         ret = 0;
466
467         VM_OBJECT_LOCK(vm_obj);
468         vm_object_pip_add(vm_obj, 1);
469         while (args->size > 0) {
470                 obj_pi = OFF_TO_IDX(args->offset);
471                 obj_po = args->offset & PAGE_MASK;
472
473                 m = i915_gem_wire_page(vm_obj, obj_pi);
474                 VM_OBJECT_UNLOCK(vm_obj);
475
476                 sf = sf_buf_alloc(m);
477                 mkva = sf_buf_kva(sf);
478                 length = min(args->size, PAGE_SIZE - obj_po);
479                 while (length > 0) {
480                         if (do_bit17_swizzling &&
481                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
482                                 cnt = roundup2(obj_po + 1, 64);
483                                 cnt = min(cnt - obj_po, length);
484                                 swizzled_po = obj_po ^ 64;
485                         } else {
486                                 cnt = length;
487                                 swizzled_po = obj_po;
488                         }
489                         ret = -copyin_nofault(
490                             (void *)(uintptr_t)args->data_ptr,
491                             (char *)mkva + swizzled_po, cnt);
492                         if (ret != 0)
493                                 break;
494                         args->data_ptr += cnt;
495                         args->size -= cnt;
496                         length -= cnt;
497                         args->offset += cnt;
498                         obj_po += cnt;
499                 }
500                 sf_buf_free(sf);
501                 VM_OBJECT_LOCK(vm_obj);
502                 vm_page_dirty(m);
503                 vm_page_reference(m);
504                 vm_page_busy_wait(m, FALSE, "i915gem");
505                 vm_page_unwire(m, 1);
506                 vm_page_wakeup(m);
507
508                 if (ret != 0)
509                         break;
510         }
511         vm_object_pip_wakeup(vm_obj);
512         VM_OBJECT_UNLOCK(vm_obj);
513
514         return (ret);
515 }
516
517 /**
518  * Writes data to the object referenced by handle.
519  *
520  * On error, the contents of the buffer that were to be modified are undefined.
521  */
522 int
523 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
524                       struct drm_file *file)
525 {
526         struct drm_i915_gem_pwrite *args = data;
527         struct drm_i915_gem_object *obj;
528         int ret;
529
530         if (args->size == 0)
531                 return 0;
532
533         ret = i915_mutex_lock_interruptible(dev);
534         if (ret)
535                 return ret;
536
537         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
538         if (&obj->base == NULL) {
539                 ret = -ENOENT;
540                 goto unlock;
541         }
542
543         /* Bounds check destination. */
544         if (args->offset > obj->base.size ||
545             args->size > obj->base.size - args->offset) {
546                 ret = -EINVAL;
547                 goto out;
548         }
549
550         ret = -EFAULT;
551         /* We can only do the GTT pwrite on untiled buffers, as otherwise
552          * it would end up going through the fenced access, and we'll get
553          * different detiling behavior between reading and writing.
554          * pread/pwrite currently are reading and writing from the CPU
555          * perspective, requiring manual detiling by the client.
556          */
557         if (obj->phys_obj) {
558                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
559                 goto out;
560         }
561
562         if (obj->cache_level == I915_CACHE_NONE &&
563             obj->tiling_mode == I915_TILING_NONE &&
564             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
565                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
566                 /* Note that the gtt paths might fail with non-page-backed user
567                  * pointers (e.g. gtt mappings when moving data between
568                  * textures). Fallback to the shmem path in that case. */
569         }
570
571         if (ret == -EFAULT || ret == -ENOSPC)
572                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
573
574 out:
575         drm_gem_object_unreference(&obj->base);
576 unlock:
577         DRM_UNLOCK(dev);
578         return ret;
579 }
580
581 int
582 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
583                      bool interruptible)
584 {
585         if (atomic_read(&dev_priv->mm.wedged)) {
586                 struct completion *x = &dev_priv->error_completion;
587                 bool recovery_complete;
588
589                 /* Give the error handler a chance to run. */
590                 spin_lock(&x->wait.lock);
591                 recovery_complete = x->done > 0;
592                 spin_unlock(&x->wait.lock);
593
594                 /* Non-interruptible callers can't handle -EAGAIN, hence return
595                  * -EIO unconditionally for these. */
596                 if (!interruptible)
597                         return -EIO;
598
599                 /* Recovery complete, but still wedged means reset failure. */
600                 if (recovery_complete)
601                         return -EIO;
602
603                 return -EAGAIN;
604         }
605
606         return 0;
607 }
608
609 /*
610  * Compare seqno against outstanding lazy request. Emit a request if they are
611  * equal.
612  */
613 static int
614 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
615 {
616         int ret;
617
618         DRM_LOCK_ASSERT(ring->dev);
619
620         ret = 0;
621         if (seqno == ring->outstanding_lazy_request)
622                 ret = i915_add_request(ring, NULL, NULL);
623
624         return ret;
625 }
626
627 /**
628  * __wait_seqno - wait until execution of seqno has finished
629  * @ring: the ring expected to report seqno
630  * @seqno: duh!
631  * @interruptible: do an interruptible wait (normally yes)
632  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
633  *
634  * Returns 0 if the seqno was found within the alloted time. Else returns the
635  * errno with remaining time filled in timeout argument.
636  */
637 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
638                         bool interruptible, struct timespec *timeout)
639 {
640         drm_i915_private_t *dev_priv = ring->dev->dev_private;
641         struct timespec before, now, wait_time={1,0};
642         unsigned long timeout_jiffies;
643         long end;
644         bool wait_forever = true;
645         int ret;
646
647         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
648                 return 0;
649
650         if (timeout != NULL) {
651                 wait_time = *timeout;
652                 wait_forever = false;
653         }
654
655         timeout_jiffies = timespec_to_jiffies(&wait_time);
656
657         if (WARN_ON(!ring->irq_get(ring)))
658                 return -ENODEV;
659
660         /* Record current time in case interrupted by signal, or wedged * */
661         getrawmonotonic(&before);
662
663 #define EXIT_COND \
664         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
665         atomic_read(&dev_priv->mm.wedged))
666         do {
667                 if (interruptible)
668                         end = wait_event_interruptible_timeout(ring->irq_queue,
669                                                                EXIT_COND,
670                                                                timeout_jiffies);
671                 else
672                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
673                                                  timeout_jiffies);
674
675                 ret = i915_gem_check_wedge(dev_priv, interruptible);
676                 if (ret)
677                         end = ret;
678         } while (end == 0 && wait_forever);
679
680         getrawmonotonic(&now);
681
682         ring->irq_put(ring);
683 #undef EXIT_COND
684
685         if (timeout) {
686                 struct timespec sleep_time = timespec_sub(now, before);
687                 *timeout = timespec_sub(*timeout, sleep_time);
688         }
689
690         switch (end) {
691         case -EIO:
692         case -EAGAIN: /* Wedged */
693         case -ERESTARTSYS: /* Signal */
694                 return (int)end;
695         case 0: /* Timeout */
696                 if (timeout)
697                         set_normalized_timespec(timeout, 0, 0);
698                 return -ETIMEDOUT;      /* -ETIME on Linux */
699         default: /* Completed */
700                 WARN_ON(end < 0); /* We're not aware of other errors */
701                 return 0;
702         }
703 }
704
705 /**
706  * Waits for a sequence number to be signaled, and cleans up the
707  * request and object lists appropriately for that event.
708  */
709 int
710 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
711 {
712         struct drm_device *dev = ring->dev;
713         struct drm_i915_private *dev_priv = dev->dev_private;
714         int ret = 0;
715
716         DRM_LOCK_ASSERT(dev);
717         BUG_ON(seqno == 0);
718
719         ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
720         if (ret)
721                 return ret;
722
723         ret = i915_gem_check_olr(ring, seqno);
724         if (ret)
725                 return ret;
726
727         ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
728
729         return ret;
730 }
731
732 /**
733  * Ensures that all rendering to the object has completed and the object is
734  * safe to unbind from the GTT or access from the CPU.
735  */
736 static __must_check int
737 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
738                                bool readonly)
739 {
740         struct intel_ring_buffer *ring = obj->ring;
741         u32 seqno;
742         int ret;
743
744         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
745         if (seqno == 0)
746                 return 0;
747
748         ret = i915_wait_seqno(ring, seqno);
749         if (ret)
750                 return ret;
751
752         i915_gem_retire_requests_ring(ring);
753
754         /* Manually manage the write flush as we may have not yet
755          * retired the buffer.
756          */
757         if (obj->last_write_seqno &&
758             i915_seqno_passed(seqno, obj->last_write_seqno)) {
759                 obj->last_write_seqno = 0;
760                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
761         }
762
763         return 0;
764 }
765
766 /* A nonblocking variant of the above wait. This is a highly dangerous routine
767  * as the object state may change during this call.
768  */
769 static __must_check int
770 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
771                                             bool readonly)
772 {
773         struct drm_device *dev = obj->base.dev;
774         struct drm_i915_private *dev_priv = dev->dev_private;
775         struct intel_ring_buffer *ring = obj->ring;
776         u32 seqno;
777         int ret;
778
779         DRM_LOCK_ASSERT(dev);
780         BUG_ON(!dev_priv->mm.interruptible);
781
782         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
783         if (seqno == 0)
784                 return 0;
785
786         ret = i915_gem_check_wedge(dev_priv, true);
787         if (ret)
788                 return ret;
789
790         ret = i915_gem_check_olr(ring, seqno);
791         if (ret)
792                 return ret;
793
794         DRM_UNLOCK(dev);
795         ret = __wait_seqno(ring, seqno, true, NULL);
796         DRM_LOCK(dev);
797
798         i915_gem_retire_requests_ring(ring);
799
800         /* Manually manage the write flush as we may have not yet
801          * retired the buffer.
802          */
803         if (obj->last_write_seqno &&
804             i915_seqno_passed(seqno, obj->last_write_seqno)) {
805                 obj->last_write_seqno = 0;
806                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
807         }
808
809         return ret;
810 }
811
812 /**
813  * Called when user space prepares to use an object with the CPU, either
814  * through the mmap ioctl's mapping or a GTT mapping.
815  */
816 int
817 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
818                           struct drm_file *file)
819 {
820         struct drm_i915_gem_set_domain *args = data;
821         struct drm_i915_gem_object *obj;
822         uint32_t read_domains = args->read_domains;
823         uint32_t write_domain = args->write_domain;
824         int ret;
825
826         /* Only handle setting domains to types used by the CPU. */
827         if (write_domain & I915_GEM_GPU_DOMAINS)
828                 return -EINVAL;
829
830         if (read_domains & I915_GEM_GPU_DOMAINS)
831                 return -EINVAL;
832
833         /* Having something in the write domain implies it's in the read
834          * domain, and only that read domain.  Enforce that in the request.
835          */
836         if (write_domain != 0 && read_domains != write_domain)
837                 return -EINVAL;
838
839         ret = i915_mutex_lock_interruptible(dev);
840         if (ret)
841                 return ret;
842
843         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
844         if (&obj->base == NULL) {
845                 ret = -ENOENT;
846                 goto unlock;
847         }
848
849         /* Try to flush the object off the GPU without holding the lock.
850          * We will repeat the flush holding the lock in the normal manner
851          * to catch cases where we are gazumped.
852          */
853         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
854         if (ret)
855                 goto unref;
856
857         if (read_domains & I915_GEM_DOMAIN_GTT) {
858                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
859
860                 /* Silently promote "you're not bound, there was nothing to do"
861                  * to success, since the client was just asking us to
862                  * make sure everything was done.
863                  */
864                 if (ret == -EINVAL)
865                         ret = 0;
866         } else {
867                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
868         }
869
870 unref:
871         drm_gem_object_unreference(&obj->base);
872 unlock:
873         DRM_UNLOCK(dev);
874         return ret;
875 }
876
877 /**
878  * Called when user space has done writes to this buffer
879  */
880 int
881 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
882                          struct drm_file *file)
883 {
884         struct drm_i915_gem_sw_finish *args = data;
885         struct drm_i915_gem_object *obj;
886         int ret = 0;
887
888         ret = i915_mutex_lock_interruptible(dev);
889         if (ret)
890                 return ret;
891         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
892         if (&obj->base == NULL) {
893                 ret = -ENOENT;
894                 goto unlock;
895         }
896
897         /* Pinned buffers may be scanout, so flush the cache */
898         if (obj->pin_count)
899                 i915_gem_object_flush_cpu_write_domain(obj);
900
901         drm_gem_object_unreference(&obj->base);
902 unlock:
903         DRM_UNLOCK(dev);
904         return ret;
905 }
906
907 /**
908  * Maps the contents of an object, returning the address it is mapped
909  * into.
910  *
911  * While the mapping holds a reference on the contents of the object, it doesn't
912  * imply a ref on the object itself.
913  */
914 int
915 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
916                     struct drm_file *file)
917 {
918         struct drm_i915_gem_mmap *args = data;
919         struct drm_gem_object *obj;
920         struct proc *p = curproc;
921         vm_map_t map = &p->p_vmspace->vm_map;
922         vm_offset_t addr;
923         vm_size_t size;
924         int error = 0, rv;
925
926         obj = drm_gem_object_lookup(dev, file, args->handle);
927         if (obj == NULL)
928                 return -ENOENT;
929
930         if (args->size == 0)
931                 goto out;
932
933         size = round_page(args->size);
934         if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
935                 error = ENOMEM;
936                 goto out;
937         }
938
939         addr = 0;
940         vm_object_hold(obj->vm_obj);
941         vm_object_reference_locked(obj->vm_obj);
942         vm_object_drop(obj->vm_obj);
943         rv = vm_map_find(map, obj->vm_obj, NULL,
944                          args->offset, &addr, args->size,
945                          PAGE_SIZE, /* align */
946                          TRUE, /* fitit */
947                          VM_MAPTYPE_NORMAL, /* maptype */
948                          VM_PROT_READ | VM_PROT_WRITE, /* prot */
949                          VM_PROT_READ | VM_PROT_WRITE, /* max */
950                          MAP_SHARED /* cow */);
951         if (rv != KERN_SUCCESS) {
952                 vm_object_deallocate(obj->vm_obj);
953                 error = -vm_mmap_to_errno(rv);
954         } else {
955                 args->addr_ptr = (uint64_t)addr;
956         }
957 out:
958         drm_gem_object_unreference(obj);
959         return (error);
960 }
961
962 /**
963  * i915_gem_release_mmap - remove physical page mappings
964  * @obj: obj in question
965  *
966  * Preserve the reservation of the mmapping with the DRM core code, but
967  * relinquish ownership of the pages back to the system.
968  *
969  * It is vital that we remove the page mapping if we have mapped a tiled
970  * object through the GTT and then lose the fence register due to
971  * resource pressure. Similarly if the object has been moved out of the
972  * aperture, than pages mapped into userspace must be revoked. Removing the
973  * mapping will then trigger a page fault on the next user access, allowing
974  * fixup by i915_gem_fault().
975  */
976 void
977 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
978 {
979         vm_object_t devobj;
980         vm_page_t m;
981         int i, page_count;
982
983         if (!obj->fault_mappable)
984                 return;
985
986         devobj = cdev_pager_lookup(obj);
987         if (devobj != NULL) {
988                 page_count = OFF_TO_IDX(obj->base.size);
989
990                 VM_OBJECT_LOCK(devobj);
991                 for (i = 0; i < page_count; i++) {
992                         m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
993                         if (m == NULL)
994                                 continue;
995                         cdev_pager_free_page(devobj, m);
996                 }
997                 VM_OBJECT_UNLOCK(devobj);
998                 vm_object_deallocate(devobj);
999         }
1000
1001         obj->fault_mappable = false;
1002 }
1003
1004 static uint32_t
1005 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1006 {
1007         uint32_t gtt_size;
1008
1009         if (INTEL_INFO(dev)->gen >= 4 ||
1010             tiling_mode == I915_TILING_NONE)
1011                 return size;
1012
1013         /* Previous chips need a power-of-two fence region when tiling */
1014         if (INTEL_INFO(dev)->gen == 3)
1015                 gtt_size = 1024*1024;
1016         else
1017                 gtt_size = 512*1024;
1018
1019         while (gtt_size < size)
1020                 gtt_size <<= 1;
1021
1022         return gtt_size;
1023 }
1024
1025 /**
1026  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1027  * @obj: object to check
1028  *
1029  * Return the required GTT alignment for an object, taking into account
1030  * potential fence register mapping.
1031  */
1032 static uint32_t
1033 i915_gem_get_gtt_alignment(struct drm_device *dev,
1034                            uint32_t size,
1035                            int tiling_mode)
1036 {
1037
1038         /*
1039          * Minimum alignment is 4k (GTT page size), but might be greater
1040          * if a fence register is needed for the object.
1041          */
1042         if (INTEL_INFO(dev)->gen >= 4 ||
1043             tiling_mode == I915_TILING_NONE)
1044                 return 4096;
1045
1046         /*
1047          * Previous chips need to be aligned to the size of the smallest
1048          * fence register that can contain the object.
1049          */
1050         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1051 }
1052
1053 /**
1054  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1055  *                                       unfenced object
1056  * @dev: the device
1057  * @size: size of the object
1058  * @tiling_mode: tiling mode of the object
1059  *
1060  * Return the required GTT alignment for an object, only taking into account
1061  * unfenced tiled surface requirements.
1062  */
1063 uint32_t
1064 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1065                                     uint32_t size,
1066                                     int tiling_mode)
1067 {
1068         /*
1069          * Minimum alignment is 4k (GTT page size) for sane hw.
1070          */
1071         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1072             tiling_mode == I915_TILING_NONE)
1073                 return 4096;
1074
1075         /* Previous hardware however needs to be aligned to a power-of-two
1076          * tile height. The simplest method for determining this is to reuse
1077          * the power-of-tile object size.
1078          */
1079         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1080 }
1081
1082 int
1083 i915_gem_mmap_gtt(struct drm_file *file,
1084                   struct drm_device *dev,
1085                   uint32_t handle,
1086                   uint64_t *offset)
1087 {
1088         struct drm_i915_private *dev_priv = dev->dev_private;
1089         struct drm_i915_gem_object *obj;
1090         int ret;
1091
1092         ret = i915_mutex_lock_interruptible(dev);
1093         if (ret)
1094                 return ret;
1095
1096         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1097         if (&obj->base == NULL) {
1098                 ret = -ENOENT;
1099                 goto unlock;
1100         }
1101
1102         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1103                 ret = -E2BIG;
1104                 goto out;
1105         }
1106
1107         if (obj->madv != I915_MADV_WILLNEED) {
1108                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1109                 ret = -EINVAL;
1110                 goto out;
1111         }
1112
1113         ret = drm_gem_create_mmap_offset(&obj->base);
1114         if (ret)
1115                 goto out;
1116
1117         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1118             DRM_GEM_MAPPING_KEY;
1119 out:
1120         drm_gem_object_unreference(&obj->base);
1121 unlock:
1122         DRM_UNLOCK(dev);
1123         return ret;
1124 }
1125
1126 /**
1127  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1128  * @dev: DRM device
1129  * @data: GTT mapping ioctl data
1130  * @file: GEM object info
1131  *
1132  * Simply returns the fake offset to userspace so it can mmap it.
1133  * The mmap call will end up in drm_gem_mmap(), which will set things
1134  * up so we can get faults in the handler above.
1135  *
1136  * The fault handler will take care of binding the object into the GTT
1137  * (since it may have been evicted to make room for something), allocating
1138  * a fence register, and mapping the appropriate aperture address into
1139  * userspace.
1140  */
1141 int
1142 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1143                         struct drm_file *file)
1144 {
1145         struct drm_i915_gem_mmap_gtt *args = data;
1146
1147         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1148 }
1149
1150 /* Immediately discard the backing storage */
1151 static void
1152 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1153 {
1154         vm_object_t vm_obj;
1155
1156         vm_obj = obj->base.vm_obj;
1157         VM_OBJECT_LOCK(vm_obj);
1158         vm_object_page_remove(vm_obj, 0, 0, false);
1159         VM_OBJECT_UNLOCK(vm_obj);
1160         obj->madv = __I915_MADV_PURGED;
1161 }
1162
1163 static inline int
1164 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1165 {
1166         return obj->madv == I915_MADV_DONTNEED;
1167 }
1168
1169 static void
1170 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1171 {
1172         vm_page_t m;
1173         int page_count, i;
1174
1175         BUG_ON(obj->madv == __I915_MADV_PURGED);
1176
1177         if (obj->tiling_mode != I915_TILING_NONE)
1178                 i915_gem_object_save_bit_17_swizzle(obj);
1179         if (obj->madv == I915_MADV_DONTNEED)
1180                 obj->dirty = 0;
1181         page_count = obj->base.size / PAGE_SIZE;
1182         VM_OBJECT_LOCK(obj->base.vm_obj);
1183 #if GEM_PARANOID_CHECK_GTT
1184         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1185 #endif
1186         for (i = 0; i < page_count; i++) {
1187                 m = obj->pages[i];
1188                 if (obj->dirty)
1189                         vm_page_dirty(m);
1190                 if (obj->madv == I915_MADV_WILLNEED)
1191                         vm_page_reference(m);
1192                 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1193                 vm_page_unwire(obj->pages[i], 1);
1194                 vm_page_wakeup(obj->pages[i]);
1195         }
1196         VM_OBJECT_UNLOCK(obj->base.vm_obj);
1197         obj->dirty = 0;
1198         drm_free(obj->pages, M_DRM);
1199         obj->pages = NULL;
1200 }
1201
1202 static int
1203 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1204     int flags)
1205 {
1206         struct drm_device *dev;
1207         vm_object_t vm_obj;
1208         vm_page_t m;
1209         int page_count, i, j;
1210
1211         dev = obj->base.dev;
1212         KASSERT(obj->pages == NULL, ("Obj already has pages"));
1213         page_count = obj->base.size / PAGE_SIZE;
1214         obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1215             M_WAITOK);
1216         vm_obj = obj->base.vm_obj;
1217         VM_OBJECT_LOCK(vm_obj);
1218         for (i = 0; i < page_count; i++) {
1219                 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
1220                         goto failed;
1221         }
1222         VM_OBJECT_UNLOCK(vm_obj);
1223         if (i915_gem_object_needs_bit17_swizzle(obj))
1224                 i915_gem_object_do_bit_17_swizzle(obj);
1225         return (0);
1226
1227 failed:
1228         for (j = 0; j < i; j++) {
1229                 m = obj->pages[j];
1230                 vm_page_busy_wait(m, FALSE, "i915gem");
1231                 vm_page_unwire(m, 0);
1232                 vm_page_wakeup(m);
1233         }
1234         VM_OBJECT_UNLOCK(vm_obj);
1235         drm_free(obj->pages, M_DRM);
1236         obj->pages = NULL;
1237         return (-EIO);
1238 }
1239
1240 void
1241 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1242                                struct intel_ring_buffer *ring)
1243 {
1244         struct drm_device *dev = obj->base.dev;
1245         struct drm_i915_private *dev_priv = dev->dev_private;
1246         u32 seqno = intel_ring_get_seqno(ring);
1247
1248         BUG_ON(ring == NULL);
1249         obj->ring = ring;
1250
1251         /* Add a reference if we're newly entering the active list. */
1252         if (!obj->active) {
1253                 drm_gem_object_reference(&obj->base);
1254                 obj->active = 1;
1255         }
1256
1257         /* Move from whatever list we were on to the tail of execution. */
1258         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1259         list_move_tail(&obj->ring_list, &ring->active_list);
1260
1261         obj->last_read_seqno = seqno;
1262
1263         if (obj->fenced_gpu_access) {
1264                 obj->last_fenced_seqno = seqno;
1265
1266                 /* Bump MRU to take account of the delayed flush */
1267                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1268                         struct drm_i915_fence_reg *reg;
1269
1270                         reg = &dev_priv->fence_regs[obj->fence_reg];
1271                         list_move_tail(&reg->lru_list,
1272                                        &dev_priv->mm.fence_list);
1273                 }
1274         }
1275 }
1276
1277 static void
1278 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1279 {
1280         struct drm_device *dev = obj->base.dev;
1281         struct drm_i915_private *dev_priv = dev->dev_private;
1282
1283         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1284         BUG_ON(!obj->active);
1285
1286         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1287
1288         list_del_init(&obj->ring_list);
1289         obj->ring = NULL;
1290
1291         obj->last_read_seqno = 0;
1292         obj->last_write_seqno = 0;
1293         obj->base.write_domain = 0;
1294
1295         obj->last_fenced_seqno = 0;
1296         obj->fenced_gpu_access = false;
1297
1298         obj->active = 0;
1299         drm_gem_object_unreference(&obj->base);
1300
1301         WARN_ON(i915_verify_lists(dev));
1302 }
1303
1304 static int
1305 i915_gem_handle_seqno_wrap(struct drm_device *dev)
1306 {
1307         struct drm_i915_private *dev_priv = dev->dev_private;
1308         struct intel_ring_buffer *ring;
1309         int ret, i, j;
1310
1311         /* The hardware uses various monotonic 32-bit counters, if we
1312          * detect that they will wraparound we need to idle the GPU
1313          * and reset those counters.
1314          */
1315         ret = 0;
1316         for_each_ring(ring, dev_priv, i) {
1317                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1318                         ret |= ring->sync_seqno[j] != 0;
1319         }
1320         if (ret == 0)
1321                 return ret;
1322
1323         ret = i915_gpu_idle(dev);
1324         if (ret)
1325                 return ret;
1326
1327         i915_gem_retire_requests(dev);
1328         for_each_ring(ring, dev_priv, i) {
1329                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1330                         ring->sync_seqno[j] = 0;
1331         }
1332
1333         return 0;
1334 }
1335
1336 int
1337 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1338 {
1339         struct drm_i915_private *dev_priv = dev->dev_private;
1340
1341         /* reserve 0 for non-seqno */
1342         if (dev_priv->next_seqno == 0) {
1343                 int ret = i915_gem_handle_seqno_wrap(dev);
1344                 if (ret)
1345                         return ret;
1346
1347                 dev_priv->next_seqno = 1;
1348         }
1349
1350         *seqno = dev_priv->next_seqno++;
1351         return 0;
1352 }
1353
1354 int
1355 i915_add_request(struct intel_ring_buffer *ring,
1356                  struct drm_file *file,
1357                  u32 *out_seqno)
1358 {
1359         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1360         struct drm_i915_gem_request *request;
1361         u32 request_ring_position;
1362         int was_empty;
1363         int ret;
1364
1365         /*
1366          * Emit any outstanding flushes - execbuf can fail to emit the flush
1367          * after having emitted the batchbuffer command. Hence we need to fix
1368          * things up similar to emitting the lazy request. The difference here
1369          * is that the flush _must_ happen before the next request, no matter
1370          * what.
1371          */
1372         ret = intel_ring_flush_all_caches(ring);
1373         if (ret)
1374                 return ret;
1375
1376         request = kmalloc(sizeof(*request), M_DRM, M_WAITOK | M_ZERO);
1377         if (request == NULL)
1378                 return -ENOMEM;
1379
1380
1381         /* Record the position of the start of the request so that
1382          * should we detect the updated seqno part-way through the
1383          * GPU processing the request, we never over-estimate the
1384          * position of the head.
1385          */
1386         request_ring_position = intel_ring_get_tail(ring);
1387
1388         ret = ring->add_request(ring);
1389         if (ret) {
1390                 kfree(request, M_DRM);
1391                 return ret;
1392         }
1393
1394         request->seqno = intel_ring_get_seqno(ring);
1395         request->ring = ring;
1396         request->tail = request_ring_position;
1397         request->emitted_jiffies = jiffies;
1398         was_empty = list_empty(&ring->request_list);
1399         list_add_tail(&request->list, &ring->request_list);
1400         request->file_priv = NULL;
1401
1402         if (file) {
1403                 struct drm_i915_file_private *file_priv = file->driver_priv;
1404
1405                 spin_lock(&file_priv->mm.lock);
1406                 request->file_priv = file_priv;
1407                 list_add_tail(&request->client_list,
1408                               &file_priv->mm.request_list);
1409                 spin_unlock(&file_priv->mm.lock);
1410         }
1411
1412         ring->outstanding_lazy_request = 0;
1413
1414         if (!dev_priv->mm.suspended) {
1415                 if (i915_enable_hangcheck) {
1416                         mod_timer(&dev_priv->hangcheck_timer,
1417                                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1418                 }
1419                 if (was_empty) {
1420                         queue_delayed_work(dev_priv->wq,
1421                                            &dev_priv->mm.retire_work,
1422                                            round_jiffies_up_relative(hz));
1423                         intel_mark_busy(dev_priv->dev);
1424                 }
1425         }
1426
1427         if (out_seqno)
1428                 *out_seqno = request->seqno;
1429         return 0;
1430 }
1431
1432 static inline void
1433 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1434 {
1435         struct drm_i915_file_private *file_priv = request->file_priv;
1436
1437         if (!file_priv)
1438                 return;
1439
1440         spin_lock(&file_priv->mm.lock);
1441         if (request->file_priv) {
1442                 list_del(&request->client_list);
1443                 request->file_priv = NULL;
1444         }
1445         spin_unlock(&file_priv->mm.lock);
1446 }
1447
1448 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1449                                       struct intel_ring_buffer *ring)
1450 {
1451         while (!list_empty(&ring->request_list)) {
1452                 struct drm_i915_gem_request *request;
1453
1454                 request = list_first_entry(&ring->request_list,
1455                                            struct drm_i915_gem_request,
1456                                            list);
1457
1458                 list_del(&request->list);
1459                 i915_gem_request_remove_from_client(request);
1460                 drm_free(request, M_DRM);
1461         }
1462
1463         while (!list_empty(&ring->active_list)) {
1464                 struct drm_i915_gem_object *obj;
1465
1466                 obj = list_first_entry(&ring->active_list,
1467                                        struct drm_i915_gem_object,
1468                                        ring_list);
1469
1470                 i915_gem_object_move_to_inactive(obj);
1471         }
1472 }
1473
1474 static void i915_gem_reset_fences(struct drm_device *dev)
1475 {
1476         struct drm_i915_private *dev_priv = dev->dev_private;
1477         int i;
1478
1479         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1480                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1481
1482                 i915_gem_write_fence(dev, i, NULL);
1483
1484                 if (reg->obj)
1485                         i915_gem_object_fence_lost(reg->obj);
1486
1487                 reg->pin_count = 0;
1488                 reg->obj = NULL;
1489                 INIT_LIST_HEAD(&reg->lru_list);
1490         }
1491
1492         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1493 }
1494
1495 void i915_gem_reset(struct drm_device *dev)
1496 {
1497         struct drm_i915_private *dev_priv = dev->dev_private;
1498         struct drm_i915_gem_object *obj;
1499         struct intel_ring_buffer *ring;
1500         int i;
1501
1502         for_each_ring(ring, dev_priv, i)
1503                 i915_gem_reset_ring_lists(dev_priv, ring);
1504
1505         /* Move everything out of the GPU domains to ensure we do any
1506          * necessary invalidation upon reuse.
1507          */
1508         list_for_each_entry(obj,
1509                             &dev_priv->mm.inactive_list,
1510                             mm_list)
1511         {
1512                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1513         }
1514
1515         /* The fence registers are invalidated so clear them out */
1516         i915_gem_reset_fences(dev);
1517 }
1518
1519 /**
1520  * This function clears the request list as sequence numbers are passed.
1521  */
1522 void
1523 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1524 {
1525         uint32_t seqno;
1526
1527         if (list_empty(&ring->request_list))
1528                 return;
1529
1530         WARN_ON(i915_verify_lists(ring->dev));
1531
1532         seqno = ring->get_seqno(ring, true);
1533
1534         while (!list_empty(&ring->request_list)) {
1535                 struct drm_i915_gem_request *request;
1536
1537                 request = list_first_entry(&ring->request_list,
1538                                            struct drm_i915_gem_request,
1539                                            list);
1540
1541                 if (!i915_seqno_passed(seqno, request->seqno))
1542                         break;
1543
1544                 /* We know the GPU must have read the request to have
1545                  * sent us the seqno + interrupt, so use the position
1546                  * of tail of the request to update the last known position
1547                  * of the GPU head.
1548                  */
1549                 ring->last_retired_head = request->tail;
1550
1551                 list_del(&request->list);
1552                 i915_gem_request_remove_from_client(request);
1553                 kfree(request, M_DRM);
1554         }
1555
1556         /* Move any buffers on the active list that are no longer referenced
1557          * by the ringbuffer to the flushing/inactive lists as appropriate.
1558          */
1559         while (!list_empty(&ring->active_list)) {
1560                 struct drm_i915_gem_object *obj;
1561
1562                 obj = list_first_entry(&ring->active_list,
1563                                       struct drm_i915_gem_object,
1564                                       ring_list);
1565
1566                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1567                         break;
1568
1569                 i915_gem_object_move_to_inactive(obj);
1570         }
1571
1572         if (unlikely(ring->trace_irq_seqno &&
1573                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1574                 ring->irq_put(ring);
1575                 ring->trace_irq_seqno = 0;
1576         }
1577
1578 }
1579
1580 void
1581 i915_gem_retire_requests(struct drm_device *dev)
1582 {
1583         drm_i915_private_t *dev_priv = dev->dev_private;
1584         struct intel_ring_buffer *ring;
1585         int i;
1586
1587         for_each_ring(ring, dev_priv, i)
1588                 i915_gem_retire_requests_ring(ring);
1589 }
1590
1591 static void
1592 i915_gem_retire_work_handler(struct work_struct *work)
1593 {
1594         drm_i915_private_t *dev_priv;
1595         struct drm_device *dev;
1596         struct intel_ring_buffer *ring;
1597         bool idle;
1598         int i;
1599
1600         dev_priv = container_of(work, drm_i915_private_t,
1601                                 mm.retire_work.work);
1602         dev = dev_priv->dev;
1603
1604         /* Come back later if the device is busy... */
1605         if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
1606                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1607                                    round_jiffies_up_relative(hz));
1608                 return;
1609         }
1610
1611         i915_gem_retire_requests(dev);
1612
1613         /* Send a periodic flush down the ring so we don't hold onto GEM
1614          * objects indefinitely.
1615          */
1616         idle = true;
1617         for_each_ring(ring, dev_priv, i) {
1618                 if (ring->gpu_caches_dirty)
1619                         i915_add_request(ring, NULL, NULL);
1620
1621                 idle &= list_empty(&ring->request_list);
1622         }
1623
1624         if (!dev_priv->mm.suspended && !idle)
1625                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1626                                    round_jiffies_up_relative(hz));
1627         if (idle)
1628                 intel_mark_idle(dev);
1629
1630         DRM_UNLOCK(dev);
1631 }
1632 /**
1633  * Ensures that an object will eventually get non-busy by flushing any required
1634  * write domains, emitting any outstanding lazy request and retiring and
1635  * completed requests.
1636  */
1637 static int
1638 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1639 {
1640         int ret;
1641
1642         if (obj->active) {
1643                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1644                 if (ret)
1645                         return ret;
1646
1647                 i915_gem_retire_requests_ring(obj->ring);
1648         }
1649
1650         return 0;
1651 }
1652
1653 /**
1654  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1655  * @DRM_IOCTL_ARGS: standard ioctl arguments
1656  *
1657  * Returns 0 if successful, else an error is returned with the remaining time in
1658  * the timeout parameter.
1659  *  -ETIME: object is still busy after timeout
1660  *  -ERESTARTSYS: signal interrupted the wait
1661  *  -ENONENT: object doesn't exist
1662  * Also possible, but rare:
1663  *  -EAGAIN: GPU wedged
1664  *  -ENOMEM: damn
1665  *  -ENODEV: Internal IRQ fail
1666  *  -E?: The add request failed
1667  *
1668  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1669  * non-zero timeout parameter the wait ioctl will wait for the given number of
1670  * nanoseconds on an object becoming unbusy. Since the wait itself does so
1671  * without holding struct_mutex the object may become re-busied before this
1672  * function completes. A similar but shorter * race condition exists in the busy
1673  * ioctl
1674  */
1675 int
1676 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1677 {
1678         struct drm_i915_gem_wait *args = data;
1679         struct drm_i915_gem_object *obj;
1680         struct intel_ring_buffer *ring = NULL;
1681         struct timespec timeout_stack, *timeout = NULL;
1682         u32 seqno = 0;
1683         int ret = 0;
1684
1685         if (args->timeout_ns >= 0) {
1686                 timeout_stack = ns_to_timespec(args->timeout_ns);
1687                 timeout = &timeout_stack;
1688         }
1689
1690         ret = i915_mutex_lock_interruptible(dev);
1691         if (ret)
1692                 return ret;
1693
1694         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1695         if (&obj->base == NULL) {
1696                 DRM_UNLOCK(dev);
1697                 return -ENOENT;
1698         }
1699
1700         /* Need to make sure the object gets inactive eventually. */
1701         ret = i915_gem_object_flush_active(obj);
1702         if (ret)
1703                 goto out;
1704
1705         if (obj->active) {
1706                 seqno = obj->last_read_seqno;
1707                 ring = obj->ring;
1708         }
1709
1710         if (seqno == 0)
1711                  goto out;
1712
1713         /* Do this after OLR check to make sure we make forward progress polling
1714          * on this IOCTL with a 0 timeout (like busy ioctl)
1715          */
1716         if (!args->timeout_ns) {
1717                 ret = -ETIMEDOUT;
1718                 goto out;
1719         }
1720
1721         drm_gem_object_unreference(&obj->base);
1722         DRM_UNLOCK(dev);
1723
1724         ret = __wait_seqno(ring, seqno, true, timeout);
1725         if (timeout) {
1726                 WARN_ON(!timespec_valid(timeout));
1727                 args->timeout_ns = timespec_to_ns(timeout);
1728         }
1729         return ret;
1730
1731 out:
1732         drm_gem_object_unreference(&obj->base);
1733         DRM_UNLOCK(dev);
1734         return ret;
1735 }
1736
1737 /**
1738  * i915_gem_object_sync - sync an object to a ring.
1739  *
1740  * @obj: object which may be in use on another ring.
1741  * @to: ring we wish to use the object on. May be NULL.
1742  *
1743  * This code is meant to abstract object synchronization with the GPU.
1744  * Calling with NULL implies synchronizing the object with the CPU
1745  * rather than a particular GPU ring.
1746  *
1747  * Returns 0 if successful, else propagates up the lower layer error.
1748  */
1749 int
1750 i915_gem_object_sync(struct drm_i915_gem_object *obj,
1751                      struct intel_ring_buffer *to)
1752 {
1753         struct intel_ring_buffer *from = obj->ring;
1754         u32 seqno;
1755         int ret, idx;
1756
1757         if (from == NULL || to == from)
1758                 return 0;
1759
1760         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1761                 return i915_gem_object_wait_rendering(obj, false);
1762
1763         idx = intel_ring_sync_index(from, to);
1764
1765         seqno = obj->last_read_seqno;
1766         if (seqno <= from->sync_seqno[idx])
1767                 return 0;
1768
1769         ret = i915_gem_check_olr(obj->ring, seqno);
1770         if (ret)
1771                 return ret;
1772
1773         ret = to->sync_to(to, from, seqno);
1774         if (!ret)
1775                 /* We use last_read_seqno because sync_to()
1776                  * might have just caused seqno wrap under
1777                  * the radar.
1778                  */
1779                 from->sync_seqno[idx] = obj->last_read_seqno;
1780
1781         return ret;
1782 }
1783
1784 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1785 {
1786         u32 old_write_domain, old_read_domains;
1787
1788         /* Act a barrier for all accesses through the GTT */
1789         cpu_mfence();
1790
1791         /* Force a pagefault for domain tracking on next user access */
1792         i915_gem_release_mmap(obj);
1793
1794         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1795                 return;
1796
1797         old_read_domains = obj->base.read_domains;
1798         old_write_domain = obj->base.write_domain;
1799
1800         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1801         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1802
1803 }
1804
1805 /**
1806  * Unbinds an object from the GTT aperture.
1807  */
1808 int
1809 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
1810 {
1811         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1812         int ret = 0;
1813
1814         if (obj->gtt_space == NULL)
1815                 return 0;
1816
1817         if (obj->pin_count)
1818                 return -EBUSY;
1819
1820         ret = i915_gem_object_finish_gpu(obj);
1821         if (ret)
1822                 return ret;
1823         /* Continue on if we fail due to EIO, the GPU is hung so we
1824          * should be safe and we need to cleanup or else we might
1825          * cause memory corruption through use-after-free.
1826          */
1827
1828         i915_gem_object_finish_gtt(obj);
1829
1830         /* Move the object to the CPU domain to ensure that
1831          * any possible CPU writes while it's not in the GTT
1832          * are flushed when we go to remap it.
1833          */
1834         if (ret == 0)
1835                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1836         if (ret == -ERESTART || ret == -EINTR)
1837                 return ret;
1838         if (ret) {
1839                 /* In the event of a disaster, abandon all caches and
1840                  * hope for the best.
1841                  */
1842                 i915_gem_clflush_object(obj);
1843                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1844         }
1845
1846         /* release the fence reg _after_ flushing */
1847         ret = i915_gem_object_put_fence(obj);
1848         if (ret)
1849                 return ret;
1850
1851         if (obj->has_global_gtt_mapping)
1852                 i915_gem_gtt_unbind_object(obj);
1853         if (obj->has_aliasing_ppgtt_mapping) {
1854                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1855                 obj->has_aliasing_ppgtt_mapping = 0;
1856         }
1857         i915_gem_gtt_finish_object(obj);
1858
1859         i915_gem_object_put_pages_gtt(obj);
1860
1861         list_del_init(&obj->gtt_list);
1862         list_del_init(&obj->mm_list);
1863         /* Avoid an unnecessary call to unbind on rebind. */
1864         obj->map_and_fenceable = true;
1865
1866         drm_mm_put_block(obj->gtt_space);
1867         obj->gtt_space = NULL;
1868         obj->gtt_offset = 0;
1869
1870         if (i915_gem_object_is_purgeable(obj))
1871                 i915_gem_object_truncate(obj);
1872
1873         return ret;
1874 }
1875
1876 int i915_gpu_idle(struct drm_device *dev)
1877 {
1878         drm_i915_private_t *dev_priv = dev->dev_private;
1879         struct intel_ring_buffer *ring;
1880         int ret, i;
1881
1882         /* Flush everything onto the inactive list. */
1883         for_each_ring(ring, dev_priv, i) {
1884                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
1885                 if (ret)
1886                         return ret;
1887
1888                 ret = intel_ring_idle(ring);
1889                 if (ret)
1890                         return ret;
1891         }
1892
1893         return 0;
1894 }
1895
1896 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
1897                                         struct drm_i915_gem_object *obj)
1898 {
1899         drm_i915_private_t *dev_priv = dev->dev_private;
1900         uint64_t val;
1901
1902         if (obj) {
1903                 u32 size = obj->gtt_space->size;
1904
1905                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1906                                  0xfffff000) << 32;
1907                 val |= obj->gtt_offset & 0xfffff000;
1908                 val |= (uint64_t)((obj->stride / 128) - 1) <<
1909                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
1910
1911                 if (obj->tiling_mode == I915_TILING_Y)
1912                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1913                 val |= I965_FENCE_REG_VALID;
1914         } else
1915                 val = 0;
1916
1917         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
1918         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
1919 }
1920
1921 static void i965_write_fence_reg(struct drm_device *dev, int reg,
1922                                  struct drm_i915_gem_object *obj)
1923 {
1924         drm_i915_private_t *dev_priv = dev->dev_private;
1925         uint64_t val;
1926
1927         if (obj) {
1928                 u32 size = obj->gtt_space->size;
1929
1930                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1931                                  0xfffff000) << 32;
1932                 val |= obj->gtt_offset & 0xfffff000;
1933                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1934                 if (obj->tiling_mode == I915_TILING_Y)
1935                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1936                 val |= I965_FENCE_REG_VALID;
1937         } else
1938                 val = 0;
1939
1940         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
1941         POSTING_READ(FENCE_REG_965_0 + reg * 8);
1942 }
1943
1944 static void i915_write_fence_reg(struct drm_device *dev, int reg,
1945                                  struct drm_i915_gem_object *obj)
1946 {
1947         drm_i915_private_t *dev_priv = dev->dev_private;
1948         u32 val;
1949
1950         if (obj) {
1951                 u32 size = obj->gtt_space->size;
1952                 int pitch_val;
1953                 int tile_width;
1954
1955                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
1956                      (size & -size) != size ||
1957                      (obj->gtt_offset & (size - 1)),
1958                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
1959                      obj->gtt_offset, obj->map_and_fenceable, size);
1960
1961                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1962                         tile_width = 128;
1963                 else
1964                         tile_width = 512;
1965
1966                 /* Note: pitch better be a power of two tile widths */
1967                 pitch_val = obj->stride / tile_width;
1968                 pitch_val = ffs(pitch_val) - 1;
1969
1970                 val = obj->gtt_offset;
1971                 if (obj->tiling_mode == I915_TILING_Y)
1972                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1973                 val |= I915_FENCE_SIZE_BITS(size);
1974                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1975                 val |= I830_FENCE_REG_VALID;
1976         } else
1977                 val = 0;
1978
1979         if (reg < 8)
1980                 reg = FENCE_REG_830_0 + reg * 4;
1981         else
1982                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
1983
1984         I915_WRITE(reg, val);
1985         POSTING_READ(reg);
1986 }
1987
1988 static void i830_write_fence_reg(struct drm_device *dev, int reg,
1989                                 struct drm_i915_gem_object *obj)
1990 {
1991         drm_i915_private_t *dev_priv = dev->dev_private;
1992         uint32_t val;
1993
1994         if (obj) {
1995                 u32 size = obj->gtt_space->size;
1996                 uint32_t pitch_val;
1997
1998                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
1999                      (size & -size) != size ||
2000                      (obj->gtt_offset & (size - 1)),
2001                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2002                      obj->gtt_offset, size);
2003
2004                 pitch_val = obj->stride / 128;
2005                 pitch_val = ffs(pitch_val) - 1;
2006
2007                 val = obj->gtt_offset;
2008                 if (obj->tiling_mode == I915_TILING_Y)
2009                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2010                 val |= I830_FENCE_SIZE_BITS(size);
2011                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2012                 val |= I830_FENCE_REG_VALID;
2013         } else
2014                 val = 0;
2015
2016         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2017         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2018 }
2019
2020 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2021                                  struct drm_i915_gem_object *obj)
2022 {
2023         switch (INTEL_INFO(dev)->gen) {
2024         case 7:
2025         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2026         case 5:
2027         case 4: i965_write_fence_reg(dev, reg, obj); break;
2028         case 3: i915_write_fence_reg(dev, reg, obj); break;
2029         case 2: i830_write_fence_reg(dev, reg, obj); break;
2030         default: break;
2031         }
2032 }
2033
2034 static inline int fence_number(struct drm_i915_private *dev_priv,
2035                                struct drm_i915_fence_reg *fence)
2036 {
2037         return fence - dev_priv->fence_regs;
2038 }
2039
2040 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2041                                          struct drm_i915_fence_reg *fence,
2042                                          bool enable)
2043 {
2044         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2045         int reg = fence_number(dev_priv, fence);
2046
2047         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2048
2049         if (enable) {
2050                 obj->fence_reg = reg;
2051                 fence->obj = obj;
2052                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2053         } else {
2054                 obj->fence_reg = I915_FENCE_REG_NONE;
2055                 fence->obj = NULL;
2056                 list_del_init(&fence->lru_list);
2057         }
2058 }
2059
2060 static int
2061 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2062 {
2063         if (obj->last_fenced_seqno) {
2064                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2065                 if (ret)
2066                         return ret;
2067
2068                 obj->last_fenced_seqno = 0;
2069         }
2070
2071         /* Ensure that all CPU reads are completed before installing a fence
2072          * and all writes before removing the fence.
2073          */
2074         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2075                 cpu_mfence();
2076
2077         obj->fenced_gpu_access = false;
2078         return 0;
2079 }
2080
2081 int
2082 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2083 {
2084         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2085         int ret;
2086
2087         ret = i915_gem_object_flush_fence(obj);
2088         if (ret)
2089                 return ret;
2090
2091         if (obj->fence_reg == I915_FENCE_REG_NONE)
2092                 return 0;
2093
2094         i915_gem_object_update_fence(obj,
2095                                      &dev_priv->fence_regs[obj->fence_reg],
2096                                      false);
2097         i915_gem_object_fence_lost(obj);
2098
2099         return 0;
2100 }
2101
2102 static struct drm_i915_fence_reg *
2103 i915_find_fence_reg(struct drm_device *dev)
2104 {
2105         struct drm_i915_private *dev_priv = dev->dev_private;
2106         struct drm_i915_fence_reg *reg, *avail;
2107         int i;
2108
2109         /* First try to find a free reg */
2110         avail = NULL;
2111         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2112                 reg = &dev_priv->fence_regs[i];
2113                 if (!reg->obj)
2114                         return reg;
2115
2116                 if (!reg->pin_count)
2117                         avail = reg;
2118         }
2119
2120         if (avail == NULL)
2121                 return NULL;
2122
2123         /* None available, try to steal one or wait for a user to finish */
2124         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2125                 if (reg->pin_count)
2126                         continue;
2127
2128                 return reg;
2129         }
2130
2131         return NULL;
2132 }
2133
2134 /**
2135  * i915_gem_object_get_fence - set up fencing for an object
2136  * @obj: object to map through a fence reg
2137  *
2138  * When mapping objects through the GTT, userspace wants to be able to write
2139  * to them without having to worry about swizzling if the object is tiled.
2140  * This function walks the fence regs looking for a free one for @obj,
2141  * stealing one if it can't find any.
2142  *
2143  * It then sets up the reg based on the object's properties: address, pitch
2144  * and tiling format.
2145  *
2146  * For an untiled surface, this removes any existing fence.
2147  */
2148 int
2149 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2150 {
2151         struct drm_device *dev = obj->base.dev;
2152         struct drm_i915_private *dev_priv = dev->dev_private;
2153         bool enable = obj->tiling_mode != I915_TILING_NONE;
2154         struct drm_i915_fence_reg *reg;
2155         int ret;
2156
2157         /* Have we updated the tiling parameters upon the object and so
2158          * will need to serialise the write to the associated fence register?
2159          */
2160         if (obj->fence_dirty) {
2161                 ret = i915_gem_object_flush_fence(obj);
2162                 if (ret)
2163                         return ret;
2164         }
2165
2166         /* Just update our place in the LRU if our fence is getting reused. */
2167         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2168                 reg = &dev_priv->fence_regs[obj->fence_reg];
2169                 if (!obj->fence_dirty) {
2170                         list_move_tail(&reg->lru_list,
2171                                        &dev_priv->mm.fence_list);
2172                         return 0;
2173                 }
2174         } else if (enable) {
2175                 reg = i915_find_fence_reg(dev);
2176                 if (reg == NULL)
2177                         return -EDEADLK;
2178
2179                 if (reg->obj) {
2180                         struct drm_i915_gem_object *old = reg->obj;
2181
2182                         ret = i915_gem_object_flush_fence(old);
2183                         if (ret)
2184                                 return ret;
2185
2186                         i915_gem_object_fence_lost(old);
2187                 }
2188         } else
2189                 return 0;
2190
2191         i915_gem_object_update_fence(obj, reg, enable);
2192         obj->fence_dirty = false;
2193
2194         return 0;
2195 }
2196
2197 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2198                                      struct drm_mm_node *gtt_space,
2199                                      unsigned long cache_level)
2200 {
2201         struct drm_mm_node *other;
2202
2203         /* On non-LLC machines we have to be careful when putting differing
2204          * types of snoopable memory together to avoid the prefetcher
2205          * crossing memory domains and dieing.
2206          */
2207         if (HAS_LLC(dev))
2208                 return true;
2209
2210         if (gtt_space == NULL)
2211                 return true;
2212
2213         if (list_empty(&gtt_space->node_list))
2214                 return true;
2215
2216         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2217         if (other->allocated && !other->hole_follows && other->color != cache_level)
2218                 return false;
2219
2220         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2221         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2222                 return false;
2223
2224         return true;
2225 }
2226
2227 static void i915_gem_verify_gtt(struct drm_device *dev)
2228 {
2229 #if WATCH_GTT
2230         struct drm_i915_private *dev_priv = dev->dev_private;
2231         struct drm_i915_gem_object *obj;
2232         int err = 0;
2233
2234         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2235                 if (obj->gtt_space == NULL) {
2236                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
2237                         err++;
2238                         continue;
2239                 }
2240
2241                 if (obj->cache_level != obj->gtt_space->color) {
2242                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2243                                obj->gtt_space->start,
2244                                obj->gtt_space->start + obj->gtt_space->size,
2245                                obj->cache_level,
2246                                obj->gtt_space->color);
2247                         err++;
2248                         continue;
2249                 }
2250
2251                 if (!i915_gem_valid_gtt_space(dev,
2252                                               obj->gtt_space,
2253                                               obj->cache_level)) {
2254                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2255                                obj->gtt_space->start,
2256                                obj->gtt_space->start + obj->gtt_space->size,
2257                                obj->cache_level);
2258                         err++;
2259                         continue;
2260                 }
2261         }
2262
2263         WARN_ON(err);
2264 #endif
2265 }
2266
2267 /**
2268  * Finds free space in the GTT aperture and binds the object there.
2269  */
2270 static int
2271 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2272                             unsigned alignment,
2273                             bool map_and_fenceable,
2274                             bool nonblocking)
2275 {
2276         struct drm_device *dev = obj->base.dev;
2277         drm_i915_private_t *dev_priv = dev->dev_private;
2278         struct drm_mm_node *free_space;
2279         uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2280         bool mappable, fenceable;
2281         int ret;
2282
2283         if (obj->madv != I915_MADV_WILLNEED) {
2284                 DRM_ERROR("Attempting to bind a purgeable object\n");
2285                 return -EINVAL;
2286         }
2287
2288         fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2289             obj->tiling_mode);
2290         fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2291             obj->tiling_mode);
2292         unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2293             obj->base.size, obj->tiling_mode);
2294         if (alignment == 0)
2295                 alignment = map_and_fenceable ? fence_alignment :
2296                     unfenced_alignment;
2297         if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2298                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2299                 return -EINVAL;
2300         }
2301
2302         size = map_and_fenceable ? fence_size : obj->base.size;
2303
2304         /* If the object is bigger than the entire aperture, reject it early
2305          * before evicting everything in a vain attempt to find space.
2306          */
2307         if (obj->base.size > (map_and_fenceable ?
2308             dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2309                 DRM_ERROR(
2310 "Attempting to bind an object larger than the aperture\n");
2311                 return -E2BIG;
2312         }
2313
2314  search_free:
2315         if (map_and_fenceable)
2316                 free_space =
2317                         drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2318                                                           size, alignment, obj->cache_level,
2319                                                           0, dev_priv->mm.gtt_mappable_end,
2320                                                           false);
2321         else
2322                 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2323                                                       size, alignment, obj->cache_level,
2324                                                       false);
2325
2326         if (free_space != NULL) {
2327                 if (map_and_fenceable)
2328                         obj->gtt_space =
2329                                 drm_mm_get_block_range_generic(free_space,
2330                                                                size, alignment, obj->cache_level,
2331                                                                0, dev_priv->mm.gtt_mappable_end,
2332                                                                false);
2333                 else
2334                         obj->gtt_space =
2335                                 drm_mm_get_block_generic(free_space,
2336                                                          size, alignment, obj->cache_level,
2337                                                          false);
2338         }
2339         if (obj->gtt_space == NULL) {
2340                 ret = i915_gem_evict_something(dev, size, alignment,
2341                                                obj->cache_level,
2342                                                map_and_fenceable,
2343                                                nonblocking);
2344                 if (ret)
2345                         return ret;
2346
2347                 goto search_free;
2348         }
2349
2350         /*
2351          * NOTE: i915_gem_object_get_pages_gtt() cannot
2352          *       return ENOMEM, since we used VM_ALLOC_RETRY.
2353          */
2354         ret = i915_gem_object_get_pages_gtt(obj, 0);
2355         if (ret != 0) {
2356                 drm_mm_put_block(obj->gtt_space);
2357                 obj->gtt_space = NULL;
2358                 return ret;
2359         }
2360
2361         i915_gem_gtt_bind_object(obj, obj->cache_level);
2362         if (ret != 0) {
2363                 i915_gem_object_put_pages_gtt(obj);
2364                 drm_mm_put_block(obj->gtt_space);
2365                 obj->gtt_space = NULL;
2366                 if (i915_gem_evict_everything(dev))
2367                         return (ret);
2368                 goto search_free;
2369         }
2370
2371         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2372         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2373
2374         obj->gtt_offset = obj->gtt_space->start;
2375
2376         fenceable =
2377                 obj->gtt_space->size == fence_size &&
2378                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2379
2380         mappable =
2381                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2382         obj->map_and_fenceable = mappable && fenceable;
2383
2384         i915_gem_verify_gtt(dev);
2385         return 0;
2386 }
2387
2388 void
2389 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2390 {
2391
2392         /* If we don't have a page list set up, then we're not pinned
2393          * to GPU, and we can ignore the cache flush because it'll happen
2394          * again at bind time.
2395          */
2396         if (obj->pages == NULL)
2397                 return;
2398
2399         /* If the GPU is snooping the contents of the CPU cache,
2400          * we do not need to manually clear the CPU cache lines.  However,
2401          * the caches are only snooped when the render cache is
2402          * flushed/invalidated.  As we always have to emit invalidations
2403          * and flushes when moving into and out of the RENDER domain, correct
2404          * snooping behaviour occurs naturally as the result of our domain
2405          * tracking.
2406          */
2407         if (obj->cache_level != I915_CACHE_NONE)
2408                 return;
2409
2410         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2411 }
2412
2413 /** Flushes the GTT write domain for the object if it's dirty. */
2414 static void
2415 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2416 {
2417         uint32_t old_write_domain;
2418
2419         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2420                 return;
2421
2422         /* No actual flushing is required for the GTT write domain.  Writes
2423          * to it immediately go to main memory as far as we know, so there's
2424          * no chipset flush.  It also doesn't land in render cache.
2425          *
2426          * However, we do have to enforce the order so that all writes through
2427          * the GTT land before any writes to the device, such as updates to
2428          * the GATT itself.
2429          */
2430         cpu_sfence();
2431
2432         old_write_domain = obj->base.write_domain;
2433         obj->base.write_domain = 0;
2434 }
2435
2436 /** Flushes the CPU write domain for the object if it's dirty. */
2437 static void
2438 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2439 {
2440         uint32_t old_write_domain;
2441
2442         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2443                 return;
2444
2445         i915_gem_clflush_object(obj);
2446         intel_gtt_chipset_flush();
2447         old_write_domain = obj->base.write_domain;
2448         obj->base.write_domain = 0;
2449 }
2450
2451 /**
2452  * Moves a single object to the GTT read, and possibly write domain.
2453  *
2454  * This function returns when the move is complete, including waiting on
2455  * flushes to occur.
2456  */
2457 int
2458 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2459 {
2460         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2461         uint32_t old_write_domain, old_read_domains;
2462         int ret;
2463
2464         /* Not valid to be called on unbound objects. */
2465         if (obj->gtt_space == NULL)
2466                 return -EINVAL;
2467
2468         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2469                 return 0;
2470
2471         ret = i915_gem_object_wait_rendering(obj, !write);
2472         if (ret)
2473                 return ret;
2474
2475         i915_gem_object_flush_cpu_write_domain(obj);
2476
2477         old_write_domain = obj->base.write_domain;
2478         old_read_domains = obj->base.read_domains;
2479
2480         /* It should now be out of any other write domains, and we can update
2481          * the domain values for our changes.
2482          */
2483         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2484         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2485         if (write) {
2486                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2487                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2488                 obj->dirty = 1;
2489         }
2490
2491         /* And bump the LRU for this access */
2492         if (i915_gem_object_is_inactive(obj))
2493                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2494
2495         return 0;
2496 }
2497
2498 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2499                                     enum i915_cache_level cache_level)
2500 {
2501         struct drm_device *dev = obj->base.dev;
2502         drm_i915_private_t *dev_priv = dev->dev_private;
2503         int ret;
2504
2505         if (obj->cache_level == cache_level)
2506                 return 0;
2507
2508         if (obj->pin_count) {
2509                 DRM_DEBUG("can not change the cache level of pinned objects\n");
2510                 return -EBUSY;
2511         }
2512
2513         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2514                 ret = i915_gem_object_unbind(obj);
2515                 if (ret)
2516                         return ret;
2517         }
2518
2519         if (obj->gtt_space) {
2520                 ret = i915_gem_object_finish_gpu(obj);
2521                 if (ret)
2522                         return ret;
2523
2524                 i915_gem_object_finish_gtt(obj);
2525
2526                 /* Before SandyBridge, you could not use tiling or fence
2527                  * registers with snooped memory, so relinquish any fences
2528                  * currently pointing to our region in the aperture.
2529                  */
2530                 if (INTEL_INFO(dev)->gen < 6) {
2531                         ret = i915_gem_object_put_fence(obj);
2532                         if (ret)
2533                                 return ret;
2534                 }
2535
2536                 if (obj->has_global_gtt_mapping)
2537                         i915_gem_gtt_bind_object(obj, cache_level);
2538                 if (obj->has_aliasing_ppgtt_mapping)
2539                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2540                                                obj, cache_level);
2541
2542                 obj->gtt_space->color = cache_level;
2543         }
2544
2545         if (cache_level == I915_CACHE_NONE) {
2546                 u32 old_read_domains, old_write_domain;
2547
2548                 /* If we're coming from LLC cached, then we haven't
2549                  * actually been tracking whether the data is in the
2550                  * CPU cache or not, since we only allow one bit set
2551                  * in obj->write_domain and have been skipping the clflushes.
2552                  * Just set it to the CPU cache for now.
2553                  */
2554                 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
2555                     ("obj %p in CPU write domain", obj));
2556                 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
2557                     ("obj %p in CPU read domain", obj));
2558
2559                 old_read_domains = obj->base.read_domains;
2560                 old_write_domain = obj->base.write_domain;
2561
2562                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2563                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2564
2565         }
2566
2567         obj->cache_level = cache_level;
2568         i915_gem_verify_gtt(dev);
2569         return 0;
2570 }
2571
2572 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2573                                struct drm_file *file)
2574 {
2575         struct drm_i915_gem_caching *args = data;
2576         struct drm_i915_gem_object *obj;
2577         int ret;
2578
2579         ret = i915_mutex_lock_interruptible(dev);
2580         if (ret)
2581                 return ret;
2582
2583         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2584         if (&obj->base == NULL) {
2585                 ret = -ENOENT;
2586                 goto unlock;
2587         }
2588
2589         args->caching = obj->cache_level != I915_CACHE_NONE;
2590
2591         drm_gem_object_unreference(&obj->base);
2592 unlock:
2593         DRM_UNLOCK(dev);
2594         return ret;
2595 }
2596
2597 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2598                                struct drm_file *file)
2599 {
2600         struct drm_i915_gem_caching *args = data;
2601         struct drm_i915_gem_object *obj;
2602         enum i915_cache_level level;
2603         int ret;
2604
2605         switch (args->caching) {
2606         case I915_CACHING_NONE:
2607                 level = I915_CACHE_NONE;
2608                 break;
2609         case I915_CACHING_CACHED:
2610                 level = I915_CACHE_LLC;
2611                 break;
2612         default:
2613                 return -EINVAL;
2614         }
2615
2616         ret = i915_mutex_lock_interruptible(dev);
2617         if (ret)
2618                 return ret;
2619
2620         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2621         if (&obj->base == NULL) {
2622                 ret = -ENOENT;
2623                 goto unlock;
2624         }
2625
2626         ret = i915_gem_object_set_cache_level(obj, level);
2627
2628         drm_gem_object_unreference(&obj->base);
2629 unlock:
2630         DRM_UNLOCK(dev);
2631         return ret;
2632 }
2633
2634 /*
2635  * Prepare buffer for display plane (scanout, cursors, etc).
2636  * Can be called from an uninterruptible phase (modesetting) and allows
2637  * any flushes to be pipelined (for pageflips).
2638  */
2639 int
2640 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2641                                      u32 alignment,
2642                                      struct intel_ring_buffer *pipelined)
2643 {
2644         u32 old_read_domains, old_write_domain;
2645         int ret;
2646
2647         if (pipelined != obj->ring) {
2648                 ret = i915_gem_object_sync(obj, pipelined);
2649                 if (ret)
2650                         return ret;
2651         }
2652
2653         /* The display engine is not coherent with the LLC cache on gen6.  As
2654          * a result, we make sure that the pinning that is about to occur is
2655          * done with uncached PTEs. This is lowest common denominator for all
2656          * chipsets.
2657          *
2658          * However for gen6+, we could do better by using the GFDT bit instead
2659          * of uncaching, which would allow us to flush all the LLC-cached data
2660          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2661          */
2662         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2663         if (ret)
2664                 return ret;
2665
2666         /* As the user may map the buffer once pinned in the display plane
2667          * (e.g. libkms for the bootup splash), we have to ensure that we
2668          * always use map_and_fenceable for all scanout buffers.
2669          */
2670         ret = i915_gem_object_pin(obj, alignment, true, false);
2671         if (ret)
2672                 return ret;
2673
2674         i915_gem_object_flush_cpu_write_domain(obj);
2675
2676         old_write_domain = obj->base.write_domain;
2677         old_read_domains = obj->base.read_domains;
2678
2679         /* It should now be out of any other write domains, and we can update
2680          * the domain values for our changes.
2681          */
2682         obj->base.write_domain = 0;
2683         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2684
2685         return 0;
2686 }
2687
2688 int
2689 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2690 {
2691         int ret;
2692
2693         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2694                 return 0;
2695
2696         ret = i915_gem_object_wait_rendering(obj, false);
2697         if (ret)
2698                 return ret;
2699
2700         /* Ensure that we invalidate the GPU's caches and TLBs. */
2701         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2702         return 0;
2703 }
2704
2705 /**
2706  * Moves a single object to the CPU read, and possibly write domain.
2707  *
2708  * This function returns when the move is complete, including waiting on
2709  * flushes to occur.
2710  */
2711 int
2712 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2713 {
2714         uint32_t old_write_domain, old_read_domains;
2715         int ret;
2716
2717         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2718                 return 0;
2719
2720         ret = i915_gem_object_wait_rendering(obj, !write);
2721         if (ret)
2722                 return ret;
2723
2724         i915_gem_object_flush_gtt_write_domain(obj);
2725
2726         old_write_domain = obj->base.write_domain;
2727         old_read_domains = obj->base.read_domains;
2728
2729         /* Flush the CPU cache if it's still invalid. */
2730         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2731                 i915_gem_clflush_object(obj);
2732
2733                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2734         }
2735
2736         /* It should now be out of any other write domains, and we can update
2737          * the domain values for our changes.
2738          */
2739         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2740
2741         /* If we're writing through the CPU, then the GPU read domains will
2742          * need to be invalidated at next use.
2743          */
2744         if (write) {
2745                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2746                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2747         }
2748
2749         return 0;
2750 }
2751
2752 /* Throttle our rendering by waiting until the ring has completed our requests
2753  * emitted over 20 msec ago.
2754  *
2755  * Note that if we were to use the current jiffies each time around the loop,
2756  * we wouldn't escape the function with any frames outstanding if the time to
2757  * render a frame was over 20ms.
2758  *
2759  * This should get us reasonable parallelism between CPU and GPU but also
2760  * relatively low latency when blocking on a particular request to finish.
2761  */
2762 static int
2763 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2764 {
2765         struct drm_i915_private *dev_priv = dev->dev_private;
2766         struct drm_i915_file_private *file_priv = file->driver_priv;
2767         unsigned long recent_enough = ticks - (20 * hz / 1000);
2768         struct drm_i915_gem_request *request;
2769         struct intel_ring_buffer *ring = NULL;
2770         u32 seqno = 0;
2771         int ret;
2772
2773         if (atomic_read(&dev_priv->mm.wedged))
2774                 return -EIO;
2775
2776         spin_lock(&file_priv->mm.lock);
2777         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
2778                 if (time_after_eq(request->emitted_jiffies, recent_enough))
2779                         break;
2780
2781                 ring = request->ring;
2782                 seqno = request->seqno;
2783         }
2784         spin_unlock(&file_priv->mm.lock);
2785
2786         if (seqno == 0)
2787                 return 0;
2788
2789         ret = __wait_seqno(ring, seqno, true, NULL);
2790
2791         if (ret == 0)
2792                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2793
2794         return ret;
2795 }
2796
2797 int
2798 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2799                     uint32_t alignment,
2800                     bool map_and_fenceable,
2801                     bool nonblocking)
2802 {
2803         int ret;
2804
2805         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
2806                 return -EBUSY;
2807
2808         if (obj->gtt_space != NULL) {
2809                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2810                     (map_and_fenceable && !obj->map_and_fenceable)) {
2811                         WARN(obj->pin_count,
2812                              "bo is already pinned with incorrect alignment:"
2813                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2814                              " obj->map_and_fenceable=%d\n",
2815                              obj->gtt_offset, alignment,
2816                              map_and_fenceable,
2817                              obj->map_and_fenceable);
2818                         ret = i915_gem_object_unbind(obj);
2819                         if (ret)
2820                                 return ret;
2821                 }
2822         }
2823
2824         if (obj->gtt_space == NULL) {
2825                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2826
2827                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
2828                                                   map_and_fenceable,
2829                                                   nonblocking);
2830                 if (ret)
2831                         return ret;
2832
2833                 if (!dev_priv->mm.aliasing_ppgtt)
2834                         i915_gem_gtt_bind_object(obj, obj->cache_level);
2835         }
2836
2837         if (!obj->has_global_gtt_mapping && map_and_fenceable)
2838                 i915_gem_gtt_bind_object(obj, obj->cache_level);
2839
2840         obj->pin_count++;
2841         obj->pin_mappable |= map_and_fenceable;
2842
2843         return 0;
2844 }
2845
2846 void
2847 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
2848 {
2849         BUG_ON(obj->pin_count == 0);
2850         BUG_ON(obj->gtt_space == NULL);
2851
2852         if (--obj->pin_count == 0)
2853                 obj->pin_mappable = false;
2854 }
2855
2856 int
2857 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2858                    struct drm_file *file)
2859 {
2860         struct drm_i915_gem_pin *args = data;
2861         struct drm_i915_gem_object *obj;
2862         int ret;
2863
2864         ret = i915_mutex_lock_interruptible(dev);
2865         if (ret)
2866                 return ret;
2867
2868         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2869         if (&obj->base == NULL) {
2870                 ret = -ENOENT;
2871                 goto unlock;
2872         }
2873
2874         if (obj->madv != I915_MADV_WILLNEED) {
2875                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
2876                 ret = -EINVAL;
2877                 goto out;
2878         }
2879
2880         if (obj->pin_filp != NULL && obj->pin_filp != file) {
2881                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2882                           args->handle);
2883                 ret = -EINVAL;
2884                 goto out;
2885         }
2886
2887         if (obj->user_pin_count == 0) {
2888                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
2889                 if (ret)
2890                         goto out;
2891         }
2892
2893         obj->user_pin_count++;
2894         obj->pin_filp = file;
2895
2896         /* XXX - flush the CPU caches for pinned objects
2897          * as the X server doesn't manage domains yet
2898          */
2899         i915_gem_object_flush_cpu_write_domain(obj);
2900         args->offset = obj->gtt_offset;
2901 out:
2902         drm_gem_object_unreference(&obj->base);
2903 unlock:
2904         DRM_UNLOCK(dev);
2905         return ret;
2906 }
2907
2908 int
2909 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2910                      struct drm_file *file)
2911 {
2912         struct drm_i915_gem_pin *args = data;
2913         struct drm_i915_gem_object *obj;
2914         int ret;
2915
2916         ret = i915_mutex_lock_interruptible(dev);
2917         if (ret)
2918                 return ret;
2919
2920         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2921         if (&obj->base == NULL) {
2922                 ret = -ENOENT;
2923                 goto unlock;
2924         }
2925
2926         if (obj->pin_filp != file) {
2927                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2928                           args->handle);
2929                 ret = -EINVAL;
2930                 goto out;
2931         }
2932         obj->user_pin_count--;
2933         if (obj->user_pin_count == 0) {
2934                 obj->pin_filp = NULL;
2935                 i915_gem_object_unpin(obj);
2936         }
2937
2938 out:
2939         drm_gem_object_unreference(&obj->base);
2940 unlock:
2941         DRM_UNLOCK(dev);
2942         return (ret);
2943 }
2944
2945 int
2946 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2947                     struct drm_file *file)
2948 {
2949         struct drm_i915_gem_busy *args = data;
2950         struct drm_i915_gem_object *obj;
2951         int ret;
2952
2953         ret = i915_mutex_lock_interruptible(dev);
2954         if (ret)
2955                 return ret;
2956
2957         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2958         if (&obj->base == NULL) {
2959                 ret = -ENOENT;
2960                 goto unlock;
2961         }
2962
2963         /* Count all active objects as busy, even if they are currently not used
2964          * by the gpu. Users of this interface expect objects to eventually
2965          * become non-busy without any further actions, therefore emit any
2966          * necessary flushes here.
2967          */
2968         ret = i915_gem_object_flush_active(obj);
2969
2970         args->busy = obj->active;
2971         if (obj->ring) {
2972                 args->busy |= intel_ring_flag(obj->ring) << 17;
2973         }
2974
2975         drm_gem_object_unreference(&obj->base);
2976 unlock:
2977         DRM_UNLOCK(dev);
2978         return ret;
2979 }
2980
2981 int
2982 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2983                         struct drm_file *file_priv)
2984 {
2985         return i915_gem_ring_throttle(dev, file_priv);
2986 }
2987
2988 int
2989 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2990                        struct drm_file *file_priv)
2991 {
2992         struct drm_i915_gem_madvise *args = data;
2993         struct drm_i915_gem_object *obj;
2994         int ret;
2995
2996         switch (args->madv) {
2997         case I915_MADV_DONTNEED:
2998         case I915_MADV_WILLNEED:
2999             break;
3000         default:
3001             return -EINVAL;
3002         }
3003
3004         ret = i915_mutex_lock_interruptible(dev);
3005         if (ret)
3006                 return ret;
3007
3008         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3009         if (&obj->base == NULL) {
3010                 ret = -ENOENT;
3011                 goto unlock;
3012         }
3013
3014         if (obj->pin_count) {
3015                 ret = -EINVAL;
3016                 goto out;
3017         }
3018
3019         if (obj->madv != __I915_MADV_PURGED)
3020                 obj->madv = args->madv;
3021
3022         /* if the object is no longer attached, discard its backing storage */
3023         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3024                 i915_gem_object_truncate(obj);
3025
3026         args->retained = obj->madv != __I915_MADV_PURGED;
3027
3028 out:
3029         drm_gem_object_unreference(&obj->base);
3030 unlock:
3031         DRM_UNLOCK(dev);
3032         return ret;
3033 }
3034
3035 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3036                                                   size_t size)
3037 {
3038         struct drm_i915_private *dev_priv;
3039         struct drm_i915_gem_object *obj;
3040
3041         dev_priv = dev->dev_private;
3042
3043         obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3044
3045         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3046                 drm_free(obj, M_DRM);
3047                 return (NULL);
3048         }
3049
3050         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3051         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3052
3053         if (HAS_LLC(dev)) {
3054                 /* On some devices, we can have the GPU use the LLC (the CPU
3055                  * cache) for about a 10% performance improvement
3056                  * compared to uncached.  Graphics requests other than
3057                  * display scanout are coherent with the CPU in
3058                  * accessing this cache.  This means in this mode we
3059                  * don't need to clflush on the CPU side, and on the
3060                  * GPU side we only need to flush internal caches to
3061                  * get data visible to the CPU.
3062                  *
3063                  * However, we maintain the display planes as UC, and so
3064                  * need to rebind when first used as such.
3065                  */
3066                 obj->cache_level = I915_CACHE_LLC;
3067         } else
3068                 obj->cache_level = I915_CACHE_NONE;
3069         obj->base.driver_private = NULL;
3070         obj->fence_reg = I915_FENCE_REG_NONE;
3071         INIT_LIST_HEAD(&obj->mm_list);
3072         INIT_LIST_HEAD(&obj->gtt_list);
3073         INIT_LIST_HEAD(&obj->ring_list);
3074         INIT_LIST_HEAD(&obj->exec_list);
3075         obj->madv = I915_MADV_WILLNEED;
3076         /* Avoid an unnecessary call to unbind on the first bind. */
3077         obj->map_and_fenceable = true;
3078
3079         i915_gem_info_add_obj(dev_priv, size);
3080
3081         return obj;
3082 }
3083
3084 int i915_gem_init_object(struct drm_gem_object *obj)
3085 {
3086         BUG();
3087
3088         return 0;
3089 }
3090
3091 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3092 {
3093         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3094         struct drm_device *dev = obj->base.dev;
3095         drm_i915_private_t *dev_priv = dev->dev_private;
3096
3097         if (obj->phys_obj)
3098                 i915_gem_detach_phys_object(dev, obj);
3099
3100         obj->pin_count = 0;
3101         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3102                 bool was_interruptible;
3103
3104                 was_interruptible = dev_priv->mm.interruptible;
3105                 dev_priv->mm.interruptible = false;
3106
3107                 WARN_ON(i915_gem_object_unbind(obj));
3108
3109                 dev_priv->mm.interruptible = was_interruptible;
3110         }
3111
3112         drm_gem_free_mmap_offset(&obj->base);
3113
3114         drm_gem_object_release(&obj->base);
3115         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3116
3117         drm_free(obj->bit_17, M_DRM);
3118         drm_free(obj, M_DRM);
3119 }
3120
3121 int
3122 i915_gem_do_init(struct drm_device *dev, unsigned long start,
3123     unsigned long mappable_end, unsigned long end)
3124 {
3125         drm_i915_private_t *dev_priv;
3126         unsigned long mappable;
3127         int error;
3128
3129         dev_priv = dev->dev_private;
3130         mappable = min(end, mappable_end) - start;
3131
3132         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
3133
3134         dev_priv->mm.gtt_start = start;
3135         dev_priv->mm.gtt_mappable_end = mappable_end;
3136         dev_priv->mm.gtt_end = end;
3137         dev_priv->mm.gtt_total = end - start;
3138         dev_priv->mm.mappable_gtt_total = mappable;
3139
3140         /* Take over this portion of the GTT */
3141         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
3142         device_printf(dev->dev,
3143             "taking over the fictitious range 0x%lx-0x%lx\n",
3144             dev->agp->base + start, dev->agp->base + start + mappable);
3145         error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
3146             dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
3147         return (error);
3148 }
3149
3150 int
3151 i915_gem_idle(struct drm_device *dev)
3152 {
3153         drm_i915_private_t *dev_priv = dev->dev_private;
3154         int ret;
3155
3156         DRM_LOCK(dev);
3157
3158         if (dev_priv->mm.suspended) {
3159                 DRM_UNLOCK(dev);
3160                 return 0;
3161         }
3162
3163         ret = i915_gpu_idle(dev);
3164         if (ret) {
3165                 DRM_UNLOCK(dev);
3166                 return ret;
3167         }
3168         i915_gem_retire_requests(dev);
3169
3170         /* Under UMS, be paranoid and evict. */
3171         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3172                 i915_gem_evict_everything(dev);
3173
3174         i915_gem_reset_fences(dev);
3175
3176         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3177          * We need to replace this with a semaphore, or something.
3178          * And not confound mm.suspended!
3179          */
3180         dev_priv->mm.suspended = 1;
3181         del_timer_sync(&dev_priv->hangcheck_timer);
3182
3183         i915_kernel_lost_context(dev);
3184         i915_gem_cleanup_ringbuffer(dev);
3185
3186         DRM_UNLOCK(dev);
3187
3188         /* Cancel the retire work handler, which should be idle now. */
3189         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3190
3191         return 0;
3192 }
3193
3194 void i915_gem_l3_remap(struct drm_device *dev)
3195 {
3196         drm_i915_private_t *dev_priv = dev->dev_private;
3197         u32 misccpctl;
3198         int i;
3199
3200         if (!HAS_L3_GPU_CACHE(dev))
3201                 return;
3202
3203         if (!dev_priv->l3_parity.remap_info)
3204                 return;
3205
3206         misccpctl = I915_READ(GEN7_MISCCPCTL);
3207         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3208         POSTING_READ(GEN7_MISCCPCTL);
3209
3210         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3211                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3212                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3213                         DRM_DEBUG("0x%x was already programmed to %x\n",
3214                                   GEN7_L3LOG_BASE + i, remap);
3215                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3216                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
3217                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3218         }
3219
3220         /* Make sure all the writes land before disabling dop clock gating */
3221         POSTING_READ(GEN7_L3LOG_BASE);
3222
3223         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3224 }
3225
3226 void i915_gem_init_swizzling(struct drm_device *dev)
3227 {
3228         drm_i915_private_t *dev_priv = dev->dev_private;
3229
3230         if (INTEL_INFO(dev)->gen < 5 ||
3231             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3232                 return;
3233
3234         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3235                                  DISP_TILE_SURFACE_SWIZZLING);
3236
3237         if (IS_GEN5(dev))
3238                 return;
3239
3240         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3241         if (IS_GEN6(dev))
3242                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3243         else
3244                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3245 }
3246
3247 static bool
3248 intel_enable_blt(struct drm_device *dev)
3249 {
3250         int revision;
3251
3252         if (!HAS_BLT(dev))
3253                 return false;
3254
3255         /* The blitter was dysfunctional on early prototypes */
3256         revision = pci_read_config(dev->dev, PCIR_REVID, 1);
3257         if (IS_GEN6(dev) && revision < 8) {
3258                 DRM_INFO("BLT not supported on this pre-production hardware;"
3259                          " graphics performance will be degraded.\n");
3260                 return false;
3261         }
3262
3263         return true;
3264 }
3265
3266 int
3267 i915_gem_init_hw(struct drm_device *dev)
3268 {
3269         drm_i915_private_t *dev_priv = dev->dev_private;
3270         int ret;
3271
3272         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3273                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3274
3275         i915_gem_l3_remap(dev);
3276
3277         i915_gem_init_swizzling(dev);
3278
3279         ret = intel_init_render_ring_buffer(dev);
3280         if (ret)
3281                 return ret;
3282
3283         if (HAS_BSD(dev)) {
3284                 ret = intel_init_bsd_ring_buffer(dev);
3285                 if (ret)
3286                         goto cleanup_render_ring;
3287         }
3288
3289         if (intel_enable_blt(dev)) {
3290                 ret = intel_init_blt_ring_buffer(dev);
3291                 if (ret)
3292                         goto cleanup_bsd_ring;
3293         }
3294
3295         dev_priv->next_seqno = 1;
3296
3297         /*
3298          * XXX: There was some w/a described somewhere suggesting loading
3299          * contexts before PPGTT.
3300          */
3301         i915_gem_context_init(dev);
3302         i915_gem_init_ppgtt(dev);
3303
3304         return 0;
3305
3306 cleanup_bsd_ring:
3307         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3308 cleanup_render_ring:
3309         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3310         return ret;
3311 }
3312
3313 static bool
3314 intel_enable_ppgtt(struct drm_device *dev)
3315 {
3316         if (i915_enable_ppgtt >= 0)
3317                 return i915_enable_ppgtt;
3318
3319         /* Disable ppgtt on SNB if VT-d is on. */
3320         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
3321                 return false;
3322
3323         return true;
3324 }
3325
3326 int i915_gem_init(struct drm_device *dev)
3327 {
3328         struct drm_i915_private *dev_priv = dev->dev_private;
3329         unsigned long prealloc_size, gtt_size, mappable_size;
3330         int ret;
3331
3332         prealloc_size = dev_priv->mm.gtt->stolen_size;
3333         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3334         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3335
3336         /* Basic memrange allocator for stolen space */
3337         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
3338
3339         DRM_LOCK(dev);
3340         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3341                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3342                  * aperture accordingly when using aliasing ppgtt. */
3343                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3344                 /* For paranoia keep the guard page in between. */
3345                 gtt_size -= PAGE_SIZE;
3346
3347                 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
3348
3349                 ret = i915_gem_init_aliasing_ppgtt(dev);
3350                 if (ret) {
3351                         DRM_UNLOCK(dev);
3352                         return ret;
3353                 }
3354         } else {
3355                 /* Let GEM Manage all of the aperture.
3356                  *
3357                  * However, leave one page at the end still bound to the scratch
3358                  * page.  There are a number of places where the hardware
3359                  * apparently prefetches past the end of the object, and we've
3360                  * seen multiple hangs with the GPU head pointer stuck in a
3361                  * batchbuffer bound at the last page of the aperture.  One page
3362                  * should be enough to keep any prefetching inside of the
3363                  * aperture.
3364                  */
3365                 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
3366         }
3367
3368         ret = i915_gem_init_hw(dev);
3369         DRM_UNLOCK(dev);
3370         if (ret) {
3371                 i915_gem_cleanup_aliasing_ppgtt(dev);
3372                 return ret;
3373         }
3374
3375 #if 0
3376         /* Try to set up FBC with a reasonable compressed buffer size */
3377         if (I915_HAS_FBC(dev) && i915_powersave) {
3378                 int cfb_size;
3379
3380                 /* Leave 1M for line length buffer & misc. */
3381
3382                 /* Try to get a 32M buffer... */
3383                 if (prealloc_size > (36*1024*1024))
3384                         cfb_size = 32*1024*1024;
3385                 else /* fall back to 7/8 of the stolen space */
3386                         cfb_size = prealloc_size * 7 / 8;
3387                 i915_setup_compression(dev, cfb_size);
3388         }
3389 #endif
3390
3391         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3392         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3393                 dev_priv->dri1.allow_batchbuffer = 1;
3394         return 0;
3395 }
3396
3397 void
3398 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3399 {
3400         drm_i915_private_t *dev_priv = dev->dev_private;
3401         struct intel_ring_buffer *ring;
3402         int i;
3403
3404         for_each_ring(ring, dev_priv, i)
3405                 intel_cleanup_ring_buffer(ring);
3406 }
3407
3408 int
3409 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3410                        struct drm_file *file_priv)
3411 {
3412         drm_i915_private_t *dev_priv = dev->dev_private;
3413         int ret;
3414
3415         if (drm_core_check_feature(dev, DRIVER_MODESET))
3416                 return 0;
3417
3418         if (atomic_read(&dev_priv->mm.wedged)) {
3419                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3420                 atomic_set(&dev_priv->mm.wedged, 0);
3421         }
3422
3423         DRM_LOCK(dev);
3424         dev_priv->mm.suspended = 0;
3425
3426         ret = i915_gem_init_hw(dev);
3427         if (ret != 0) {
3428                 DRM_UNLOCK(dev);
3429                 return ret;
3430         }
3431
3432         KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
3433         DRM_UNLOCK(dev);
3434
3435         ret = drm_irq_install(dev);
3436         if (ret)
3437                 goto cleanup_ringbuffer;
3438
3439         return 0;
3440
3441 cleanup_ringbuffer:
3442         DRM_LOCK(dev);
3443         i915_gem_cleanup_ringbuffer(dev);
3444         dev_priv->mm.suspended = 1;
3445         DRM_UNLOCK(dev);
3446
3447         return ret;
3448 }
3449
3450 int
3451 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3452                        struct drm_file *file_priv)
3453 {
3454         if (drm_core_check_feature(dev, DRIVER_MODESET))
3455                 return 0;
3456
3457         drm_irq_uninstall(dev);
3458         return i915_gem_idle(dev);
3459 }
3460
3461 void
3462 i915_gem_lastclose(struct drm_device *dev)
3463 {
3464         int ret;
3465
3466         if (drm_core_check_feature(dev, DRIVER_MODESET))
3467                 return;
3468
3469         ret = i915_gem_idle(dev);
3470         if (ret)
3471                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3472 }
3473
3474 static void
3475 init_ring_lists(struct intel_ring_buffer *ring)
3476 {
3477         INIT_LIST_HEAD(&ring->active_list);
3478         INIT_LIST_HEAD(&ring->request_list);
3479 }
3480
3481 void
3482 i915_gem_load(struct drm_device *dev)
3483 {
3484         int i;
3485         drm_i915_private_t *dev_priv = dev->dev_private;
3486
3487         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3488         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3489         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3490         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3491         for (i = 0; i < I915_NUM_RINGS; i++)
3492                 init_ring_lists(&dev_priv->ring[i]);
3493         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3494                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3495         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3496                           i915_gem_retire_work_handler);
3497         init_completion(&dev_priv->error_completion);
3498
3499         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3500         if (IS_GEN3(dev)) {
3501                 I915_WRITE(MI_ARB_STATE,
3502                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3503         }
3504
3505         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3506
3507         /* Old X drivers will take 0-2 for front, back, depth buffers */
3508         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3509                 dev_priv->fence_reg_start = 3;
3510
3511         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3512                 dev_priv->num_fence_regs = 16;
3513         else
3514                 dev_priv->num_fence_regs = 8;
3515
3516         /* Initialize fence registers to zero */
3517         i915_gem_reset_fences(dev);
3518
3519         i915_gem_detect_bit_6_swizzle(dev);
3520         init_waitqueue_head(&dev_priv->pending_flip_queue);
3521
3522         dev_priv->mm.interruptible = true;
3523
3524 #if 0
3525         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3526         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3527         register_shrinker(&dev_priv->mm.inactive_shrinker);
3528 #else
3529         dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
3530             i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
3531 #endif
3532 }
3533
3534 /*
3535  * Create a physically contiguous memory object for this object
3536  * e.g. for cursor + overlay regs
3537  */
3538 static int i915_gem_init_phys_object(struct drm_device *dev,
3539                                      int id, int size, int align)
3540 {
3541         drm_i915_private_t *dev_priv = dev->dev_private;
3542         struct drm_i915_gem_phys_object *phys_obj;
3543         int ret;
3544
3545         if (dev_priv->mm.phys_objs[id - 1] || !size)
3546                 return 0;
3547
3548         phys_obj = kmalloc(sizeof(struct drm_i915_gem_phys_object), M_DRM,
3549             M_WAITOK | M_ZERO);
3550         if (!phys_obj)
3551                 return -ENOMEM;
3552
3553         phys_obj->id = id;
3554
3555         phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3556         if (!phys_obj->handle) {
3557                 ret = -ENOMEM;
3558                 goto kfree_obj;
3559         }
3560         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3561             size / PAGE_SIZE, PAT_WRITE_COMBINING);
3562
3563         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3564
3565         return 0;
3566
3567 kfree_obj:
3568         drm_free(phys_obj, M_DRM);
3569         return ret;
3570 }
3571
3572 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3573 {
3574         drm_i915_private_t *dev_priv = dev->dev_private;
3575         struct drm_i915_gem_phys_object *phys_obj;
3576
3577         if (!dev_priv->mm.phys_objs[id - 1])
3578                 return;
3579
3580         phys_obj = dev_priv->mm.phys_objs[id - 1];
3581         if (phys_obj->cur_obj) {
3582                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3583         }
3584
3585         drm_pci_free(dev, phys_obj->handle);
3586         drm_free(phys_obj, M_DRM);
3587         dev_priv->mm.phys_objs[id - 1] = NULL;
3588 }
3589
3590 void i915_gem_free_all_phys_object(struct drm_device *dev)
3591 {
3592         int i;
3593
3594         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3595                 i915_gem_free_phys_object(dev, i);
3596 }
3597
3598 void i915_gem_detach_phys_object(struct drm_device *dev,
3599                                  struct drm_i915_gem_object *obj)
3600 {
3601         vm_page_t m;
3602         struct sf_buf *sf;
3603         char *vaddr, *dst;
3604         int i, page_count;
3605
3606         if (!obj->phys_obj)
3607                 return;
3608         vaddr = obj->phys_obj->handle->vaddr;
3609
3610         page_count = obj->base.size / PAGE_SIZE;
3611         VM_OBJECT_LOCK(obj->base.vm_obj);
3612         for (i = 0; i < page_count; i++) {
3613                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3614                 if (m == NULL)
3615                         continue; /* XXX */
3616
3617                 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3618                 sf = sf_buf_alloc(m);
3619                 if (sf != NULL) {
3620                         dst = (char *)sf_buf_kva(sf);
3621                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3622                         sf_buf_free(sf);
3623                 }
3624                 drm_clflush_pages(&m, 1);
3625
3626                 VM_OBJECT_LOCK(obj->base.vm_obj);
3627                 vm_page_reference(m);
3628                 vm_page_dirty(m);
3629                 vm_page_busy_wait(m, FALSE, "i915gem");
3630                 vm_page_unwire(m, 0);
3631                 vm_page_wakeup(m);
3632         }
3633         VM_OBJECT_UNLOCK(obj->base.vm_obj);
3634         intel_gtt_chipset_flush();
3635
3636         obj->phys_obj->cur_obj = NULL;
3637         obj->phys_obj = NULL;
3638 }
3639
3640 int
3641 i915_gem_attach_phys_object(struct drm_device *dev,
3642                             struct drm_i915_gem_object *obj,
3643                             int id,
3644                             int align)
3645 {
3646         drm_i915_private_t *dev_priv = dev->dev_private;
3647         vm_page_t m;
3648         struct sf_buf *sf;
3649         char *dst, *src;
3650         int i, page_count, ret;
3651
3652         if (id > I915_MAX_PHYS_OBJECT)
3653                 return -EINVAL;
3654
3655         if (obj->phys_obj) {
3656                 if (obj->phys_obj->id == id)
3657                         return 0;
3658                 i915_gem_detach_phys_object(dev, obj);
3659         }
3660
3661         /* create a new object */
3662         if (!dev_priv->mm.phys_objs[id - 1]) {
3663                 ret = i915_gem_init_phys_object(dev, id,
3664                                                 obj->base.size, align);
3665                 if (ret) {
3666                         DRM_ERROR("failed to init phys object %d size: %zu\n",
3667                                   id, obj->base.size);
3668                         return ret;
3669                 }
3670         }
3671
3672         /* bind to the object */
3673         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3674         obj->phys_obj->cur_obj = obj;
3675
3676         page_count = obj->base.size / PAGE_SIZE;
3677
3678         VM_OBJECT_LOCK(obj->base.vm_obj);
3679         ret = 0;
3680         for (i = 0; i < page_count; i++) {
3681                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3682                 if (m == NULL) {
3683                         ret = -EIO;
3684                         break;
3685                 }
3686                 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3687                 sf = sf_buf_alloc(m);
3688                 src = (char *)sf_buf_kva(sf);
3689                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3690                 memcpy(dst, src, PAGE_SIZE);
3691                 sf_buf_free(sf);
3692
3693                 VM_OBJECT_LOCK(obj->base.vm_obj);
3694
3695                 vm_page_reference(m);
3696                 vm_page_busy_wait(m, FALSE, "i915gem");
3697                 vm_page_unwire(m, 0);
3698                 vm_page_wakeup(m);
3699         }
3700         VM_OBJECT_UNLOCK(obj->base.vm_obj);
3701
3702         return (0);
3703 }
3704
3705 static int
3706 i915_gem_phys_pwrite(struct drm_device *dev,
3707                      struct drm_i915_gem_object *obj,
3708                      struct drm_i915_gem_pwrite *args,
3709                      struct drm_file *file_priv)
3710 {
3711         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
3712         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
3713
3714         if (copyin_nofault(user_data, vaddr, args->size) != 0) {
3715                 unsigned long unwritten;
3716
3717                 /* The physical object once assigned is fixed for the lifetime
3718                  * of the obj, so we can safely drop the lock and continue
3719                  * to access vaddr.
3720                  */
3721                 DRM_UNLOCK(dev);
3722                 unwritten = copy_from_user(vaddr, user_data, args->size);
3723                 DRM_LOCK(dev);
3724                 if (unwritten)
3725                         return -EFAULT;
3726         }
3727
3728         i915_gem_chipset_flush(dev);
3729         return 0;
3730 }
3731
3732 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3733 {
3734         struct drm_i915_file_private *file_priv = file->driver_priv;
3735
3736         /* Clean up our request list when the client is going away, so that
3737          * later retire_requests won't dereference our soon-to-be-gone
3738          * file_priv.
3739          */
3740         spin_lock(&file_priv->mm.lock);
3741         while (!list_empty(&file_priv->mm.request_list)) {
3742                 struct drm_i915_gem_request *request;
3743
3744                 request = list_first_entry(&file_priv->mm.request_list,
3745                                            struct drm_i915_gem_request,
3746                                            client_list);
3747                 list_del(&request->client_list);
3748                 request->file_priv = NULL;
3749         }
3750         spin_unlock(&file_priv->mm.lock);
3751 }
3752
3753 static int
3754 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
3755     vm_ooffset_t foff, struct ucred *cred, u_short *color)
3756 {
3757
3758         *color = 0; /* XXXKIB */
3759         return (0);
3760 }
3761
3762 int i915_intr_pf;
3763
3764 static int
3765 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
3766     vm_page_t *mres)
3767 {
3768         struct drm_gem_object *gem_obj;
3769         struct drm_i915_gem_object *obj;
3770         struct drm_device *dev;
3771         drm_i915_private_t *dev_priv;
3772         vm_page_t m, oldm;
3773         int cause, ret;
3774         bool write;
3775
3776         gem_obj = vm_obj->handle;
3777         obj = to_intel_bo(gem_obj);
3778         dev = obj->base.dev;
3779         dev_priv = dev->dev_private;
3780 #if 0
3781         write = (prot & VM_PROT_WRITE) != 0;
3782 #else
3783         write = true;
3784 #endif
3785         vm_object_pip_add(vm_obj, 1);
3786
3787         /*
3788          * Remove the placeholder page inserted by vm_fault() from the
3789          * object before dropping the object lock. If
3790          * i915_gem_release_mmap() is active in parallel on this gem
3791          * object, then it owns the drm device sx and might find the
3792          * placeholder already. Then, since the page is busy,
3793          * i915_gem_release_mmap() sleeps waiting for the busy state
3794          * of the page cleared. We will be not able to acquire drm
3795          * device lock until i915_gem_release_mmap() is able to make a
3796          * progress.
3797          */
3798         if (*mres != NULL) {
3799                 oldm = *mres;
3800                 vm_page_remove(oldm);
3801                 *mres = NULL;
3802         } else
3803                 oldm = NULL;
3804 retry:
3805         VM_OBJECT_UNLOCK(vm_obj);
3806 unlocked_vmobj:
3807         cause = ret = 0;
3808         m = NULL;
3809
3810         if (i915_intr_pf) {
3811                 ret = i915_mutex_lock_interruptible(dev);
3812                 if (ret != 0) {
3813                         cause = 10;
3814                         goto out;
3815                 }
3816         } else
3817                 DRM_LOCK(dev);
3818
3819         /*
3820          * Since the object lock was dropped, other thread might have
3821          * faulted on the same GTT address and instantiated the
3822          * mapping for the page.  Recheck.
3823          */
3824         VM_OBJECT_LOCK(vm_obj);
3825         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
3826         if (m != NULL) {
3827                 if ((m->flags & PG_BUSY) != 0) {
3828                         DRM_UNLOCK(dev);
3829 #if 0 /* XXX */
3830                         vm_page_sleep(m, "915pee");
3831 #endif
3832                         goto retry;
3833                 }
3834                 goto have_page;
3835         } else
3836                 VM_OBJECT_UNLOCK(vm_obj);
3837
3838         /* Now bind it into the GTT if needed */
3839         if (!obj->map_and_fenceable) {
3840                 ret = i915_gem_object_unbind(obj);
3841                 if (ret != 0) {
3842                         cause = 20;
3843                         goto unlock;
3844                 }
3845         }
3846         if (!obj->gtt_space) {
3847                 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
3848                 if (ret != 0) {
3849                         cause = 30;
3850                         goto unlock;
3851                 }
3852
3853                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
3854                 if (ret != 0) {
3855                         cause = 40;
3856                         goto unlock;
3857                 }
3858         }
3859
3860         if (obj->tiling_mode == I915_TILING_NONE)
3861                 ret = i915_gem_object_put_fence(obj);
3862         else
3863                 ret = i915_gem_object_get_fence(obj);
3864         if (ret != 0) {
3865                 cause = 50;
3866                 goto unlock;
3867         }
3868
3869         if (i915_gem_object_is_inactive(obj))
3870                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3871
3872         obj->fault_mappable = true;
3873         VM_OBJECT_LOCK(vm_obj);
3874         m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
3875             offset);
3876         if (m == NULL) {
3877                 cause = 60;
3878                 ret = -EFAULT;
3879                 goto unlock;
3880         }
3881         KASSERT((m->flags & PG_FICTITIOUS) != 0,
3882             ("not fictitious %p", m));
3883         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
3884
3885         if ((m->flags & PG_BUSY) != 0) {
3886                 DRM_UNLOCK(dev);
3887 #if 0 /* XXX */
3888                 vm_page_sleep(m, "915pbs");
3889 #endif
3890                 goto retry;
3891         }
3892         m->valid = VM_PAGE_BITS_ALL;
3893         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
3894 have_page:
3895         *mres = m;
3896         vm_page_busy_try(m, false);
3897
3898         DRM_UNLOCK(dev);
3899         if (oldm != NULL) {
3900                 vm_page_free(oldm);
3901         }
3902         vm_object_pip_wakeup(vm_obj);
3903         return (VM_PAGER_OK);
3904
3905 unlock:
3906         DRM_UNLOCK(dev);
3907 out:
3908         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
3909         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
3910                 goto unlocked_vmobj;
3911         }
3912         VM_OBJECT_LOCK(vm_obj);
3913         vm_object_pip_wakeup(vm_obj);
3914         return (VM_PAGER_ERROR);
3915 }
3916
3917 static void
3918 i915_gem_pager_dtor(void *handle)
3919 {
3920         struct drm_gem_object *obj;
3921         struct drm_device *dev;
3922
3923         obj = handle;
3924         dev = obj->dev;
3925
3926         DRM_LOCK(dev);
3927         drm_gem_free_mmap_offset(obj);
3928         i915_gem_release_mmap(to_intel_bo(obj));
3929         drm_gem_object_unreference(obj);
3930         DRM_UNLOCK(dev);
3931 }
3932
3933 struct cdev_pager_ops i915_gem_pager_ops = {
3934         .cdev_pg_fault  = i915_gem_pager_fault,
3935         .cdev_pg_ctor   = i915_gem_pager_ctor,
3936         .cdev_pg_dtor   = i915_gem_pager_dtor
3937 };
3938
3939 #define GEM_PARANOID_CHECK_GTT 0
3940 #if GEM_PARANOID_CHECK_GTT
3941 static void
3942 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
3943     int page_count)
3944 {
3945         struct drm_i915_private *dev_priv;
3946         vm_paddr_t pa;
3947         unsigned long start, end;
3948         u_int i;
3949         int j;
3950
3951         dev_priv = dev->dev_private;
3952         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
3953         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
3954         for (i = start; i < end; i++) {
3955                 pa = intel_gtt_read_pte_paddr(i);
3956                 for (j = 0; j < page_count; j++) {
3957                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
3958                                 panic("Page %p in GTT pte index %d pte %x",
3959                                     ma[i], i, intel_gtt_read_pte(i));
3960                         }
3961                 }
3962         }
3963 }
3964 #endif
3965
3966 #define VM_OBJECT_LOCK_ASSERT_OWNED(object)
3967
3968 static vm_page_t
3969 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
3970 {
3971         vm_page_t m;
3972         int rv;
3973
3974         VM_OBJECT_LOCK_ASSERT_OWNED(object);
3975         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3976         if (m->valid != VM_PAGE_BITS_ALL) {
3977                 if (vm_pager_has_page(object, pindex)) {
3978                         rv = vm_pager_get_page(object, &m, 1);
3979                         m = vm_page_lookup(object, pindex);
3980                         if (m == NULL)
3981                                 return (NULL);
3982                         if (rv != VM_PAGER_OK) {
3983                                 vm_page_free(m);
3984                                 return (NULL);
3985                         }
3986                 } else {
3987                         pmap_zero_page(VM_PAGE_TO_PHYS(m));
3988                         m->valid = VM_PAGE_BITS_ALL;
3989                         m->dirty = 0;
3990                 }
3991         }
3992         vm_page_wire(m);
3993         vm_page_wakeup(m);
3994         return (m);
3995 }
3996
3997 static int
3998 i915_gpu_is_active(struct drm_device *dev)
3999 {
4000         drm_i915_private_t *dev_priv = dev->dev_private;
4001
4002         return !list_empty(&dev_priv->mm.active_list);
4003 }
4004
4005 static void
4006 i915_gem_lowmem(void *arg)
4007 {
4008         struct drm_device *dev;
4009         struct drm_i915_private *dev_priv;
4010         struct drm_i915_gem_object *obj, *next;
4011         int cnt, cnt_fail, cnt_total;
4012
4013         dev = arg;
4014         dev_priv = dev->dev_private;
4015
4016         if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
4017                 return;
4018
4019 rescan:
4020         /* first scan for clean buffers */
4021         i915_gem_retire_requests(dev);
4022
4023         cnt_total = cnt_fail = cnt = 0;
4024
4025         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4026             mm_list) {
4027                 if (i915_gem_object_is_purgeable(obj)) {
4028                         if (i915_gem_object_unbind(obj) != 0)
4029                                 cnt_total++;
4030                 } else
4031                         cnt_total++;
4032         }
4033
4034         /* second pass, evict/count anything still on the inactive list */
4035         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4036             mm_list) {
4037                 if (i915_gem_object_unbind(obj) == 0)
4038                         cnt++;
4039                 else
4040                         cnt_fail++;
4041         }
4042
4043         if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4044                 /*
4045                  * We are desperate for pages, so as a last resort, wait
4046                  * for the GPU to finish and discard whatever we can.
4047                  * This has a dramatic impact to reduce the number of
4048                  * OOM-killer events whilst running the GPU aggressively.
4049                  */
4050                 if (i915_gpu_idle(dev) == 0)
4051                         goto rescan;
4052         }
4053         DRM_UNLOCK(dev);
4054 }
4055
4056 void
4057 i915_gem_unload(struct drm_device *dev)
4058 {
4059         struct drm_i915_private *dev_priv;
4060
4061         dev_priv = dev->dev_private;
4062         EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
4063 }