drm/i915: revert part of "Make the GEM code more Linux-like"
[dragonfly.git] / sys / dev / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  */
54
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
57
58 #include <drm/drmP.h>
59 #include <drm/i915_drm.h>
60 #include "i915_drv.h"
61 #include "intel_drv.h"
62 #include "intel_ringbuffer.h"
63 #include <linux/completion.h>
64 #include <linux/jiffies.h>
65 #include <linux/time.h>
66
67 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
68 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
69 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
70                                                     unsigned alignment,
71                                                     bool map_and_fenceable,
72                                                     bool nonblocking);
73 static int i915_gem_phys_pwrite(struct drm_device *dev,
74                                 struct drm_i915_gem_object *obj,
75                                 struct drm_i915_gem_pwrite *args,
76                                 struct drm_file *file);
77
78 static void i915_gem_write_fence(struct drm_device *dev, int reg,
79                                  struct drm_i915_gem_object *obj);
80 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
81                                          struct drm_i915_fence_reg *fence,
82                                          bool enable);
83
84 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
85     int tiling_mode);
86 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
87     uint32_t size, int tiling_mode);
88 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
89     int flags);
90 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92
93 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
94 {
95         if (obj->tiling_mode)
96                 i915_gem_release_mmap(obj);
97
98         /* As we do not have an associated fence register, we will force
99          * a tiling change if we ever need to acquire one.
100          */
101         obj->fence_dirty = false;
102         obj->fence_reg = I915_FENCE_REG_NONE;
103 }
104
105 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
106 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
107 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
108 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
109 static void i915_gem_reset_fences(struct drm_device *dev);
110 static void i915_gem_lowmem(void *arg);
111
112 /* some bookkeeping */
113 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
114                                   size_t size)
115 {
116         dev_priv->mm.object_count++;
117         dev_priv->mm.object_memory += size;
118 }
119
120 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
121                                      size_t size)
122 {
123         dev_priv->mm.object_count--;
124         dev_priv->mm.object_memory -= size;
125 }
126
127 static int
128 i915_gem_wait_for_error(struct drm_device *dev)
129 {
130         struct drm_i915_private *dev_priv = dev->dev_private;
131         struct completion *x = &dev_priv->error_completion;
132         int ret;
133
134         if (!atomic_read(&dev_priv->mm.wedged))
135                 return 0;
136
137         /*
138          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
139          * userspace. If it takes that long something really bad is going on and
140          * we should simply try to bail out and fail as gracefully as possible.
141          */
142         ret = wait_for_completion_interruptible_timeout(x, 10*hz);
143         if (ret == 0) {
144                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
145                 return -EIO;
146         } else if (ret < 0) {
147                 return ret;
148         }
149
150         if (atomic_read(&dev_priv->mm.wedged)) {
151                 /* GPU is hung, bump the completion count to account for
152                  * the token we just consumed so that we never hit zero and
153                  * end up waiting upon a subsequent completion event that
154                  * will never happen.
155                  */
156                 lockmgr(&x->wait.lock, LK_EXCLUSIVE);
157                 x->done++;
158                 lockmgr(&x->wait.lock, LK_RELEASE);
159         }
160         return 0;
161 }
162
163 int i915_mutex_lock_interruptible(struct drm_device *dev)
164 {
165         int ret;
166
167         ret = i915_gem_wait_for_error(dev);
168         if (ret)
169                 return ret;
170
171         ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL);
172         if (ret)
173                 return -EINTR;
174
175         WARN_ON(i915_verify_lists(dev));
176         return 0;
177 }
178
179 static inline bool
180 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
181 {
182         return !obj->active;
183 }
184
185 int
186 i915_gem_init_ioctl(struct drm_device *dev, void *data,
187                     struct drm_file *file)
188 {
189         struct drm_i915_gem_init *args = data;
190
191         if (drm_core_check_feature(dev, DRIVER_MODESET))
192                 return -ENODEV;
193
194         if (args->gtt_start >= args->gtt_end ||
195             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
196                 return -EINVAL;
197
198         /* GEM with user mode setting was never supported on ilk and later. */
199         if (INTEL_INFO(dev)->gen >= 5)
200                 return -ENODEV;
201
202         lockmgr(&dev->dev_lock, LK_EXCLUSIVE|LK_RETRY|LK_CANRECURSE);
203         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
204         lockmgr(&dev->dev_lock, LK_RELEASE);
205
206         return 0;
207 }
208
209 int
210 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
211                             struct drm_file *file)
212 {
213         struct drm_i915_private *dev_priv = dev->dev_private;
214         struct drm_i915_gem_get_aperture *args = data;
215         struct drm_i915_gem_object *obj;
216         size_t pinned;
217
218         pinned = 0;
219         DRM_LOCK(dev);
220         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
221                 if (obj->pin_count)
222                         pinned += obj->gtt_space->size;
223         DRM_UNLOCK(dev);
224
225         args->aper_size = dev_priv->mm.gtt_total;
226         args->aper_available_size = args->aper_size - pinned;
227
228         return 0;
229 }
230
231 static int
232 i915_gem_create(struct drm_file *file,
233                 struct drm_device *dev,
234                 uint64_t size,
235                 uint32_t *handle_p)
236 {
237         struct drm_i915_gem_object *obj;
238         int ret;
239         u32 handle;
240
241         size = roundup(size, PAGE_SIZE);
242         if (size == 0)
243                 return -EINVAL;
244
245         /* Allocate the new object */
246         obj = i915_gem_alloc_object(dev, size);
247         if (obj == NULL)
248                 return -ENOMEM;
249
250         handle = 0;
251         ret = drm_gem_handle_create(file, &obj->base, &handle);
252         if (ret) {
253                 drm_gem_object_release(&obj->base);
254                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
255                 drm_free(obj, M_DRM);
256                 return (-ret);
257         }
258
259         /* drop reference from allocate - handle holds it now */
260         drm_gem_object_unreference(&obj->base);
261         *handle_p = handle;
262         return 0;
263 }
264
265 int
266 i915_gem_dumb_create(struct drm_file *file,
267                      struct drm_device *dev,
268                      struct drm_mode_create_dumb *args)
269 {
270
271         /* have to work out size/pitch and return them */
272         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
273         args->size = args->pitch * args->height;
274         return i915_gem_create(file, dev,
275                                args->size, &args->handle);
276 }
277
278 int i915_gem_dumb_destroy(struct drm_file *file,
279                           struct drm_device *dev,
280                           uint32_t handle)
281 {
282
283         return drm_gem_handle_delete(file, handle);
284 }
285
286 /**
287  * Creates a new mm object and returns a handle to it.
288  */
289 int
290 i915_gem_create_ioctl(struct drm_device *dev, void *data,
291                       struct drm_file *file)
292 {
293         struct drm_i915_gem_create *args = data;
294
295         return i915_gem_create(file, dev,
296                                args->size, &args->handle);
297 }
298
299 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
300 {
301         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
302
303         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
304                 obj->tiling_mode != I915_TILING_NONE;
305 }
306
307 static inline void vm_page_reference(vm_page_t m)
308 {
309         vm_page_flag_set(m, PG_REFERENCED);
310 }
311
312 static int
313 i915_gem_shmem_pread(struct drm_device *dev,
314                      struct drm_i915_gem_object *obj,
315                      struct drm_i915_gem_pread *args,
316                      struct drm_file *file)
317 {
318         vm_object_t vm_obj;
319         vm_page_t m;
320         struct sf_buf *sf;
321         vm_offset_t mkva;
322         vm_pindex_t obj_pi;
323         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
324
325         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
326
327         obj->dirty = 1;
328         vm_obj = obj->base.vm_obj;
329         ret = 0;
330
331         VM_OBJECT_LOCK(vm_obj);
332         vm_object_pip_add(vm_obj, 1);
333         while (args->size > 0) {
334                 obj_pi = OFF_TO_IDX(args->offset);
335                 obj_po = args->offset & PAGE_MASK;
336
337                 m = i915_gem_wire_page(vm_obj, obj_pi);
338                 VM_OBJECT_UNLOCK(vm_obj);
339
340                 sf = sf_buf_alloc(m);
341                 mkva = sf_buf_kva(sf);
342                 length = min(args->size, PAGE_SIZE - obj_po);
343                 while (length > 0) {
344                         if (do_bit17_swizzling &&
345                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
346                                 cnt = roundup2(obj_po + 1, 64);
347                                 cnt = min(cnt - obj_po, length);
348                                 swizzled_po = obj_po ^ 64;
349                         } else {
350                                 cnt = length;
351                                 swizzled_po = obj_po;
352                         }
353                         ret = -copyout_nofault(
354                             (char *)mkva + swizzled_po,
355                             (void *)(uintptr_t)args->data_ptr, cnt);
356                         if (ret != 0)
357                                 break;
358                         args->data_ptr += cnt;
359                         args->size -= cnt;
360                         length -= cnt;
361                         args->offset += cnt;
362                         obj_po += cnt;
363                 }
364                 sf_buf_free(sf);
365                 VM_OBJECT_LOCK(vm_obj);
366                 vm_page_reference(m);
367                 vm_page_busy_wait(m, FALSE, "i915gem");
368                 vm_page_unwire(m, 1);
369                 vm_page_wakeup(m);
370
371                 if (ret != 0)
372                         break;
373         }
374         vm_object_pip_wakeup(vm_obj);
375         VM_OBJECT_UNLOCK(vm_obj);
376
377         return (ret);
378 }
379
380 /**
381  * Reads data from the object referenced by handle.
382  *
383  * On error, the contents of *data are undefined.
384  */
385 int
386 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
387                      struct drm_file *file)
388 {
389         struct drm_i915_gem_pread *args = data;
390         struct drm_i915_gem_object *obj;
391         int ret = 0;
392
393         if (args->size == 0)
394                 return 0;
395
396         ret = i915_mutex_lock_interruptible(dev);
397         if (ret)
398                 return ret;
399
400         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
401         if (&obj->base == NULL) {
402                 ret = -ENOENT;
403                 goto unlock;
404         }
405
406         /* Bounds check source.  */
407         if (args->offset > obj->base.size ||
408             args->size > obj->base.size - args->offset) {
409                 ret = -EINVAL;
410                 goto out;
411         }
412
413         ret = i915_gem_shmem_pread(dev, obj, args, file);
414 out:
415         drm_gem_object_unreference(&obj->base);
416 unlock:
417         DRM_UNLOCK(dev);
418         return ret;
419 }
420
421 static int
422 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
423     uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
424 {
425         vm_offset_t mkva;
426         int ret;
427
428         /*
429          * Pass the unaligned physical address and size to pmap_mapdev_attr()
430          * so it can properly calculate whether an extra page needs to be
431          * mapped or not to cover the requested range.  The function will
432          * add the page offset into the returned mkva for us.
433          */
434         mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
435             offset, size, PAT_WRITE_COMBINING);
436         ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
437         pmap_unmapdev(mkva, size);
438         return ret;
439 }
440
441 static int
442 i915_gem_shmem_pwrite(struct drm_device *dev,
443                       struct drm_i915_gem_object *obj,
444                       struct drm_i915_gem_pwrite *args,
445                       struct drm_file *file)
446 {
447         vm_object_t vm_obj;
448         vm_page_t m;
449         struct sf_buf *sf;
450         vm_offset_t mkva;
451         vm_pindex_t obj_pi;
452         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
453
454         do_bit17_swizzling = 0;
455
456         obj->dirty = 1;
457         vm_obj = obj->base.vm_obj;
458         ret = 0;
459
460         VM_OBJECT_LOCK(vm_obj);
461         vm_object_pip_add(vm_obj, 1);
462         while (args->size > 0) {
463                 obj_pi = OFF_TO_IDX(args->offset);
464                 obj_po = args->offset & PAGE_MASK;
465
466                 m = i915_gem_wire_page(vm_obj, obj_pi);
467                 VM_OBJECT_UNLOCK(vm_obj);
468
469                 sf = sf_buf_alloc(m);
470                 mkva = sf_buf_kva(sf);
471                 length = min(args->size, PAGE_SIZE - obj_po);
472                 while (length > 0) {
473                         if (do_bit17_swizzling &&
474                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
475                                 cnt = roundup2(obj_po + 1, 64);
476                                 cnt = min(cnt - obj_po, length);
477                                 swizzled_po = obj_po ^ 64;
478                         } else {
479                                 cnt = length;
480                                 swizzled_po = obj_po;
481                         }
482                         ret = -copyin_nofault(
483                             (void *)(uintptr_t)args->data_ptr,
484                             (char *)mkva + swizzled_po, cnt);
485                         if (ret != 0)
486                                 break;
487                         args->data_ptr += cnt;
488                         args->size -= cnt;
489                         length -= cnt;
490                         args->offset += cnt;
491                         obj_po += cnt;
492                 }
493                 sf_buf_free(sf);
494                 VM_OBJECT_LOCK(vm_obj);
495                 vm_page_dirty(m);
496                 vm_page_reference(m);
497                 vm_page_busy_wait(m, FALSE, "i915gem");
498                 vm_page_unwire(m, 1);
499                 vm_page_wakeup(m);
500
501                 if (ret != 0)
502                         break;
503         }
504         vm_object_pip_wakeup(vm_obj);
505         VM_OBJECT_UNLOCK(vm_obj);
506
507         return (ret);
508 }
509
510 /**
511  * Writes data to the object referenced by handle.
512  *
513  * On error, the contents of the buffer that were to be modified are undefined.
514  */
515 int
516 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
517                       struct drm_file *file)
518 {
519         struct drm_i915_gem_pwrite *args = data;
520         struct drm_i915_gem_object *obj;
521         vm_page_t *ma;
522         vm_offset_t start, end;
523         int npages, ret;
524
525         if (args->size == 0)
526                 return 0;
527
528         start = trunc_page(args->data_ptr);
529         end = round_page(args->data_ptr + args->size);
530         npages = howmany(end - start, PAGE_SIZE);
531         ma = kmalloc(npages * sizeof(vm_page_t), M_DRM, M_WAITOK |
532             M_ZERO);
533         npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
534             (vm_offset_t)args->data_ptr, args->size,
535             VM_PROT_READ, ma, npages);
536         if (npages == -1) {
537                 ret = -EFAULT;
538                 goto free_ma;
539         }
540
541         ret = i915_mutex_lock_interruptible(dev);
542         if (ret != 0)
543                 goto unlocked;
544
545         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
546         if (&obj->base == NULL) {
547                 ret = -ENOENT;
548                 goto unlock;
549         }
550
551         /* Bounds check destination. */
552         if (args->offset > obj->base.size ||
553             args->size > obj->base.size - args->offset) {
554                 ret = -EINVAL;
555                 goto out;
556         }
557
558         if (obj->phys_obj) {
559                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
560         } else if (obj->gtt_space &&
561                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
562                 ret = i915_gem_object_pin(obj, 0, true, false);
563                 if (ret != 0)
564                         goto out;
565                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
566                 if (ret != 0)
567                         goto out_unpin;
568                 ret = i915_gem_object_put_fence(obj);
569                 if (ret != 0)
570                         goto out_unpin;
571                 ret = i915_gem_gtt_write(dev, obj, args->data_ptr, args->size,
572                     args->offset, file);
573 out_unpin:
574                 i915_gem_object_unpin(obj);
575         } else {
576                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
577                 if (ret != 0)
578                         goto out;
579                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
580         }
581 out:
582         drm_gem_object_unreference(&obj->base);
583 unlock:
584         DRM_UNLOCK(dev);
585 unlocked:
586         vm_page_unhold_pages(ma, npages);
587 free_ma:
588         drm_free(ma, M_DRM);
589         return ret;
590 }
591
592 int
593 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
594                      bool interruptible)
595 {
596         if (atomic_read(&dev_priv->mm.wedged)) {
597                 struct completion *x = &dev_priv->error_completion;
598                 bool recovery_complete;
599
600                 /* Give the error handler a chance to run. */
601                 lockmgr(&x->wait.lock, LK_EXCLUSIVE);
602                 recovery_complete = x->done > 0;
603                 lockmgr(&x->wait.lock, LK_RELEASE);
604
605                 /* Non-interruptible callers can't handle -EAGAIN, hence return
606                  * -EIO unconditionally for these. */
607                 if (!interruptible)
608                         return -EIO;
609
610                 /* Recovery complete, but still wedged means reset failure. */
611                 if (recovery_complete)
612                         return -EIO;
613
614                 return -EAGAIN;
615         }
616
617         return 0;
618 }
619
620 /*
621  * Compare seqno against outstanding lazy request. Emit a request if they are
622  * equal.
623  */
624 static int
625 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
626 {
627         int ret;
628
629         DRM_LOCK_ASSERT(ring->dev);
630
631         ret = 0;
632         if (seqno == ring->outstanding_lazy_request)
633                 ret = i915_add_request(ring, NULL, NULL);
634
635         return ret;
636 }
637
638 /**
639  * __wait_seqno - wait until execution of seqno has finished
640  * @ring: the ring expected to report seqno
641  * @seqno: duh!
642  * @interruptible: do an interruptible wait (normally yes)
643  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
644  *
645  * Returns 0 if the seqno was found within the alloted time. Else returns the
646  * errno with remaining time filled in timeout argument.
647  */
648 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
649                         bool interruptible, struct timespec *timeout)
650 {
651         drm_i915_private_t *dev_priv = ring->dev->dev_private;
652         struct timespec before, now, wait_time={1,0};
653         unsigned long timeout_jiffies;
654         long end;
655         bool wait_forever = true;
656         int ret;
657
658         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
659                 return 0;
660
661         if (timeout != NULL) {
662                 wait_time = *timeout;
663                 wait_forever = false;
664         }
665
666         timeout_jiffies = timespec_to_jiffies(&wait_time);
667
668         if (WARN_ON(!ring->irq_get(ring)))
669                 return -ENODEV;
670
671         /* Record current time in case interrupted by signal, or wedged * */
672         getrawmonotonic(&before);
673
674 #define EXIT_COND \
675         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
676         atomic_read(&dev_priv->mm.wedged))
677         do {
678                 if (interruptible)
679                         end = wait_event_interruptible_timeout(ring->irq_queue,
680                                                                EXIT_COND,
681                                                                timeout_jiffies);
682                 else
683                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
684                                                  timeout_jiffies);
685
686                 ret = i915_gem_check_wedge(dev_priv, interruptible);
687                 if (ret)
688                         end = ret;
689         } while (end == 0 && wait_forever);
690
691         getrawmonotonic(&now);
692
693         ring->irq_put(ring);
694 #undef EXIT_COND
695
696         if (timeout) {
697                 struct timespec sleep_time = timespec_sub(now, before);
698                 *timeout = timespec_sub(*timeout, sleep_time);
699         }
700
701         switch (end) {
702         case -EIO:
703         case -EAGAIN: /* Wedged */
704         case -ERESTARTSYS: /* Signal */
705                 return (int)end;
706         case 0: /* Timeout */
707                 if (timeout)
708                         set_normalized_timespec(timeout, 0, 0);
709                 return -ETIMEDOUT;      /* -ETIME on Linux */
710         default: /* Completed */
711                 WARN_ON(end < 0); /* We're not aware of other errors */
712                 return 0;
713         }
714 }
715
716 /**
717  * Waits for a sequence number to be signaled, and cleans up the
718  * request and object lists appropriately for that event.
719  */
720 int
721 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
722 {
723         struct drm_device *dev = ring->dev;
724         struct drm_i915_private *dev_priv = dev->dev_private;
725         int ret = 0;
726
727         DRM_LOCK_ASSERT(dev);
728         BUG_ON(seqno == 0);
729
730         ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
731         if (ret)
732                 return ret;
733
734         ret = i915_gem_check_olr(ring, seqno);
735         if (ret)
736                 return ret;
737
738         ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
739
740         return ret;
741 }
742
743 /**
744  * Ensures that all rendering to the object has completed and the object is
745  * safe to unbind from the GTT or access from the CPU.
746  */
747 static __must_check int
748 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
749                                bool readonly)
750 {
751         struct intel_ring_buffer *ring = obj->ring;
752         u32 seqno;
753         int ret;
754
755         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
756         if (seqno == 0)
757                 return 0;
758
759         ret = i915_wait_seqno(ring, seqno);
760         if (ret)
761                 return ret;
762
763         i915_gem_retire_requests_ring(ring);
764
765         /* Manually manage the write flush as we may have not yet
766          * retired the buffer.
767          */
768         if (obj->last_write_seqno &&
769             i915_seqno_passed(seqno, obj->last_write_seqno)) {
770                 obj->last_write_seqno = 0;
771                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
772         }
773
774         return 0;
775 }
776
777 /* A nonblocking variant of the above wait. This is a highly dangerous routine
778  * as the object state may change during this call.
779  */
780 static __must_check int
781 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
782                                             bool readonly)
783 {
784         struct drm_device *dev = obj->base.dev;
785         struct drm_i915_private *dev_priv = dev->dev_private;
786         struct intel_ring_buffer *ring = obj->ring;
787         u32 seqno;
788         int ret;
789
790         DRM_LOCK_ASSERT(dev);
791         BUG_ON(!dev_priv->mm.interruptible);
792
793         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
794         if (seqno == 0)
795                 return 0;
796
797         ret = i915_gem_check_wedge(dev_priv, true);
798         if (ret)
799                 return ret;
800
801         ret = i915_gem_check_olr(ring, seqno);
802         if (ret)
803                 return ret;
804
805         DRM_UNLOCK(dev);
806         ret = __wait_seqno(ring, seqno, true, NULL);
807         DRM_LOCK(dev);
808
809         i915_gem_retire_requests_ring(ring);
810
811         /* Manually manage the write flush as we may have not yet
812          * retired the buffer.
813          */
814         if (obj->last_write_seqno &&
815             i915_seqno_passed(seqno, obj->last_write_seqno)) {
816                 obj->last_write_seqno = 0;
817                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
818         }
819
820         return ret;
821 }
822
823 /**
824  * Called when user space prepares to use an object with the CPU, either
825  * through the mmap ioctl's mapping or a GTT mapping.
826  */
827 int
828 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
829                           struct drm_file *file)
830 {
831         struct drm_i915_gem_set_domain *args = data;
832         struct drm_i915_gem_object *obj;
833         uint32_t read_domains = args->read_domains;
834         uint32_t write_domain = args->write_domain;
835         int ret;
836
837         /* Only handle setting domains to types used by the CPU. */
838         if (write_domain & I915_GEM_GPU_DOMAINS)
839                 return -EINVAL;
840
841         if (read_domains & I915_GEM_GPU_DOMAINS)
842                 return -EINVAL;
843
844         /* Having something in the write domain implies it's in the read
845          * domain, and only that read domain.  Enforce that in the request.
846          */
847         if (write_domain != 0 && read_domains != write_domain)
848                 return -EINVAL;
849
850         ret = i915_mutex_lock_interruptible(dev);
851         if (ret)
852                 return ret;
853
854         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
855         if (&obj->base == NULL) {
856                 ret = -ENOENT;
857                 goto unlock;
858         }
859
860         /* Try to flush the object off the GPU without holding the lock.
861          * We will repeat the flush holding the lock in the normal manner
862          * to catch cases where we are gazumped.
863          */
864         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
865         if (ret)
866                 goto unref;
867
868         if (read_domains & I915_GEM_DOMAIN_GTT) {
869                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
870
871                 /* Silently promote "you're not bound, there was nothing to do"
872                  * to success, since the client was just asking us to
873                  * make sure everything was done.
874                  */
875                 if (ret == -EINVAL)
876                         ret = 0;
877         } else {
878                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
879         }
880
881 unref:
882         drm_gem_object_unreference(&obj->base);
883 unlock:
884         DRM_UNLOCK(dev);
885         return ret;
886 }
887
888 /**
889  * Called when user space has done writes to this buffer
890  */
891 int
892 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
893                          struct drm_file *file)
894 {
895         struct drm_i915_gem_sw_finish *args = data;
896         struct drm_i915_gem_object *obj;
897         int ret = 0;
898
899         ret = i915_mutex_lock_interruptible(dev);
900         if (ret)
901                 return ret;
902         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
903         if (&obj->base == NULL) {
904                 ret = -ENOENT;
905                 goto unlock;
906         }
907
908         /* Pinned buffers may be scanout, so flush the cache */
909         if (obj->pin_count)
910                 i915_gem_object_flush_cpu_write_domain(obj);
911
912         drm_gem_object_unreference(&obj->base);
913 unlock:
914         DRM_UNLOCK(dev);
915         return ret;
916 }
917
918 /**
919  * Maps the contents of an object, returning the address it is mapped
920  * into.
921  *
922  * While the mapping holds a reference on the contents of the object, it doesn't
923  * imply a ref on the object itself.
924  */
925 int
926 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
927                     struct drm_file *file)
928 {
929         struct drm_i915_gem_mmap *args = data;
930         struct drm_gem_object *obj;
931         struct proc *p = curproc;
932         vm_map_t map = &p->p_vmspace->vm_map;
933         vm_offset_t addr;
934         vm_size_t size;
935         int error = 0, rv;
936
937         obj = drm_gem_object_lookup(dev, file, args->handle);
938         if (obj == NULL)
939                 return -ENOENT;
940
941         if (args->size == 0)
942                 goto out;
943
944         size = round_page(args->size);
945         if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
946                 error = ENOMEM;
947                 goto out;
948         }
949
950         addr = 0;
951         vm_object_hold(obj->vm_obj);
952         vm_object_reference_locked(obj->vm_obj);
953         vm_object_drop(obj->vm_obj);
954         rv = vm_map_find(map, obj->vm_obj, NULL,
955                          args->offset, &addr, args->size,
956                          PAGE_SIZE, /* align */
957                          TRUE, /* fitit */
958                          VM_MAPTYPE_NORMAL, /* maptype */
959                          VM_PROT_READ | VM_PROT_WRITE, /* prot */
960                          VM_PROT_READ | VM_PROT_WRITE, /* max */
961                          MAP_SHARED /* cow */);
962         if (rv != KERN_SUCCESS) {
963                 vm_object_deallocate(obj->vm_obj);
964                 error = -vm_mmap_to_errno(rv);
965         } else {
966                 args->addr_ptr = (uint64_t)addr;
967         }
968 out:
969         drm_gem_object_unreference(obj);
970         return (error);
971 }
972
973 /**
974  * i915_gem_release_mmap - remove physical page mappings
975  * @obj: obj in question
976  *
977  * Preserve the reservation of the mmapping with the DRM core code, but
978  * relinquish ownership of the pages back to the system.
979  *
980  * It is vital that we remove the page mapping if we have mapped a tiled
981  * object through the GTT and then lose the fence register due to
982  * resource pressure. Similarly if the object has been moved out of the
983  * aperture, than pages mapped into userspace must be revoked. Removing the
984  * mapping will then trigger a page fault on the next user access, allowing
985  * fixup by i915_gem_fault().
986  */
987 void
988 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
989 {
990         vm_object_t devobj;
991         vm_page_t m;
992         int i, page_count;
993
994         if (!obj->fault_mappable)
995                 return;
996
997         devobj = cdev_pager_lookup(obj);
998         if (devobj != NULL) {
999                 page_count = OFF_TO_IDX(obj->base.size);
1000
1001                 VM_OBJECT_LOCK(devobj);
1002                 for (i = 0; i < page_count; i++) {
1003                         m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1004                         if (m == NULL)
1005                                 continue;
1006                         cdev_pager_free_page(devobj, m);
1007                 }
1008                 VM_OBJECT_UNLOCK(devobj);
1009                 vm_object_deallocate(devobj);
1010         }
1011
1012         obj->fault_mappable = false;
1013 }
1014
1015 static uint32_t
1016 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1017 {
1018         uint32_t gtt_size;
1019
1020         if (INTEL_INFO(dev)->gen >= 4 ||
1021             tiling_mode == I915_TILING_NONE)
1022                 return size;
1023
1024         /* Previous chips need a power-of-two fence region when tiling */
1025         if (INTEL_INFO(dev)->gen == 3)
1026                 gtt_size = 1024*1024;
1027         else
1028                 gtt_size = 512*1024;
1029
1030         while (gtt_size < size)
1031                 gtt_size <<= 1;
1032
1033         return gtt_size;
1034 }
1035
1036 /**
1037  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1038  * @obj: object to check
1039  *
1040  * Return the required GTT alignment for an object, taking into account
1041  * potential fence register mapping.
1042  */
1043 static uint32_t
1044 i915_gem_get_gtt_alignment(struct drm_device *dev,
1045                            uint32_t size,
1046                            int tiling_mode)
1047 {
1048
1049         /*
1050          * Minimum alignment is 4k (GTT page size), but might be greater
1051          * if a fence register is needed for the object.
1052          */
1053         if (INTEL_INFO(dev)->gen >= 4 ||
1054             tiling_mode == I915_TILING_NONE)
1055                 return 4096;
1056
1057         /*
1058          * Previous chips need to be aligned to the size of the smallest
1059          * fence register that can contain the object.
1060          */
1061         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1062 }
1063
1064 /**
1065  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1066  *                                       unfenced object
1067  * @dev: the device
1068  * @size: size of the object
1069  * @tiling_mode: tiling mode of the object
1070  *
1071  * Return the required GTT alignment for an object, only taking into account
1072  * unfenced tiled surface requirements.
1073  */
1074 uint32_t
1075 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1076                                     uint32_t size,
1077                                     int tiling_mode)
1078 {
1079         /*
1080          * Minimum alignment is 4k (GTT page size) for sane hw.
1081          */
1082         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1083             tiling_mode == I915_TILING_NONE)
1084                 return 4096;
1085
1086         /* Previous hardware however needs to be aligned to a power-of-two
1087          * tile height. The simplest method for determining this is to reuse
1088          * the power-of-tile object size.
1089          */
1090         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1091 }
1092
1093 int
1094 i915_gem_mmap_gtt(struct drm_file *file,
1095                   struct drm_device *dev,
1096                   uint32_t handle,
1097                   uint64_t *offset)
1098 {
1099         struct drm_i915_private *dev_priv = dev->dev_private;
1100         struct drm_i915_gem_object *obj;
1101         int ret;
1102
1103         ret = i915_mutex_lock_interruptible(dev);
1104         if (ret)
1105                 return ret;
1106
1107         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1108         if (&obj->base == NULL) {
1109                 ret = -ENOENT;
1110                 goto unlock;
1111         }
1112
1113         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1114                 ret = -E2BIG;
1115                 goto out;
1116         }
1117
1118         if (obj->madv != I915_MADV_WILLNEED) {
1119                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1120                 ret = -EINVAL;
1121                 goto out;
1122         }
1123
1124         ret = drm_gem_create_mmap_offset(&obj->base);
1125         if (ret)
1126                 goto out;
1127
1128         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1129             DRM_GEM_MAPPING_KEY;
1130 out:
1131         drm_gem_object_unreference(&obj->base);
1132 unlock:
1133         DRM_UNLOCK(dev);
1134         return ret;
1135 }
1136
1137 /**
1138  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1139  * @dev: DRM device
1140  * @data: GTT mapping ioctl data
1141  * @file: GEM object info
1142  *
1143  * Simply returns the fake offset to userspace so it can mmap it.
1144  * The mmap call will end up in drm_gem_mmap(), which will set things
1145  * up so we can get faults in the handler above.
1146  *
1147  * The fault handler will take care of binding the object into the GTT
1148  * (since it may have been evicted to make room for something), allocating
1149  * a fence register, and mapping the appropriate aperture address into
1150  * userspace.
1151  */
1152 int
1153 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1154                         struct drm_file *file)
1155 {
1156         struct drm_i915_gem_mmap_gtt *args = data;
1157
1158         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1159 }
1160
1161 /* Immediately discard the backing storage */
1162 static void
1163 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1164 {
1165         vm_object_t vm_obj;
1166
1167         vm_obj = obj->base.vm_obj;
1168         VM_OBJECT_LOCK(vm_obj);
1169         vm_object_page_remove(vm_obj, 0, 0, false);
1170         VM_OBJECT_UNLOCK(vm_obj);
1171         obj->madv = __I915_MADV_PURGED;
1172 }
1173
1174 static inline int
1175 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1176 {
1177         return obj->madv == I915_MADV_DONTNEED;
1178 }
1179
1180 static void
1181 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1182 {
1183         vm_page_t m;
1184         int page_count, i;
1185
1186         BUG_ON(obj->madv == __I915_MADV_PURGED);
1187
1188         if (obj->tiling_mode != I915_TILING_NONE)
1189                 i915_gem_object_save_bit_17_swizzle(obj);
1190         if (obj->madv == I915_MADV_DONTNEED)
1191                 obj->dirty = 0;
1192         page_count = obj->base.size / PAGE_SIZE;
1193         VM_OBJECT_LOCK(obj->base.vm_obj);
1194 #if GEM_PARANOID_CHECK_GTT
1195         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1196 #endif
1197         for (i = 0; i < page_count; i++) {
1198                 m = obj->pages[i];
1199                 if (obj->dirty)
1200                         vm_page_dirty(m);
1201                 if (obj->madv == I915_MADV_WILLNEED)
1202                         vm_page_reference(m);
1203                 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1204                 vm_page_unwire(obj->pages[i], 1);
1205                 vm_page_wakeup(obj->pages[i]);
1206         }
1207         VM_OBJECT_UNLOCK(obj->base.vm_obj);
1208         obj->dirty = 0;
1209         drm_free(obj->pages, M_DRM);
1210         obj->pages = NULL;
1211 }
1212
1213 static int
1214 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1215     int flags)
1216 {
1217         struct drm_device *dev;
1218         vm_object_t vm_obj;
1219         vm_page_t m;
1220         int page_count, i, j;
1221
1222         dev = obj->base.dev;
1223         KASSERT(obj->pages == NULL, ("Obj already has pages"));
1224         page_count = obj->base.size / PAGE_SIZE;
1225         obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1226             M_WAITOK);
1227         vm_obj = obj->base.vm_obj;
1228         VM_OBJECT_LOCK(vm_obj);
1229         for (i = 0; i < page_count; i++) {
1230                 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
1231                         goto failed;
1232         }
1233         VM_OBJECT_UNLOCK(vm_obj);
1234         if (i915_gem_object_needs_bit17_swizzle(obj))
1235                 i915_gem_object_do_bit_17_swizzle(obj);
1236         return (0);
1237
1238 failed:
1239         for (j = 0; j < i; j++) {
1240                 m = obj->pages[j];
1241                 vm_page_busy_wait(m, FALSE, "i915gem");
1242                 vm_page_unwire(m, 0);
1243                 vm_page_wakeup(m);
1244         }
1245         VM_OBJECT_UNLOCK(vm_obj);
1246         drm_free(obj->pages, M_DRM);
1247         obj->pages = NULL;
1248         return (-EIO);
1249 }
1250
1251 void
1252 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1253                                struct intel_ring_buffer *ring)
1254 {
1255         struct drm_device *dev = obj->base.dev;
1256         struct drm_i915_private *dev_priv = dev->dev_private;
1257         u32 seqno = intel_ring_get_seqno(ring);
1258
1259         BUG_ON(ring == NULL);
1260         obj->ring = ring;
1261
1262         /* Add a reference if we're newly entering the active list. */
1263         if (!obj->active) {
1264                 drm_gem_object_reference(&obj->base);
1265                 obj->active = 1;
1266         }
1267
1268         /* Move from whatever list we were on to the tail of execution. */
1269         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1270         list_move_tail(&obj->ring_list, &ring->active_list);
1271
1272         obj->last_read_seqno = seqno;
1273
1274         if (obj->fenced_gpu_access) {
1275                 obj->last_fenced_seqno = seqno;
1276
1277                 /* Bump MRU to take account of the delayed flush */
1278                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1279                         struct drm_i915_fence_reg *reg;
1280
1281                         reg = &dev_priv->fence_regs[obj->fence_reg];
1282                         list_move_tail(&reg->lru_list,
1283                                        &dev_priv->mm.fence_list);
1284                 }
1285         }
1286 }
1287
1288 static void
1289 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1290 {
1291         struct drm_device *dev = obj->base.dev;
1292         struct drm_i915_private *dev_priv = dev->dev_private;
1293
1294         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1295         BUG_ON(!obj->active);
1296
1297         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1298
1299         list_del_init(&obj->ring_list);
1300         obj->ring = NULL;
1301
1302         obj->last_read_seqno = 0;
1303         obj->last_write_seqno = 0;
1304         obj->base.write_domain = 0;
1305
1306         obj->last_fenced_seqno = 0;
1307         obj->fenced_gpu_access = false;
1308
1309         obj->active = 0;
1310         drm_gem_object_unreference(&obj->base);
1311
1312         WARN_ON(i915_verify_lists(dev));
1313 }
1314
1315 static int
1316 i915_gem_handle_seqno_wrap(struct drm_device *dev)
1317 {
1318         struct drm_i915_private *dev_priv = dev->dev_private;
1319         struct intel_ring_buffer *ring;
1320         int ret, i, j;
1321
1322         /* The hardware uses various monotonic 32-bit counters, if we
1323          * detect that they will wraparound we need to idle the GPU
1324          * and reset those counters.
1325          */
1326         ret = 0;
1327         for_each_ring(ring, dev_priv, i) {
1328                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1329                         ret |= ring->sync_seqno[j] != 0;
1330         }
1331         if (ret == 0)
1332                 return ret;
1333
1334         ret = i915_gpu_idle(dev);
1335         if (ret)
1336                 return ret;
1337
1338         i915_gem_retire_requests(dev);
1339         for_each_ring(ring, dev_priv, i) {
1340                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1341                         ring->sync_seqno[j] = 0;
1342         }
1343
1344         return 0;
1345 }
1346
1347 int
1348 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1349 {
1350         struct drm_i915_private *dev_priv = dev->dev_private;
1351
1352         /* reserve 0 for non-seqno */
1353         if (dev_priv->next_seqno == 0) {
1354                 int ret = i915_gem_handle_seqno_wrap(dev);
1355                 if (ret)
1356                         return ret;
1357
1358                 dev_priv->next_seqno = 1;
1359         }
1360
1361         *seqno = dev_priv->next_seqno++;
1362         return 0;
1363 }
1364
1365 int
1366 i915_add_request(struct intel_ring_buffer *ring,
1367                  struct drm_file *file,
1368                  u32 *out_seqno)
1369 {
1370         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1371         struct drm_i915_gem_request *request;
1372         u32 request_ring_position;
1373         int was_empty;
1374         int ret;
1375
1376         /*
1377          * Emit any outstanding flushes - execbuf can fail to emit the flush
1378          * after having emitted the batchbuffer command. Hence we need to fix
1379          * things up similar to emitting the lazy request. The difference here
1380          * is that the flush _must_ happen before the next request, no matter
1381          * what.
1382          */
1383         ret = intel_ring_flush_all_caches(ring);
1384         if (ret)
1385                 return ret;
1386
1387         request = kmalloc(sizeof(*request), M_DRM, M_WAITOK | M_ZERO);
1388         if (request == NULL)
1389                 return -ENOMEM;
1390
1391
1392         /* Record the position of the start of the request so that
1393          * should we detect the updated seqno part-way through the
1394          * GPU processing the request, we never over-estimate the
1395          * position of the head.
1396          */
1397         request_ring_position = intel_ring_get_tail(ring);
1398
1399         ret = ring->add_request(ring);
1400         if (ret) {
1401                 kfree(request, M_DRM);
1402                 return ret;
1403         }
1404
1405         request->seqno = intel_ring_get_seqno(ring);
1406         request->ring = ring;
1407         request->tail = request_ring_position;
1408         request->emitted_jiffies = jiffies;
1409         was_empty = list_empty(&ring->request_list);
1410         list_add_tail(&request->list, &ring->request_list);
1411         request->file_priv = NULL;
1412
1413         if (file) {
1414                 struct drm_i915_file_private *file_priv = file->driver_priv;
1415
1416                 spin_lock(&file_priv->mm.lock);
1417                 request->file_priv = file_priv;
1418                 list_add_tail(&request->client_list,
1419                               &file_priv->mm.request_list);
1420                 spin_unlock(&file_priv->mm.lock);
1421         }
1422
1423         ring->outstanding_lazy_request = 0;
1424
1425         if (!dev_priv->mm.suspended) {
1426                 if (i915_enable_hangcheck) {
1427                         mod_timer(&dev_priv->hangcheck_timer,
1428                                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1429                 }
1430                 if (was_empty) {
1431                         queue_delayed_work(dev_priv->wq,
1432                                            &dev_priv->mm.retire_work,
1433                                            round_jiffies_up_relative(hz));
1434                         intel_mark_busy(dev_priv->dev);
1435                 }
1436         }
1437
1438         if (out_seqno)
1439                 *out_seqno = request->seqno;
1440         return 0;
1441 }
1442
1443 static inline void
1444 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1445 {
1446         struct drm_i915_file_private *file_priv = request->file_priv;
1447
1448         if (!file_priv)
1449                 return;
1450
1451         spin_lock(&file_priv->mm.lock);
1452         if (request->file_priv) {
1453                 list_del(&request->client_list);
1454                 request->file_priv = NULL;
1455         }
1456         spin_unlock(&file_priv->mm.lock);
1457 }
1458
1459 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1460                                       struct intel_ring_buffer *ring)
1461 {
1462         while (!list_empty(&ring->request_list)) {
1463                 struct drm_i915_gem_request *request;
1464
1465                 request = list_first_entry(&ring->request_list,
1466                                            struct drm_i915_gem_request,
1467                                            list);
1468
1469                 list_del(&request->list);
1470                 i915_gem_request_remove_from_client(request);
1471                 drm_free(request, M_DRM);
1472         }
1473
1474         while (!list_empty(&ring->active_list)) {
1475                 struct drm_i915_gem_object *obj;
1476
1477                 obj = list_first_entry(&ring->active_list,
1478                                        struct drm_i915_gem_object,
1479                                        ring_list);
1480
1481                 i915_gem_object_move_to_inactive(obj);
1482         }
1483 }
1484
1485 static void i915_gem_reset_fences(struct drm_device *dev)
1486 {
1487         struct drm_i915_private *dev_priv = dev->dev_private;
1488         int i;
1489
1490         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1491                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1492
1493                 i915_gem_write_fence(dev, i, NULL);
1494
1495                 if (reg->obj)
1496                         i915_gem_object_fence_lost(reg->obj);
1497
1498                 reg->pin_count = 0;
1499                 reg->obj = NULL;
1500                 INIT_LIST_HEAD(&reg->lru_list);
1501         }
1502
1503         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1504 }
1505
1506 void i915_gem_reset(struct drm_device *dev)
1507 {
1508         struct drm_i915_private *dev_priv = dev->dev_private;
1509         struct drm_i915_gem_object *obj;
1510         struct intel_ring_buffer *ring;
1511         int i;
1512
1513         for_each_ring(ring, dev_priv, i)
1514                 i915_gem_reset_ring_lists(dev_priv, ring);
1515
1516         /* Move everything out of the GPU domains to ensure we do any
1517          * necessary invalidation upon reuse.
1518          */
1519         list_for_each_entry(obj,
1520                             &dev_priv->mm.inactive_list,
1521                             mm_list)
1522         {
1523                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1524         }
1525
1526         /* The fence registers are invalidated so clear them out */
1527         i915_gem_reset_fences(dev);
1528 }
1529
1530 /**
1531  * This function clears the request list as sequence numbers are passed.
1532  */
1533 void
1534 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1535 {
1536         uint32_t seqno;
1537
1538         if (list_empty(&ring->request_list))
1539                 return;
1540
1541         WARN_ON(i915_verify_lists(ring->dev));
1542
1543         seqno = ring->get_seqno(ring, true);
1544
1545         while (!list_empty(&ring->request_list)) {
1546                 struct drm_i915_gem_request *request;
1547
1548                 request = list_first_entry(&ring->request_list,
1549                                            struct drm_i915_gem_request,
1550                                            list);
1551
1552                 if (!i915_seqno_passed(seqno, request->seqno))
1553                         break;
1554
1555                 /* We know the GPU must have read the request to have
1556                  * sent us the seqno + interrupt, so use the position
1557                  * of tail of the request to update the last known position
1558                  * of the GPU head.
1559                  */
1560                 ring->last_retired_head = request->tail;
1561
1562                 list_del(&request->list);
1563                 i915_gem_request_remove_from_client(request);
1564                 kfree(request, M_DRM);
1565         }
1566
1567         /* Move any buffers on the active list that are no longer referenced
1568          * by the ringbuffer to the flushing/inactive lists as appropriate.
1569          */
1570         while (!list_empty(&ring->active_list)) {
1571                 struct drm_i915_gem_object *obj;
1572
1573                 obj = list_first_entry(&ring->active_list,
1574                                       struct drm_i915_gem_object,
1575                                       ring_list);
1576
1577                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1578                         break;
1579
1580                 i915_gem_object_move_to_inactive(obj);
1581         }
1582
1583         if (unlikely(ring->trace_irq_seqno &&
1584                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1585                 ring->irq_put(ring);
1586                 ring->trace_irq_seqno = 0;
1587         }
1588
1589 }
1590
1591 void
1592 i915_gem_retire_requests(struct drm_device *dev)
1593 {
1594         drm_i915_private_t *dev_priv = dev->dev_private;
1595         struct intel_ring_buffer *ring;
1596         int i;
1597
1598         for_each_ring(ring, dev_priv, i)
1599                 i915_gem_retire_requests_ring(ring);
1600 }
1601
1602 static void
1603 i915_gem_retire_work_handler(struct work_struct *work)
1604 {
1605         drm_i915_private_t *dev_priv;
1606         struct drm_device *dev;
1607         struct intel_ring_buffer *ring;
1608         bool idle;
1609         int i;
1610
1611         dev_priv = container_of(work, drm_i915_private_t,
1612                                 mm.retire_work.work);
1613         dev = dev_priv->dev;
1614
1615         /* Come back later if the device is busy... */
1616         if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
1617                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1618                                    round_jiffies_up_relative(hz));
1619                 return;
1620         }
1621
1622         i915_gem_retire_requests(dev);
1623
1624         /* Send a periodic flush down the ring so we don't hold onto GEM
1625          * objects indefinitely.
1626          */
1627         idle = true;
1628         for_each_ring(ring, dev_priv, i) {
1629                 if (ring->gpu_caches_dirty)
1630                         i915_add_request(ring, NULL, NULL);
1631
1632                 idle &= list_empty(&ring->request_list);
1633         }
1634
1635         if (!dev_priv->mm.suspended && !idle)
1636                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1637                                    round_jiffies_up_relative(hz));
1638         if (idle)
1639                 intel_mark_idle(dev);
1640
1641         DRM_UNLOCK(dev);
1642 }
1643 /**
1644  * Ensures that an object will eventually get non-busy by flushing any required
1645  * write domains, emitting any outstanding lazy request and retiring and
1646  * completed requests.
1647  */
1648 static int
1649 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1650 {
1651         int ret;
1652
1653         if (obj->active) {
1654                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1655                 if (ret)
1656                         return ret;
1657
1658                 i915_gem_retire_requests_ring(obj->ring);
1659         }
1660
1661         return 0;
1662 }
1663
1664 /**
1665  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1666  * @DRM_IOCTL_ARGS: standard ioctl arguments
1667  *
1668  * Returns 0 if successful, else an error is returned with the remaining time in
1669  * the timeout parameter.
1670  *  -ETIME: object is still busy after timeout
1671  *  -ERESTARTSYS: signal interrupted the wait
1672  *  -ENONENT: object doesn't exist
1673  * Also possible, but rare:
1674  *  -EAGAIN: GPU wedged
1675  *  -ENOMEM: damn
1676  *  -ENODEV: Internal IRQ fail
1677  *  -E?: The add request failed
1678  *
1679  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1680  * non-zero timeout parameter the wait ioctl will wait for the given number of
1681  * nanoseconds on an object becoming unbusy. Since the wait itself does so
1682  * without holding struct_mutex the object may become re-busied before this
1683  * function completes. A similar but shorter * race condition exists in the busy
1684  * ioctl
1685  */
1686 int
1687 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1688 {
1689         struct drm_i915_gem_wait *args = data;
1690         struct drm_i915_gem_object *obj;
1691         struct intel_ring_buffer *ring = NULL;
1692         struct timespec timeout_stack, *timeout = NULL;
1693         u32 seqno = 0;
1694         int ret = 0;
1695
1696         if (args->timeout_ns >= 0) {
1697                 timeout_stack = ns_to_timespec(args->timeout_ns);
1698                 timeout = &timeout_stack;
1699         }
1700
1701         ret = i915_mutex_lock_interruptible(dev);
1702         if (ret)
1703                 return ret;
1704
1705         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1706         if (&obj->base == NULL) {
1707                 DRM_UNLOCK(dev);
1708                 return -ENOENT;
1709         }
1710
1711         /* Need to make sure the object gets inactive eventually. */
1712         ret = i915_gem_object_flush_active(obj);
1713         if (ret)
1714                 goto out;
1715
1716         if (obj->active) {
1717                 seqno = obj->last_read_seqno;
1718                 ring = obj->ring;
1719         }
1720
1721         if (seqno == 0)
1722                  goto out;
1723
1724         /* Do this after OLR check to make sure we make forward progress polling
1725          * on this IOCTL with a 0 timeout (like busy ioctl)
1726          */
1727         if (!args->timeout_ns) {
1728                 ret = -ETIMEDOUT;
1729                 goto out;
1730         }
1731
1732         drm_gem_object_unreference(&obj->base);
1733         DRM_UNLOCK(dev);
1734
1735         ret = __wait_seqno(ring, seqno, true, timeout);
1736         if (timeout) {
1737                 WARN_ON(!timespec_valid(timeout));
1738                 args->timeout_ns = timespec_to_ns(timeout);
1739         }
1740         return ret;
1741
1742 out:
1743         drm_gem_object_unreference(&obj->base);
1744         DRM_UNLOCK(dev);
1745         return ret;
1746 }
1747
1748 /**
1749  * i915_gem_object_sync - sync an object to a ring.
1750  *
1751  * @obj: object which may be in use on another ring.
1752  * @to: ring we wish to use the object on. May be NULL.
1753  *
1754  * This code is meant to abstract object synchronization with the GPU.
1755  * Calling with NULL implies synchronizing the object with the CPU
1756  * rather than a particular GPU ring.
1757  *
1758  * Returns 0 if successful, else propagates up the lower layer error.
1759  */
1760 int
1761 i915_gem_object_sync(struct drm_i915_gem_object *obj,
1762                      struct intel_ring_buffer *to)
1763 {
1764         struct intel_ring_buffer *from = obj->ring;
1765         u32 seqno;
1766         int ret, idx;
1767
1768         if (from == NULL || to == from)
1769                 return 0;
1770
1771         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1772                 return i915_gem_object_wait_rendering(obj, false);
1773
1774         idx = intel_ring_sync_index(from, to);
1775
1776         seqno = obj->last_read_seqno;
1777         if (seqno <= from->sync_seqno[idx])
1778                 return 0;
1779
1780         ret = i915_gem_check_olr(obj->ring, seqno);
1781         if (ret)
1782                 return ret;
1783
1784         ret = to->sync_to(to, from, seqno);
1785         if (!ret)
1786                 /* We use last_read_seqno because sync_to()
1787                  * might have just caused seqno wrap under
1788                  * the radar.
1789                  */
1790                 from->sync_seqno[idx] = obj->last_read_seqno;
1791
1792         return ret;
1793 }
1794
1795 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1796 {
1797         u32 old_write_domain, old_read_domains;
1798
1799         /* Act a barrier for all accesses through the GTT */
1800         cpu_mfence();
1801
1802         /* Force a pagefault for domain tracking on next user access */
1803         i915_gem_release_mmap(obj);
1804
1805         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1806                 return;
1807
1808         old_read_domains = obj->base.read_domains;
1809         old_write_domain = obj->base.write_domain;
1810
1811         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1812         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1813
1814 }
1815
1816 /**
1817  * Unbinds an object from the GTT aperture.
1818  */
1819 int
1820 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
1821 {
1822         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1823         int ret = 0;
1824
1825         if (obj->gtt_space == NULL)
1826                 return 0;
1827
1828         if (obj->pin_count)
1829                 return -EBUSY;
1830
1831         ret = i915_gem_object_finish_gpu(obj);
1832         if (ret)
1833                 return ret;
1834         /* Continue on if we fail due to EIO, the GPU is hung so we
1835          * should be safe and we need to cleanup or else we might
1836          * cause memory corruption through use-after-free.
1837          */
1838
1839         i915_gem_object_finish_gtt(obj);
1840
1841         /* Move the object to the CPU domain to ensure that
1842          * any possible CPU writes while it's not in the GTT
1843          * are flushed when we go to remap it.
1844          */
1845         if (ret == 0)
1846                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1847         if (ret == -ERESTARTSYS)
1848                 return ret;
1849         if (ret) {
1850                 /* In the event of a disaster, abandon all caches and
1851                  * hope for the best.
1852                  */
1853                 i915_gem_clflush_object(obj);
1854                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1855         }
1856
1857         /* release the fence reg _after_ flushing */
1858         ret = i915_gem_object_put_fence(obj);
1859         if (ret)
1860                 return ret;
1861
1862         if (obj->has_global_gtt_mapping)
1863                 i915_gem_gtt_unbind_object(obj);
1864         if (obj->has_aliasing_ppgtt_mapping) {
1865                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1866                 obj->has_aliasing_ppgtt_mapping = 0;
1867         }
1868         i915_gem_gtt_finish_object(obj);
1869
1870         i915_gem_object_put_pages_gtt(obj);
1871
1872         list_del_init(&obj->gtt_list);
1873         list_del_init(&obj->mm_list);
1874         /* Avoid an unnecessary call to unbind on rebind. */
1875         obj->map_and_fenceable = true;
1876
1877         drm_mm_put_block(obj->gtt_space);
1878         obj->gtt_space = NULL;
1879         obj->gtt_offset = 0;
1880
1881         if (i915_gem_object_is_purgeable(obj))
1882                 i915_gem_object_truncate(obj);
1883
1884         return ret;
1885 }
1886
1887 int i915_gpu_idle(struct drm_device *dev)
1888 {
1889         drm_i915_private_t *dev_priv = dev->dev_private;
1890         struct intel_ring_buffer *ring;
1891         int ret, i;
1892
1893         /* Flush everything onto the inactive list. */
1894         for_each_ring(ring, dev_priv, i) {
1895                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
1896                 if (ret)
1897                         return ret;
1898
1899                 ret = intel_ring_idle(ring);
1900                 if (ret)
1901                         return ret;
1902         }
1903
1904         return 0;
1905 }
1906
1907 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
1908                                         struct drm_i915_gem_object *obj)
1909 {
1910         drm_i915_private_t *dev_priv = dev->dev_private;
1911         uint64_t val;
1912
1913         if (obj) {
1914                 u32 size = obj->gtt_space->size;
1915
1916                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1917                                  0xfffff000) << 32;
1918                 val |= obj->gtt_offset & 0xfffff000;
1919                 val |= (uint64_t)((obj->stride / 128) - 1) <<
1920                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
1921
1922                 if (obj->tiling_mode == I915_TILING_Y)
1923                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1924                 val |= I965_FENCE_REG_VALID;
1925         } else
1926                 val = 0;
1927
1928         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
1929         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
1930 }
1931
1932 static void i965_write_fence_reg(struct drm_device *dev, int reg,
1933                                  struct drm_i915_gem_object *obj)
1934 {
1935         drm_i915_private_t *dev_priv = dev->dev_private;
1936         uint64_t val;
1937
1938         if (obj) {
1939                 u32 size = obj->gtt_space->size;
1940
1941                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1942                                  0xfffff000) << 32;
1943                 val |= obj->gtt_offset & 0xfffff000;
1944                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1945                 if (obj->tiling_mode == I915_TILING_Y)
1946                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1947                 val |= I965_FENCE_REG_VALID;
1948         } else
1949                 val = 0;
1950
1951         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
1952         POSTING_READ(FENCE_REG_965_0 + reg * 8);
1953 }
1954
1955 static void i915_write_fence_reg(struct drm_device *dev, int reg,
1956                                  struct drm_i915_gem_object *obj)
1957 {
1958         drm_i915_private_t *dev_priv = dev->dev_private;
1959         u32 val;
1960
1961         if (obj) {
1962                 u32 size = obj->gtt_space->size;
1963                 int pitch_val;
1964                 int tile_width;
1965
1966                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
1967                      (size & -size) != size ||
1968                      (obj->gtt_offset & (size - 1)),
1969                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
1970                      obj->gtt_offset, obj->map_and_fenceable, size);
1971
1972                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1973                         tile_width = 128;
1974                 else
1975                         tile_width = 512;
1976
1977                 /* Note: pitch better be a power of two tile widths */
1978                 pitch_val = obj->stride / tile_width;
1979                 pitch_val = ffs(pitch_val) - 1;
1980
1981                 val = obj->gtt_offset;
1982                 if (obj->tiling_mode == I915_TILING_Y)
1983                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1984                 val |= I915_FENCE_SIZE_BITS(size);
1985                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1986                 val |= I830_FENCE_REG_VALID;
1987         } else
1988                 val = 0;
1989
1990         if (reg < 8)
1991                 reg = FENCE_REG_830_0 + reg * 4;
1992         else
1993                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
1994
1995         I915_WRITE(reg, val);
1996         POSTING_READ(reg);
1997 }
1998
1999 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2000                                 struct drm_i915_gem_object *obj)
2001 {
2002         drm_i915_private_t *dev_priv = dev->dev_private;
2003         uint32_t val;
2004
2005         if (obj) {
2006                 u32 size = obj->gtt_space->size;
2007                 uint32_t pitch_val;
2008
2009                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2010                      (size & -size) != size ||
2011                      (obj->gtt_offset & (size - 1)),
2012                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2013                      obj->gtt_offset, size);
2014
2015                 pitch_val = obj->stride / 128;
2016                 pitch_val = ffs(pitch_val) - 1;
2017
2018                 val = obj->gtt_offset;
2019                 if (obj->tiling_mode == I915_TILING_Y)
2020                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2021                 val |= I830_FENCE_SIZE_BITS(size);
2022                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2023                 val |= I830_FENCE_REG_VALID;
2024         } else
2025                 val = 0;
2026
2027         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2028         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2029 }
2030
2031 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2032                                  struct drm_i915_gem_object *obj)
2033 {
2034         switch (INTEL_INFO(dev)->gen) {
2035         case 7:
2036         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2037         case 5:
2038         case 4: i965_write_fence_reg(dev, reg, obj); break;
2039         case 3: i915_write_fence_reg(dev, reg, obj); break;
2040         case 2: i830_write_fence_reg(dev, reg, obj); break;
2041         default: break;
2042         }
2043 }
2044
2045 static inline int fence_number(struct drm_i915_private *dev_priv,
2046                                struct drm_i915_fence_reg *fence)
2047 {
2048         return fence - dev_priv->fence_regs;
2049 }
2050
2051 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2052                                          struct drm_i915_fence_reg *fence,
2053                                          bool enable)
2054 {
2055         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2056         int reg = fence_number(dev_priv, fence);
2057
2058         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2059
2060         if (enable) {
2061                 obj->fence_reg = reg;
2062                 fence->obj = obj;
2063                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2064         } else {
2065                 obj->fence_reg = I915_FENCE_REG_NONE;
2066                 fence->obj = NULL;
2067                 list_del_init(&fence->lru_list);
2068         }
2069 }
2070
2071 static int
2072 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2073 {
2074         if (obj->last_fenced_seqno) {
2075                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2076                 if (ret)
2077                         return ret;
2078
2079                 obj->last_fenced_seqno = 0;
2080         }
2081
2082         /* Ensure that all CPU reads are completed before installing a fence
2083          * and all writes before removing the fence.
2084          */
2085         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2086                 cpu_mfence();
2087
2088         obj->fenced_gpu_access = false;
2089         return 0;
2090 }
2091
2092 int
2093 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2094 {
2095         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2096         int ret;
2097
2098         ret = i915_gem_object_flush_fence(obj);
2099         if (ret)
2100                 return ret;
2101
2102         if (obj->fence_reg == I915_FENCE_REG_NONE)
2103                 return 0;
2104
2105         i915_gem_object_update_fence(obj,
2106                                      &dev_priv->fence_regs[obj->fence_reg],
2107                                      false);
2108         i915_gem_object_fence_lost(obj);
2109
2110         return 0;
2111 }
2112
2113 static struct drm_i915_fence_reg *
2114 i915_find_fence_reg(struct drm_device *dev)
2115 {
2116         struct drm_i915_private *dev_priv = dev->dev_private;
2117         struct drm_i915_fence_reg *reg, *avail;
2118         int i;
2119
2120         /* First try to find a free reg */
2121         avail = NULL;
2122         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2123                 reg = &dev_priv->fence_regs[i];
2124                 if (!reg->obj)
2125                         return reg;
2126
2127                 if (!reg->pin_count)
2128                         avail = reg;
2129         }
2130
2131         if (avail == NULL)
2132                 return NULL;
2133
2134         /* None available, try to steal one or wait for a user to finish */
2135         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2136                 if (reg->pin_count)
2137                         continue;
2138
2139                 return reg;
2140         }
2141
2142         return NULL;
2143 }
2144
2145 /**
2146  * i915_gem_object_get_fence - set up fencing for an object
2147  * @obj: object to map through a fence reg
2148  *
2149  * When mapping objects through the GTT, userspace wants to be able to write
2150  * to them without having to worry about swizzling if the object is tiled.
2151  * This function walks the fence regs looking for a free one for @obj,
2152  * stealing one if it can't find any.
2153  *
2154  * It then sets up the reg based on the object's properties: address, pitch
2155  * and tiling format.
2156  *
2157  * For an untiled surface, this removes any existing fence.
2158  */
2159 int
2160 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2161 {
2162         struct drm_device *dev = obj->base.dev;
2163         struct drm_i915_private *dev_priv = dev->dev_private;
2164         bool enable = obj->tiling_mode != I915_TILING_NONE;
2165         struct drm_i915_fence_reg *reg;
2166         int ret;
2167
2168         /* Have we updated the tiling parameters upon the object and so
2169          * will need to serialise the write to the associated fence register?
2170          */
2171         if (obj->fence_dirty) {
2172                 ret = i915_gem_object_flush_fence(obj);
2173                 if (ret)
2174                         return ret;
2175         }
2176
2177         /* Just update our place in the LRU if our fence is getting reused. */
2178         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2179                 reg = &dev_priv->fence_regs[obj->fence_reg];
2180                 if (!obj->fence_dirty) {
2181                         list_move_tail(&reg->lru_list,
2182                                        &dev_priv->mm.fence_list);
2183                         return 0;
2184                 }
2185         } else if (enable) {
2186                 reg = i915_find_fence_reg(dev);
2187                 if (reg == NULL)
2188                         return -EDEADLK;
2189
2190                 if (reg->obj) {
2191                         struct drm_i915_gem_object *old = reg->obj;
2192
2193                         ret = i915_gem_object_flush_fence(old);
2194                         if (ret)
2195                                 return ret;
2196
2197                         i915_gem_object_fence_lost(old);
2198                 }
2199         } else
2200                 return 0;
2201
2202         i915_gem_object_update_fence(obj, reg, enable);
2203         obj->fence_dirty = false;
2204
2205         return 0;
2206 }
2207
2208 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2209                                      struct drm_mm_node *gtt_space,
2210                                      unsigned long cache_level)
2211 {
2212         struct drm_mm_node *other;
2213
2214         /* On non-LLC machines we have to be careful when putting differing
2215          * types of snoopable memory together to avoid the prefetcher
2216          * crossing memory domains and dieing.
2217          */
2218         if (HAS_LLC(dev))
2219                 return true;
2220
2221         if (gtt_space == NULL)
2222                 return true;
2223
2224         if (list_empty(&gtt_space->node_list))
2225                 return true;
2226
2227         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2228         if (other->allocated && !other->hole_follows && other->color != cache_level)
2229                 return false;
2230
2231         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2232         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2233                 return false;
2234
2235         return true;
2236 }
2237
2238 static void i915_gem_verify_gtt(struct drm_device *dev)
2239 {
2240 #if WATCH_GTT
2241         struct drm_i915_private *dev_priv = dev->dev_private;
2242         struct drm_i915_gem_object *obj;
2243         int err = 0;
2244
2245         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2246                 if (obj->gtt_space == NULL) {
2247                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
2248                         err++;
2249                         continue;
2250                 }
2251
2252                 if (obj->cache_level != obj->gtt_space->color) {
2253                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2254                                obj->gtt_space->start,
2255                                obj->gtt_space->start + obj->gtt_space->size,
2256                                obj->cache_level,
2257                                obj->gtt_space->color);
2258                         err++;
2259                         continue;
2260                 }
2261
2262                 if (!i915_gem_valid_gtt_space(dev,
2263                                               obj->gtt_space,
2264                                               obj->cache_level)) {
2265                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2266                                obj->gtt_space->start,
2267                                obj->gtt_space->start + obj->gtt_space->size,
2268                                obj->cache_level);
2269                         err++;
2270                         continue;
2271                 }
2272         }
2273
2274         WARN_ON(err);
2275 #endif
2276 }
2277
2278 /**
2279  * Finds free space in the GTT aperture and binds the object there.
2280  */
2281 static int
2282 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2283                             unsigned alignment,
2284                             bool map_and_fenceable,
2285                             bool nonblocking)
2286 {
2287         struct drm_device *dev = obj->base.dev;
2288         drm_i915_private_t *dev_priv = dev->dev_private;
2289         struct drm_mm_node *free_space;
2290         uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2291         bool mappable, fenceable;
2292         int ret;
2293
2294         if (obj->madv != I915_MADV_WILLNEED) {
2295                 DRM_ERROR("Attempting to bind a purgeable object\n");
2296                 return -EINVAL;
2297         }
2298
2299         fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2300             obj->tiling_mode);
2301         fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2302             obj->tiling_mode);
2303         unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2304             obj->base.size, obj->tiling_mode);
2305         if (alignment == 0)
2306                 alignment = map_and_fenceable ? fence_alignment :
2307                     unfenced_alignment;
2308         if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2309                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2310                 return -EINVAL;
2311         }
2312
2313         size = map_and_fenceable ? fence_size : obj->base.size;
2314
2315         /* If the object is bigger than the entire aperture, reject it early
2316          * before evicting everything in a vain attempt to find space.
2317          */
2318         if (obj->base.size > (map_and_fenceable ?
2319             dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2320                 DRM_ERROR(
2321 "Attempting to bind an object larger than the aperture\n");
2322                 return -E2BIG;
2323         }
2324
2325  search_free:
2326         if (map_and_fenceable)
2327                 free_space =
2328                         drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2329                                                           size, alignment, obj->cache_level,
2330                                                           0, dev_priv->mm.gtt_mappable_end,
2331                                                           false);
2332         else
2333                 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2334                                                       size, alignment, obj->cache_level,
2335                                                       false);
2336
2337         if (free_space != NULL) {
2338                 if (map_and_fenceable)
2339                         obj->gtt_space =
2340                                 drm_mm_get_block_range_generic(free_space,
2341                                                                size, alignment, obj->cache_level,
2342                                                                0, dev_priv->mm.gtt_mappable_end,
2343                                                                false);
2344                 else
2345                         obj->gtt_space =
2346                                 drm_mm_get_block_generic(free_space,
2347                                                          size, alignment, obj->cache_level,
2348                                                          false);
2349         }
2350         if (obj->gtt_space == NULL) {
2351                 ret = i915_gem_evict_something(dev, size, alignment,
2352                                                obj->cache_level,
2353                                                map_and_fenceable,
2354                                                nonblocking);
2355                 if (ret)
2356                         return ret;
2357
2358                 goto search_free;
2359         }
2360
2361         /*
2362          * NOTE: i915_gem_object_get_pages_gtt() cannot
2363          *       return ENOMEM, since we used VM_ALLOC_RETRY.
2364          */
2365         ret = i915_gem_object_get_pages_gtt(obj, 0);
2366         if (ret != 0) {
2367                 drm_mm_put_block(obj->gtt_space);
2368                 obj->gtt_space = NULL;
2369                 return ret;
2370         }
2371
2372         i915_gem_gtt_bind_object(obj, obj->cache_level);
2373         if (ret != 0) {
2374                 i915_gem_object_put_pages_gtt(obj);
2375                 drm_mm_put_block(obj->gtt_space);
2376                 obj->gtt_space = NULL;
2377                 if (i915_gem_evict_everything(dev))
2378                         return (ret);
2379                 goto search_free;
2380         }
2381
2382         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2383         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2384
2385         obj->gtt_offset = obj->gtt_space->start;
2386
2387         fenceable =
2388                 obj->gtt_space->size == fence_size &&
2389                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2390
2391         mappable =
2392                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2393         obj->map_and_fenceable = mappable && fenceable;
2394
2395         i915_gem_verify_gtt(dev);
2396         return 0;
2397 }
2398
2399 void
2400 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2401 {
2402
2403         /* If we don't have a page list set up, then we're not pinned
2404          * to GPU, and we can ignore the cache flush because it'll happen
2405          * again at bind time.
2406          */
2407         if (obj->pages == NULL)
2408                 return;
2409
2410         /* If the GPU is snooping the contents of the CPU cache,
2411          * we do not need to manually clear the CPU cache lines.  However,
2412          * the caches are only snooped when the render cache is
2413          * flushed/invalidated.  As we always have to emit invalidations
2414          * and flushes when moving into and out of the RENDER domain, correct
2415          * snooping behaviour occurs naturally as the result of our domain
2416          * tracking.
2417          */
2418         if (obj->cache_level != I915_CACHE_NONE)
2419                 return;
2420
2421         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2422 }
2423
2424 /** Flushes the GTT write domain for the object if it's dirty. */
2425 static void
2426 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2427 {
2428         uint32_t old_write_domain;
2429
2430         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2431                 return;
2432
2433         /* No actual flushing is required for the GTT write domain.  Writes
2434          * to it immediately go to main memory as far as we know, so there's
2435          * no chipset flush.  It also doesn't land in render cache.
2436          *
2437          * However, we do have to enforce the order so that all writes through
2438          * the GTT land before any writes to the device, such as updates to
2439          * the GATT itself.
2440          */
2441         cpu_sfence();
2442
2443         old_write_domain = obj->base.write_domain;
2444         obj->base.write_domain = 0;
2445 }
2446
2447 /** Flushes the CPU write domain for the object if it's dirty. */
2448 static void
2449 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2450 {
2451         uint32_t old_write_domain;
2452
2453         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2454                 return;
2455
2456         i915_gem_clflush_object(obj);
2457         intel_gtt_chipset_flush();
2458         old_write_domain = obj->base.write_domain;
2459         obj->base.write_domain = 0;
2460 }
2461
2462 /**
2463  * Moves a single object to the GTT read, and possibly write domain.
2464  *
2465  * This function returns when the move is complete, including waiting on
2466  * flushes to occur.
2467  */
2468 int
2469 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2470 {
2471         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2472         uint32_t old_write_domain, old_read_domains;
2473         int ret;
2474
2475         /* Not valid to be called on unbound objects. */
2476         if (obj->gtt_space == NULL)
2477                 return -EINVAL;
2478
2479         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2480                 return 0;
2481
2482         ret = i915_gem_object_wait_rendering(obj, !write);
2483         if (ret)
2484                 return ret;
2485
2486         i915_gem_object_flush_cpu_write_domain(obj);
2487
2488         old_write_domain = obj->base.write_domain;
2489         old_read_domains = obj->base.read_domains;
2490
2491         /* It should now be out of any other write domains, and we can update
2492          * the domain values for our changes.
2493          */
2494         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2495         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2496         if (write) {
2497                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2498                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2499                 obj->dirty = 1;
2500         }
2501
2502         /* And bump the LRU for this access */
2503         if (i915_gem_object_is_inactive(obj))
2504                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2505
2506         return 0;
2507 }
2508
2509 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2510                                     enum i915_cache_level cache_level)
2511 {
2512         struct drm_device *dev = obj->base.dev;
2513         drm_i915_private_t *dev_priv = dev->dev_private;
2514         int ret;
2515
2516         if (obj->cache_level == cache_level)
2517                 return 0;
2518
2519         if (obj->pin_count) {
2520                 DRM_DEBUG("can not change the cache level of pinned objects\n");
2521                 return -EBUSY;
2522         }
2523
2524         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2525                 ret = i915_gem_object_unbind(obj);
2526                 if (ret)
2527                         return ret;
2528         }
2529
2530         if (obj->gtt_space) {
2531                 ret = i915_gem_object_finish_gpu(obj);
2532                 if (ret)
2533                         return ret;
2534
2535                 i915_gem_object_finish_gtt(obj);
2536
2537                 /* Before SandyBridge, you could not use tiling or fence
2538                  * registers with snooped memory, so relinquish any fences
2539                  * currently pointing to our region in the aperture.
2540                  */
2541                 if (INTEL_INFO(dev)->gen < 6) {
2542                         ret = i915_gem_object_put_fence(obj);
2543                         if (ret)
2544                                 return ret;
2545                 }
2546
2547                 if (obj->has_global_gtt_mapping)
2548                         i915_gem_gtt_bind_object(obj, cache_level);
2549                 if (obj->has_aliasing_ppgtt_mapping)
2550                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2551                                                obj, cache_level);
2552
2553                 obj->gtt_space->color = cache_level;
2554         }
2555
2556         if (cache_level == I915_CACHE_NONE) {
2557                 u32 old_read_domains, old_write_domain;
2558
2559                 /* If we're coming from LLC cached, then we haven't
2560                  * actually been tracking whether the data is in the
2561                  * CPU cache or not, since we only allow one bit set
2562                  * in obj->write_domain and have been skipping the clflushes.
2563                  * Just set it to the CPU cache for now.
2564                  */
2565                 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
2566                     ("obj %p in CPU write domain", obj));
2567                 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
2568                     ("obj %p in CPU read domain", obj));
2569
2570                 old_read_domains = obj->base.read_domains;
2571                 old_write_domain = obj->base.write_domain;
2572
2573                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2574                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2575
2576         }
2577
2578         obj->cache_level = cache_level;
2579         i915_gem_verify_gtt(dev);
2580         return 0;
2581 }
2582
2583 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2584                                struct drm_file *file)
2585 {
2586         struct drm_i915_gem_caching *args = data;
2587         struct drm_i915_gem_object *obj;
2588         int ret;
2589
2590         ret = i915_mutex_lock_interruptible(dev);
2591         if (ret)
2592                 return ret;
2593
2594         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2595         if (&obj->base == NULL) {
2596                 ret = -ENOENT;
2597                 goto unlock;
2598         }
2599
2600         args->caching = obj->cache_level != I915_CACHE_NONE;
2601
2602         drm_gem_object_unreference(&obj->base);
2603 unlock:
2604         DRM_UNLOCK(dev);
2605         return ret;
2606 }
2607
2608 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2609                                struct drm_file *file)
2610 {
2611         struct drm_i915_gem_caching *args = data;
2612         struct drm_i915_gem_object *obj;
2613         enum i915_cache_level level;
2614         int ret;
2615
2616         switch (args->caching) {
2617         case I915_CACHING_NONE:
2618                 level = I915_CACHE_NONE;
2619                 break;
2620         case I915_CACHING_CACHED:
2621                 level = I915_CACHE_LLC;
2622                 break;
2623         default:
2624                 return -EINVAL;
2625         }
2626
2627         ret = i915_mutex_lock_interruptible(dev);
2628         if (ret)
2629                 return ret;
2630
2631         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2632         if (&obj->base == NULL) {
2633                 ret = -ENOENT;
2634                 goto unlock;
2635         }
2636
2637         ret = i915_gem_object_set_cache_level(obj, level);
2638
2639         drm_gem_object_unreference(&obj->base);
2640 unlock:
2641         DRM_UNLOCK(dev);
2642         return ret;
2643 }
2644
2645 /*
2646  * Prepare buffer for display plane (scanout, cursors, etc).
2647  * Can be called from an uninterruptible phase (modesetting) and allows
2648  * any flushes to be pipelined (for pageflips).
2649  */
2650 int
2651 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2652                                      u32 alignment,
2653                                      struct intel_ring_buffer *pipelined)
2654 {
2655         u32 old_read_domains, old_write_domain;
2656         int ret;
2657
2658         if (pipelined != obj->ring) {
2659                 ret = i915_gem_object_sync(obj, pipelined);
2660                 if (ret)
2661                         return ret;
2662         }
2663
2664         /* The display engine is not coherent with the LLC cache on gen6.  As
2665          * a result, we make sure that the pinning that is about to occur is
2666          * done with uncached PTEs. This is lowest common denominator for all
2667          * chipsets.
2668          *
2669          * However for gen6+, we could do better by using the GFDT bit instead
2670          * of uncaching, which would allow us to flush all the LLC-cached data
2671          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2672          */
2673         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2674         if (ret)
2675                 return ret;
2676
2677         /* As the user may map the buffer once pinned in the display plane
2678          * (e.g. libkms for the bootup splash), we have to ensure that we
2679          * always use map_and_fenceable for all scanout buffers.
2680          */
2681         ret = i915_gem_object_pin(obj, alignment, true, false);
2682         if (ret)
2683                 return ret;
2684
2685         i915_gem_object_flush_cpu_write_domain(obj);
2686
2687         old_write_domain = obj->base.write_domain;
2688         old_read_domains = obj->base.read_domains;
2689
2690         /* It should now be out of any other write domains, and we can update
2691          * the domain values for our changes.
2692          */
2693         obj->base.write_domain = 0;
2694         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2695
2696         return 0;
2697 }
2698
2699 int
2700 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2701 {
2702         int ret;
2703
2704         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2705                 return 0;
2706
2707         ret = i915_gem_object_wait_rendering(obj, false);
2708         if (ret)
2709                 return ret;
2710
2711         /* Ensure that we invalidate the GPU's caches and TLBs. */
2712         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2713         return 0;
2714 }
2715
2716 /**
2717  * Moves a single object to the CPU read, and possibly write domain.
2718  *
2719  * This function returns when the move is complete, including waiting on
2720  * flushes to occur.
2721  */
2722 int
2723 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2724 {
2725         uint32_t old_write_domain, old_read_domains;
2726         int ret;
2727
2728         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2729                 return 0;
2730
2731         ret = i915_gem_object_wait_rendering(obj, !write);
2732         if (ret)
2733                 return ret;
2734
2735         i915_gem_object_flush_gtt_write_domain(obj);
2736
2737         old_write_domain = obj->base.write_domain;
2738         old_read_domains = obj->base.read_domains;
2739
2740         /* Flush the CPU cache if it's still invalid. */
2741         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2742                 i915_gem_clflush_object(obj);
2743
2744                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2745         }
2746
2747         /* It should now be out of any other write domains, and we can update
2748          * the domain values for our changes.
2749          */
2750         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2751
2752         /* If we're writing through the CPU, then the GPU read domains will
2753          * need to be invalidated at next use.
2754          */
2755         if (write) {
2756                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2757                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2758         }
2759
2760         return 0;
2761 }
2762
2763 /* Throttle our rendering by waiting until the ring has completed our requests
2764  * emitted over 20 msec ago.
2765  *
2766  * Note that if we were to use the current jiffies each time around the loop,
2767  * we wouldn't escape the function with any frames outstanding if the time to
2768  * render a frame was over 20ms.
2769  *
2770  * This should get us reasonable parallelism between CPU and GPU but also
2771  * relatively low latency when blocking on a particular request to finish.
2772  */
2773 static int
2774 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2775 {
2776         struct drm_i915_private *dev_priv = dev->dev_private;
2777         struct drm_i915_file_private *file_priv = file->driver_priv;
2778         unsigned long recent_enough = ticks - (20 * hz / 1000);
2779         struct drm_i915_gem_request *request;
2780         struct intel_ring_buffer *ring = NULL;
2781         u32 seqno = 0;
2782         int ret;
2783
2784         if (atomic_read(&dev_priv->mm.wedged))
2785                 return -EIO;
2786
2787         spin_lock(&file_priv->mm.lock);
2788         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
2789                 if (time_after_eq(request->emitted_jiffies, recent_enough))
2790                         break;
2791
2792                 ring = request->ring;
2793                 seqno = request->seqno;
2794         }
2795         spin_unlock(&file_priv->mm.lock);
2796
2797         if (seqno == 0)
2798                 return 0;
2799
2800         ret = __wait_seqno(ring, seqno, true, NULL);
2801
2802         if (ret == 0)
2803                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2804
2805         return ret;
2806 }
2807
2808 int
2809 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2810                     uint32_t alignment,
2811                     bool map_and_fenceable,
2812                     bool nonblocking)
2813 {
2814         int ret;
2815
2816         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
2817                 return -EBUSY;
2818
2819         if (obj->gtt_space != NULL) {
2820                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2821                     (map_and_fenceable && !obj->map_and_fenceable)) {
2822                         WARN(obj->pin_count,
2823                              "bo is already pinned with incorrect alignment:"
2824                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2825                              " obj->map_and_fenceable=%d\n",
2826                              obj->gtt_offset, alignment,
2827                              map_and_fenceable,
2828                              obj->map_and_fenceable);
2829                         ret = i915_gem_object_unbind(obj);
2830                         if (ret)
2831                                 return ret;
2832                 }
2833         }
2834
2835         if (obj->gtt_space == NULL) {
2836                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2837
2838                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
2839                                                   map_and_fenceable,
2840                                                   nonblocking);
2841                 if (ret)
2842                         return ret;
2843
2844                 if (!dev_priv->mm.aliasing_ppgtt)
2845                         i915_gem_gtt_bind_object(obj, obj->cache_level);
2846         }
2847
2848         if (!obj->has_global_gtt_mapping && map_and_fenceable)
2849                 i915_gem_gtt_bind_object(obj, obj->cache_level);
2850
2851         obj->pin_count++;
2852         obj->pin_mappable |= map_and_fenceable;
2853
2854         return 0;
2855 }
2856
2857 void
2858 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
2859 {
2860         BUG_ON(obj->pin_count == 0);
2861         BUG_ON(obj->gtt_space == NULL);
2862
2863         if (--obj->pin_count == 0)
2864                 obj->pin_mappable = false;
2865 }
2866
2867 int
2868 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2869                    struct drm_file *file)
2870 {
2871         struct drm_i915_gem_pin *args = data;
2872         struct drm_i915_gem_object *obj;
2873         int ret;
2874
2875         ret = i915_mutex_lock_interruptible(dev);
2876         if (ret)
2877                 return ret;
2878
2879         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2880         if (&obj->base == NULL) {
2881                 ret = -ENOENT;
2882                 goto unlock;
2883         }
2884
2885         if (obj->madv != I915_MADV_WILLNEED) {
2886                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
2887                 ret = -EINVAL;
2888                 goto out;
2889         }
2890
2891         if (obj->pin_filp != NULL && obj->pin_filp != file) {
2892                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2893                           args->handle);
2894                 ret = -EINVAL;
2895                 goto out;
2896         }
2897
2898         if (obj->user_pin_count == 0) {
2899                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
2900                 if (ret)
2901                         goto out;
2902         }
2903
2904         obj->user_pin_count++;
2905         obj->pin_filp = file;
2906
2907         /* XXX - flush the CPU caches for pinned objects
2908          * as the X server doesn't manage domains yet
2909          */
2910         i915_gem_object_flush_cpu_write_domain(obj);
2911         args->offset = obj->gtt_offset;
2912 out:
2913         drm_gem_object_unreference(&obj->base);
2914 unlock:
2915         DRM_UNLOCK(dev);
2916         return ret;
2917 }
2918
2919 int
2920 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2921                      struct drm_file *file)
2922 {
2923         struct drm_i915_gem_pin *args = data;
2924         struct drm_i915_gem_object *obj;
2925         int ret;
2926
2927         ret = i915_mutex_lock_interruptible(dev);
2928         if (ret)
2929                 return ret;
2930
2931         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2932         if (&obj->base == NULL) {
2933                 ret = -ENOENT;
2934                 goto unlock;
2935         }
2936
2937         if (obj->pin_filp != file) {
2938                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2939                           args->handle);
2940                 ret = -EINVAL;
2941                 goto out;
2942         }
2943         obj->user_pin_count--;
2944         if (obj->user_pin_count == 0) {
2945                 obj->pin_filp = NULL;
2946                 i915_gem_object_unpin(obj);
2947         }
2948
2949 out:
2950         drm_gem_object_unreference(&obj->base);
2951 unlock:
2952         DRM_UNLOCK(dev);
2953         return (ret);
2954 }
2955
2956 int
2957 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2958                     struct drm_file *file)
2959 {
2960         struct drm_i915_gem_busy *args = data;
2961         struct drm_i915_gem_object *obj;
2962         int ret;
2963
2964         ret = i915_mutex_lock_interruptible(dev);
2965         if (ret)
2966                 return ret;
2967
2968         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2969         if (&obj->base == NULL) {
2970                 ret = -ENOENT;
2971                 goto unlock;
2972         }
2973
2974         /* Count all active objects as busy, even if they are currently not used
2975          * by the gpu. Users of this interface expect objects to eventually
2976          * become non-busy without any further actions, therefore emit any
2977          * necessary flushes here.
2978          */
2979         ret = i915_gem_object_flush_active(obj);
2980
2981         args->busy = obj->active;
2982         if (obj->ring) {
2983                 args->busy |= intel_ring_flag(obj->ring) << 17;
2984         }
2985
2986         drm_gem_object_unreference(&obj->base);
2987 unlock:
2988         DRM_UNLOCK(dev);
2989         return ret;
2990 }
2991
2992 int
2993 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2994                         struct drm_file *file_priv)
2995 {
2996         return i915_gem_ring_throttle(dev, file_priv);
2997 }
2998
2999 int
3000 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3001                        struct drm_file *file_priv)
3002 {
3003         struct drm_i915_gem_madvise *args = data;
3004         struct drm_i915_gem_object *obj;
3005         int ret;
3006
3007         switch (args->madv) {
3008         case I915_MADV_DONTNEED:
3009         case I915_MADV_WILLNEED:
3010             break;
3011         default:
3012             return -EINVAL;
3013         }
3014
3015         ret = i915_mutex_lock_interruptible(dev);
3016         if (ret)
3017                 return ret;
3018
3019         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3020         if (&obj->base == NULL) {
3021                 ret = -ENOENT;
3022                 goto unlock;
3023         }
3024
3025         if (obj->pin_count) {
3026                 ret = -EINVAL;
3027                 goto out;
3028         }
3029
3030         if (obj->madv != __I915_MADV_PURGED)
3031                 obj->madv = args->madv;
3032
3033         /* if the object is no longer attached, discard its backing storage */
3034         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3035                 i915_gem_object_truncate(obj);
3036
3037         args->retained = obj->madv != __I915_MADV_PURGED;
3038
3039 out:
3040         drm_gem_object_unreference(&obj->base);
3041 unlock:
3042         DRM_UNLOCK(dev);
3043         return ret;
3044 }
3045
3046 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3047                                                   size_t size)
3048 {
3049         struct drm_i915_private *dev_priv;
3050         struct drm_i915_gem_object *obj;
3051
3052         dev_priv = dev->dev_private;
3053
3054         obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3055
3056         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3057                 drm_free(obj, M_DRM);
3058                 return (NULL);
3059         }
3060
3061         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3062         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3063
3064         if (HAS_LLC(dev)) {
3065                 /* On some devices, we can have the GPU use the LLC (the CPU
3066                  * cache) for about a 10% performance improvement
3067                  * compared to uncached.  Graphics requests other than
3068                  * display scanout are coherent with the CPU in
3069                  * accessing this cache.  This means in this mode we
3070                  * don't need to clflush on the CPU side, and on the
3071                  * GPU side we only need to flush internal caches to
3072                  * get data visible to the CPU.
3073                  *
3074                  * However, we maintain the display planes as UC, and so
3075                  * need to rebind when first used as such.
3076                  */
3077                 obj->cache_level = I915_CACHE_LLC;
3078         } else
3079                 obj->cache_level = I915_CACHE_NONE;
3080         obj->base.driver_private = NULL;
3081         obj->fence_reg = I915_FENCE_REG_NONE;
3082         INIT_LIST_HEAD(&obj->mm_list);
3083         INIT_LIST_HEAD(&obj->gtt_list);
3084         INIT_LIST_HEAD(&obj->ring_list);
3085         INIT_LIST_HEAD(&obj->exec_list);
3086         obj->madv = I915_MADV_WILLNEED;
3087         /* Avoid an unnecessary call to unbind on the first bind. */
3088         obj->map_and_fenceable = true;
3089
3090         i915_gem_info_add_obj(dev_priv, size);
3091
3092         return obj;
3093 }
3094
3095 int i915_gem_init_object(struct drm_gem_object *obj)
3096 {
3097         BUG();
3098
3099         return 0;
3100 }
3101
3102 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3103 {
3104         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3105         struct drm_device *dev = obj->base.dev;
3106         drm_i915_private_t *dev_priv = dev->dev_private;
3107
3108         if (obj->phys_obj)
3109                 i915_gem_detach_phys_object(dev, obj);
3110
3111         obj->pin_count = 0;
3112         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3113                 bool was_interruptible;
3114
3115                 was_interruptible = dev_priv->mm.interruptible;
3116                 dev_priv->mm.interruptible = false;
3117
3118                 WARN_ON(i915_gem_object_unbind(obj));
3119
3120                 dev_priv->mm.interruptible = was_interruptible;
3121         }
3122
3123         drm_gem_free_mmap_offset(&obj->base);
3124
3125         drm_gem_object_release(&obj->base);
3126         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3127
3128         drm_free(obj->bit_17, M_DRM);
3129         drm_free(obj, M_DRM);
3130 }
3131
3132 int
3133 i915_gem_do_init(struct drm_device *dev, unsigned long start,
3134     unsigned long mappable_end, unsigned long end)
3135 {
3136         drm_i915_private_t *dev_priv;
3137         unsigned long mappable;
3138         int error;
3139
3140         dev_priv = dev->dev_private;
3141         mappable = min(end, mappable_end) - start;
3142
3143         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
3144
3145         dev_priv->mm.gtt_start = start;
3146         dev_priv->mm.gtt_mappable_end = mappable_end;
3147         dev_priv->mm.gtt_end = end;
3148         dev_priv->mm.gtt_total = end - start;
3149         dev_priv->mm.mappable_gtt_total = mappable;
3150
3151         /* Take over this portion of the GTT */
3152         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
3153         device_printf(dev->dev,
3154             "taking over the fictitious range 0x%lx-0x%lx\n",
3155             dev->agp->base + start, dev->agp->base + start + mappable);
3156         error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
3157             dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
3158         return (error);
3159 }
3160
3161 int
3162 i915_gem_idle(struct drm_device *dev)
3163 {
3164         drm_i915_private_t *dev_priv = dev->dev_private;
3165         int ret;
3166
3167         DRM_LOCK(dev);
3168
3169         if (dev_priv->mm.suspended) {
3170                 DRM_UNLOCK(dev);
3171                 return 0;
3172         }
3173
3174         ret = i915_gpu_idle(dev);
3175         if (ret) {
3176                 DRM_UNLOCK(dev);
3177                 return ret;
3178         }
3179         i915_gem_retire_requests(dev);
3180
3181         /* Under UMS, be paranoid and evict. */
3182         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3183                 i915_gem_evict_everything(dev);
3184
3185         i915_gem_reset_fences(dev);
3186
3187         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3188          * We need to replace this with a semaphore, or something.
3189          * And not confound mm.suspended!
3190          */
3191         dev_priv->mm.suspended = 1;
3192         del_timer_sync(&dev_priv->hangcheck_timer);
3193
3194         i915_kernel_lost_context(dev);
3195         i915_gem_cleanup_ringbuffer(dev);
3196
3197         DRM_UNLOCK(dev);
3198
3199         /* Cancel the retire work handler, which should be idle now. */
3200         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3201
3202         return 0;
3203 }
3204
3205 void i915_gem_l3_remap(struct drm_device *dev)
3206 {
3207         drm_i915_private_t *dev_priv = dev->dev_private;
3208         u32 misccpctl;
3209         int i;
3210
3211         if (!HAS_L3_GPU_CACHE(dev))
3212                 return;
3213
3214         if (!dev_priv->l3_parity.remap_info)
3215                 return;
3216
3217         misccpctl = I915_READ(GEN7_MISCCPCTL);
3218         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3219         POSTING_READ(GEN7_MISCCPCTL);
3220
3221         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3222                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3223                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3224                         DRM_DEBUG("0x%x was already programmed to %x\n",
3225                                   GEN7_L3LOG_BASE + i, remap);
3226                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3227                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
3228                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3229         }
3230
3231         /* Make sure all the writes land before disabling dop clock gating */
3232         POSTING_READ(GEN7_L3LOG_BASE);
3233
3234         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3235 }
3236
3237 void i915_gem_init_swizzling(struct drm_device *dev)
3238 {
3239         drm_i915_private_t *dev_priv = dev->dev_private;
3240
3241         if (INTEL_INFO(dev)->gen < 5 ||
3242             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3243                 return;
3244
3245         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3246                                  DISP_TILE_SURFACE_SWIZZLING);
3247
3248         if (IS_GEN5(dev))
3249                 return;
3250
3251         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3252         if (IS_GEN6(dev))
3253                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3254         else
3255                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3256 }
3257
3258 static bool
3259 intel_enable_blt(struct drm_device *dev)
3260 {
3261         int revision;
3262
3263         if (!HAS_BLT(dev))
3264                 return false;
3265
3266         /* The blitter was dysfunctional on early prototypes */
3267         revision = pci_read_config(dev->dev, PCIR_REVID, 1);
3268         if (IS_GEN6(dev) && revision < 8) {
3269                 DRM_INFO("BLT not supported on this pre-production hardware;"
3270                          " graphics performance will be degraded.\n");
3271                 return false;
3272         }
3273
3274         return true;
3275 }
3276
3277 int
3278 i915_gem_init_hw(struct drm_device *dev)
3279 {
3280         drm_i915_private_t *dev_priv = dev->dev_private;
3281         int ret;
3282
3283         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3284                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3285
3286         i915_gem_l3_remap(dev);
3287
3288         i915_gem_init_swizzling(dev);
3289
3290         ret = intel_init_render_ring_buffer(dev);
3291         if (ret)
3292                 return ret;
3293
3294         if (HAS_BSD(dev)) {
3295                 ret = intel_init_bsd_ring_buffer(dev);
3296                 if (ret)
3297                         goto cleanup_render_ring;
3298         }
3299
3300         if (intel_enable_blt(dev)) {
3301                 ret = intel_init_blt_ring_buffer(dev);
3302                 if (ret)
3303                         goto cleanup_bsd_ring;
3304         }
3305
3306         dev_priv->next_seqno = 1;
3307
3308         /*
3309          * XXX: There was some w/a described somewhere suggesting loading
3310          * contexts before PPGTT.
3311          */
3312         i915_gem_context_init(dev);
3313         i915_gem_init_ppgtt(dev);
3314
3315         return 0;
3316
3317 cleanup_bsd_ring:
3318         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3319 cleanup_render_ring:
3320         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3321         return ret;
3322 }
3323
3324 static bool
3325 intel_enable_ppgtt(struct drm_device *dev)
3326 {
3327         if (i915_enable_ppgtt >= 0)
3328                 return i915_enable_ppgtt;
3329
3330         /* Disable ppgtt on SNB if VT-d is on. */
3331         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
3332                 return false;
3333
3334         return true;
3335 }
3336
3337 int i915_gem_init(struct drm_device *dev)
3338 {
3339         struct drm_i915_private *dev_priv = dev->dev_private;
3340         unsigned long prealloc_size, gtt_size, mappable_size;
3341         int ret;
3342
3343         prealloc_size = dev_priv->mm.gtt->stolen_size;
3344         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3345         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3346
3347         /* Basic memrange allocator for stolen space */
3348         drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
3349
3350         DRM_LOCK(dev);
3351         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3352                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3353                  * aperture accordingly when using aliasing ppgtt. */
3354                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3355                 /* For paranoia keep the guard page in between. */
3356                 gtt_size -= PAGE_SIZE;
3357
3358                 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
3359
3360                 ret = i915_gem_init_aliasing_ppgtt(dev);
3361                 if (ret) {
3362                         DRM_UNLOCK(dev);
3363                         return ret;
3364                 }
3365         } else {
3366                 /* Let GEM Manage all of the aperture.
3367                  *
3368                  * However, leave one page at the end still bound to the scratch
3369                  * page.  There are a number of places where the hardware
3370                  * apparently prefetches past the end of the object, and we've
3371                  * seen multiple hangs with the GPU head pointer stuck in a
3372                  * batchbuffer bound at the last page of the aperture.  One page
3373                  * should be enough to keep any prefetching inside of the
3374                  * aperture.
3375                  */
3376                 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
3377         }
3378
3379         ret = i915_gem_init_hw(dev);
3380         DRM_UNLOCK(dev);
3381         if (ret) {
3382                 i915_gem_cleanup_aliasing_ppgtt(dev);
3383                 return ret;
3384         }
3385
3386 #if 0
3387         /* Try to set up FBC with a reasonable compressed buffer size */
3388         if (I915_HAS_FBC(dev) && i915_powersave) {
3389                 int cfb_size;
3390
3391                 /* Leave 1M for line length buffer & misc. */
3392
3393                 /* Try to get a 32M buffer... */
3394                 if (prealloc_size > (36*1024*1024))
3395                         cfb_size = 32*1024*1024;
3396                 else /* fall back to 7/8 of the stolen space */
3397                         cfb_size = prealloc_size * 7 / 8;
3398                 i915_setup_compression(dev, cfb_size);
3399         }
3400 #endif
3401
3402         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3403         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3404                 dev_priv->dri1.allow_batchbuffer = 1;
3405         return 0;
3406 }
3407
3408 void
3409 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3410 {
3411         drm_i915_private_t *dev_priv = dev->dev_private;
3412         struct intel_ring_buffer *ring;
3413         int i;
3414
3415         for_each_ring(ring, dev_priv, i)
3416                 intel_cleanup_ring_buffer(ring);
3417 }
3418
3419 int
3420 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3421                        struct drm_file *file_priv)
3422 {
3423         drm_i915_private_t *dev_priv = dev->dev_private;
3424         int ret;
3425
3426         if (drm_core_check_feature(dev, DRIVER_MODESET))
3427                 return 0;
3428
3429         if (atomic_read(&dev_priv->mm.wedged)) {
3430                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3431                 atomic_set(&dev_priv->mm.wedged, 0);
3432         }
3433
3434         DRM_LOCK(dev);
3435         dev_priv->mm.suspended = 0;
3436
3437         ret = i915_gem_init_hw(dev);
3438         if (ret != 0) {
3439                 DRM_UNLOCK(dev);
3440                 return ret;
3441         }
3442
3443         KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
3444         DRM_UNLOCK(dev);
3445
3446         ret = drm_irq_install(dev);
3447         if (ret)
3448                 goto cleanup_ringbuffer;
3449
3450         return 0;
3451
3452 cleanup_ringbuffer:
3453         DRM_LOCK(dev);
3454         i915_gem_cleanup_ringbuffer(dev);
3455         dev_priv->mm.suspended = 1;
3456         DRM_UNLOCK(dev);
3457
3458         return ret;
3459 }
3460
3461 int
3462 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3463                        struct drm_file *file_priv)
3464 {
3465         if (drm_core_check_feature(dev, DRIVER_MODESET))
3466                 return 0;
3467
3468         drm_irq_uninstall(dev);
3469         return i915_gem_idle(dev);
3470 }
3471
3472 void
3473 i915_gem_lastclose(struct drm_device *dev)
3474 {
3475         int ret;
3476
3477         if (drm_core_check_feature(dev, DRIVER_MODESET))
3478                 return;
3479
3480         ret = i915_gem_idle(dev);
3481         if (ret)
3482                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3483 }
3484
3485 static void
3486 init_ring_lists(struct intel_ring_buffer *ring)
3487 {
3488         INIT_LIST_HEAD(&ring->active_list);
3489         INIT_LIST_HEAD(&ring->request_list);
3490 }
3491
3492 void
3493 i915_gem_load(struct drm_device *dev)
3494 {
3495         int i;
3496         drm_i915_private_t *dev_priv = dev->dev_private;
3497
3498         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3499         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3500         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3501         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3502         for (i = 0; i < I915_NUM_RINGS; i++)
3503                 init_ring_lists(&dev_priv->ring[i]);
3504         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3505                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3506         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3507                           i915_gem_retire_work_handler);
3508         init_completion(&dev_priv->error_completion);
3509
3510         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3511         if (IS_GEN3(dev)) {
3512                 I915_WRITE(MI_ARB_STATE,
3513                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3514         }
3515
3516         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3517
3518         /* Old X drivers will take 0-2 for front, back, depth buffers */
3519         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3520                 dev_priv->fence_reg_start = 3;
3521
3522         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3523                 dev_priv->num_fence_regs = 16;
3524         else
3525                 dev_priv->num_fence_regs = 8;
3526
3527         /* Initialize fence registers to zero */
3528         i915_gem_reset_fences(dev);
3529
3530         i915_gem_detect_bit_6_swizzle(dev);
3531         init_waitqueue_head(&dev_priv->pending_flip_queue);
3532
3533         dev_priv->mm.interruptible = true;
3534
3535 #if 0
3536         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3537         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3538         register_shrinker(&dev_priv->mm.inactive_shrinker);
3539 #else
3540         dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
3541             i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
3542 #endif
3543 }
3544
3545 /*
3546  * Create a physically contiguous memory object for this object
3547  * e.g. for cursor + overlay regs
3548  */
3549 static int i915_gem_init_phys_object(struct drm_device *dev,
3550                                      int id, int size, int align)
3551 {
3552         drm_i915_private_t *dev_priv = dev->dev_private;
3553         struct drm_i915_gem_phys_object *phys_obj;
3554         int ret;
3555
3556         if (dev_priv->mm.phys_objs[id - 1] || !size)
3557                 return 0;
3558
3559         phys_obj = kmalloc(sizeof(struct drm_i915_gem_phys_object), M_DRM,
3560             M_WAITOK | M_ZERO);
3561         if (!phys_obj)
3562                 return -ENOMEM;
3563
3564         phys_obj->id = id;
3565
3566         phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3567         if (!phys_obj->handle) {
3568                 ret = -ENOMEM;
3569                 goto kfree_obj;
3570         }
3571         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3572             size / PAGE_SIZE, PAT_WRITE_COMBINING);
3573
3574         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3575
3576         return 0;
3577
3578 kfree_obj:
3579         drm_free(phys_obj, M_DRM);
3580         return ret;
3581 }
3582
3583 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3584 {
3585         drm_i915_private_t *dev_priv = dev->dev_private;
3586         struct drm_i915_gem_phys_object *phys_obj;
3587
3588         if (!dev_priv->mm.phys_objs[id - 1])
3589                 return;
3590
3591         phys_obj = dev_priv->mm.phys_objs[id - 1];
3592         if (phys_obj->cur_obj) {
3593                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3594         }
3595
3596         drm_pci_free(dev, phys_obj->handle);
3597         drm_free(phys_obj, M_DRM);
3598         dev_priv->mm.phys_objs[id - 1] = NULL;
3599 }
3600
3601 void i915_gem_free_all_phys_object(struct drm_device *dev)
3602 {
3603         int i;
3604
3605         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3606                 i915_gem_free_phys_object(dev, i);
3607 }
3608
3609 void i915_gem_detach_phys_object(struct drm_device *dev,
3610                                  struct drm_i915_gem_object *obj)
3611 {
3612         vm_page_t m;
3613         struct sf_buf *sf;
3614         char *vaddr, *dst;
3615         int i, page_count;
3616
3617         if (!obj->phys_obj)
3618                 return;
3619         vaddr = obj->phys_obj->handle->vaddr;
3620
3621         page_count = obj->base.size / PAGE_SIZE;
3622         VM_OBJECT_LOCK(obj->base.vm_obj);
3623         for (i = 0; i < page_count; i++) {
3624                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3625                 if (m == NULL)
3626                         continue; /* XXX */
3627
3628                 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3629                 sf = sf_buf_alloc(m);
3630                 if (sf != NULL) {
3631                         dst = (char *)sf_buf_kva(sf);
3632                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3633                         sf_buf_free(sf);
3634                 }
3635                 drm_clflush_pages(&m, 1);
3636
3637                 VM_OBJECT_LOCK(obj->base.vm_obj);
3638                 vm_page_reference(m);
3639                 vm_page_dirty(m);
3640                 vm_page_busy_wait(m, FALSE, "i915gem");
3641                 vm_page_unwire(m, 0);
3642                 vm_page_wakeup(m);
3643         }
3644         VM_OBJECT_UNLOCK(obj->base.vm_obj);
3645         intel_gtt_chipset_flush();
3646
3647         obj->phys_obj->cur_obj = NULL;
3648         obj->phys_obj = NULL;
3649 }
3650
3651 int
3652 i915_gem_attach_phys_object(struct drm_device *dev,
3653                             struct drm_i915_gem_object *obj,
3654                             int id,
3655                             int align)
3656 {
3657         drm_i915_private_t *dev_priv = dev->dev_private;
3658         vm_page_t m;
3659         struct sf_buf *sf;
3660         char *dst, *src;
3661         int i, page_count, ret;
3662
3663         if (id > I915_MAX_PHYS_OBJECT)
3664                 return -EINVAL;
3665
3666         if (obj->phys_obj) {
3667                 if (obj->phys_obj->id == id)
3668                         return 0;
3669                 i915_gem_detach_phys_object(dev, obj);
3670         }
3671
3672         /* create a new object */
3673         if (!dev_priv->mm.phys_objs[id - 1]) {
3674                 ret = i915_gem_init_phys_object(dev, id,
3675                                                 obj->base.size, align);
3676                 if (ret) {
3677                         DRM_ERROR("failed to init phys object %d size: %zu\n",
3678                                   id, obj->base.size);
3679                         return ret;
3680                 }
3681         }
3682
3683         /* bind to the object */
3684         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3685         obj->phys_obj->cur_obj = obj;
3686
3687         page_count = obj->base.size / PAGE_SIZE;
3688
3689         VM_OBJECT_LOCK(obj->base.vm_obj);
3690         ret = 0;
3691         for (i = 0; i < page_count; i++) {
3692                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3693                 if (m == NULL) {
3694                         ret = -EIO;
3695                         break;
3696                 }
3697                 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3698                 sf = sf_buf_alloc(m);
3699                 src = (char *)sf_buf_kva(sf);
3700                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3701                 memcpy(dst, src, PAGE_SIZE);
3702                 sf_buf_free(sf);
3703
3704                 VM_OBJECT_LOCK(obj->base.vm_obj);
3705
3706                 vm_page_reference(m);
3707                 vm_page_busy_wait(m, FALSE, "i915gem");
3708                 vm_page_unwire(m, 0);
3709                 vm_page_wakeup(m);
3710         }
3711         VM_OBJECT_UNLOCK(obj->base.vm_obj);
3712
3713         return (0);
3714 }
3715
3716 static int
3717 i915_gem_phys_pwrite(struct drm_device *dev,
3718                      struct drm_i915_gem_object *obj,
3719                      struct drm_i915_gem_pwrite *args,
3720                      struct drm_file *file_priv)
3721 {
3722         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
3723         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
3724
3725         if (copyin_nofault(user_data, vaddr, args->size) != 0) {
3726                 unsigned long unwritten;
3727
3728                 /* The physical object once assigned is fixed for the lifetime
3729                  * of the obj, so we can safely drop the lock and continue
3730                  * to access vaddr.
3731                  */
3732                 DRM_UNLOCK(dev);
3733                 unwritten = copy_from_user(vaddr, user_data, args->size);
3734                 DRM_LOCK(dev);
3735                 if (unwritten)
3736                         return -EFAULT;
3737         }
3738
3739         i915_gem_chipset_flush(dev);
3740         return 0;
3741 }
3742
3743 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3744 {
3745         struct drm_i915_file_private *file_priv = file->driver_priv;
3746
3747         /* Clean up our request list when the client is going away, so that
3748          * later retire_requests won't dereference our soon-to-be-gone
3749          * file_priv.
3750          */
3751         spin_lock(&file_priv->mm.lock);
3752         while (!list_empty(&file_priv->mm.request_list)) {
3753                 struct drm_i915_gem_request *request;
3754
3755                 request = list_first_entry(&file_priv->mm.request_list,
3756                                            struct drm_i915_gem_request,
3757                                            client_list);
3758                 list_del(&request->client_list);
3759                 request->file_priv = NULL;
3760         }
3761         spin_unlock(&file_priv->mm.lock);
3762 }
3763
3764 static int
3765 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
3766     vm_ooffset_t foff, struct ucred *cred, u_short *color)
3767 {
3768
3769         *color = 0; /* XXXKIB */
3770         return (0);
3771 }
3772
3773 int i915_intr_pf;
3774
3775 static int
3776 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
3777     vm_page_t *mres)
3778 {
3779         struct drm_gem_object *gem_obj;
3780         struct drm_i915_gem_object *obj;
3781         struct drm_device *dev;
3782         drm_i915_private_t *dev_priv;
3783         vm_page_t m, oldm;
3784         int cause, ret;
3785         bool write;
3786
3787         gem_obj = vm_obj->handle;
3788         obj = to_intel_bo(gem_obj);
3789         dev = obj->base.dev;
3790         dev_priv = dev->dev_private;
3791 #if 0
3792         write = (prot & VM_PROT_WRITE) != 0;
3793 #else
3794         write = true;
3795 #endif
3796         vm_object_pip_add(vm_obj, 1);
3797
3798         /*
3799          * Remove the placeholder page inserted by vm_fault() from the
3800          * object before dropping the object lock. If
3801          * i915_gem_release_mmap() is active in parallel on this gem
3802          * object, then it owns the drm device sx and might find the
3803          * placeholder already. Then, since the page is busy,
3804          * i915_gem_release_mmap() sleeps waiting for the busy state
3805          * of the page cleared. We will be not able to acquire drm
3806          * device lock until i915_gem_release_mmap() is able to make a
3807          * progress.
3808          */
3809         if (*mres != NULL) {
3810                 oldm = *mres;
3811                 vm_page_remove(oldm);
3812                 *mres = NULL;
3813         } else
3814                 oldm = NULL;
3815 retry:
3816         VM_OBJECT_UNLOCK(vm_obj);
3817 unlocked_vmobj:
3818         cause = ret = 0;
3819         m = NULL;
3820
3821         if (i915_intr_pf) {
3822                 ret = i915_mutex_lock_interruptible(dev);
3823                 if (ret != 0) {
3824                         cause = 10;
3825                         goto out;
3826                 }
3827         } else
3828                 DRM_LOCK(dev);
3829
3830         /*
3831          * Since the object lock was dropped, other thread might have
3832          * faulted on the same GTT address and instantiated the
3833          * mapping for the page.  Recheck.
3834          */
3835         VM_OBJECT_LOCK(vm_obj);
3836         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
3837         if (m != NULL) {
3838                 if ((m->flags & PG_BUSY) != 0) {
3839                         DRM_UNLOCK(dev);
3840 #if 0 /* XXX */
3841                         vm_page_sleep(m, "915pee");
3842 #endif
3843                         goto retry;
3844                 }
3845                 goto have_page;
3846         } else
3847                 VM_OBJECT_UNLOCK(vm_obj);
3848
3849         /* Now bind it into the GTT if needed */
3850         if (!obj->map_and_fenceable) {
3851                 ret = i915_gem_object_unbind(obj);
3852                 if (ret != 0) {
3853                         cause = 20;
3854                         goto unlock;
3855                 }
3856         }
3857         if (!obj->gtt_space) {
3858                 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
3859                 if (ret != 0) {
3860                         cause = 30;
3861                         goto unlock;
3862                 }
3863
3864                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
3865                 if (ret != 0) {
3866                         cause = 40;
3867                         goto unlock;
3868                 }
3869         }
3870
3871         if (obj->tiling_mode == I915_TILING_NONE)
3872                 ret = i915_gem_object_put_fence(obj);
3873         else
3874                 ret = i915_gem_object_get_fence(obj);
3875         if (ret != 0) {
3876                 cause = 50;
3877                 goto unlock;
3878         }
3879
3880         if (i915_gem_object_is_inactive(obj))
3881                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3882
3883         obj->fault_mappable = true;
3884         VM_OBJECT_LOCK(vm_obj);
3885         m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
3886             offset);
3887         if (m == NULL) {
3888                 cause = 60;
3889                 ret = -EFAULT;
3890                 goto unlock;
3891         }
3892         KASSERT((m->flags & PG_FICTITIOUS) != 0,
3893             ("not fictitious %p", m));
3894         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
3895
3896         if ((m->flags & PG_BUSY) != 0) {
3897                 DRM_UNLOCK(dev);
3898 #if 0 /* XXX */
3899                 vm_page_sleep(m, "915pbs");
3900 #endif
3901                 goto retry;
3902         }
3903         m->valid = VM_PAGE_BITS_ALL;
3904         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
3905 have_page:
3906         *mres = m;
3907         vm_page_busy_try(m, false);
3908
3909         DRM_UNLOCK(dev);
3910         if (oldm != NULL) {
3911                 vm_page_free(oldm);
3912         }
3913         vm_object_pip_wakeup(vm_obj);
3914         return (VM_PAGER_OK);
3915
3916 unlock:
3917         DRM_UNLOCK(dev);
3918 out:
3919         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
3920         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
3921                 goto unlocked_vmobj;
3922         }
3923         VM_OBJECT_LOCK(vm_obj);
3924         vm_object_pip_wakeup(vm_obj);
3925         return (VM_PAGER_ERROR);
3926 }
3927
3928 static void
3929 i915_gem_pager_dtor(void *handle)
3930 {
3931         struct drm_gem_object *obj;
3932         struct drm_device *dev;
3933
3934         obj = handle;
3935         dev = obj->dev;
3936
3937         DRM_LOCK(dev);
3938         drm_gem_free_mmap_offset(obj);
3939         i915_gem_release_mmap(to_intel_bo(obj));
3940         drm_gem_object_unreference(obj);
3941         DRM_UNLOCK(dev);
3942 }
3943
3944 struct cdev_pager_ops i915_gem_pager_ops = {
3945         .cdev_pg_fault  = i915_gem_pager_fault,
3946         .cdev_pg_ctor   = i915_gem_pager_ctor,
3947         .cdev_pg_dtor   = i915_gem_pager_dtor
3948 };
3949
3950 #define GEM_PARANOID_CHECK_GTT 0
3951 #if GEM_PARANOID_CHECK_GTT
3952 static void
3953 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
3954     int page_count)
3955 {
3956         struct drm_i915_private *dev_priv;
3957         vm_paddr_t pa;
3958         unsigned long start, end;
3959         u_int i;
3960         int j;
3961
3962         dev_priv = dev->dev_private;
3963         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
3964         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
3965         for (i = start; i < end; i++) {
3966                 pa = intel_gtt_read_pte_paddr(i);
3967                 for (j = 0; j < page_count; j++) {
3968                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
3969                                 panic("Page %p in GTT pte index %d pte %x",
3970                                     ma[i], i, intel_gtt_read_pte(i));
3971                         }
3972                 }
3973         }
3974 }
3975 #endif
3976
3977 #define VM_OBJECT_LOCK_ASSERT_OWNED(object)
3978
3979 static vm_page_t
3980 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
3981 {
3982         vm_page_t m;
3983         int rv;
3984
3985         VM_OBJECT_LOCK_ASSERT_OWNED(object);
3986         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3987         if (m->valid != VM_PAGE_BITS_ALL) {
3988                 if (vm_pager_has_page(object, pindex)) {
3989                         rv = vm_pager_get_page(object, &m, 1);
3990                         m = vm_page_lookup(object, pindex);
3991                         if (m == NULL)
3992                                 return (NULL);
3993                         if (rv != VM_PAGER_OK) {
3994                                 vm_page_free(m);
3995                                 return (NULL);
3996                         }
3997                 } else {
3998                         pmap_zero_page(VM_PAGE_TO_PHYS(m));
3999                         m->valid = VM_PAGE_BITS_ALL;
4000                         m->dirty = 0;
4001                 }
4002         }
4003         vm_page_wire(m);
4004         vm_page_wakeup(m);
4005         return (m);
4006 }
4007
4008 static int
4009 i915_gpu_is_active(struct drm_device *dev)
4010 {
4011         drm_i915_private_t *dev_priv = dev->dev_private;
4012
4013         return !list_empty(&dev_priv->mm.active_list);
4014 }
4015
4016 static void
4017 i915_gem_lowmem(void *arg)
4018 {
4019         struct drm_device *dev;
4020         struct drm_i915_private *dev_priv;
4021         struct drm_i915_gem_object *obj, *next;
4022         int cnt, cnt_fail, cnt_total;
4023
4024         dev = arg;
4025         dev_priv = dev->dev_private;
4026
4027         if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
4028                 return;
4029
4030 rescan:
4031         /* first scan for clean buffers */
4032         i915_gem_retire_requests(dev);
4033
4034         cnt_total = cnt_fail = cnt = 0;
4035
4036         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4037             mm_list) {
4038                 if (i915_gem_object_is_purgeable(obj)) {
4039                         if (i915_gem_object_unbind(obj) != 0)
4040                                 cnt_total++;
4041                 } else
4042                         cnt_total++;
4043         }
4044
4045         /* second pass, evict/count anything still on the inactive list */
4046         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4047             mm_list) {
4048                 if (i915_gem_object_unbind(obj) == 0)
4049                         cnt++;
4050                 else
4051                         cnt_fail++;
4052         }
4053
4054         if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4055                 /*
4056                  * We are desperate for pages, so as a last resort, wait
4057                  * for the GPU to finish and discard whatever we can.
4058                  * This has a dramatic impact to reduce the number of
4059                  * OOM-killer events whilst running the GPU aggressively.
4060                  */
4061                 if (i915_gpu_idle(dev) == 0)
4062                         goto rescan;
4063         }
4064         DRM_UNLOCK(dev);
4065 }
4066
4067 void
4068 i915_gem_unload(struct drm_device *dev)
4069 {
4070         struct drm_i915_private *dev_priv;
4071
4072         dev_priv = dev->dev_private;
4073         EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
4074 }