nrelease - fix/improve livecd
[dragonfly.git] / sys / dev / drm / i915 / i915_gem_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30
31 #include "i915_drv.h"
32
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35         return "i915";
36 }
37
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40         /* The timeline struct (as part of the ppgtt underneath a context)
41          * may be freed when the request is no longer in use by the GPU.
42          * We could extend the life of a context to beyond that of all
43          * fences, possibly keeping the hw resource around indefinitely,
44          * or we just give them a false name. Since
45          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46          * lie seems justifiable.
47          */
48         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
49                 return "signaled";
50
51         return to_request(fence)->timeline->common->name;
52 }
53
54 static bool i915_fence_signaled(struct dma_fence *fence)
55 {
56         return i915_gem_request_completed(to_request(fence));
57 }
58
59 static bool i915_fence_enable_signaling(struct dma_fence *fence)
60 {
61         if (i915_fence_signaled(fence))
62                 return false;
63
64         intel_engine_enable_signaling(to_request(fence), true);
65         return !i915_fence_signaled(fence);
66 }
67
68 static signed long i915_fence_wait(struct dma_fence *fence,
69                                    bool interruptible,
70                                    signed long timeout)
71 {
72         return i915_wait_request(to_request(fence), interruptible, timeout);
73 }
74
75 static void i915_fence_release(struct dma_fence *fence)
76 {
77         struct drm_i915_gem_request *req = to_request(fence);
78
79         /* The request is put onto a RCU freelist (i.e. the address
80          * is immediately reused), mark the fences as being freed now.
81          * Otherwise the debugobjects for the fences are only marked as
82          * freed when the slab cache itself is freed, and so we would get
83          * caught trying to reuse dead objects.
84          */
85         i915_sw_fence_fini(&req->submit);
86
87         kmem_cache_free(req->i915->requests, req);
88 }
89
90 const struct dma_fence_ops i915_fence_ops = {
91         .get_driver_name = i915_fence_get_driver_name,
92         .get_timeline_name = i915_fence_get_timeline_name,
93         .enable_signaling = i915_fence_enable_signaling,
94         .signaled = i915_fence_signaled,
95         .wait = i915_fence_wait,
96         .release = i915_fence_release,
97 };
98
99 static inline void
100 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
101 {
102         struct drm_i915_file_private *file_priv;
103
104         file_priv = request->file_priv;
105         if (!file_priv)
106                 return;
107
108         lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE);
109         if (request->file_priv) {
110                 list_del(&request->client_link);
111                 request->file_priv = NULL;
112         }
113         lockmgr(&file_priv->mm.lock, LK_RELEASE);
114 }
115
116 static struct i915_dependency *
117 i915_dependency_alloc(struct drm_i915_private *i915)
118 {
119         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
120 }
121
122 static void
123 i915_dependency_free(struct drm_i915_private *i915,
124                      struct i915_dependency *dep)
125 {
126         kmem_cache_free(i915->dependencies, dep);
127 }
128
129 static void
130 __i915_priotree_add_dependency(struct i915_priotree *pt,
131                                struct i915_priotree *signal,
132                                struct i915_dependency *dep,
133                                unsigned long flags)
134 {
135         INIT_LIST_HEAD(&dep->dfs_link);
136         list_add(&dep->wait_link, &signal->waiters_list);
137         list_add(&dep->signal_link, &pt->signalers_list);
138         dep->signaler = signal;
139         dep->flags = flags;
140 }
141
142 static int
143 i915_priotree_add_dependency(struct drm_i915_private *i915,
144                              struct i915_priotree *pt,
145                              struct i915_priotree *signal)
146 {
147         struct i915_dependency *dep;
148
149         dep = i915_dependency_alloc(i915);
150         if (!dep)
151                 return -ENOMEM;
152
153         __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
154         return 0;
155 }
156
157 static void
158 i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
159 {
160         struct i915_dependency *dep, *next;
161
162         GEM_BUG_ON(!list_empty(&pt->link));
163
164         /* Everyone we depended upon (the fences we wait to be signaled)
165          * should retire before us and remove themselves from our list.
166          * However, retirement is run independently on each timeline and
167          * so we may be called out-of-order.
168          */
169         list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
170                 list_del(&dep->wait_link);
171                 if (dep->flags & I915_DEPENDENCY_ALLOC)
172                         i915_dependency_free(i915, dep);
173         }
174
175         /* Remove ourselves from everyone who depends upon us */
176         list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
177                 list_del(&dep->signal_link);
178                 if (dep->flags & I915_DEPENDENCY_ALLOC)
179                         i915_dependency_free(i915, dep);
180         }
181 }
182
183 static void
184 i915_priotree_init(struct i915_priotree *pt)
185 {
186         INIT_LIST_HEAD(&pt->signalers_list);
187         INIT_LIST_HEAD(&pt->waiters_list);
188         INIT_LIST_HEAD(&pt->link);
189         pt->priority = I915_PRIORITY_INVALID;
190 }
191
192 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
193 {
194         struct intel_engine_cs *engine;
195         enum intel_engine_id id;
196         int ret;
197
198         /* Carefully retire all requests without writing to the rings */
199         ret = i915_gem_wait_for_idle(i915,
200                                      I915_WAIT_INTERRUPTIBLE |
201                                      I915_WAIT_LOCKED);
202         if (ret)
203                 return ret;
204
205         /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
206         for_each_engine(engine, i915, id) {
207                 struct i915_gem_timeline *timeline;
208                 struct intel_timeline *tl = engine->timeline;
209
210                 if (!i915_seqno_passed(seqno, tl->seqno)) {
211                         /* spin until threads are complete */
212                         while (intel_breadcrumbs_busy(engine))
213                                 cond_resched();
214                 }
215
216                 /* Check we are idle before we fiddle with hw state! */
217                 GEM_BUG_ON(!intel_engine_is_idle(engine));
218                 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
219
220                 /* Finally reset hw state */
221                 intel_engine_init_global_seqno(engine, seqno);
222                 tl->seqno = seqno;
223
224                 list_for_each_entry(timeline, &i915->gt.timelines, link)
225                         memset(timeline->engine[id].global_sync, 0,
226                                sizeof(timeline->engine[id].global_sync));
227         }
228
229         return 0;
230 }
231
232 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
233 {
234         struct drm_i915_private *dev_priv = to_i915(dev);
235
236         lockdep_assert_held(&dev_priv->drm.struct_mutex);
237
238         if (seqno == 0)
239                 return -EINVAL;
240
241         /* HWS page needs to be set less than what we
242          * will inject to ring
243          */
244         return reset_all_global_seqno(dev_priv, seqno - 1);
245 }
246
247 static void mark_busy(struct drm_i915_private *i915)
248 {
249         if (i915->gt.awake)
250                 return;
251
252         GEM_BUG_ON(!i915->gt.active_requests);
253
254         intel_runtime_pm_get_noresume(i915);
255         i915->gt.awake = true;
256
257         intel_enable_gt_powersave(i915);
258         i915_update_gfx_val(i915);
259         if (INTEL_GEN(i915) >= 6)
260                 gen6_rps_busy(i915);
261
262         queue_delayed_work(i915->wq,
263                            &i915->gt.retire_work,
264                            round_jiffies_up_relative(HZ));
265 }
266
267 static int reserve_engine(struct intel_engine_cs *engine)
268 {
269         struct drm_i915_private *i915 = engine->i915;
270         u32 active = ++engine->timeline->inflight_seqnos;
271         u32 seqno = engine->timeline->seqno;
272         int ret;
273
274         /* Reservation is fine until we need to wrap around */
275         if (unlikely(add_overflows(seqno, active))) {
276                 ret = reset_all_global_seqno(i915, 0);
277                 if (ret) {
278                         engine->timeline->inflight_seqnos--;
279                         return ret;
280                 }
281         }
282
283         if (!i915->gt.active_requests++)
284                 mark_busy(i915);
285
286         return 0;
287 }
288
289 static void unreserve_engine(struct intel_engine_cs *engine)
290 {
291         struct drm_i915_private *i915 = engine->i915;
292
293         if (!--i915->gt.active_requests) {
294                 /* Cancel the mark_busy() from our reserve_engine() */
295                 GEM_BUG_ON(!i915->gt.awake);
296                 mod_delayed_work(i915->wq,
297                                  &i915->gt.idle_work,
298                                  msecs_to_jiffies(100));
299         }
300
301         GEM_BUG_ON(!engine->timeline->inflight_seqnos);
302         engine->timeline->inflight_seqnos--;
303 }
304
305 void i915_gem_retire_noop(struct i915_gem_active *active,
306                           struct drm_i915_gem_request *request)
307 {
308         /* Space left intentionally blank */
309 }
310
311 static void advance_ring(struct drm_i915_gem_request *request)
312 {
313         unsigned int tail;
314
315         /* We know the GPU must have read the request to have
316          * sent us the seqno + interrupt, so use the position
317          * of tail of the request to update the last known position
318          * of the GPU head.
319          *
320          * Note this requires that we are always called in request
321          * completion order.
322          */
323         if (list_is_last(&request->ring_link, &request->ring->request_list)) {
324                 /* We may race here with execlists resubmitting this request
325                  * as we retire it. The resubmission will move the ring->tail
326                  * forwards (to request->wa_tail). We either read the
327                  * current value that was written to hw, or the value that
328                  * is just about to be. Either works, if we miss the last two
329                  * noops - they are safe to be replayed on a reset.
330                  */
331                 tail = READ_ONCE(request->ring->tail);
332         } else {
333                 tail = request->postfix;
334         }
335         list_del(&request->ring_link);
336
337         request->ring->head = tail;
338 }
339
340 static void free_capture_list(struct drm_i915_gem_request *request)
341 {
342         struct i915_gem_capture_list *capture;
343
344         capture = request->capture_list;
345         while (capture) {
346                 struct i915_gem_capture_list *next = capture->next;
347
348                 kfree(capture);
349                 capture = next;
350         }
351 }
352
353 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
354 {
355         struct intel_engine_cs *engine = request->engine;
356         struct i915_gem_active *active, *next;
357
358         lockdep_assert_held(&request->i915->drm.struct_mutex);
359         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
360         GEM_BUG_ON(!i915_gem_request_completed(request));
361         GEM_BUG_ON(!request->i915->gt.active_requests);
362
363         trace_i915_gem_request_retire(request);
364
365         spin_lock_irq(&engine->timeline->lock);
366         list_del_init(&request->link);
367         spin_unlock_irq(&engine->timeline->lock);
368
369         unreserve_engine(request->engine);
370         advance_ring(request);
371
372         free_capture_list(request);
373
374         /* Walk through the active list, calling retire on each. This allows
375          * objects to track their GPU activity and mark themselves as idle
376          * when their *last* active request is completed (updating state
377          * tracking lists for eviction, active references for GEM, etc).
378          *
379          * As the ->retire() may free the node, we decouple it first and
380          * pass along the auxiliary information (to avoid dereferencing
381          * the node after the callback).
382          */
383         list_for_each_entry_safe(active, next, &request->active_list, link) {
384                 /* In microbenchmarks or focusing upon time inside the kernel,
385                  * we may spend an inordinate amount of time simply handling
386                  * the retirement of requests and processing their callbacks.
387                  * Of which, this loop itself is particularly hot due to the
388                  * cache misses when jumping around the list of i915_gem_active.
389                  * So we try to keep this loop as streamlined as possible and
390                  * also prefetch the next i915_gem_active to try and hide
391                  * the likely cache miss.
392                  */
393                 prefetchw(next);
394
395                 INIT_LIST_HEAD(&active->link);
396                 RCU_INIT_POINTER(active->request, NULL);
397
398                 active->retire(active, request);
399         }
400
401         i915_gem_request_remove_from_client(request);
402
403         /* Retirement decays the ban score as it is a sign of ctx progress */
404         atomic_dec_if_positive(&request->ctx->ban_score);
405
406         /* The backing object for the context is done after switching to the
407          * *next* context. Therefore we cannot retire the previous context until
408          * the next context has already started running. However, since we
409          * cannot take the required locks at i915_gem_request_submit() we
410          * defer the unpinning of the active context to now, retirement of
411          * the subsequent request.
412          */
413         if (engine->last_retired_context)
414                 engine->context_unpin(engine, engine->last_retired_context);
415         engine->last_retired_context = request->ctx;
416
417         spin_lock_irq(&request->lock);
418         if (request->waitboost)
419                 atomic_dec(&request->i915->gt_pm.rps.num_waiters);
420         dma_fence_signal_locked(&request->fence);
421         spin_unlock_irq(&request->lock);
422
423         i915_priotree_fini(request->i915, &request->priotree);
424         i915_gem_request_put(request);
425 }
426
427 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
428 {
429         struct intel_engine_cs *engine = req->engine;
430         struct drm_i915_gem_request *tmp;
431
432         lockdep_assert_held(&req->i915->drm.struct_mutex);
433         GEM_BUG_ON(!i915_gem_request_completed(req));
434
435         if (list_empty(&req->link))
436                 return;
437
438         do {
439                 tmp = list_first_entry(&engine->timeline->requests,
440                                        typeof(*tmp), link);
441
442                 i915_gem_request_retire(tmp);
443         } while (tmp != req);
444 }
445
446 static u32 timeline_get_seqno(struct intel_timeline *tl)
447 {
448         return ++tl->seqno;
449 }
450
451 void __i915_gem_request_submit(struct drm_i915_gem_request *request)
452 {
453         struct intel_engine_cs *engine = request->engine;
454         struct intel_timeline *timeline;
455         u32 seqno;
456
457 //      GEM_BUG_ON(!irqs_disabled());
458         lockdep_assert_held(&engine->timeline->lock);
459
460         trace_i915_gem_request_execute(request);
461
462         /* Transfer from per-context onto the global per-engine timeline */
463         timeline = engine->timeline;
464         GEM_BUG_ON(timeline == request->timeline);
465
466         seqno = timeline_get_seqno(timeline);
467         GEM_BUG_ON(!seqno);
468         GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
469
470         /* We may be recursing from the signal callback of another i915 fence */
471         lockmgr(&request->lock, LK_EXCLUSIVE);
472         request->global_seqno = seqno;
473         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
474                 intel_engine_enable_signaling(request, false);
475         lockmgr(&request->lock, LK_RELEASE);
476
477         engine->emit_breadcrumb(request,
478                                 request->ring->vaddr + request->postfix);
479
480         lockmgr(&request->timeline->lock, LK_EXCLUSIVE);
481         list_move_tail(&request->link, &timeline->requests);
482         lockmgr(&request->timeline->lock, LK_RELEASE);
483
484         wake_up_all(&request->execute);
485 }
486
487 void i915_gem_request_submit(struct drm_i915_gem_request *request)
488 {
489         struct intel_engine_cs *engine = request->engine;
490         unsigned long flags;
491
492         /* Will be called from irq-context when using foreign fences. */
493         spin_lock_irqsave(&engine->timeline->lock, flags);
494
495         __i915_gem_request_submit(request);
496
497         spin_unlock_irqrestore(&engine->timeline->lock, flags);
498 }
499
500 void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
501 {
502         struct intel_engine_cs *engine = request->engine;
503         struct intel_timeline *timeline;
504
505         GEM_BUG_ON(!irqs_disabled());
506         lockdep_assert_held(&engine->timeline->lock);
507
508         /* Only unwind in reverse order, required so that the per-context list
509          * is kept in seqno/ring order.
510          */
511         GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
512         engine->timeline->seqno--;
513
514         /* We may be recursing from the signal callback of another i915 fence */
515         lockmgr(&request->lock, LK_EXCLUSIVE);
516         request->global_seqno = 0;
517         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
518                 intel_engine_cancel_signaling(request);
519         lockmgr(&request->lock, LK_RELEASE);
520
521         /* Transfer back from the global per-engine timeline to per-context */
522         timeline = request->timeline;
523         GEM_BUG_ON(timeline == engine->timeline);
524
525         lockmgr(&timeline->lock, LK_EXCLUSIVE);
526         list_move(&request->link, &timeline->requests);
527         lockmgr(&timeline->lock, LK_RELEASE);
528
529         /* We don't need to wake_up any waiters on request->execute, they
530          * will get woken by any other event or us re-adding this request
531          * to the engine timeline (__i915_gem_request_submit()). The waiters
532          * should be quite adapt at finding that the request now has a new
533          * global_seqno to the one they went to sleep on.
534          */
535 }
536
537 void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
538 {
539         struct intel_engine_cs *engine = request->engine;
540         unsigned long flags;
541
542         /* Will be called from irq-context when using foreign fences. */
543         spin_lock_irqsave(&engine->timeline->lock, flags);
544
545         __i915_gem_request_unsubmit(request);
546
547         spin_unlock_irqrestore(&engine->timeline->lock, flags);
548 }
549
550 static int __i915_sw_fence_call
551 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
552 {
553         struct drm_i915_gem_request *request =
554                 container_of(fence, typeof(*request), submit);
555
556         switch (state) {
557         case FENCE_COMPLETE:
558                 trace_i915_gem_request_submit(request);
559                 /*
560                  * We need to serialize use of the submit_request() callback with its
561                  * hotplugging performed during an emergency i915_gem_set_wedged().
562                  * We use the RCU mechanism to mark the critical section in order to
563                  * force i915_gem_set_wedged() to wait until the submit_request() is
564                  * completed before proceeding.
565                  */
566                 rcu_read_lock();
567                 request->engine->submit_request(request);
568                 rcu_read_unlock();
569                 break;
570
571         case FENCE_FREE:
572                 i915_gem_request_put(request);
573                 break;
574         }
575
576         return NOTIFY_DONE;
577 }
578
579 /**
580  * i915_gem_request_alloc - allocate a request structure
581  *
582  * @engine: engine that we wish to issue the request on.
583  * @ctx: context that the request will be associated with.
584  *
585  * Returns a pointer to the allocated request if successful,
586  * or an error code if not.
587  */
588 struct drm_i915_gem_request *
589 i915_gem_request_alloc(struct intel_engine_cs *engine,
590                        struct i915_gem_context *ctx)
591 {
592         struct drm_i915_private *dev_priv = engine->i915;
593         struct drm_i915_gem_request *req;
594         struct intel_ring *ring;
595         int ret;
596
597         lockdep_assert_held(&dev_priv->drm.struct_mutex);
598
599         /*
600          * Preempt contexts are reserved for exclusive use to inject a
601          * preemption context switch. They are never to be used for any trivial
602          * request!
603          */
604         GEM_BUG_ON(ctx == dev_priv->preempt_context);
605
606         /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
607          * EIO if the GPU is already wedged.
608          */
609         if (i915_terminally_wedged(&dev_priv->gpu_error))
610                 return ERR_PTR(-EIO);
611
612         /* Pinning the contexts may generate requests in order to acquire
613          * GGTT space, so do this first before we reserve a seqno for
614          * ourselves.
615          */
616         ring = engine->context_pin(engine, ctx);
617         if (IS_ERR(ring))
618                 return ERR_CAST(ring);
619         GEM_BUG_ON(!ring);
620
621         ret = reserve_engine(engine);
622         if (ret)
623                 goto err_unpin;
624
625         /* Move the oldest request to the slab-cache (if not in use!) */
626         req = list_first_entry_or_null(&engine->timeline->requests,
627                                        typeof(*req), link);
628         if (req && i915_gem_request_completed(req))
629                 i915_gem_request_retire(req);
630
631         /* Beware: Dragons be flying overhead.
632          *
633          * We use RCU to look up requests in flight. The lookups may
634          * race with the request being allocated from the slab freelist.
635          * That is the request we are writing to here, may be in the process
636          * of being read by __i915_gem_active_get_rcu(). As such,
637          * we have to be very careful when overwriting the contents. During
638          * the RCU lookup, we change chase the request->engine pointer,
639          * read the request->global_seqno and increment the reference count.
640          *
641          * The reference count is incremented atomically. If it is zero,
642          * the lookup knows the request is unallocated and complete. Otherwise,
643          * it is either still in use, or has been reallocated and reset
644          * with dma_fence_init(). This increment is safe for release as we
645          * check that the request we have a reference to and matches the active
646          * request.
647          *
648          * Before we increment the refcount, we chase the request->engine
649          * pointer. We must not call kmem_cache_zalloc() or else we set
650          * that pointer to NULL and cause a crash during the lookup. If
651          * we see the request is completed (based on the value of the
652          * old engine and seqno), the lookup is complete and reports NULL.
653          * If we decide the request is not completed (new engine or seqno),
654          * then we grab a reference and double check that it is still the
655          * active request - which it won't be and restart the lookup.
656          *
657          * Do not use kmem_cache_zalloc() here!
658          */
659         req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
660         if (!req) {
661                 ret = -ENOMEM;
662                 goto err_unreserve;
663         }
664
665         req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
666         GEM_BUG_ON(req->timeline == engine->timeline);
667
668         lockinit(&req->lock, "i915_rl", 0, 0);
669         dma_fence_init(&req->fence,
670                        &i915_fence_ops,
671                        &req->lock,
672                        req->timeline->fence_context,
673                        timeline_get_seqno(req->timeline));
674
675         /* We bump the ref for the fence chain */
676         i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
677         init_waitqueue_head(&req->execute);
678
679         i915_priotree_init(&req->priotree);
680
681         INIT_LIST_HEAD(&req->active_list);
682         req->i915 = dev_priv;
683         req->engine = engine;
684         req->ctx = ctx;
685         req->ring = ring;
686
687         /* No zalloc, must clear what we need by hand */
688         req->global_seqno = 0;
689         req->file_priv = NULL;
690         req->batch = NULL;
691         req->capture_list = NULL;
692         req->waitboost = false;
693
694         /*
695          * Reserve space in the ring buffer for all the commands required to
696          * eventually emit this request. This is to guarantee that the
697          * i915_add_request() call can't fail. Note that the reserve may need
698          * to be redone if the request is not actually submitted straight
699          * away, e.g. because a GPU scheduler has deferred it.
700          */
701         req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
702         GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
703
704         ret = engine->request_alloc(req);
705         if (ret)
706                 goto err_ctx;
707
708         /* Record the position of the start of the request so that
709          * should we detect the updated seqno part-way through the
710          * GPU processing the request, we never over-estimate the
711          * position of the head.
712          */
713         req->head = req->ring->emit;
714
715         /* Check that we didn't interrupt ourselves with a new request */
716         GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
717         return req;
718
719 err_ctx:
720         /* Make sure we didn't add ourselves to external state before freeing */
721         GEM_BUG_ON(!list_empty(&req->active_list));
722         GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
723         GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
724
725         kmem_cache_free(dev_priv->requests, req);
726 err_unreserve:
727         unreserve_engine(engine);
728 err_unpin:
729         engine->context_unpin(engine, ctx);
730         return ERR_PTR(ret);
731 }
732
733 static int
734 i915_gem_request_await_request(struct drm_i915_gem_request *to,
735                                struct drm_i915_gem_request *from)
736 {
737         int ret;
738
739         GEM_BUG_ON(to == from);
740         GEM_BUG_ON(to->timeline == from->timeline);
741
742         if (i915_gem_request_completed(from))
743                 return 0;
744
745         if (to->engine->schedule) {
746                 ret = i915_priotree_add_dependency(to->i915,
747                                                    &to->priotree,
748                                                    &from->priotree);
749                 if (ret < 0)
750                         return ret;
751         }
752
753         if (to->engine == from->engine) {
754                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
755                                                        &from->submit,
756                                                        GFP_KERNEL);
757                 return ret < 0 ? ret : 0;
758         }
759
760         if (to->engine->semaphore.sync_to) {
761                 u32 seqno;
762
763                 GEM_BUG_ON(!from->engine->semaphore.signal);
764
765                 seqno = i915_gem_request_global_seqno(from);
766                 if (!seqno)
767                         goto await_dma_fence;
768
769                 if (seqno <= to->timeline->global_sync[from->engine->id])
770                         return 0;
771
772                 trace_i915_gem_ring_sync_to(to, from);
773                 ret = to->engine->semaphore.sync_to(to, from);
774                 if (ret)
775                         return ret;
776
777                 to->timeline->global_sync[from->engine->id] = seqno;
778                 return 0;
779         }
780
781 await_dma_fence:
782         ret = i915_sw_fence_await_dma_fence(&to->submit,
783                                             &from->fence, 0,
784                                             GFP_KERNEL);
785         return ret < 0 ? ret : 0;
786 }
787
788 int
789 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
790                                  struct dma_fence *fence)
791 {
792         struct dma_fence **child = &fence;
793         unsigned int nchild = 1;
794         int ret;
795
796         /* Note that if the fence-array was created in signal-on-any mode,
797          * we should *not* decompose it into its individual fences. However,
798          * we don't currently store which mode the fence-array is operating
799          * in. Fortunately, the only user of signal-on-any is private to
800          * amdgpu and we should not see any incoming fence-array from
801          * sync-file being in signal-on-any mode.
802          */
803         if (dma_fence_is_array(fence)) {
804                 struct dma_fence_array *array = to_dma_fence_array(fence);
805
806                 child = array->fences;
807                 nchild = array->num_fences;
808                 GEM_BUG_ON(!nchild);
809         }
810
811         do {
812                 fence = *child++;
813                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
814                         continue;
815
816                 /*
817                  * Requests on the same timeline are explicitly ordered, along
818                  * with their dependencies, by i915_add_request() which ensures
819                  * that requests are submitted in-order through each ring.
820                  */
821                 if (fence->context == req->fence.context)
822                         continue;
823
824                 /* Squash repeated waits to the same timelines */
825                 if (fence->context != req->i915->mm.unordered_timeline &&
826                     intel_timeline_sync_is_later(req->timeline, fence))
827                         continue;
828
829                 if (dma_fence_is_i915(fence))
830                         ret = i915_gem_request_await_request(req,
831                                                              to_request(fence));
832                 else
833                         ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
834                                                             I915_FENCE_TIMEOUT,
835                                                             GFP_KERNEL);
836                 if (ret < 0)
837                         return ret;
838
839                 /* Record the latest fence used against each timeline */
840                 if (fence->context != req->i915->mm.unordered_timeline)
841                         intel_timeline_sync_set(req->timeline, fence);
842         } while (--nchild);
843
844         return 0;
845 }
846
847 /**
848  * i915_gem_request_await_object - set this request to (async) wait upon a bo
849  *
850  * @to: request we are wishing to use
851  * @obj: object which may be in use on another ring.
852  *
853  * This code is meant to abstract object synchronization with the GPU.
854  * Conceptually we serialise writes between engines inside the GPU.
855  * We only allow one engine to write into a buffer at any time, but
856  * multiple readers. To ensure each has a coherent view of memory, we must:
857  *
858  * - If there is an outstanding write request to the object, the new
859  *   request must wait for it to complete (either CPU or in hw, requests
860  *   on the same ring will be naturally ordered).
861  *
862  * - If we are a write request (pending_write_domain is set), the new
863  *   request must wait for outstanding read requests to complete.
864  *
865  * Returns 0 if successful, else propagates up the lower layer error.
866  */
867 int
868 i915_gem_request_await_object(struct drm_i915_gem_request *to,
869                               struct drm_i915_gem_object *obj,
870                               bool write)
871 {
872         struct dma_fence *excl;
873         int ret = 0;
874
875         if (write) {
876                 struct dma_fence **shared;
877                 unsigned int count, i;
878
879                 ret = reservation_object_get_fences_rcu(obj->resv,
880                                                         &excl, &count, &shared);
881                 if (ret)
882                         return ret;
883
884                 for (i = 0; i < count; i++) {
885                         ret = i915_gem_request_await_dma_fence(to, shared[i]);
886                         if (ret)
887                                 break;
888
889                         dma_fence_put(shared[i]);
890                 }
891
892                 for (; i < count; i++)
893                         dma_fence_put(shared[i]);
894                 kfree(shared);
895         } else {
896                 excl = reservation_object_get_excl_rcu(obj->resv);
897         }
898
899         if (excl) {
900                 if (ret == 0)
901                         ret = i915_gem_request_await_dma_fence(to, excl);
902
903                 dma_fence_put(excl);
904         }
905
906         return ret;
907 }
908
909 /*
910  * NB: This function is not allowed to fail. Doing so would mean the the
911  * request is not being tracked for completion but the work itself is
912  * going to happen on the hardware. This would be a Bad Thing(tm).
913  */
914 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
915 {
916         struct intel_engine_cs *engine = request->engine;
917         struct intel_ring *ring = request->ring;
918         struct intel_timeline *timeline = request->timeline;
919         struct drm_i915_gem_request *prev;
920         u32 *cs;
921         int err;
922
923         lockdep_assert_held(&request->i915->drm.struct_mutex);
924         trace_i915_gem_request_add(request);
925
926         /* Make sure that no request gazumped us - if it was allocated after
927          * our i915_gem_request_alloc() and called __i915_add_request() before
928          * us, the timeline will hold its seqno which is later than ours.
929          */
930         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
931
932         /*
933          * To ensure that this call will not fail, space for its emissions
934          * should already have been reserved in the ring buffer. Let the ring
935          * know that it is time to use that space up.
936          */
937         request->reserved_space = 0;
938
939         /*
940          * Emit any outstanding flushes - execbuf can fail to emit the flush
941          * after having emitted the batchbuffer command. Hence we need to fix
942          * things up similar to emitting the lazy request. The difference here
943          * is that the flush _must_ happen before the next request, no matter
944          * what.
945          */
946         if (flush_caches) {
947                 err = engine->emit_flush(request, EMIT_FLUSH);
948
949                 /* Not allowed to fail! */
950                 WARN(err, "engine->emit_flush() failed: %d!\n", err);
951         }
952
953         /* Record the position of the start of the breadcrumb so that
954          * should we detect the updated seqno part-way through the
955          * GPU processing the request, we never over-estimate the
956          * position of the ring's HEAD.
957          */
958         cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
959         GEM_BUG_ON(IS_ERR(cs));
960         request->postfix = intel_ring_offset(request, cs);
961
962         /* Seal the request and mark it as pending execution. Note that
963          * we may inspect this state, without holding any locks, during
964          * hangcheck. Hence we apply the barrier to ensure that we do not
965          * see a more recent value in the hws than we are tracking.
966          */
967
968         prev = i915_gem_active_raw(&timeline->last_request,
969                                    &request->i915->drm.struct_mutex);
970         if (prev) {
971                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
972                                              &request->submitq);
973                 if (engine->schedule)
974                         __i915_priotree_add_dependency(&request->priotree,
975                                                        &prev->priotree,
976                                                        &request->dep,
977                                                        0);
978         }
979
980         spin_lock_irq(&timeline->lock);
981         list_add_tail(&request->link, &timeline->requests);
982         spin_unlock_irq(&timeline->lock);
983
984         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
985         i915_gem_active_set(&timeline->last_request, request);
986
987         list_add_tail(&request->ring_link, &ring->request_list);
988         request->emitted_jiffies = jiffies;
989
990         /* Let the backend know a new request has arrived that may need
991          * to adjust the existing execution schedule due to a high priority
992          * request - i.e. we may want to preempt the current request in order
993          * to run a high priority dependency chain *before* we can execute this
994          * request.
995          *
996          * This is called before the request is ready to run so that we can
997          * decide whether to preempt the entire chain so that it is ready to
998          * run at the earliest possible convenience.
999          */
1000         if (engine->schedule)
1001                 engine->schedule(request, request->ctx->priority);
1002
1003         local_bh_disable();
1004         i915_sw_fence_commit(&request->submit);
1005         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1006 }
1007
1008 static unsigned long local_clock_us(unsigned int *cpu)
1009 {
1010         unsigned long t;
1011
1012         /* Cheaply and approximately convert from nanoseconds to microseconds.
1013          * The result and subsequent calculations are also defined in the same
1014          * approximate microseconds units. The principal source of timing
1015          * error here is from the simple truncation.
1016          *
1017          * Note that local_clock() is only defined wrt to the current CPU;
1018          * the comparisons are no longer valid if we switch CPUs. Instead of
1019          * blocking preemption for the entire busywait, we can detect the CPU
1020          * switch and use that as indicator of system load and a reason to
1021          * stop busywaiting, see busywait_stop().
1022          */
1023         *cpu = get_cpu();
1024         t = local_clock() >> 10;
1025         put_cpu();
1026
1027         return t;
1028 }
1029
1030 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1031 {
1032         unsigned int this_cpu;
1033
1034         if (time_after(local_clock_us(&this_cpu), timeout))
1035                 return true;
1036
1037         return this_cpu != cpu;
1038 }
1039
1040 static bool __i915_spin_request(const struct drm_i915_gem_request *req,
1041                                 u32 seqno, int state, unsigned long timeout_us)
1042 {
1043         struct intel_engine_cs *engine = req->engine;
1044         unsigned int irq, cpu;
1045
1046         GEM_BUG_ON(!seqno);
1047
1048         /*
1049          * Only wait for the request if we know it is likely to complete.
1050          *
1051          * We don't track the timestamps around requests, nor the average
1052          * request length, so we do not have a good indicator that this
1053          * request will complete within the timeout. What we do know is the
1054          * order in which requests are executed by the engine and so we can
1055          * tell if the request has started. If the request hasn't started yet,
1056          * it is a fair assumption that it will not complete within our
1057          * relatively short timeout.
1058          */
1059         if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
1060                 return false;
1061
1062         /* When waiting for high frequency requests, e.g. during synchronous
1063          * rendering split between the CPU and GPU, the finite amount of time
1064          * required to set up the irq and wait upon it limits the response
1065          * rate. By busywaiting on the request completion for a short while we
1066          * can service the high frequency waits as quick as possible. However,
1067          * if it is a slow request, we want to sleep as quickly as possible.
1068          * The tradeoff between waiting and sleeping is roughly the time it
1069          * takes to sleep on a request, on the order of a microsecond.
1070          */
1071
1072         irq = atomic_read(&engine->irq_count);
1073         timeout_us += local_clock_us(&cpu);
1074         do {
1075                 if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
1076                         return seqno == i915_gem_request_global_seqno(req);
1077
1078                 /* Seqno are meant to be ordered *before* the interrupt. If
1079                  * we see an interrupt without a corresponding seqno advance,
1080                  * assume we won't see one in the near future but require
1081                  * the engine->seqno_barrier() to fixup coherency.
1082                  */
1083                 if (atomic_read(&engine->irq_count) != irq)
1084                         break;
1085
1086                 if (signal_pending_state(state, current))
1087                         break;
1088
1089                 if (busywait_stop(timeout_us, cpu))
1090                         break;
1091
1092                 cpu_relax();
1093         } while (!need_resched());
1094
1095         return false;
1096 }
1097
1098 static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
1099 {
1100         if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
1101                 return false;
1102
1103         __set_current_state(TASK_RUNNING);
1104         i915_reset(request->i915, 0);
1105         return true;
1106 }
1107
1108 /**
1109  * i915_wait_request - wait until execution of request has finished
1110  * @req: the request to wait upon
1111  * @flags: how to wait
1112  * @timeout: how long to wait in jiffies
1113  *
1114  * i915_wait_request() waits for the request to be completed, for a
1115  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1116  * unbounded wait).
1117  *
1118  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1119  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1120  * must not specify that the wait is locked.
1121  *
1122  * Returns the remaining time (in jiffies) if the request completed, which may
1123  * be zero or -ETIME if the request is unfinished after the timeout expires.
1124  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1125  * pending before the request completes.
1126  */
1127 long i915_wait_request(struct drm_i915_gem_request *req,
1128                        unsigned int flags,
1129                        long timeout)
1130 {
1131         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1132                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1133         wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
1134         DEFINE_WAIT_FUNC(reset, default_wake_function);
1135         DEFINE_WAIT_FUNC(exec, default_wake_function);
1136         struct intel_wait wait;
1137
1138         might_sleep();
1139 #if IS_ENABLED(CONFIG_LOCKDEP)
1140         GEM_BUG_ON(debug_locks &&
1141                    !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
1142                    !!(flags & I915_WAIT_LOCKED));
1143 #endif
1144         GEM_BUG_ON(timeout < 0);
1145
1146         if (i915_gem_request_completed(req))
1147                 return timeout;
1148
1149         if (!timeout)
1150                 return -ETIME;
1151
1152         trace_i915_gem_request_wait_begin(req, flags);
1153
1154         add_wait_queue(&req->execute, &exec);
1155         if (flags & I915_WAIT_LOCKED)
1156                 add_wait_queue(errq, &reset);
1157
1158         intel_wait_init(&wait, req);
1159
1160 restart:
1161         do {
1162                 set_current_state(state);
1163                 if (intel_wait_update_request(&wait, req))
1164                         break;
1165
1166                 if (flags & I915_WAIT_LOCKED &&
1167                     __i915_wait_request_check_and_reset(req))
1168                         continue;
1169
1170                 if (signal_pending_state(state, current)) {
1171                         timeout = -ERESTARTSYS;
1172                         goto complete;
1173                 }
1174
1175                 if (!timeout) {
1176                         timeout = -ETIME;
1177                         goto complete;
1178                 }
1179
1180                 timeout = io_schedule_timeout(timeout);
1181         } while (1);
1182
1183         GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1184         GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
1185
1186         /* Optimistic short spin before touching IRQs */
1187         if (__i915_spin_request(req, wait.seqno, state, 5))
1188                 goto complete;
1189
1190         set_current_state(state);
1191         if (intel_engine_add_wait(req->engine, &wait))
1192                 /* In order to check that we haven't missed the interrupt
1193                  * as we enabled it, we need to kick ourselves to do a
1194                  * coherent check on the seqno before we sleep.
1195                  */
1196                 goto wakeup;
1197
1198         if (flags & I915_WAIT_LOCKED)
1199                 __i915_wait_request_check_and_reset(req);
1200
1201         for (;;) {
1202                 if (signal_pending_state(state, current)) {
1203                         timeout = -ERESTARTSYS;
1204                         break;
1205                 }
1206
1207                 if (!timeout) {
1208                         timeout = -ETIME;
1209                         break;
1210                 }
1211
1212                 timeout = io_schedule_timeout(timeout);
1213
1214                 if (intel_wait_complete(&wait) &&
1215                     intel_wait_check_request(&wait, req))
1216                         break;
1217
1218                 set_current_state(state);
1219
1220 wakeup:
1221                 /* Carefully check if the request is complete, giving time
1222                  * for the seqno to be visible following the interrupt.
1223                  * We also have to check in case we are kicked by the GPU
1224                  * reset in order to drop the struct_mutex.
1225                  */
1226                 if (__i915_request_irq_complete(req))
1227                         break;
1228
1229                 /* If the GPU is hung, and we hold the lock, reset the GPU
1230                  * and then check for completion. On a full reset, the engine's
1231                  * HW seqno will be advanced passed us and we are complete.
1232                  * If we do a partial reset, we have to wait for the GPU to
1233                  * resume and update the breadcrumb.
1234                  *
1235                  * If we don't hold the mutex, we can just wait for the worker
1236                  * to come along and update the breadcrumb (either directly
1237                  * itself, or indirectly by recovering the GPU).
1238                  */
1239                 if (flags & I915_WAIT_LOCKED &&
1240                     __i915_wait_request_check_and_reset(req))
1241                         continue;
1242
1243                 /* Only spin if we know the GPU is processing this request */
1244                 if (__i915_spin_request(req, wait.seqno, state, 2))
1245                         break;
1246
1247                 if (!intel_wait_check_request(&wait, req)) {
1248                         intel_engine_remove_wait(req->engine, &wait);
1249                         goto restart;
1250                 }
1251         }
1252
1253         intel_engine_remove_wait(req->engine, &wait);
1254 complete:
1255         __set_current_state(TASK_RUNNING);
1256         if (flags & I915_WAIT_LOCKED)
1257                 remove_wait_queue(errq, &reset);
1258         remove_wait_queue(&req->execute, &exec);
1259         trace_i915_gem_request_wait_end(req);
1260
1261         return timeout;
1262 }
1263
1264 static void engine_retire_requests(struct intel_engine_cs *engine)
1265 {
1266         struct drm_i915_gem_request *request, *next;
1267         u32 seqno = intel_engine_get_seqno(engine);
1268         LINUX_LIST_HEAD(retire);
1269
1270         spin_lock_irq(&engine->timeline->lock);
1271         list_for_each_entry_safe(request, next,
1272                                  &engine->timeline->requests, link) {
1273                 if (!i915_seqno_passed(seqno, request->global_seqno))
1274                         break;
1275
1276                 list_move_tail(&request->link, &retire);
1277         }
1278         spin_unlock_irq(&engine->timeline->lock);
1279
1280         list_for_each_entry_safe(request, next, &retire, link)
1281                 i915_gem_request_retire(request);
1282 }
1283
1284 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
1285 {
1286         struct intel_engine_cs *engine;
1287         enum intel_engine_id id;
1288
1289         lockdep_assert_held(&dev_priv->drm.struct_mutex);
1290
1291         if (!dev_priv->gt.active_requests)
1292                 return;
1293
1294         for_each_engine(engine, dev_priv, id)
1295                 engine_retire_requests(engine);
1296 }
1297
1298 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1299 #include "selftests/mock_request.c"
1300 #include "selftests/i915_gem_request.c"
1301 #endif