Branch data Line data Source code
1 : : /*
2 : : * Copyright © 2008-2018 Intel Corporation
3 : : *
4 : : * Permission is hereby granted, free of charge, to any person obtaining a
5 : : * copy of this software and associated documentation files (the "Software"),
6 : : * to deal in the Software without restriction, including without limitation
7 : : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : : * and/or sell copies of the Software, and to permit persons to whom the
9 : : * Software is furnished to do so, subject to the following conditions:
10 : : *
11 : : * The above copyright notice and this permission notice (including the next
12 : : * paragraph) shall be included in all copies or substantial portions of the
13 : : * Software.
14 : : *
15 : : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 : : * IN THE SOFTWARE.
22 : : *
23 : : */
24 : :
25 : : #ifndef I915_REQUEST_H
26 : : #define I915_REQUEST_H
27 : :
28 : : #include <linux/dma-fence.h>
29 : : #include <linux/irq_work.h>
30 : : #include <linux/lockdep.h>
31 : :
32 : : #include "gem/i915_gem_context_types.h"
33 : : #include "gt/intel_context_types.h"
34 : : #include "gt/intel_engine_types.h"
35 : : #include "gt/intel_timeline_types.h"
36 : :
37 : : #include "i915_gem.h"
38 : : #include "i915_scheduler.h"
39 : : #include "i915_selftest.h"
40 : : #include "i915_sw_fence.h"
41 : :
42 : : #include <uapi/drm/i915_drm.h>
43 : :
44 : : struct drm_file;
45 : : struct drm_i915_gem_object;
46 : : struct i915_request;
47 : :
48 : : struct i915_capture_list {
49 : : struct i915_capture_list *next;
50 : : struct i915_vma *vma;
51 : : };
52 : :
53 : : #define RQ_TRACE(rq, fmt, ...) do { \
54 : : const struct i915_request *rq__ = (rq); \
55 : : ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
56 : : rq__->fence.context, rq__->fence.seqno, \
57 : : hwsp_seqno(rq__), ##__VA_ARGS__); \
58 : : } while (0)
59 : :
60 : : enum {
61 : : /*
62 : : * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63 : : *
64 : : * Set by __i915_request_submit() on handing over to HW, and cleared
65 : : * by __i915_request_unsubmit() if we preempt this request.
66 : : *
67 : : * Finally cleared for consistency on retiring the request, when
68 : : * we know the HW is no longer running this request.
69 : : *
70 : : * See i915_request_is_active()
71 : : */
72 : : I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
73 : :
74 : : /*
75 : : * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
76 : : *
77 : : * Using the scheduler, when a request is ready for execution it is put
78 : : * into the priority queue, and removed from that queue when transferred
79 : : * to the HW runlists. We want to track its membership within the
80 : : * priority queue so that we can easily check before rescheduling.
81 : : *
82 : : * See i915_request_in_priority_queue()
83 : : */
84 : : I915_FENCE_FLAG_PQUEUE,
85 : :
86 : : /*
87 : : * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
88 : : *
89 : : * Internal bookkeeping used by the breadcrumb code to track when
90 : : * a request is on the various signal_list.
91 : : */
92 : : I915_FENCE_FLAG_SIGNAL,
93 : :
94 : : /*
95 : : * I915_FENCE_FLAG_HOLD - this request is currently on hold
96 : : *
97 : : * This request has been suspended, pending an ongoing investigation.
98 : : */
99 : : I915_FENCE_FLAG_HOLD,
100 : :
101 : : /*
102 : : * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
103 : : *
104 : : * The execution of some requests should not be interrupted. This is
105 : : * a sensitive operation as it makes the request super important,
106 : : * blocking other higher priority work. Abuse of this flag will
107 : : * lead to quality of service issues.
108 : : */
109 : : I915_FENCE_FLAG_NOPREEMPT,
110 : :
111 : : /*
112 : : * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
113 : : *
114 : : * A high priority sentinel request may be submitted to clear the
115 : : * submission queue. As it will be the only request in-flight, upon
116 : : * execution all other active requests will have been preempted and
117 : : * unsubmitted. This preemptive pulse is used to re-evaluate the
118 : : * in-flight requests, particularly in cases where an active context
119 : : * is banned and those active requests need to be cancelled.
120 : : */
121 : : I915_FENCE_FLAG_SENTINEL,
122 : :
123 : : /*
124 : : * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
125 : : *
126 : : * Some requests are more important than others! In particular, a
127 : : * request that the user is waiting on is typically required for
128 : : * interactive latency, for which we want to minimise by upclocking
129 : : * the GPU. Here we track such boost requests on a per-request basis.
130 : : */
131 : : I915_FENCE_FLAG_BOOST,
132 : : };
133 : :
134 : : /**
135 : : * Request queue structure.
136 : : *
137 : : * The request queue allows us to note sequence numbers that have been emitted
138 : : * and may be associated with active buffers to be retired.
139 : : *
140 : : * By keeping this list, we can avoid having to do questionable sequence
141 : : * number comparisons on buffer last_read|write_seqno. It also allows an
142 : : * emission time to be associated with the request for tracking how far ahead
143 : : * of the GPU the submission is.
144 : : *
145 : : * When modifying this structure be very aware that we perform a lockless
146 : : * RCU lookup of it that may race against reallocation of the struct
147 : : * from the slab freelist. We intentionally do not zero the structure on
148 : : * allocation so that the lookup can use the dangling pointers (and is
149 : : * cogniscent that those pointers may be wrong). Instead, everything that
150 : : * needs to be initialised must be done so explicitly.
151 : : *
152 : : * The requests are reference counted.
153 : : */
154 : : struct i915_request {
155 : : struct dma_fence fence;
156 : : spinlock_t lock;
157 : :
158 : : /** On Which ring this request was generated */
159 : : struct drm_i915_private *i915;
160 : :
161 : : /**
162 : : * Context and ring buffer related to this request
163 : : * Contexts are refcounted, so when this request is associated with a
164 : : * context, we must increment the context's refcount, to guarantee that
165 : : * it persists while any request is linked to it. Requests themselves
166 : : * are also refcounted, so the request will only be freed when the last
167 : : * reference to it is dismissed, and the code in
168 : : * i915_request_free() will then decrement the refcount on the
169 : : * context.
170 : : */
171 : : struct intel_engine_cs *engine;
172 : : struct intel_context *context;
173 : : struct intel_ring *ring;
174 : : struct intel_timeline __rcu *timeline;
175 : : struct list_head signal_link;
176 : :
177 : : /*
178 : : * The rcu epoch of when this request was allocated. Used to judiciously
179 : : * apply backpressure on future allocations to ensure that under
180 : : * mempressure there is sufficient RCU ticks for us to reclaim our
181 : : * RCU protected slabs.
182 : : */
183 : : unsigned long rcustate;
184 : :
185 : : /*
186 : : * We pin the timeline->mutex while constructing the request to
187 : : * ensure that no caller accidentally drops it during construction.
188 : : * The timeline->mutex must be held to ensure that only this caller
189 : : * can use the ring and manipulate the associated timeline during
190 : : * construction.
191 : : */
192 : : struct pin_cookie cookie;
193 : :
194 : : /*
195 : : * Fences for the various phases in the request's lifetime.
196 : : *
197 : : * The submit fence is used to await upon all of the request's
198 : : * dependencies. When it is signaled, the request is ready to run.
199 : : * It is used by the driver to then queue the request for execution.
200 : : */
201 : : struct i915_sw_fence submit;
202 : : union {
203 : : wait_queue_entry_t submitq;
204 : : struct i915_sw_dma_fence_cb dmaq;
205 : : struct i915_request_duration_cb {
206 : : struct dma_fence_cb cb;
207 : : ktime_t emitted;
208 : : } duration;
209 : : };
210 : : struct list_head execute_cb;
211 : : struct i915_sw_fence semaphore;
212 : : struct irq_work semaphore_work;
213 : :
214 : : /*
215 : : * A list of everyone we wait upon, and everyone who waits upon us.
216 : : * Even though we will not be submitted to the hardware before the
217 : : * submit fence is signaled (it waits for all external events as well
218 : : * as our own requests), the scheduler still needs to know the
219 : : * dependency tree for the lifetime of the request (from execbuf
220 : : * to retirement), i.e. bidirectional dependency information for the
221 : : * request not tied to individual fences.
222 : : */
223 : : struct i915_sched_node sched;
224 : : struct i915_dependency dep;
225 : : intel_engine_mask_t execution_mask;
226 : :
227 : : /*
228 : : * A convenience pointer to the current breadcrumb value stored in
229 : : * the HW status page (or our timeline's local equivalent). The full
230 : : * path would be rq->hw_context->ring->timeline->hwsp_seqno.
231 : : */
232 : : const u32 *hwsp_seqno;
233 : :
234 : : /*
235 : : * If we need to access the timeline's seqno for this request in
236 : : * another request, we need to keep a read reference to this associated
237 : : * cacheline, so that we do not free and recycle it before the foreign
238 : : * observers have completed. Hence, we keep a pointer to the cacheline
239 : : * inside the timeline's HWSP vma, but it is only valid while this
240 : : * request has not completed and guarded by the timeline mutex.
241 : : */
242 : : struct intel_timeline_cacheline __rcu *hwsp_cacheline;
243 : :
244 : : /** Position in the ring of the start of the request */
245 : : u32 head;
246 : :
247 : : /** Position in the ring of the start of the user packets */
248 : : u32 infix;
249 : :
250 : : /**
251 : : * Position in the ring of the start of the postfix.
252 : : * This is required to calculate the maximum available ring space
253 : : * without overwriting the postfix.
254 : : */
255 : : u32 postfix;
256 : :
257 : : /** Position in the ring of the end of the whole request */
258 : : u32 tail;
259 : :
260 : : /** Position in the ring of the end of any workarounds after the tail */
261 : : u32 wa_tail;
262 : :
263 : : /** Preallocate space in the ring for the emitting the request */
264 : : u32 reserved_space;
265 : :
266 : : /** Batch buffer related to this request if any (used for
267 : : * error state dump only).
268 : : */
269 : : struct i915_vma *batch;
270 : : /**
271 : : * Additional buffers requested by userspace to be captured upon
272 : : * a GPU hang. The vma/obj on this list are protected by their
273 : : * active reference - all objects on this list must also be
274 : : * on the active_list (of their final request).
275 : : */
276 : : struct i915_capture_list *capture_list;
277 : :
278 : : /** Time at which this request was emitted, in jiffies. */
279 : : unsigned long emitted_jiffies;
280 : :
281 : : /** timeline->request entry for this request */
282 : : struct list_head link;
283 : :
284 : : struct drm_i915_file_private *file_priv;
285 : : /** file_priv list entry for this request */
286 : : struct list_head client_link;
287 : :
288 : : I915_SELFTEST_DECLARE(struct {
289 : : struct list_head link;
290 : : unsigned long delay;
291 : : } mock;)
292 : : };
293 : :
294 : : #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
295 : :
296 : : extern const struct dma_fence_ops i915_fence_ops;
297 : :
298 : 0 : static inline bool dma_fence_is_i915(const struct dma_fence *fence)
299 : : {
300 [ # # # # ]: 0 : return fence->ops == &i915_fence_ops;
301 : : }
302 : :
303 : : struct i915_request * __must_check
304 : : __i915_request_create(struct intel_context *ce, gfp_t gfp);
305 : : struct i915_request * __must_check
306 : : i915_request_create(struct intel_context *ce);
307 : :
308 : : struct i915_request *__i915_request_commit(struct i915_request *request);
309 : : void __i915_request_queue(struct i915_request *rq,
310 : : const struct i915_sched_attr *attr);
311 : :
312 : : bool i915_request_retire(struct i915_request *rq);
313 : : void i915_request_retire_upto(struct i915_request *rq);
314 : :
315 : : static inline struct i915_request *
316 : 0 : to_request(struct dma_fence *fence)
317 : : {
318 : : /* We assume that NULL fence/request are interoperable */
319 : 0 : BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
320 : 0 : GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
321 [ # # # # : 0 : return container_of(fence, struct i915_request, fence);
# # ]
322 : : }
323 : :
324 : : static inline struct i915_request *
325 : 0 : i915_request_get(struct i915_request *rq)
326 : : {
327 [ # # # # : 0 : return to_request(dma_fence_get(&rq->fence));
# # # # ]
328 : : }
329 : :
330 : : static inline struct i915_request *
331 : 0 : i915_request_get_rcu(struct i915_request *rq)
332 : : {
333 : 0 : return to_request(dma_fence_get_rcu(&rq->fence));
334 : : }
335 : :
336 : : static inline void
337 : 0 : i915_request_put(struct i915_request *rq)
338 : : {
339 : 0 : dma_fence_put(&rq->fence);
340 : 0 : }
341 : :
342 : : int i915_request_await_object(struct i915_request *to,
343 : : struct drm_i915_gem_object *obj,
344 : : bool write);
345 : : int i915_request_await_dma_fence(struct i915_request *rq,
346 : : struct dma_fence *fence);
347 : : int i915_request_await_execution(struct i915_request *rq,
348 : : struct dma_fence *fence,
349 : : void (*hook)(struct i915_request *rq,
350 : : struct dma_fence *signal));
351 : :
352 : : void i915_request_add(struct i915_request *rq);
353 : :
354 : : bool __i915_request_submit(struct i915_request *request);
355 : : void i915_request_submit(struct i915_request *request);
356 : :
357 : : void i915_request_skip(struct i915_request *request, int error);
358 : :
359 : : void __i915_request_unsubmit(struct i915_request *request);
360 : : void i915_request_unsubmit(struct i915_request *request);
361 : :
362 : : /* Note: part of the intel_breadcrumbs family */
363 : : bool i915_request_enable_breadcrumb(struct i915_request *request);
364 : : void i915_request_cancel_breadcrumb(struct i915_request *request);
365 : :
366 : : long i915_request_wait(struct i915_request *rq,
367 : : unsigned int flags,
368 : : long timeout)
369 : : __attribute__((nonnull(1)));
370 : : #define I915_WAIT_INTERRUPTIBLE BIT(0)
371 : : #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
372 : : #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
373 : :
374 : 0 : static inline bool i915_request_signaled(const struct i915_request *rq)
375 : : {
376 : : /* The request may live longer than its HWSP, so check flags first! */
377 : 0 : return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
378 : : }
379 : :
380 : 0 : static inline bool i915_request_is_active(const struct i915_request *rq)
381 : : {
382 : 0 : return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
383 : : }
384 : :
385 : 0 : static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
386 : : {
387 : 0 : return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
388 : : }
389 : :
390 : : /**
391 : : * Returns true if seq1 is later than seq2.
392 : : */
393 : 0 : static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
394 : : {
395 [ # # # # ]: 0 : return (s32)(seq1 - seq2) >= 0;
396 : : }
397 : :
398 : 0 : static inline u32 __hwsp_seqno(const struct i915_request *rq)
399 : : {
400 [ # # ]: 0 : return READ_ONCE(*rq->hwsp_seqno);
401 : : }
402 : :
403 : : /**
404 : : * hwsp_seqno - the current breadcrumb value in the HW status page
405 : : * @rq: the request, to chase the relevant HW status page
406 : : *
407 : : * The emphasis in naming here is that hwsp_seqno() is not a property of the
408 : : * request, but an indication of the current HW state (associated with this
409 : : * request). Its value will change as the GPU executes more requests.
410 : : *
411 : : * Returns the current breadcrumb value in the associated HW status page (or
412 : : * the local timeline's equivalent) for this request. The request itself
413 : : * has the associated breadcrumb value of rq->fence.seqno, when the HW
414 : : * status page has that breadcrumb or later, this request is complete.
415 : : */
416 : 0 : static inline u32 hwsp_seqno(const struct i915_request *rq)
417 : : {
418 : 0 : u32 seqno;
419 : :
420 : 0 : rcu_read_lock(); /* the HWSP may be freed at runtime */
421 : 0 : seqno = __hwsp_seqno(rq);
422 : 0 : rcu_read_unlock();
423 : :
424 [ # # # # ]: 0 : return seqno;
425 : : }
426 : :
427 : 0 : static inline bool __i915_request_has_started(const struct i915_request *rq)
428 : : {
429 : 0 : return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
430 : : }
431 : :
432 : : /**
433 : : * i915_request_started - check if the request has begun being executed
434 : : * @rq: the request
435 : : *
436 : : * If the timeline is not using initial breadcrumbs, a request is
437 : : * considered started if the previous request on its timeline (i.e.
438 : : * context) has been signaled.
439 : : *
440 : : * If the timeline is using semaphores, it will also be emitting an
441 : : * "initial breadcrumb" after the semaphores are complete and just before
442 : : * it began executing the user payload. A request can therefore be active
443 : : * on the HW and not yet started as it is still busywaiting on its
444 : : * dependencies (via HW semaphores).
445 : : *
446 : : * If the request has started, its dependencies will have been signaled
447 : : * (either by fences or by semaphores) and it will have begun processing
448 : : * the user payload.
449 : : *
450 : : * However, even if a request has started, it may have been preempted and
451 : : * so no longer active, or it may have already completed.
452 : : *
453 : : * See also i915_request_is_active().
454 : : *
455 : : * Returns true if the request has begun executing the user payload, or
456 : : * has completed:
457 : : */
458 : 0 : static inline bool i915_request_started(const struct i915_request *rq)
459 : : {
460 [ # # ]: 0 : if (i915_request_signaled(rq))
461 : : return true;
462 : :
463 : : /* Remember: started but may have since been preempted! */
464 : 0 : return __i915_request_has_started(rq);
465 : : }
466 : :
467 : : /**
468 : : * i915_request_is_running - check if the request may actually be executing
469 : : * @rq: the request
470 : : *
471 : : * Returns true if the request is currently submitted to hardware, has passed
472 : : * its start point (i.e. the context is setup and not busywaiting). Note that
473 : : * it may no longer be running by the time the function returns!
474 : : */
475 : 0 : static inline bool i915_request_is_running(const struct i915_request *rq)
476 : : {
477 [ # # ]: 0 : if (!i915_request_is_active(rq))
478 : : return false;
479 : :
480 : 0 : return __i915_request_has_started(rq);
481 : : }
482 : :
483 : : /**
484 : : * i915_request_is_running - check if the request is ready for execution
485 : : * @rq: the request
486 : : *
487 : : * Upon construction, the request is instructed to wait upon various
488 : : * signals before it is ready to be executed by the HW. That is, we do
489 : : * not want to start execution and read data before it is written. In practice,
490 : : * this is controlled with a mixture of interrupts and semaphores. Once
491 : : * the submit fence is completed, the backend scheduler will place the
492 : : * request into its queue and from there submit it for execution. So we
493 : : * can detect when a request is eligible for execution (and is under control
494 : : * of the scheduler) by querying where it is in any of the scheduler's lists.
495 : : *
496 : : * Returns true if the request is ready for execution (it may be inflight),
497 : : * false otherwise.
498 : : */
499 : 0 : static inline bool i915_request_is_ready(const struct i915_request *rq)
500 : : {
501 [ # # # # ]: 0 : return !list_empty(&rq->sched.link);
502 : : }
503 : :
504 : 0 : static inline bool i915_request_completed(const struct i915_request *rq)
505 : : {
506 [ # # ]: 0 : if (i915_request_signaled(rq))
507 : : return true;
508 : :
509 : 0 : return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
510 : : }
511 : :
512 : 0 : static inline void i915_request_mark_complete(struct i915_request *rq)
513 : : {
514 : 0 : rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
515 : 0 : }
516 : :
517 : 0 : static inline bool i915_request_has_waitboost(const struct i915_request *rq)
518 : : {
519 : 0 : return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
520 : : }
521 : :
522 : 0 : static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
523 : : {
524 : : /* Preemption should only be disabled very rarely */
525 [ # # ]: 0 : return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
526 : : }
527 : :
528 : 0 : static inline bool i915_request_has_sentinel(const struct i915_request *rq)
529 : : {
530 [ # # ]: 0 : return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
531 : : }
532 : :
533 : 0 : static inline bool i915_request_on_hold(const struct i915_request *rq)
534 : : {
535 [ # # # # : 0 : return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
# # ]
536 : : }
537 : :
538 : 0 : static inline void i915_request_set_hold(struct i915_request *rq)
539 : : {
540 : 0 : set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
541 : 0 : }
542 : :
543 : 0 : static inline void i915_request_clear_hold(struct i915_request *rq)
544 : : {
545 : 0 : clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
546 : : }
547 : :
548 : : static inline struct intel_timeline *
549 : 0 : i915_request_timeline(struct i915_request *rq)
550 : : {
551 : : /* Valid only while the request is being constructed (or retired). */
552 [ # # ]: 0 : return rcu_dereference_protected(rq->timeline,
553 : : lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
554 : : }
555 : :
556 : : static inline struct i915_gem_context *
557 : 0 : i915_request_gem_context(struct i915_request *rq)
558 : : {
559 : : /* Valid only while the request is being constructed (or retired). */
560 [ # # ]: 0 : return rcu_dereference_protected(rq->context->gem_context, true);
561 : : }
562 : :
563 : : static inline struct intel_timeline *
564 : 0 : i915_request_active_timeline(struct i915_request *rq)
565 : : {
566 : : /*
567 : : * When in use during submission, we are protected by a guarantee that
568 : : * the context/timeline is pinned and must remain pinned until after
569 : : * this submission.
570 : : */
571 [ # # # # : 0 : return rcu_dereference_protected(rq->timeline,
# # # # #
# ]
572 : : lockdep_is_held(&rq->engine->active.lock));
573 : : }
574 : :
575 : : #endif /* I915_REQUEST_H */
|