blob: 7bec7d2e45bfa242d30d8a76f38b99ae44226903 [file] [log] [blame]
Chris Wilson73cb9702016-10-28 13:58:46 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsona89d1f92018-05-02 17:38:39 +010025#ifndef I915_TIMELINE_H
26#define I915_TIMELINE_H
Chris Wilson73cb9702016-10-28 13:58:46 +010027
28#include <linux/list.h>
Chris Wilsona89d1f92018-05-02 17:38:39 +010029#include <linux/kref.h>
Chris Wilson73cb9702016-10-28 13:58:46 +010030
Chris Wilson21950ee2019-02-05 13:00:05 +000031#include "i915_active.h"
Chris Wilsone61e0f52018-02-21 09:56:36 +000032#include "i915_request.h"
Chris Wilson47979482017-05-03 10:39:21 +010033#include "i915_syncmap.h"
Chris Wilsone61e0f52018-02-21 09:56:36 +000034#include "i915_utils.h"
Chris Wilson73cb9702016-10-28 13:58:46 +010035
Chris Wilson52954ed2019-01-28 18:18:09 +000036struct i915_vma;
Chris Wilson8ba306a2019-01-28 18:18:10 +000037struct i915_timeline_hwsp;
Chris Wilson52954ed2019-01-28 18:18:09 +000038
Chris Wilsona89d1f92018-05-02 17:38:39 +010039struct i915_timeline {
Chris Wilson73cb9702016-10-28 13:58:46 +010040 u64 fence_context;
Chris Wilson9b6586a2017-02-23 07:44:08 +000041 u32 seqno;
42
Chris Wilson80b204b2016-10-28 13:58:58 +010043 spinlock_t lock;
Chris Wilson890fd182018-07-06 22:07:10 +010044#define TIMELINE_CLIENT 0 /* default subclass */
45#define TIMELINE_ENGINE 1
Chris Wilson73cb9702016-10-28 13:58:46 +010046
Chris Wilson52954ed2019-01-28 18:18:09 +000047 unsigned int pin_count;
48 const u32 *hwsp_seqno;
49 struct i915_vma *hwsp_ggtt;
50 u32 hwsp_offset;
51
Chris Wilson85474442019-01-29 18:54:50 +000052 bool has_initial_breadcrumb;
53
Chris Wilson73cb9702016-10-28 13:58:46 +010054 /**
55 * List of breadcrumbs associated with GPU requests currently
56 * outstanding.
57 */
58 struct list_head requests;
59
60 /* Contains an RCU guarded pointer to the last request. No reference is
61 * held to the request, users must carefully acquire a reference to
Chris Wilson21950ee2019-02-05 13:00:05 +000062 * the request using i915_active_request_get_request_rcu(), or hold the
Chris Wilson73cb9702016-10-28 13:58:46 +010063 * struct_mutex.
64 */
Chris Wilson21950ee2019-02-05 13:00:05 +000065 struct i915_active_request last_request;
Chris Wilson47979482017-05-03 10:39:21 +010066
67 /**
68 * We track the most recent seqno that we wait on in every context so
69 * that we only have to emit a new await and dependency on a more
70 * recent sync point. As the contexts may be executed out-of-order, we
71 * have to track each individually and can not rely on an absolute
72 * global_seqno. When we know that all tracked fences are completed
73 * (i.e. when the driver is idle), we know that the syncmap is
74 * redundant and we can discard it without loss of generality.
75 */
76 struct i915_syncmap *sync;
Chris Wilson73cb9702016-10-28 13:58:46 +010077
Tvrtko Ursulin78108582019-02-05 09:50:30 +000078 /**
79 * Barrier provides the ability to serialize ordering between different
80 * timelines.
81 *
82 * Users can call i915_timeline_set_barrier which will make all
83 * subsequent submissions to this timeline be executed only after the
84 * barrier has been completed.
85 */
Chris Wilson21950ee2019-02-05 13:00:05 +000086 struct i915_active_request barrier;
Tvrtko Ursulin78108582019-02-05 09:50:30 +000087
Chris Wilson73cb9702016-10-28 13:58:46 +010088 struct list_head link;
Chris Wilson73cb9702016-10-28 13:58:46 +010089 const char *name;
Chris Wilson1e345562019-01-28 10:23:56 +000090 struct drm_i915_private *i915;
Chris Wilson73cb9702016-10-28 13:58:46 +010091
Chris Wilsona89d1f92018-05-02 17:38:39 +010092 struct kref kref;
Chris Wilson73cb9702016-10-28 13:58:46 +010093};
94
Chris Wilson52954ed2019-01-28 18:18:09 +000095int i915_timeline_init(struct drm_i915_private *i915,
96 struct i915_timeline *tl,
97 const char *name,
98 struct i915_vma *hwsp);
Chris Wilsona89d1f92018-05-02 17:38:39 +010099void i915_timeline_fini(struct i915_timeline *tl);
Chris Wilson73cb9702016-10-28 13:58:46 +0100100
Chris Wilsonf911e722018-11-15 20:38:51 +0000101static inline void
102i915_timeline_set_subclass(struct i915_timeline *timeline,
103 unsigned int subclass)
104{
105 lockdep_set_subclass(&timeline->lock, subclass);
106
107 /*
108 * Due to an interesting quirk in lockdep's internal debug tracking,
109 * after setting a subclass we must ensure the lock is used. Otherwise,
110 * nr_unused_locks is incremented once too often.
111 */
112#ifdef CONFIG_DEBUG_LOCK_ALLOC
113 local_irq_disable();
114 lock_map_acquire(&timeline->lock.dep_map);
115 lock_map_release(&timeline->lock.dep_map);
116 local_irq_enable();
117#endif
118}
119
Chris Wilsona89d1f92018-05-02 17:38:39 +0100120struct i915_timeline *
Chris Wilson52954ed2019-01-28 18:18:09 +0000121i915_timeline_create(struct drm_i915_private *i915,
122 const char *name,
123 struct i915_vma *global_hwsp);
Chris Wilson65fcb802018-05-02 17:38:38 +0100124
Chris Wilsona89d1f92018-05-02 17:38:39 +0100125static inline struct i915_timeline *
126i915_timeline_get(struct i915_timeline *timeline)
127{
128 kref_get(&timeline->kref);
129 return timeline;
130}
131
132void __i915_timeline_free(struct kref *kref);
133static inline void i915_timeline_put(struct i915_timeline *timeline)
134{
135 kref_put(&timeline->kref, __i915_timeline_free);
136}
137
138static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
139 u64 context, u32 seqno)
Chris Wilson47979482017-05-03 10:39:21 +0100140{
141 return i915_syncmap_set(&tl->sync, context, seqno);
142}
143
Chris Wilsona89d1f92018-05-02 17:38:39 +0100144static inline int i915_timeline_sync_set(struct i915_timeline *tl,
145 const struct dma_fence *fence)
Chris Wilson47979482017-05-03 10:39:21 +0100146{
Chris Wilsona89d1f92018-05-02 17:38:39 +0100147 return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
Chris Wilson47979482017-05-03 10:39:21 +0100148}
149
Chris Wilsona89d1f92018-05-02 17:38:39 +0100150static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
151 u64 context, u32 seqno)
Chris Wilson47979482017-05-03 10:39:21 +0100152{
153 return i915_syncmap_is_later(&tl->sync, context, seqno);
154}
155
Chris Wilsona89d1f92018-05-02 17:38:39 +0100156static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
157 const struct dma_fence *fence)
Chris Wilson47979482017-05-03 10:39:21 +0100158{
Chris Wilsona89d1f92018-05-02 17:38:39 +0100159 return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
Chris Wilson47979482017-05-03 10:39:21 +0100160}
161
Chris Wilson52954ed2019-01-28 18:18:09 +0000162int i915_timeline_pin(struct i915_timeline *tl);
163void i915_timeline_unpin(struct i915_timeline *tl);
164
Chris Wilson1e345562019-01-28 10:23:56 +0000165void i915_timelines_init(struct drm_i915_private *i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +0100166void i915_timelines_park(struct drm_i915_private *i915);
Chris Wilson1e345562019-01-28 10:23:56 +0000167void i915_timelines_fini(struct drm_i915_private *i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +0100168
Tvrtko Ursulin78108582019-02-05 09:50:30 +0000169/**
170 * i915_timeline_set_barrier - orders submission between different timelines
171 * @timeline: timeline to set the barrier on
172 * @rq: request after which new submissions can proceed
173 *
174 * Sets the passed in request as the serialization point for all subsequent
175 * submissions on @timeline. Subsequent requests will not be submitted to GPU
176 * until the barrier has been completed.
177 */
Chris Wilson21950ee2019-02-05 13:00:05 +0000178static inline int
179i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
180{
181 return i915_active_request_set(&tl->barrier, rq);
182}
Tvrtko Ursulin78108582019-02-05 09:50:30 +0000183
Chris Wilson73cb9702016-10-28 13:58:46 +0100184#endif