Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2018 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #include <linux/mutex.h> |
| 8 | |
| 9 | #include "i915_drv.h" |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 10 | #include "i915_globals.h" |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 11 | #include "i915_request.h" |
| 12 | #include "i915_scheduler.h" |
| 13 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 14 | static struct i915_global_scheduler { |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 15 | struct i915_global base; |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 16 | struct kmem_cache *slab_dependencies; |
| 17 | struct kmem_cache *slab_priorities; |
| 18 | } global; |
| 19 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 20 | static DEFINE_SPINLOCK(schedule_lock); |
| 21 | |
| 22 | static const struct i915_request * |
| 23 | node_to_request(const struct i915_sched_node *node) |
| 24 | { |
| 25 | return container_of(node, const struct i915_request, sched); |
| 26 | } |
| 27 | |
Chris Wilson | babfb1b | 2019-02-26 10:23:54 +0000 | [diff] [blame] | 28 | static inline bool node_started(const struct i915_sched_node *node) |
| 29 | { |
| 30 | return i915_request_started(node_to_request(node)); |
| 31 | } |
| 32 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 33 | static inline bool node_signaled(const struct i915_sched_node *node) |
| 34 | { |
| 35 | return i915_request_completed(node_to_request(node)); |
| 36 | } |
| 37 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 38 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) |
| 39 | { |
| 40 | return rb_entry(rb, struct i915_priolist, node); |
| 41 | } |
| 42 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 43 | static void assert_priolists(struct intel_engine_execlists * const execlists) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 44 | { |
| 45 | struct rb_node *rb; |
| 46 | long last_prio, i; |
| 47 | |
| 48 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 49 | return; |
| 50 | |
| 51 | GEM_BUG_ON(rb_first_cached(&execlists->queue) != |
| 52 | rb_first(&execlists->queue.rb_root)); |
| 53 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 54 | last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 55 | for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { |
| 56 | const struct i915_priolist *p = to_priolist(rb); |
| 57 | |
| 58 | GEM_BUG_ON(p->priority >= last_prio); |
| 59 | last_prio = p->priority; |
| 60 | |
| 61 | GEM_BUG_ON(!p->used); |
| 62 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) { |
| 63 | if (list_empty(&p->requests[i])) |
| 64 | continue; |
| 65 | |
| 66 | GEM_BUG_ON(!(p->used & BIT(i))); |
| 67 | } |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | struct list_head * |
| 72 | i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) |
| 73 | { |
| 74 | struct intel_engine_execlists * const execlists = &engine->execlists; |
| 75 | struct i915_priolist *p; |
| 76 | struct rb_node **parent, *rb; |
| 77 | bool first = true; |
| 78 | int idx, i; |
| 79 | |
| 80 | lockdep_assert_held(&engine->timeline.lock); |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 81 | assert_priolists(execlists); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 82 | |
| 83 | /* buckets sorted from highest [in slot 0] to lowest priority */ |
| 84 | idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; |
| 85 | prio >>= I915_USER_PRIORITY_SHIFT; |
| 86 | if (unlikely(execlists->no_priolist)) |
| 87 | prio = I915_PRIORITY_NORMAL; |
| 88 | |
| 89 | find_priolist: |
| 90 | /* most positive priority is scheduled first, equal priorities fifo */ |
| 91 | rb = NULL; |
| 92 | parent = &execlists->queue.rb_root.rb_node; |
| 93 | while (*parent) { |
| 94 | rb = *parent; |
| 95 | p = to_priolist(rb); |
| 96 | if (prio > p->priority) { |
| 97 | parent = &rb->rb_left; |
| 98 | } else if (prio < p->priority) { |
| 99 | parent = &rb->rb_right; |
| 100 | first = false; |
| 101 | } else { |
| 102 | goto out; |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | if (prio == I915_PRIORITY_NORMAL) { |
| 107 | p = &execlists->default_priolist; |
| 108 | } else { |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 109 | p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 110 | /* Convert an allocation failure to a priority bump */ |
| 111 | if (unlikely(!p)) { |
| 112 | prio = I915_PRIORITY_NORMAL; /* recurses just once */ |
| 113 | |
| 114 | /* To maintain ordering with all rendering, after an |
| 115 | * allocation failure we have to disable all scheduling. |
| 116 | * Requests will then be executed in fifo, and schedule |
| 117 | * will ensure that dependencies are emitted in fifo. |
| 118 | * There will be still some reordering with existing |
| 119 | * requests, so if userspace lied about their |
| 120 | * dependencies that reordering may be visible. |
| 121 | */ |
| 122 | execlists->no_priolist = true; |
| 123 | goto find_priolist; |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | p->priority = prio; |
| 128 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) |
| 129 | INIT_LIST_HEAD(&p->requests[i]); |
| 130 | rb_link_node(&p->node, rb, parent); |
| 131 | rb_insert_color_cached(&p->node, &execlists->queue, first); |
| 132 | p->used = 0; |
| 133 | |
| 134 | out: |
| 135 | p->used |= BIT(idx); |
| 136 | return &p->requests[idx]; |
| 137 | } |
| 138 | |
Chris Wilson | 5ae8706 | 2019-05-13 13:01:00 +0100 | [diff] [blame] | 139 | void __i915_priolist_free(struct i915_priolist *p) |
| 140 | { |
| 141 | kmem_cache_free(global.slab_priorities, p); |
| 142 | } |
| 143 | |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 144 | struct sched_cache { |
| 145 | struct list_head *priolist; |
| 146 | }; |
| 147 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 148 | static struct intel_engine_cs * |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 149 | sched_lock_engine(const struct i915_sched_node *node, |
| 150 | struct intel_engine_cs *locked, |
| 151 | struct sched_cache *cache) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 152 | { |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 153 | const struct i915_request *rq = node_to_request(node); |
| 154 | struct intel_engine_cs *engine; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 155 | |
| 156 | GEM_BUG_ON(!locked); |
| 157 | |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 158 | /* |
| 159 | * Virtual engines complicate acquiring the engine timeline lock, |
| 160 | * as their rq->engine pointer is not stable until under that |
| 161 | * engine lock. The simple ploy we use is to take the lock then |
| 162 | * check that the rq still belongs to the newly locked engine. |
| 163 | */ |
| 164 | while (locked != (engine = READ_ONCE(rq->engine))) { |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 165 | spin_unlock(&locked->timeline.lock); |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 166 | memset(cache, 0, sizeof(*cache)); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 167 | spin_lock(&engine->timeline.lock); |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 168 | locked = engine; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 169 | } |
| 170 | |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 171 | GEM_BUG_ON(locked != engine); |
| 172 | return locked; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 173 | } |
| 174 | |
Chris Wilson | 25d851a | 2019-05-07 13:25:44 +0100 | [diff] [blame] | 175 | static inline int rq_prio(const struct i915_request *rq) |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 176 | { |
Chris Wilson | 25d851a | 2019-05-07 13:25:44 +0100 | [diff] [blame] | 177 | return rq->sched.attr.priority | __NO_PREEMPTION; |
| 178 | } |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 179 | |
Chris Wilson | 25d851a | 2019-05-07 13:25:44 +0100 | [diff] [blame] | 180 | static void kick_submission(struct intel_engine_cs *engine, int prio) |
| 181 | { |
| 182 | const struct i915_request *inflight = |
| 183 | port_request(engine->execlists.port); |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 184 | |
Chris Wilson | 25d851a | 2019-05-07 13:25:44 +0100 | [diff] [blame] | 185 | /* |
| 186 | * If we are already the currently executing context, don't |
| 187 | * bother evaluating if we should preempt ourselves, or if |
| 188 | * we expect nothing to change as a result of running the |
| 189 | * tasklet, i.e. we have not change the priority queue |
| 190 | * sufficiently to oust the running context. |
| 191 | */ |
| 192 | if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight))) |
| 193 | return; |
| 194 | |
| 195 | tasklet_hi_schedule(&engine->execlists.tasklet); |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 196 | } |
| 197 | |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 198 | static void __i915_schedule(struct i915_sched_node *node, |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 199 | const struct i915_sched_attr *attr) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 200 | { |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 201 | struct intel_engine_cs *engine; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 202 | struct i915_dependency *dep, *p; |
| 203 | struct i915_dependency stack; |
| 204 | const int prio = attr->priority; |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 205 | struct sched_cache cache; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 206 | LIST_HEAD(dfs); |
| 207 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 208 | /* Needed in order to use the temporary link inside i915_dependency */ |
| 209 | lockdep_assert_held(&schedule_lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 210 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); |
| 211 | |
Chris Wilson | 1909801 | 2019-05-13 13:01:02 +0100 | [diff] [blame] | 212 | if (prio <= READ_ONCE(node->attr.priority)) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 213 | return; |
| 214 | |
Chris Wilson | 1909801 | 2019-05-13 13:01:02 +0100 | [diff] [blame] | 215 | if (node_signaled(node)) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 216 | return; |
| 217 | |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 218 | stack.signaler = node; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 219 | list_add(&stack.dfs_link, &dfs); |
| 220 | |
| 221 | /* |
| 222 | * Recursively bump all dependent priorities to match the new request. |
| 223 | * |
| 224 | * A naive approach would be to use recursion: |
| 225 | * static void update_priorities(struct i915_sched_node *node, prio) { |
| 226 | * list_for_each_entry(dep, &node->signalers_list, signal_link) |
| 227 | * update_priorities(dep->signal, prio) |
| 228 | * queue_request(node); |
| 229 | * } |
| 230 | * but that may have unlimited recursion depth and so runs a very |
| 231 | * real risk of overunning the kernel stack. Instead, we build |
| 232 | * a flat list of all dependencies starting with the current request. |
| 233 | * As we walk the list of dependencies, we add all of its dependencies |
| 234 | * to the end of the list (this may include an already visited |
| 235 | * request) and continue to walk onwards onto the new dependencies. The |
| 236 | * end result is a topological list of requests in reverse order, the |
| 237 | * last element in the list is the request we must execute first. |
| 238 | */ |
| 239 | list_for_each_entry(dep, &dfs, dfs_link) { |
| 240 | struct i915_sched_node *node = dep->signaler; |
| 241 | |
Chris Wilson | babfb1b | 2019-02-26 10:23:54 +0000 | [diff] [blame] | 242 | /* If we are already flying, we know we have no signalers */ |
| 243 | if (node_started(node)) |
| 244 | continue; |
| 245 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 246 | /* |
| 247 | * Within an engine, there can be no cycle, but we may |
| 248 | * refer to the same dependency chain multiple times |
| 249 | * (redundant dependencies are not eliminated) and across |
| 250 | * engines. |
| 251 | */ |
| 252 | list_for_each_entry(p, &node->signalers_list, signal_link) { |
| 253 | GEM_BUG_ON(p == dep); /* no cycles! */ |
| 254 | |
| 255 | if (node_signaled(p->signaler)) |
| 256 | continue; |
| 257 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 258 | if (prio > READ_ONCE(p->signaler->attr.priority)) |
| 259 | list_move_tail(&p->dfs_link, &dfs); |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * If we didn't need to bump any existing priorities, and we haven't |
| 265 | * yet submitted this request (i.e. there is no potential race with |
| 266 | * execlists_submit_request()), we can set our own priority and skip |
| 267 | * acquiring the engine locks. |
| 268 | */ |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 269 | if (node->attr.priority == I915_PRIORITY_INVALID) { |
| 270 | GEM_BUG_ON(!list_empty(&node->link)); |
| 271 | node->attr = *attr; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 272 | |
| 273 | if (stack.dfs_link.next == stack.dfs_link.prev) |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 274 | return; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 275 | |
| 276 | __list_del_entry(&stack.dfs_link); |
| 277 | } |
| 278 | |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 279 | memset(&cache, 0, sizeof(cache)); |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 280 | engine = node_to_request(node)->engine; |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 281 | spin_lock(&engine->timeline.lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 282 | |
| 283 | /* Fifo and depth-first replacement ensure our deps execute before us */ |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 284 | engine = sched_lock_engine(node, engine, &cache); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 285 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 286 | INIT_LIST_HEAD(&dep->dfs_link); |
| 287 | |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 288 | node = dep->signaler; |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 289 | engine = sched_lock_engine(node, engine, &cache); |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 290 | lockdep_assert_held(&engine->timeline.lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 291 | |
| 292 | /* Recheck after acquiring the engine->timeline.lock */ |
| 293 | if (prio <= node->attr.priority || node_signaled(node)) |
| 294 | continue; |
| 295 | |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 296 | GEM_BUG_ON(node_to_request(node)->engine != engine); |
| 297 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 298 | node->attr.priority = prio; |
| 299 | if (!list_empty(&node->link)) { |
Chris Wilson | 6d06779 | 2019-05-21 22:11:30 +0100 | [diff] [blame^] | 300 | GEM_BUG_ON(intel_engine_is_virtual(engine)); |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 301 | if (!cache.priolist) |
| 302 | cache.priolist = |
| 303 | i915_sched_lookup_priolist(engine, |
| 304 | prio); |
| 305 | list_move_tail(&node->link, cache.priolist); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 306 | } else { |
| 307 | /* |
| 308 | * If the request is not in the priolist queue because |
| 309 | * it is not yet runnable, then it doesn't contribute |
| 310 | * to our preemption decisions. On the other hand, |
| 311 | * if the request is on the HW, it too is not in the |
| 312 | * queue; but in that case we may still need to reorder |
| 313 | * the inflight requests. |
| 314 | */ |
| 315 | if (!i915_sw_fence_done(&node_to_request(node)->submit)) |
| 316 | continue; |
| 317 | } |
| 318 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 319 | if (prio <= engine->execlists.queue_priority_hint) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 320 | continue; |
| 321 | |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 322 | engine->execlists.queue_priority_hint = prio; |
| 323 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 324 | /* Defer (tasklet) submission until after all of our updates. */ |
Chris Wilson | 25d851a | 2019-05-07 13:25:44 +0100 | [diff] [blame] | 325 | kick_submission(engine, prio); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 326 | } |
| 327 | |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 328 | spin_unlock(&engine->timeline.lock); |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 329 | } |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 330 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 331 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) |
| 332 | { |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 333 | spin_lock_irq(&schedule_lock); |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 334 | __i915_schedule(&rq->sched, attr); |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 335 | spin_unlock_irq(&schedule_lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 336 | } |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 337 | |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 338 | static void __bump_priority(struct i915_sched_node *node, unsigned int bump) |
| 339 | { |
| 340 | struct i915_sched_attr attr = node->attr; |
| 341 | |
| 342 | attr.priority |= bump; |
| 343 | __i915_schedule(node, &attr); |
| 344 | } |
| 345 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 346 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) |
| 347 | { |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 348 | unsigned long flags; |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 349 | |
| 350 | GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); |
| 351 | |
| 352 | if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID) |
| 353 | return; |
| 354 | |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 355 | spin_lock_irqsave(&schedule_lock, flags); |
Chris Wilson | 52c76fb | 2019-05-13 13:01:01 +0100 | [diff] [blame] | 356 | __bump_priority(&rq->sched, bump); |
Chris Wilson | b7404c7 | 2019-04-09 16:29:22 +0100 | [diff] [blame] | 357 | spin_unlock_irqrestore(&schedule_lock, flags); |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 358 | } |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 359 | |
Chris Wilson | 5ae8706 | 2019-05-13 13:01:00 +0100 | [diff] [blame] | 360 | void i915_sched_node_init(struct i915_sched_node *node) |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 361 | { |
Chris Wilson | 5ae8706 | 2019-05-13 13:01:00 +0100 | [diff] [blame] | 362 | INIT_LIST_HEAD(&node->signalers_list); |
| 363 | INIT_LIST_HEAD(&node->waiters_list); |
| 364 | INIT_LIST_HEAD(&node->link); |
| 365 | node->attr.priority = I915_PRIORITY_INVALID; |
| 366 | node->semaphores = 0; |
| 367 | node->flags = 0; |
| 368 | } |
| 369 | |
| 370 | static struct i915_dependency * |
| 371 | i915_dependency_alloc(void) |
| 372 | { |
| 373 | return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); |
| 374 | } |
| 375 | |
| 376 | static void |
| 377 | i915_dependency_free(struct i915_dependency *dep) |
| 378 | { |
| 379 | kmem_cache_free(global.slab_dependencies, dep); |
| 380 | } |
| 381 | |
| 382 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, |
| 383 | struct i915_sched_node *signal, |
| 384 | struct i915_dependency *dep, |
| 385 | unsigned long flags) |
| 386 | { |
| 387 | bool ret = false; |
| 388 | |
| 389 | spin_lock_irq(&schedule_lock); |
| 390 | |
| 391 | if (!node_signaled(signal)) { |
| 392 | INIT_LIST_HEAD(&dep->dfs_link); |
| 393 | list_add(&dep->wait_link, &signal->waiters_list); |
| 394 | list_add(&dep->signal_link, &node->signalers_list); |
| 395 | dep->signaler = signal; |
| 396 | dep->flags = flags; |
| 397 | |
| 398 | /* Keep track of whether anyone on this chain has a semaphore */ |
| 399 | if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN && |
| 400 | !node_started(signal)) |
| 401 | node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; |
| 402 | |
Chris Wilson | 6e7eb7a | 2019-05-15 14:00:50 +0100 | [diff] [blame] | 403 | /* |
| 404 | * As we do not allow WAIT to preempt inflight requests, |
| 405 | * once we have executed a request, along with triggering |
| 406 | * any execution callbacks, we must preserve its ordering |
| 407 | * within the non-preemptible FIFO. |
| 408 | */ |
| 409 | BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); |
| 410 | if (flags & I915_DEPENDENCY_EXTERNAL) |
| 411 | __bump_priority(signal, __NO_PREEMPTION); |
| 412 | |
Chris Wilson | 5ae8706 | 2019-05-13 13:01:00 +0100 | [diff] [blame] | 413 | ret = true; |
| 414 | } |
| 415 | |
| 416 | spin_unlock_irq(&schedule_lock); |
| 417 | |
| 418 | return ret; |
| 419 | } |
| 420 | |
| 421 | int i915_sched_node_add_dependency(struct i915_sched_node *node, |
| 422 | struct i915_sched_node *signal) |
| 423 | { |
| 424 | struct i915_dependency *dep; |
| 425 | |
| 426 | dep = i915_dependency_alloc(); |
| 427 | if (!dep) |
| 428 | return -ENOMEM; |
| 429 | |
| 430 | if (!__i915_sched_node_add_dependency(node, signal, dep, |
Chris Wilson | 6e7eb7a | 2019-05-15 14:00:50 +0100 | [diff] [blame] | 431 | I915_DEPENDENCY_EXTERNAL | |
Chris Wilson | 5ae8706 | 2019-05-13 13:01:00 +0100 | [diff] [blame] | 432 | I915_DEPENDENCY_ALLOC)) |
| 433 | i915_dependency_free(dep); |
| 434 | |
| 435 | return 0; |
| 436 | } |
| 437 | |
| 438 | void i915_sched_node_fini(struct i915_sched_node *node) |
| 439 | { |
| 440 | struct i915_dependency *dep, *tmp; |
| 441 | |
| 442 | GEM_BUG_ON(!list_empty(&node->link)); |
| 443 | |
| 444 | spin_lock_irq(&schedule_lock); |
| 445 | |
| 446 | /* |
| 447 | * Everyone we depended upon (the fences we wait to be signaled) |
| 448 | * should retire before us and remove themselves from our list. |
| 449 | * However, retirement is run independently on each timeline and |
| 450 | * so we may be called out-of-order. |
| 451 | */ |
| 452 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { |
| 453 | GEM_BUG_ON(!node_signaled(dep->signaler)); |
| 454 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); |
| 455 | |
| 456 | list_del(&dep->wait_link); |
| 457 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
| 458 | i915_dependency_free(dep); |
| 459 | } |
| 460 | |
| 461 | /* Remove ourselves from everyone who depends upon us */ |
| 462 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { |
| 463 | GEM_BUG_ON(dep->signaler != node); |
| 464 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); |
| 465 | |
| 466 | list_del(&dep->signal_link); |
| 467 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
| 468 | i915_dependency_free(dep); |
| 469 | } |
| 470 | |
| 471 | spin_unlock_irq(&schedule_lock); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 472 | } |
| 473 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 474 | static void i915_global_scheduler_shrink(void) |
| 475 | { |
| 476 | kmem_cache_shrink(global.slab_dependencies); |
| 477 | kmem_cache_shrink(global.slab_priorities); |
| 478 | } |
| 479 | |
| 480 | static void i915_global_scheduler_exit(void) |
| 481 | { |
| 482 | kmem_cache_destroy(global.slab_dependencies); |
| 483 | kmem_cache_destroy(global.slab_priorities); |
| 484 | } |
| 485 | |
| 486 | static struct i915_global_scheduler global = { { |
| 487 | .shrink = i915_global_scheduler_shrink, |
| 488 | .exit = i915_global_scheduler_exit, |
| 489 | } }; |
| 490 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 491 | int __init i915_global_scheduler_init(void) |
| 492 | { |
| 493 | global.slab_dependencies = KMEM_CACHE(i915_dependency, |
| 494 | SLAB_HWCACHE_ALIGN); |
| 495 | if (!global.slab_dependencies) |
| 496 | return -ENOMEM; |
| 497 | |
| 498 | global.slab_priorities = KMEM_CACHE(i915_priolist, |
| 499 | SLAB_HWCACHE_ALIGN); |
| 500 | if (!global.slab_priorities) |
| 501 | goto err_priorities; |
| 502 | |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 503 | i915_global_register(&global.base); |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame] | 504 | return 0; |
| 505 | |
| 506 | err_priorities: |
| 507 | kmem_cache_destroy(global.slab_priorities); |
| 508 | return -ENOMEM; |
| 509 | } |