Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2018 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #include <linux/mutex.h> |
| 8 | |
| 9 | #include "i915_drv.h" |
| 10 | #include "i915_request.h" |
| 11 | #include "i915_scheduler.h" |
| 12 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 13 | static struct i915_global_scheduler { |
| 14 | struct kmem_cache *slab_dependencies; |
| 15 | struct kmem_cache *slab_priorities; |
| 16 | } global; |
| 17 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 18 | static DEFINE_SPINLOCK(schedule_lock); |
| 19 | |
| 20 | static const struct i915_request * |
| 21 | node_to_request(const struct i915_sched_node *node) |
| 22 | { |
| 23 | return container_of(node, const struct i915_request, sched); |
| 24 | } |
| 25 | |
Chris Wilson | babfb1b | 2019-02-26 10:23:54 +0000 | [diff] [blame] | 26 | static inline bool node_started(const struct i915_sched_node *node) |
| 27 | { |
| 28 | return i915_request_started(node_to_request(node)); |
| 29 | } |
| 30 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 31 | static inline bool node_signaled(const struct i915_sched_node *node) |
| 32 | { |
| 33 | return i915_request_completed(node_to_request(node)); |
| 34 | } |
| 35 | |
| 36 | void i915_sched_node_init(struct i915_sched_node *node) |
| 37 | { |
| 38 | INIT_LIST_HEAD(&node->signalers_list); |
| 39 | INIT_LIST_HEAD(&node->waiters_list); |
| 40 | INIT_LIST_HEAD(&node->link); |
| 41 | node->attr.priority = I915_PRIORITY_INVALID; |
| 42 | } |
| 43 | |
| 44 | static struct i915_dependency * |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 45 | i915_dependency_alloc(void) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 46 | { |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 47 | return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static void |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 51 | i915_dependency_free(struct i915_dependency *dep) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 52 | { |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 53 | kmem_cache_free(global.slab_dependencies, dep); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, |
| 57 | struct i915_sched_node *signal, |
| 58 | struct i915_dependency *dep, |
| 59 | unsigned long flags) |
| 60 | { |
| 61 | bool ret = false; |
| 62 | |
| 63 | spin_lock(&schedule_lock); |
| 64 | |
| 65 | if (!node_signaled(signal)) { |
| 66 | INIT_LIST_HEAD(&dep->dfs_link); |
| 67 | list_add(&dep->wait_link, &signal->waiters_list); |
| 68 | list_add(&dep->signal_link, &node->signalers_list); |
| 69 | dep->signaler = signal; |
| 70 | dep->flags = flags; |
| 71 | |
| 72 | ret = true; |
| 73 | } |
| 74 | |
| 75 | spin_unlock(&schedule_lock); |
| 76 | |
| 77 | return ret; |
| 78 | } |
| 79 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 80 | int i915_sched_node_add_dependency(struct i915_sched_node *node, |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 81 | struct i915_sched_node *signal) |
| 82 | { |
| 83 | struct i915_dependency *dep; |
| 84 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 85 | dep = i915_dependency_alloc(); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 86 | if (!dep) |
| 87 | return -ENOMEM; |
| 88 | |
| 89 | if (!__i915_sched_node_add_dependency(node, signal, dep, |
| 90 | I915_DEPENDENCY_ALLOC)) |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 91 | i915_dependency_free(dep); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 96 | void i915_sched_node_fini(struct i915_sched_node *node) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 97 | { |
| 98 | struct i915_dependency *dep, *tmp; |
| 99 | |
| 100 | GEM_BUG_ON(!list_empty(&node->link)); |
| 101 | |
| 102 | spin_lock(&schedule_lock); |
| 103 | |
| 104 | /* |
| 105 | * Everyone we depended upon (the fences we wait to be signaled) |
| 106 | * should retire before us and remove themselves from our list. |
| 107 | * However, retirement is run independently on each timeline and |
| 108 | * so we may be called out-of-order. |
| 109 | */ |
| 110 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { |
| 111 | GEM_BUG_ON(!node_signaled(dep->signaler)); |
| 112 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); |
| 113 | |
| 114 | list_del(&dep->wait_link); |
| 115 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 116 | i915_dependency_free(dep); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /* Remove ourselves from everyone who depends upon us */ |
| 120 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { |
| 121 | GEM_BUG_ON(dep->signaler != node); |
| 122 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); |
| 123 | |
| 124 | list_del(&dep->signal_link); |
| 125 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 126 | i915_dependency_free(dep); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | spin_unlock(&schedule_lock); |
| 130 | } |
| 131 | |
| 132 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) |
| 133 | { |
| 134 | return rb_entry(rb, struct i915_priolist, node); |
| 135 | } |
| 136 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 137 | static void assert_priolists(struct intel_engine_execlists * const execlists) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 138 | { |
| 139 | struct rb_node *rb; |
| 140 | long last_prio, i; |
| 141 | |
| 142 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 143 | return; |
| 144 | |
| 145 | GEM_BUG_ON(rb_first_cached(&execlists->queue) != |
| 146 | rb_first(&execlists->queue.rb_root)); |
| 147 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 148 | last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 149 | for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { |
| 150 | const struct i915_priolist *p = to_priolist(rb); |
| 151 | |
| 152 | GEM_BUG_ON(p->priority >= last_prio); |
| 153 | last_prio = p->priority; |
| 154 | |
| 155 | GEM_BUG_ON(!p->used); |
| 156 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) { |
| 157 | if (list_empty(&p->requests[i])) |
| 158 | continue; |
| 159 | |
| 160 | GEM_BUG_ON(!(p->used & BIT(i))); |
| 161 | } |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | struct list_head * |
| 166 | i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) |
| 167 | { |
| 168 | struct intel_engine_execlists * const execlists = &engine->execlists; |
| 169 | struct i915_priolist *p; |
| 170 | struct rb_node **parent, *rb; |
| 171 | bool first = true; |
| 172 | int idx, i; |
| 173 | |
| 174 | lockdep_assert_held(&engine->timeline.lock); |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 175 | assert_priolists(execlists); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 176 | |
| 177 | /* buckets sorted from highest [in slot 0] to lowest priority */ |
| 178 | idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; |
| 179 | prio >>= I915_USER_PRIORITY_SHIFT; |
| 180 | if (unlikely(execlists->no_priolist)) |
| 181 | prio = I915_PRIORITY_NORMAL; |
| 182 | |
| 183 | find_priolist: |
| 184 | /* most positive priority is scheduled first, equal priorities fifo */ |
| 185 | rb = NULL; |
| 186 | parent = &execlists->queue.rb_root.rb_node; |
| 187 | while (*parent) { |
| 188 | rb = *parent; |
| 189 | p = to_priolist(rb); |
| 190 | if (prio > p->priority) { |
| 191 | parent = &rb->rb_left; |
| 192 | } else if (prio < p->priority) { |
| 193 | parent = &rb->rb_right; |
| 194 | first = false; |
| 195 | } else { |
| 196 | goto out; |
| 197 | } |
| 198 | } |
| 199 | |
| 200 | if (prio == I915_PRIORITY_NORMAL) { |
| 201 | p = &execlists->default_priolist; |
| 202 | } else { |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 203 | p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 204 | /* Convert an allocation failure to a priority bump */ |
| 205 | if (unlikely(!p)) { |
| 206 | prio = I915_PRIORITY_NORMAL; /* recurses just once */ |
| 207 | |
| 208 | /* To maintain ordering with all rendering, after an |
| 209 | * allocation failure we have to disable all scheduling. |
| 210 | * Requests will then be executed in fifo, and schedule |
| 211 | * will ensure that dependencies are emitted in fifo. |
| 212 | * There will be still some reordering with existing |
| 213 | * requests, so if userspace lied about their |
| 214 | * dependencies that reordering may be visible. |
| 215 | */ |
| 216 | execlists->no_priolist = true; |
| 217 | goto find_priolist; |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | p->priority = prio; |
| 222 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) |
| 223 | INIT_LIST_HEAD(&p->requests[i]); |
| 224 | rb_link_node(&p->node, rb, parent); |
| 225 | rb_insert_color_cached(&p->node, &execlists->queue, first); |
| 226 | p->used = 0; |
| 227 | |
| 228 | out: |
| 229 | p->used |= BIT(idx); |
| 230 | return &p->requests[idx]; |
| 231 | } |
| 232 | |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 233 | struct sched_cache { |
| 234 | struct list_head *priolist; |
| 235 | }; |
| 236 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 237 | static struct intel_engine_cs * |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 238 | sched_lock_engine(const struct i915_sched_node *node, |
| 239 | struct intel_engine_cs *locked, |
| 240 | struct sched_cache *cache) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 241 | { |
| 242 | struct intel_engine_cs *engine = node_to_request(node)->engine; |
| 243 | |
| 244 | GEM_BUG_ON(!locked); |
| 245 | |
| 246 | if (engine != locked) { |
| 247 | spin_unlock(&locked->timeline.lock); |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 248 | memset(cache, 0, sizeof(*cache)); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 249 | spin_lock(&engine->timeline.lock); |
| 250 | } |
| 251 | |
| 252 | return engine; |
| 253 | } |
| 254 | |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 255 | static bool inflight(const struct i915_request *rq, |
| 256 | const struct intel_engine_cs *engine) |
| 257 | { |
| 258 | const struct i915_request *active; |
| 259 | |
Chris Wilson | 52c0fdb | 2019-01-29 20:52:29 +0000 | [diff] [blame] | 260 | if (!i915_request_is_active(rq)) |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 261 | return false; |
| 262 | |
| 263 | active = port_request(engine->execlists.port); |
| 264 | return active->hw_context == rq->hw_context; |
| 265 | } |
| 266 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 267 | static void __i915_schedule(struct i915_request *rq, |
| 268 | const struct i915_sched_attr *attr) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 269 | { |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 270 | struct intel_engine_cs *engine; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 271 | struct i915_dependency *dep, *p; |
| 272 | struct i915_dependency stack; |
| 273 | const int prio = attr->priority; |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 274 | struct sched_cache cache; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 275 | LIST_HEAD(dfs); |
| 276 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 277 | /* Needed in order to use the temporary link inside i915_dependency */ |
| 278 | lockdep_assert_held(&schedule_lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 279 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); |
| 280 | |
| 281 | if (i915_request_completed(rq)) |
| 282 | return; |
| 283 | |
| 284 | if (prio <= READ_ONCE(rq->sched.attr.priority)) |
| 285 | return; |
| 286 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 287 | stack.signaler = &rq->sched; |
| 288 | list_add(&stack.dfs_link, &dfs); |
| 289 | |
| 290 | /* |
| 291 | * Recursively bump all dependent priorities to match the new request. |
| 292 | * |
| 293 | * A naive approach would be to use recursion: |
| 294 | * static void update_priorities(struct i915_sched_node *node, prio) { |
| 295 | * list_for_each_entry(dep, &node->signalers_list, signal_link) |
| 296 | * update_priorities(dep->signal, prio) |
| 297 | * queue_request(node); |
| 298 | * } |
| 299 | * but that may have unlimited recursion depth and so runs a very |
| 300 | * real risk of overunning the kernel stack. Instead, we build |
| 301 | * a flat list of all dependencies starting with the current request. |
| 302 | * As we walk the list of dependencies, we add all of its dependencies |
| 303 | * to the end of the list (this may include an already visited |
| 304 | * request) and continue to walk onwards onto the new dependencies. The |
| 305 | * end result is a topological list of requests in reverse order, the |
| 306 | * last element in the list is the request we must execute first. |
| 307 | */ |
| 308 | list_for_each_entry(dep, &dfs, dfs_link) { |
| 309 | struct i915_sched_node *node = dep->signaler; |
| 310 | |
Chris Wilson | babfb1b | 2019-02-26 10:23:54 +0000 | [diff] [blame] | 311 | /* If we are already flying, we know we have no signalers */ |
| 312 | if (node_started(node)) |
| 313 | continue; |
| 314 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 315 | /* |
| 316 | * Within an engine, there can be no cycle, but we may |
| 317 | * refer to the same dependency chain multiple times |
| 318 | * (redundant dependencies are not eliminated) and across |
| 319 | * engines. |
| 320 | */ |
| 321 | list_for_each_entry(p, &node->signalers_list, signal_link) { |
| 322 | GEM_BUG_ON(p == dep); /* no cycles! */ |
| 323 | |
| 324 | if (node_signaled(p->signaler)) |
| 325 | continue; |
| 326 | |
| 327 | GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); |
| 328 | if (prio > READ_ONCE(p->signaler->attr.priority)) |
| 329 | list_move_tail(&p->dfs_link, &dfs); |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | /* |
| 334 | * If we didn't need to bump any existing priorities, and we haven't |
| 335 | * yet submitted this request (i.e. there is no potential race with |
| 336 | * execlists_submit_request()), we can set our own priority and skip |
| 337 | * acquiring the engine locks. |
| 338 | */ |
| 339 | if (rq->sched.attr.priority == I915_PRIORITY_INVALID) { |
| 340 | GEM_BUG_ON(!list_empty(&rq->sched.link)); |
| 341 | rq->sched.attr = *attr; |
| 342 | |
| 343 | if (stack.dfs_link.next == stack.dfs_link.prev) |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 344 | return; |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 345 | |
| 346 | __list_del_entry(&stack.dfs_link); |
| 347 | } |
| 348 | |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 349 | memset(&cache, 0, sizeof(cache)); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 350 | engine = rq->engine; |
| 351 | spin_lock_irq(&engine->timeline.lock); |
| 352 | |
| 353 | /* Fifo and depth-first replacement ensure our deps execute before us */ |
| 354 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { |
| 355 | struct i915_sched_node *node = dep->signaler; |
| 356 | |
| 357 | INIT_LIST_HEAD(&dep->dfs_link); |
| 358 | |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 359 | engine = sched_lock_engine(node, engine, &cache); |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 360 | lockdep_assert_held(&engine->timeline.lock); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 361 | |
| 362 | /* Recheck after acquiring the engine->timeline.lock */ |
| 363 | if (prio <= node->attr.priority || node_signaled(node)) |
| 364 | continue; |
| 365 | |
| 366 | node->attr.priority = prio; |
| 367 | if (!list_empty(&node->link)) { |
Chris Wilson | ed7dc67 | 2019-02-11 20:46:47 +0000 | [diff] [blame] | 368 | if (!cache.priolist) |
| 369 | cache.priolist = |
| 370 | i915_sched_lookup_priolist(engine, |
| 371 | prio); |
| 372 | list_move_tail(&node->link, cache.priolist); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 373 | } else { |
| 374 | /* |
| 375 | * If the request is not in the priolist queue because |
| 376 | * it is not yet runnable, then it doesn't contribute |
| 377 | * to our preemption decisions. On the other hand, |
| 378 | * if the request is on the HW, it too is not in the |
| 379 | * queue; but in that case we may still need to reorder |
| 380 | * the inflight requests. |
| 381 | */ |
| 382 | if (!i915_sw_fence_done(&node_to_request(node)->submit)) |
| 383 | continue; |
| 384 | } |
| 385 | |
Chris Wilson | 4d97cbe0 | 2019-01-29 18:54:51 +0000 | [diff] [blame] | 386 | if (prio <= engine->execlists.queue_priority_hint) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 387 | continue; |
| 388 | |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 389 | engine->execlists.queue_priority_hint = prio; |
| 390 | |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 391 | /* |
| 392 | * If we are already the currently executing context, don't |
| 393 | * bother evaluating if we should preempt ourselves. |
| 394 | */ |
Chris Wilson | c9a6462 | 2019-01-29 18:54:52 +0000 | [diff] [blame] | 395 | if (inflight(node_to_request(node), engine)) |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 396 | continue; |
| 397 | |
| 398 | /* Defer (tasklet) submission until after all of our updates. */ |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 399 | tasklet_hi_schedule(&engine->execlists.tasklet); |
| 400 | } |
| 401 | |
| 402 | spin_unlock_irq(&engine->timeline.lock); |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 403 | } |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 404 | |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 405 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) |
| 406 | { |
| 407 | spin_lock(&schedule_lock); |
| 408 | __i915_schedule(rq, attr); |
Chris Wilson | e2f3496 | 2018-10-01 15:47:54 +0100 | [diff] [blame] | 409 | spin_unlock(&schedule_lock); |
| 410 | } |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 411 | |
| 412 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) |
| 413 | { |
| 414 | struct i915_sched_attr attr; |
| 415 | |
| 416 | GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); |
| 417 | |
| 418 | if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID) |
| 419 | return; |
| 420 | |
| 421 | spin_lock_bh(&schedule_lock); |
| 422 | |
| 423 | attr = rq->sched.attr; |
| 424 | attr.priority |= bump; |
| 425 | __i915_schedule(rq, &attr); |
| 426 | |
| 427 | spin_unlock_bh(&schedule_lock); |
| 428 | } |
Chris Wilson | 32eb6bc | 2019-02-28 10:20:33 +0000 | [diff] [blame^] | 429 | |
| 430 | void __i915_priolist_free(struct i915_priolist *p) |
| 431 | { |
| 432 | kmem_cache_free(global.slab_priorities, p); |
| 433 | } |
| 434 | |
| 435 | int __init i915_global_scheduler_init(void) |
| 436 | { |
| 437 | global.slab_dependencies = KMEM_CACHE(i915_dependency, |
| 438 | SLAB_HWCACHE_ALIGN); |
| 439 | if (!global.slab_dependencies) |
| 440 | return -ENOMEM; |
| 441 | |
| 442 | global.slab_priorities = KMEM_CACHE(i915_priolist, |
| 443 | SLAB_HWCACHE_ALIGN); |
| 444 | if (!global.slab_priorities) |
| 445 | goto err_priorities; |
| 446 | |
| 447 | return 0; |
| 448 | |
| 449 | err_priorities: |
| 450 | kmem_cache_destroy(global.slab_priorities); |
| 451 | return -ENOMEM; |
| 452 | } |
| 453 | |
| 454 | void i915_global_scheduler_shrink(void) |
| 455 | { |
| 456 | kmem_cache_shrink(global.slab_dependencies); |
| 457 | kmem_cache_shrink(global.slab_priorities); |
| 458 | } |
| 459 | |
| 460 | void i915_global_scheduler_exit(void) |
| 461 | { |
| 462 | kmem_cache_destroy(global.slab_dependencies); |
| 463 | kmem_cache_destroy(global.slab_priorities); |
| 464 | } |