Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 25 | #include <linux/kthread.h> |
| 26 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 27 | #include "i915_drv.h" |
| 28 | |
| 29 | static void intel_breadcrumbs_fake_irq(unsigned long data) |
| 30 | { |
| 31 | struct intel_engine_cs *engine = (struct intel_engine_cs *)data; |
| 32 | |
| 33 | /* |
| 34 | * The timer persists in case we cannot enable interrupts, |
| 35 | * or if we have previously seen seqno/interrupt incoherency |
| 36 | * ("missed interrupt" syndrome). Here the worker will wake up |
| 37 | * every jiffie in order to kick the oldest waiter to do the |
| 38 | * coherent seqno check. |
| 39 | */ |
| 40 | rcu_read_lock(); |
| 41 | if (intel_engine_wakeup(engine)) |
| 42 | mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); |
| 43 | rcu_read_unlock(); |
| 44 | } |
| 45 | |
| 46 | static void irq_enable(struct intel_engine_cs *engine) |
| 47 | { |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 48 | /* Enabling the IRQ may miss the generation of the interrupt, but |
| 49 | * we still need to force the barrier before reading the seqno, |
| 50 | * just in case. |
| 51 | */ |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 52 | engine->breadcrumbs.irq_posted = true; |
Chris Wilson | 31bb59c | 2016-07-01 17:23:27 +0100 | [diff] [blame] | 53 | |
| 54 | spin_lock_irq(&engine->i915->irq_lock); |
| 55 | engine->irq_enable(engine); |
| 56 | spin_unlock_irq(&engine->i915->irq_lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | static void irq_disable(struct intel_engine_cs *engine) |
| 60 | { |
Chris Wilson | 31bb59c | 2016-07-01 17:23:27 +0100 | [diff] [blame] | 61 | spin_lock_irq(&engine->i915->irq_lock); |
| 62 | engine->irq_disable(engine); |
| 63 | spin_unlock_irq(&engine->i915->irq_lock); |
| 64 | |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 65 | engine->breadcrumbs.irq_posted = false; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 66 | } |
| 67 | |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 68 | static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 69 | { |
| 70 | struct intel_engine_cs *engine = |
| 71 | container_of(b, struct intel_engine_cs, breadcrumbs); |
| 72 | struct drm_i915_private *i915 = engine->i915; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 73 | |
| 74 | assert_spin_locked(&b->lock); |
| 75 | if (b->rpm_wakelock) |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 76 | return; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 77 | |
| 78 | /* Since we are waiting on a request, the GPU should be busy |
| 79 | * and should have its own rpm reference. For completeness, |
| 80 | * record an rpm reference for ourselves to cover the |
| 81 | * interrupt we unmask. |
| 82 | */ |
| 83 | intel_runtime_pm_get_noresume(i915); |
| 84 | b->rpm_wakelock = true; |
| 85 | |
| 86 | /* No interrupts? Kick the waiter every jiffie! */ |
| 87 | if (intel_irqs_enabled(i915)) { |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 88 | if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 89 | irq_enable(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 90 | b->irq_enabled = true; |
| 91 | } |
| 92 | |
| 93 | if (!b->irq_enabled || |
| 94 | test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) |
| 95 | mod_timer(&b->fake_irq, jiffies + 1); |
Chris Wilson | 232af39 | 2016-07-09 10:12:05 +0100 | [diff] [blame] | 96 | |
| 97 | /* Ensure that even if the GPU hangs, we get woken up. |
| 98 | * |
| 99 | * However, note that if no one is waiting, we never notice |
| 100 | * a gpu hang. Eventually, we will have to wait for a resource |
| 101 | * held by the GPU and so trigger a hangcheck. In the most |
| 102 | * pathological case, this will be upon memory starvation! |
| 103 | */ |
| 104 | i915_queue_hangcheck(i915); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) |
| 108 | { |
| 109 | struct intel_engine_cs *engine = |
| 110 | container_of(b, struct intel_engine_cs, breadcrumbs); |
| 111 | |
| 112 | assert_spin_locked(&b->lock); |
| 113 | if (!b->rpm_wakelock) |
| 114 | return; |
| 115 | |
| 116 | if (b->irq_enabled) { |
| 117 | irq_disable(engine); |
| 118 | b->irq_enabled = false; |
| 119 | } |
| 120 | |
| 121 | intel_runtime_pm_put(engine->i915); |
| 122 | b->rpm_wakelock = false; |
| 123 | } |
| 124 | |
| 125 | static inline struct intel_wait *to_wait(struct rb_node *node) |
| 126 | { |
| 127 | return container_of(node, struct intel_wait, node); |
| 128 | } |
| 129 | |
| 130 | static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, |
| 131 | struct intel_wait *wait) |
| 132 | { |
| 133 | assert_spin_locked(&b->lock); |
| 134 | |
| 135 | /* This request is completed, so remove it from the tree, mark it as |
| 136 | * complete, and *then* wake up the associated task. |
| 137 | */ |
| 138 | rb_erase(&wait->node, &b->waiters); |
| 139 | RB_CLEAR_NODE(&wait->node); |
| 140 | |
| 141 | wake_up_process(wait->tsk); /* implicit smp_wmb() */ |
| 142 | } |
| 143 | |
| 144 | static bool __intel_engine_add_wait(struct intel_engine_cs *engine, |
| 145 | struct intel_wait *wait) |
| 146 | { |
| 147 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 148 | struct rb_node **p, *parent, *completed; |
| 149 | bool first; |
| 150 | u32 seqno; |
| 151 | |
| 152 | /* Insert the request into the retirement ordered list |
| 153 | * of waiters by walking the rbtree. If we are the oldest |
| 154 | * seqno in the tree (the first to be retired), then |
| 155 | * set ourselves as the bottom-half. |
| 156 | * |
| 157 | * As we descend the tree, prune completed branches since we hold the |
| 158 | * spinlock we know that the first_waiter must be delayed and can |
| 159 | * reduce some of the sequential wake up latency if we take action |
| 160 | * ourselves and wake up the completed tasks in parallel. Also, by |
| 161 | * removing stale elements in the tree, we may be able to reduce the |
| 162 | * ping-pong between the old bottom-half and ourselves as first-waiter. |
| 163 | */ |
| 164 | first = true; |
| 165 | parent = NULL; |
| 166 | completed = NULL; |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 167 | seqno = intel_engine_get_seqno(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 168 | |
| 169 | /* If the request completed before we managed to grab the spinlock, |
| 170 | * return now before adding ourselves to the rbtree. We let the |
| 171 | * current bottom-half handle any pending wakeups and instead |
| 172 | * try and get out of the way quickly. |
| 173 | */ |
| 174 | if (i915_seqno_passed(seqno, wait->seqno)) { |
| 175 | RB_CLEAR_NODE(&wait->node); |
| 176 | return first; |
| 177 | } |
| 178 | |
| 179 | p = &b->waiters.rb_node; |
| 180 | while (*p) { |
| 181 | parent = *p; |
| 182 | if (wait->seqno == to_wait(parent)->seqno) { |
| 183 | /* We have multiple waiters on the same seqno, select |
| 184 | * the highest priority task (that with the smallest |
| 185 | * task->prio) to serve as the bottom-half for this |
| 186 | * group. |
| 187 | */ |
| 188 | if (wait->tsk->prio > to_wait(parent)->tsk->prio) { |
| 189 | p = &parent->rb_right; |
| 190 | first = false; |
| 191 | } else { |
| 192 | p = &parent->rb_left; |
| 193 | } |
| 194 | } else if (i915_seqno_passed(wait->seqno, |
| 195 | to_wait(parent)->seqno)) { |
| 196 | p = &parent->rb_right; |
| 197 | if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) |
| 198 | completed = parent; |
| 199 | else |
| 200 | first = false; |
| 201 | } else { |
| 202 | p = &parent->rb_left; |
| 203 | } |
| 204 | } |
| 205 | rb_link_node(&wait->node, parent, p); |
| 206 | rb_insert_color(&wait->node, &b->waiters); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 207 | GEM_BUG_ON(!first && !b->irq_seqno_bh); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 208 | |
| 209 | if (completed) { |
| 210 | struct rb_node *next = rb_next(completed); |
| 211 | |
| 212 | GEM_BUG_ON(!next && !first); |
| 213 | if (next && next != &wait->node) { |
| 214 | GEM_BUG_ON(first); |
| 215 | b->first_wait = to_wait(next); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 216 | smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 217 | /* As there is a delay between reading the current |
| 218 | * seqno, processing the completed tasks and selecting |
| 219 | * the next waiter, we may have missed the interrupt |
| 220 | * and so need for the next bottom-half to wakeup. |
| 221 | * |
| 222 | * Also as we enable the IRQ, we may miss the |
| 223 | * interrupt for that seqno, so we have to wake up |
| 224 | * the next bottom-half in order to do a coherent check |
| 225 | * in case the seqno passed. |
| 226 | */ |
| 227 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 228 | if (READ_ONCE(b->irq_posted)) |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 229 | wake_up_process(to_wait(next)->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | do { |
| 233 | struct intel_wait *crumb = to_wait(completed); |
| 234 | completed = rb_prev(completed); |
| 235 | __intel_breadcrumbs_finish(b, crumb); |
| 236 | } while (completed); |
| 237 | } |
| 238 | |
| 239 | if (first) { |
| 240 | GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); |
| 241 | b->first_wait = wait; |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 242 | smp_store_mb(b->irq_seqno_bh, wait->tsk); |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 243 | /* After assigning ourselves as the new bottom-half, we must |
| 244 | * perform a cursory check to prevent a missed interrupt. |
| 245 | * Either we miss the interrupt whilst programming the hardware, |
| 246 | * or if there was a previous waiter (for a later seqno) they |
| 247 | * may be woken instead of us (due to the inherent race |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 248 | * in the unlocked read of b->irq_seqno_bh in the irq handler) |
| 249 | * and so we miss the wake up. |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 250 | */ |
| 251 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 252 | } |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 253 | GEM_BUG_ON(!b->irq_seqno_bh); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 254 | GEM_BUG_ON(!b->first_wait); |
| 255 | GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); |
| 256 | |
| 257 | return first; |
| 258 | } |
| 259 | |
| 260 | bool intel_engine_add_wait(struct intel_engine_cs *engine, |
| 261 | struct intel_wait *wait) |
| 262 | { |
| 263 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 264 | bool first; |
| 265 | |
| 266 | spin_lock(&b->lock); |
| 267 | first = __intel_engine_add_wait(engine, wait); |
| 268 | spin_unlock(&b->lock); |
| 269 | |
| 270 | return first; |
| 271 | } |
| 272 | |
| 273 | void intel_engine_enable_fake_irq(struct intel_engine_cs *engine) |
| 274 | { |
| 275 | mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); |
| 276 | } |
| 277 | |
| 278 | static inline bool chain_wakeup(struct rb_node *rb, int priority) |
| 279 | { |
| 280 | return rb && to_wait(rb)->tsk->prio <= priority; |
| 281 | } |
| 282 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 283 | static inline int wakeup_priority(struct intel_breadcrumbs *b, |
| 284 | struct task_struct *tsk) |
| 285 | { |
| 286 | if (tsk == b->signaler) |
| 287 | return INT_MIN; |
| 288 | else |
| 289 | return tsk->prio; |
| 290 | } |
| 291 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 292 | void intel_engine_remove_wait(struct intel_engine_cs *engine, |
| 293 | struct intel_wait *wait) |
| 294 | { |
| 295 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 296 | |
| 297 | /* Quick check to see if this waiter was already decoupled from |
| 298 | * the tree by the bottom-half to avoid contention on the spinlock |
| 299 | * by the herd. |
| 300 | */ |
| 301 | if (RB_EMPTY_NODE(&wait->node)) |
| 302 | return; |
| 303 | |
| 304 | spin_lock(&b->lock); |
| 305 | |
| 306 | if (RB_EMPTY_NODE(&wait->node)) |
| 307 | goto out_unlock; |
| 308 | |
| 309 | if (b->first_wait == wait) { |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 310 | const int priority = wakeup_priority(b, wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 311 | struct rb_node *next; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 312 | |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 313 | GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 314 | |
| 315 | /* We are the current bottom-half. Find the next candidate, |
| 316 | * the first waiter in the queue on the remaining oldest |
| 317 | * request. As multiple seqnos may complete in the time it |
| 318 | * takes us to wake up and find the next waiter, we have to |
| 319 | * wake up that waiter for it to perform its own coherent |
| 320 | * completion check. |
| 321 | */ |
| 322 | next = rb_next(&wait->node); |
| 323 | if (chain_wakeup(next, priority)) { |
| 324 | /* If the next waiter is already complete, |
| 325 | * wake it up and continue onto the next waiter. So |
| 326 | * if have a small herd, they will wake up in parallel |
| 327 | * rather than sequentially, which should reduce |
| 328 | * the overall latency in waking all the completed |
| 329 | * clients. |
| 330 | * |
| 331 | * However, waking up a chain adds extra latency to |
| 332 | * the first_waiter. This is undesirable if that |
| 333 | * waiter is a high priority task. |
| 334 | */ |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 335 | u32 seqno = intel_engine_get_seqno(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 336 | |
| 337 | while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { |
| 338 | struct rb_node *n = rb_next(next); |
| 339 | |
| 340 | __intel_breadcrumbs_finish(b, to_wait(next)); |
| 341 | next = n; |
| 342 | if (!chain_wakeup(next, priority)) |
| 343 | break; |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | if (next) { |
| 348 | /* In our haste, we may have completed the first waiter |
| 349 | * before we enabled the interrupt. Do so now as we |
| 350 | * have a second waiter for a future seqno. Afterwards, |
| 351 | * we have to wake up that waiter in case we missed |
| 352 | * the interrupt, or if we have to handle an |
| 353 | * exception rather than a seqno completion. |
| 354 | */ |
| 355 | b->first_wait = to_wait(next); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 356 | smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 357 | if (b->first_wait->seqno != wait->seqno) |
| 358 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 359 | wake_up_process(b->irq_seqno_bh); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 360 | } else { |
| 361 | b->first_wait = NULL; |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 362 | WRITE_ONCE(b->irq_seqno_bh, NULL); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 363 | __intel_breadcrumbs_disable_irq(b); |
| 364 | } |
| 365 | } else { |
| 366 | GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); |
| 367 | } |
| 368 | |
| 369 | GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); |
| 370 | rb_erase(&wait->node, &b->waiters); |
| 371 | |
| 372 | out_unlock: |
| 373 | GEM_BUG_ON(b->first_wait == wait); |
| 374 | GEM_BUG_ON(rb_first(&b->waiters) != |
| 375 | (b->first_wait ? &b->first_wait->node : NULL)); |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 376 | GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 377 | spin_unlock(&b->lock); |
| 378 | } |
| 379 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 380 | static bool signal_complete(struct drm_i915_gem_request *request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 381 | { |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 382 | if (!request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 383 | return false; |
| 384 | |
| 385 | /* If another process served as the bottom-half it may have already |
| 386 | * signalled that this wait is already completed. |
| 387 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 388 | if (intel_wait_complete(&request->signaling.wait)) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 389 | return true; |
| 390 | |
| 391 | /* Carefully check if the request is complete, giving time for the |
| 392 | * seqno to be visible or if the GPU hung. |
| 393 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 394 | if (__i915_request_irq_complete(request)) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 395 | return true; |
| 396 | |
| 397 | return false; |
| 398 | } |
| 399 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 400 | static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 401 | { |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 402 | return container_of(rb, struct drm_i915_gem_request, signaling.node); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | static void signaler_set_rtpriority(void) |
| 406 | { |
| 407 | struct sched_param param = { .sched_priority = 1 }; |
| 408 | |
| 409 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); |
| 410 | } |
| 411 | |
| 412 | static int intel_breadcrumbs_signaler(void *arg) |
| 413 | { |
| 414 | struct intel_engine_cs *engine = arg; |
| 415 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 416 | struct drm_i915_gem_request *request; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 417 | |
| 418 | /* Install ourselves with high priority to reduce signalling latency */ |
| 419 | signaler_set_rtpriority(); |
| 420 | |
| 421 | do { |
| 422 | set_current_state(TASK_INTERRUPTIBLE); |
| 423 | |
| 424 | /* We are either woken up by the interrupt bottom-half, |
| 425 | * or by a client adding a new signaller. In both cases, |
| 426 | * the GPU seqno may have advanced beyond our oldest signal. |
| 427 | * If it has, propagate the signal, remove the waiter and |
| 428 | * check again with the next oldest signal. Otherwise we |
| 429 | * need to wait for a new interrupt from the GPU or for |
| 430 | * a new client. |
| 431 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 432 | request = READ_ONCE(b->first_signal); |
| 433 | if (signal_complete(request)) { |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 434 | /* Wake up all other completed waiters and select the |
| 435 | * next bottom-half for the next user interrupt. |
| 436 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 437 | intel_engine_remove_wait(engine, |
| 438 | &request->signaling.wait); |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame^] | 439 | fence_signal(&request->fence); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 440 | |
| 441 | /* Find the next oldest signal. Note that as we have |
| 442 | * not been holding the lock, another client may |
| 443 | * have installed an even older signal than the one |
| 444 | * we just completed - so double check we are still |
| 445 | * the oldest before picking the next one. |
| 446 | */ |
| 447 | spin_lock(&b->lock); |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 448 | if (request == b->first_signal) { |
| 449 | struct rb_node *rb = |
| 450 | rb_next(&request->signaling.node); |
| 451 | b->first_signal = rb ? to_signaler(rb) : NULL; |
| 452 | } |
| 453 | rb_erase(&request->signaling.node, &b->signals); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 454 | spin_unlock(&b->lock); |
| 455 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 456 | i915_gem_request_unreference(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 457 | } else { |
| 458 | if (kthread_should_stop()) |
| 459 | break; |
| 460 | |
| 461 | schedule(); |
| 462 | } |
| 463 | } while (1); |
| 464 | __set_current_state(TASK_RUNNING); |
| 465 | |
| 466 | return 0; |
| 467 | } |
| 468 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 469 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 470 | { |
| 471 | struct intel_engine_cs *engine = request->engine; |
| 472 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 473 | struct rb_node *parent, **p; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 474 | bool first, wakeup; |
| 475 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 476 | if (unlikely(READ_ONCE(request->signaling.wait.tsk))) |
| 477 | return; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 478 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 479 | spin_lock(&b->lock); |
| 480 | if (unlikely(request->signaling.wait.tsk)) { |
| 481 | wakeup = false; |
| 482 | goto unlock; |
| 483 | } |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 484 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 485 | request->signaling.wait.tsk = b->signaler; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame^] | 486 | request->signaling.wait.seqno = request->fence.seqno; |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 487 | i915_gem_request_reference(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 488 | |
| 489 | /* First add ourselves into the list of waiters, but register our |
| 490 | * bottom-half as the signaller thread. As per usual, only the oldest |
| 491 | * waiter (not just signaller) is tasked as the bottom-half waking |
| 492 | * up all completed waiters after the user interrupt. |
| 493 | * |
| 494 | * If we are the oldest waiter, enable the irq (after which we |
| 495 | * must double check that the seqno did not complete). |
| 496 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 497 | wakeup = __intel_engine_add_wait(engine, &request->signaling.wait); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 498 | |
| 499 | /* Now insert ourselves into the retirement ordered list of signals |
| 500 | * on this engine. We track the oldest seqno as that will be the |
| 501 | * first signal to complete. |
| 502 | */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 503 | parent = NULL; |
| 504 | first = true; |
| 505 | p = &b->signals.rb_node; |
| 506 | while (*p) { |
| 507 | parent = *p; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame^] | 508 | if (i915_seqno_passed(request->fence.seqno, |
| 509 | to_signaler(parent)->fence.seqno)) { |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 510 | p = &parent->rb_right; |
| 511 | first = false; |
| 512 | } else { |
| 513 | p = &parent->rb_left; |
| 514 | } |
| 515 | } |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 516 | rb_link_node(&request->signaling.node, parent, p); |
| 517 | rb_insert_color(&request->signaling.node, &b->signals); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 518 | if (first) |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 519 | smp_store_mb(b->first_signal, request); |
| 520 | |
| 521 | unlock: |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 522 | spin_unlock(&b->lock); |
| 523 | |
| 524 | if (wakeup) |
| 525 | wake_up_process(b->signaler); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 526 | } |
| 527 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 528 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) |
| 529 | { |
| 530 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 531 | struct task_struct *tsk; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 532 | |
| 533 | spin_lock_init(&b->lock); |
| 534 | setup_timer(&b->fake_irq, |
| 535 | intel_breadcrumbs_fake_irq, |
| 536 | (unsigned long)engine); |
| 537 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 538 | /* Spawn a thread to provide a common bottom-half for all signals. |
| 539 | * As this is an asynchronous interface we cannot steal the current |
| 540 | * task for handling the bottom-half to the user interrupt, therefore |
| 541 | * we create a thread to do the coherent seqno dance after the |
| 542 | * interrupt and then signal the waitqueue (via the dma-buf/fence). |
| 543 | */ |
| 544 | tsk = kthread_run(intel_breadcrumbs_signaler, engine, |
| 545 | "i915/signal:%d", engine->id); |
| 546 | if (IS_ERR(tsk)) |
| 547 | return PTR_ERR(tsk); |
| 548 | |
| 549 | b->signaler = tsk; |
| 550 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 551 | return 0; |
| 552 | } |
| 553 | |
| 554 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) |
| 555 | { |
| 556 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 557 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 558 | if (!IS_ERR_OR_NULL(b->signaler)) |
| 559 | kthread_stop(b->signaler); |
| 560 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 561 | del_timer_sync(&b->fake_irq); |
| 562 | } |
| 563 | |
| 564 | unsigned int intel_kick_waiters(struct drm_i915_private *i915) |
| 565 | { |
| 566 | struct intel_engine_cs *engine; |
| 567 | unsigned int mask = 0; |
| 568 | |
| 569 | /* To avoid the task_struct disappearing beneath us as we wake up |
| 570 | * the process, we must first inspect the task_struct->state under the |
| 571 | * RCU lock, i.e. as we call wake_up_process() we must be holding the |
| 572 | * rcu_read_lock(). |
| 573 | */ |
| 574 | rcu_read_lock(); |
| 575 | for_each_engine(engine, i915) |
| 576 | if (unlikely(intel_engine_wakeup(engine))) |
| 577 | mask |= intel_engine_flag(engine); |
| 578 | rcu_read_unlock(); |
| 579 | |
| 580 | return mask; |
| 581 | } |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 582 | |
| 583 | unsigned int intel_kick_signalers(struct drm_i915_private *i915) |
| 584 | { |
| 585 | struct intel_engine_cs *engine; |
| 586 | unsigned int mask = 0; |
| 587 | |
| 588 | for_each_engine(engine, i915) { |
| 589 | if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { |
| 590 | wake_up_process(engine->breadcrumbs.signaler); |
| 591 | mask |= intel_engine_flag(engine); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | return mask; |
| 596 | } |