Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 25 | #include <linux/kthread.h> |
| 26 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 27 | #include "i915_drv.h" |
| 28 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 29 | static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 30 | { |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 31 | struct intel_wait *wait; |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 32 | unsigned int result = 0; |
| 33 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 34 | wait = b->first_wait; |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 35 | if (wait) { |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 36 | result = ENGINE_WAKEUP_WAITER; |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 37 | if (wake_up_process(wait->tsk)) |
| 38 | result |= ENGINE_WAKEUP_ASLEEP; |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 39 | } |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 40 | |
| 41 | return result; |
| 42 | } |
| 43 | |
| 44 | unsigned int intel_engine_wakeup(struct intel_engine_cs *engine) |
| 45 | { |
| 46 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 47 | unsigned long flags; |
| 48 | unsigned int result; |
| 49 | |
| 50 | spin_lock_irqsave(&b->lock, flags); |
| 51 | result = __intel_breadcrumbs_wakeup(b); |
| 52 | spin_unlock_irqrestore(&b->lock, flags); |
Chris Wilson | 8d769ea | 2017-02-27 20:58:47 +0000 | [diff] [blame] | 53 | |
| 54 | return result; |
| 55 | } |
| 56 | |
Chris Wilson | 2246bea | 2017-02-17 15:13:00 +0000 | [diff] [blame] | 57 | static unsigned long wait_timeout(void) |
| 58 | { |
| 59 | return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); |
| 60 | } |
| 61 | |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 62 | static void intel_breadcrumbs_hangcheck(unsigned long data) |
| 63 | { |
| 64 | struct intel_engine_cs *engine = (struct intel_engine_cs *)data; |
| 65 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 66 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 67 | if (!b->irq_armed) |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 68 | return; |
| 69 | |
Chris Wilson | 2246bea | 2017-02-17 15:13:00 +0000 | [diff] [blame] | 70 | if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) { |
| 71 | b->hangcheck_interrupts = atomic_read(&engine->irq_count); |
| 72 | mod_timer(&b->hangcheck, wait_timeout()); |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 73 | return; |
| 74 | } |
| 75 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 76 | /* We keep the hangcheck time alive until we disarm the irq, even |
| 77 | * if there are no waiters at present. |
| 78 | * |
| 79 | * If the waiter was currently running, assume it hasn't had a chance |
Chris Wilson | 8998567 | 2017-02-17 15:13:02 +0000 | [diff] [blame] | 80 | * to process the pending interrupt (e.g, low priority task on a loaded |
| 81 | * system) and wait until it sleeps before declaring a missed interrupt. |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 82 | * |
| 83 | * If the waiter was asleep (and not even pending a wakeup), then we |
| 84 | * must have missed an interrupt as the GPU has stopped advancing |
| 85 | * but we still have a waiter. Assuming all batches complete within |
| 86 | * DRM_I915_HANGCHECK_JIFFIES [1.5s]! |
Chris Wilson | 8998567 | 2017-02-17 15:13:02 +0000 | [diff] [blame] | 87 | */ |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 88 | if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) { |
| 89 | DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name); |
| 90 | set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); |
| 91 | mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); |
| 92 | } else { |
Chris Wilson | 8998567 | 2017-02-17 15:13:02 +0000 | [diff] [blame] | 93 | mod_timer(&b->hangcheck, wait_timeout()); |
Chris Wilson | 8998567 | 2017-02-17 15:13:02 +0000 | [diff] [blame] | 94 | } |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 95 | } |
| 96 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 97 | static void intel_breadcrumbs_fake_irq(unsigned long data) |
| 98 | { |
| 99 | struct intel_engine_cs *engine = (struct intel_engine_cs *)data; |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 100 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 101 | unsigned long flags; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 102 | |
| 103 | /* |
| 104 | * The timer persists in case we cannot enable interrupts, |
| 105 | * or if we have previously seen seqno/interrupt incoherency |
| 106 | * ("missed interrupt" syndrome). Here the worker will wake up |
| 107 | * every jiffie in order to kick the oldest waiter to do the |
| 108 | * coherent seqno check. |
| 109 | */ |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 110 | |
| 111 | spin_lock_irqsave(&b->lock, flags); |
| 112 | if (!__intel_breadcrumbs_wakeup(b)) |
| 113 | __intel_engine_disarm_breadcrumbs(engine); |
| 114 | spin_unlock_irqrestore(&b->lock, flags); |
| 115 | if (!b->irq_armed) |
Chris Wilson | 19d0a57 | 2017-02-27 20:58:49 +0000 | [diff] [blame] | 116 | return; |
| 117 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 118 | mod_timer(&b->fake_irq, jiffies + 1); |
Chris Wilson | 19d0a57 | 2017-02-27 20:58:49 +0000 | [diff] [blame] | 119 | |
| 120 | /* Ensure that even if the GPU hangs, we get woken up. |
| 121 | * |
| 122 | * However, note that if no one is waiting, we never notice |
| 123 | * a gpu hang. Eventually, we will have to wait for a resource |
| 124 | * held by the GPU and so trigger a hangcheck. In the most |
| 125 | * pathological case, this will be upon memory starvation! To |
| 126 | * prevent this, we also queue the hangcheck from the retire |
| 127 | * worker. |
| 128 | */ |
| 129 | i915_queue_hangcheck(engine->i915); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static void irq_enable(struct intel_engine_cs *engine) |
| 133 | { |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 134 | /* Enabling the IRQ may miss the generation of the interrupt, but |
| 135 | * we still need to force the barrier before reading the seqno, |
| 136 | * just in case. |
| 137 | */ |
Chris Wilson | 538b257 | 2017-01-24 15:18:05 +0000 | [diff] [blame] | 138 | set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); |
Chris Wilson | 31bb59c | 2016-07-01 17:23:27 +0100 | [diff] [blame] | 139 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 140 | /* Caller disables interrupts */ |
| 141 | spin_lock(&engine->i915->irq_lock); |
Chris Wilson | 31bb59c | 2016-07-01 17:23:27 +0100 | [diff] [blame] | 142 | engine->irq_enable(engine); |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 143 | spin_unlock(&engine->i915->irq_lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | static void irq_disable(struct intel_engine_cs *engine) |
| 147 | { |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 148 | /* Caller disables interrupts */ |
| 149 | spin_lock(&engine->i915->irq_lock); |
Chris Wilson | 31bb59c | 2016-07-01 17:23:27 +0100 | [diff] [blame] | 150 | engine->irq_disable(engine); |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 151 | spin_unlock(&engine->i915->irq_lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 152 | } |
| 153 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 154 | void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) |
| 155 | { |
| 156 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 157 | |
| 158 | assert_spin_locked(&b->lock); |
| 159 | |
| 160 | if (b->irq_enabled) { |
| 161 | irq_disable(engine); |
| 162 | b->irq_enabled = false; |
| 163 | } |
| 164 | |
| 165 | b->irq_armed = false; |
| 166 | } |
| 167 | |
| 168 | void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) |
| 169 | { |
| 170 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 171 | unsigned long flags; |
| 172 | |
| 173 | if (!b->irq_armed) |
| 174 | return; |
| 175 | |
| 176 | spin_lock_irqsave(&b->lock, flags); |
| 177 | |
| 178 | /* We only disarm the irq when we are idle (all requests completed), |
| 179 | * so if there remains a sleeping waiter, it missed the request |
| 180 | * completion. |
| 181 | */ |
| 182 | if (__intel_breadcrumbs_wakeup(b) & ENGINE_WAKEUP_ASLEEP) |
| 183 | set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); |
| 184 | |
| 185 | __intel_engine_disarm_breadcrumbs(engine); |
| 186 | |
| 187 | spin_unlock_irqrestore(&b->lock, flags); |
| 188 | } |
| 189 | |
Chris Wilson | 6ef98ea | 2017-02-17 15:13:03 +0000 | [diff] [blame] | 190 | static bool use_fake_irq(const struct intel_breadcrumbs *b) |
| 191 | { |
| 192 | const struct intel_engine_cs *engine = |
| 193 | container_of(b, struct intel_engine_cs, breadcrumbs); |
| 194 | |
| 195 | if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) |
| 196 | return false; |
| 197 | |
| 198 | /* Only start with the heavy weight fake irq timer if we have not |
| 199 | * seen any interrupts since enabling it the first time. If the |
| 200 | * interrupts are still arriving, it means we made a mistake in our |
| 201 | * engine->seqno_barrier(), a timing error that should be transient |
| 202 | * and unlikely to reoccur. |
| 203 | */ |
| 204 | return atomic_read(&engine->irq_count) == b->hangcheck_interrupts; |
| 205 | } |
| 206 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 207 | static void enable_fake_irq(struct intel_breadcrumbs *b) |
| 208 | { |
| 209 | /* Ensure we never sleep indefinitely */ |
| 210 | if (!b->irq_enabled || use_fake_irq(b)) |
| 211 | mod_timer(&b->fake_irq, jiffies + 1); |
| 212 | else |
| 213 | mod_timer(&b->hangcheck, wait_timeout()); |
| 214 | } |
| 215 | |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 216 | static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 217 | { |
| 218 | struct intel_engine_cs *engine = |
| 219 | container_of(b, struct intel_engine_cs, breadcrumbs); |
| 220 | struct drm_i915_private *i915 = engine->i915; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 221 | |
| 222 | assert_spin_locked(&b->lock); |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 223 | if (b->irq_armed) |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 224 | return; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 225 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 226 | /* The breadcrumb irq will be disarmed on the interrupt after the |
| 227 | * waiters are signaled. This gives us a single interrupt window in |
| 228 | * which we can add a new waiter and avoid the cost of re-enabling |
| 229 | * the irq. |
| 230 | */ |
| 231 | b->irq_armed = true; |
| 232 | GEM_BUG_ON(b->irq_enabled); |
| 233 | |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 234 | if (I915_SELFTEST_ONLY(b->mock)) { |
| 235 | /* For our mock objects we want to avoid interaction |
| 236 | * with the real hardware (which is not set up). So |
| 237 | * we simply pretend we have enabled the powerwell |
| 238 | * and the irq, and leave it up to the mock |
| 239 | * implementation to call intel_engine_wakeup() |
| 240 | * itself when it wants to simulate a user interrupt, |
| 241 | */ |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 242 | return; |
| 243 | } |
| 244 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 245 | /* Since we are waiting on a request, the GPU should be busy |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 246 | * and should have its own rpm reference. This is tracked |
| 247 | * by i915->gt.awake, we can forgo holding our own wakref |
| 248 | * for the interrupt as before i915->gt.awake is released (when |
| 249 | * the driver is idle) we disarm the breadcrumbs. |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 250 | */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 251 | |
| 252 | /* No interrupts? Kick the waiter every jiffie! */ |
| 253 | if (intel_irqs_enabled(i915)) { |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 254 | if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 255 | irq_enable(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 256 | b->irq_enabled = true; |
| 257 | } |
| 258 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 259 | enable_fake_irq(b); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 260 | } |
| 261 | |
| 262 | static inline struct intel_wait *to_wait(struct rb_node *node) |
| 263 | { |
Chris Wilson | d856786 | 2016-12-20 10:40:03 +0000 | [diff] [blame] | 264 | return rb_entry(node, struct intel_wait, node); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, |
| 268 | struct intel_wait *wait) |
| 269 | { |
| 270 | assert_spin_locked(&b->lock); |
| 271 | |
| 272 | /* This request is completed, so remove it from the tree, mark it as |
| 273 | * complete, and *then* wake up the associated task. |
| 274 | */ |
| 275 | rb_erase(&wait->node, &b->waiters); |
| 276 | RB_CLEAR_NODE(&wait->node); |
| 277 | |
| 278 | wake_up_process(wait->tsk); /* implicit smp_wmb() */ |
| 279 | } |
| 280 | |
| 281 | static bool __intel_engine_add_wait(struct intel_engine_cs *engine, |
| 282 | struct intel_wait *wait) |
| 283 | { |
| 284 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 285 | struct rb_node **p, *parent, *completed; |
| 286 | bool first; |
| 287 | u32 seqno; |
| 288 | |
| 289 | /* Insert the request into the retirement ordered list |
| 290 | * of waiters by walking the rbtree. If we are the oldest |
| 291 | * seqno in the tree (the first to be retired), then |
| 292 | * set ourselves as the bottom-half. |
| 293 | * |
| 294 | * As we descend the tree, prune completed branches since we hold the |
| 295 | * spinlock we know that the first_waiter must be delayed and can |
| 296 | * reduce some of the sequential wake up latency if we take action |
| 297 | * ourselves and wake up the completed tasks in parallel. Also, by |
| 298 | * removing stale elements in the tree, we may be able to reduce the |
| 299 | * ping-pong between the old bottom-half and ourselves as first-waiter. |
| 300 | */ |
| 301 | first = true; |
| 302 | parent = NULL; |
| 303 | completed = NULL; |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 304 | seqno = intel_engine_get_seqno(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 305 | |
| 306 | /* If the request completed before we managed to grab the spinlock, |
| 307 | * return now before adding ourselves to the rbtree. We let the |
| 308 | * current bottom-half handle any pending wakeups and instead |
| 309 | * try and get out of the way quickly. |
| 310 | */ |
| 311 | if (i915_seqno_passed(seqno, wait->seqno)) { |
| 312 | RB_CLEAR_NODE(&wait->node); |
| 313 | return first; |
| 314 | } |
| 315 | |
| 316 | p = &b->waiters.rb_node; |
| 317 | while (*p) { |
| 318 | parent = *p; |
| 319 | if (wait->seqno == to_wait(parent)->seqno) { |
| 320 | /* We have multiple waiters on the same seqno, select |
| 321 | * the highest priority task (that with the smallest |
| 322 | * task->prio) to serve as the bottom-half for this |
| 323 | * group. |
| 324 | */ |
| 325 | if (wait->tsk->prio > to_wait(parent)->tsk->prio) { |
| 326 | p = &parent->rb_right; |
| 327 | first = false; |
| 328 | } else { |
| 329 | p = &parent->rb_left; |
| 330 | } |
| 331 | } else if (i915_seqno_passed(wait->seqno, |
| 332 | to_wait(parent)->seqno)) { |
| 333 | p = &parent->rb_right; |
| 334 | if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) |
| 335 | completed = parent; |
| 336 | else |
| 337 | first = false; |
| 338 | } else { |
| 339 | p = &parent->rb_left; |
| 340 | } |
| 341 | } |
| 342 | rb_link_node(&wait->node, parent, p); |
| 343 | rb_insert_color(&wait->node, &b->waiters); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 344 | |
| 345 | if (completed) { |
| 346 | struct rb_node *next = rb_next(completed); |
| 347 | |
| 348 | GEM_BUG_ON(!next && !first); |
| 349 | if (next && next != &wait->node) { |
| 350 | GEM_BUG_ON(first); |
| 351 | b->first_wait = to_wait(next); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 352 | /* As there is a delay between reading the current |
| 353 | * seqno, processing the completed tasks and selecting |
| 354 | * the next waiter, we may have missed the interrupt |
| 355 | * and so need for the next bottom-half to wakeup. |
| 356 | * |
| 357 | * Also as we enable the IRQ, we may miss the |
| 358 | * interrupt for that seqno, so we have to wake up |
| 359 | * the next bottom-half in order to do a coherent check |
| 360 | * in case the seqno passed. |
| 361 | */ |
| 362 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | 538b257 | 2017-01-24 15:18:05 +0000 | [diff] [blame] | 363 | if (test_bit(ENGINE_IRQ_BREADCRUMB, |
| 364 | &engine->irq_posted)) |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 365 | wake_up_process(to_wait(next)->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | do { |
| 369 | struct intel_wait *crumb = to_wait(completed); |
| 370 | completed = rb_prev(completed); |
| 371 | __intel_breadcrumbs_finish(b, crumb); |
| 372 | } while (completed); |
| 373 | } |
| 374 | |
| 375 | if (first) { |
| 376 | GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); |
| 377 | b->first_wait = wait; |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 378 | /* After assigning ourselves as the new bottom-half, we must |
| 379 | * perform a cursory check to prevent a missed interrupt. |
| 380 | * Either we miss the interrupt whilst programming the hardware, |
| 381 | * or if there was a previous waiter (for a later seqno) they |
| 382 | * may be woken instead of us (due to the inherent race |
Chris Wilson | aca34b6 | 2016-07-06 12:39:02 +0100 | [diff] [blame] | 383 | * in the unlocked read of b->irq_seqno_bh in the irq handler) |
| 384 | * and so we miss the wake up. |
Chris Wilson | 0417131 | 2016-07-06 12:39:00 +0100 | [diff] [blame] | 385 | */ |
| 386 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 387 | } |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 388 | GEM_BUG_ON(!b->first_wait); |
| 389 | GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); |
| 390 | |
| 391 | return first; |
| 392 | } |
| 393 | |
| 394 | bool intel_engine_add_wait(struct intel_engine_cs *engine, |
| 395 | struct intel_wait *wait) |
| 396 | { |
| 397 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 398 | bool first; |
| 399 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 400 | spin_lock_irq(&b->lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 401 | first = __intel_engine_add_wait(engine, wait); |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 402 | spin_unlock_irq(&b->lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 403 | |
| 404 | return first; |
| 405 | } |
| 406 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 407 | static inline bool chain_wakeup(struct rb_node *rb, int priority) |
| 408 | { |
| 409 | return rb && to_wait(rb)->tsk->prio <= priority; |
| 410 | } |
| 411 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 412 | static inline int wakeup_priority(struct intel_breadcrumbs *b, |
| 413 | struct task_struct *tsk) |
| 414 | { |
| 415 | if (tsk == b->signaler) |
| 416 | return INT_MIN; |
| 417 | else |
| 418 | return tsk->prio; |
| 419 | } |
| 420 | |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 421 | static void __intel_engine_remove_wait(struct intel_engine_cs *engine, |
| 422 | struct intel_wait *wait) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 423 | { |
| 424 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 425 | |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 426 | assert_spin_locked(&b->lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 427 | |
| 428 | if (RB_EMPTY_NODE(&wait->node)) |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 429 | goto out; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 430 | |
| 431 | if (b->first_wait == wait) { |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 432 | const int priority = wakeup_priority(b, wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 433 | struct rb_node *next; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 434 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 435 | /* We are the current bottom-half. Find the next candidate, |
| 436 | * the first waiter in the queue on the remaining oldest |
| 437 | * request. As multiple seqnos may complete in the time it |
| 438 | * takes us to wake up and find the next waiter, we have to |
| 439 | * wake up that waiter for it to perform its own coherent |
| 440 | * completion check. |
| 441 | */ |
| 442 | next = rb_next(&wait->node); |
| 443 | if (chain_wakeup(next, priority)) { |
| 444 | /* If the next waiter is already complete, |
| 445 | * wake it up and continue onto the next waiter. So |
| 446 | * if have a small herd, they will wake up in parallel |
| 447 | * rather than sequentially, which should reduce |
| 448 | * the overall latency in waking all the completed |
| 449 | * clients. |
| 450 | * |
| 451 | * However, waking up a chain adds extra latency to |
| 452 | * the first_waiter. This is undesirable if that |
| 453 | * waiter is a high priority task. |
| 454 | */ |
Chris Wilson | 1b7744e | 2016-07-01 17:23:17 +0100 | [diff] [blame] | 455 | u32 seqno = intel_engine_get_seqno(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 456 | |
| 457 | while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { |
| 458 | struct rb_node *n = rb_next(next); |
| 459 | |
| 460 | __intel_breadcrumbs_finish(b, to_wait(next)); |
| 461 | next = n; |
| 462 | if (!chain_wakeup(next, priority)) |
| 463 | break; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | if (next) { |
| 468 | /* In our haste, we may have completed the first waiter |
| 469 | * before we enabled the interrupt. Do so now as we |
| 470 | * have a second waiter for a future seqno. Afterwards, |
| 471 | * we have to wake up that waiter in case we missed |
| 472 | * the interrupt, or if we have to handle an |
| 473 | * exception rather than a seqno completion. |
| 474 | */ |
| 475 | b->first_wait = to_wait(next); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 476 | if (b->first_wait->seqno != wait->seqno) |
| 477 | __intel_breadcrumbs_enable_irq(b); |
Chris Wilson | dbd6ef2 | 2016-08-09 17:47:52 +0100 | [diff] [blame] | 478 | wake_up_process(b->first_wait->tsk); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 479 | } else { |
| 480 | b->first_wait = NULL; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 481 | } |
| 482 | } else { |
| 483 | GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); |
| 484 | } |
| 485 | |
| 486 | GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); |
| 487 | rb_erase(&wait->node, &b->waiters); |
| 488 | |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 489 | out: |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 490 | GEM_BUG_ON(b->first_wait == wait); |
| 491 | GEM_BUG_ON(rb_first(&b->waiters) != |
| 492 | (b->first_wait ? &b->first_wait->node : NULL)); |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | void intel_engine_remove_wait(struct intel_engine_cs *engine, |
| 496 | struct intel_wait *wait) |
| 497 | { |
| 498 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 499 | |
| 500 | /* Quick check to see if this waiter was already decoupled from |
| 501 | * the tree by the bottom-half to avoid contention on the spinlock |
| 502 | * by the herd. |
| 503 | */ |
| 504 | if (RB_EMPTY_NODE(&wait->node)) |
| 505 | return; |
| 506 | |
| 507 | spin_lock_irq(&b->lock); |
| 508 | __intel_engine_remove_wait(engine, wait); |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 509 | spin_unlock_irq(&b->lock); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 510 | } |
| 511 | |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 512 | static bool signal_valid(const struct drm_i915_gem_request *request) |
| 513 | { |
| 514 | return intel_wait_check_request(&request->signaling.wait, request); |
| 515 | } |
| 516 | |
| 517 | static bool signal_complete(const struct drm_i915_gem_request *request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 518 | { |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 519 | if (!request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 520 | return false; |
| 521 | |
| 522 | /* If another process served as the bottom-half it may have already |
| 523 | * signalled that this wait is already completed. |
| 524 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 525 | if (intel_wait_complete(&request->signaling.wait)) |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 526 | return signal_valid(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 527 | |
| 528 | /* Carefully check if the request is complete, giving time for the |
| 529 | * seqno to be visible or if the GPU hung. |
| 530 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 531 | if (__i915_request_irq_complete(request)) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 532 | return true; |
| 533 | |
| 534 | return false; |
| 535 | } |
| 536 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 537 | static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 538 | { |
Chris Wilson | d856786 | 2016-12-20 10:40:03 +0000 | [diff] [blame] | 539 | return rb_entry(rb, struct drm_i915_gem_request, signaling.node); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | static void signaler_set_rtpriority(void) |
| 543 | { |
| 544 | struct sched_param param = { .sched_priority = 1 }; |
| 545 | |
| 546 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); |
| 547 | } |
| 548 | |
| 549 | static int intel_breadcrumbs_signaler(void *arg) |
| 550 | { |
| 551 | struct intel_engine_cs *engine = arg; |
| 552 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 553 | struct drm_i915_gem_request *request; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 554 | |
| 555 | /* Install ourselves with high priority to reduce signalling latency */ |
| 556 | signaler_set_rtpriority(); |
| 557 | |
| 558 | do { |
| 559 | set_current_state(TASK_INTERRUPTIBLE); |
| 560 | |
| 561 | /* We are either woken up by the interrupt bottom-half, |
| 562 | * or by a client adding a new signaller. In both cases, |
| 563 | * the GPU seqno may have advanced beyond our oldest signal. |
| 564 | * If it has, propagate the signal, remove the waiter and |
| 565 | * check again with the next oldest signal. Otherwise we |
| 566 | * need to wait for a new interrupt from the GPU or for |
| 567 | * a new client. |
| 568 | */ |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 569 | rcu_read_lock(); |
| 570 | request = rcu_dereference(b->first_signal); |
| 571 | if (request) |
| 572 | request = i915_gem_request_get_rcu(request); |
| 573 | rcu_read_unlock(); |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 574 | if (signal_complete(request)) { |
Chris Wilson | 7c9e934 | 2017-01-24 11:00:09 +0000 | [diff] [blame] | 575 | local_bh_disable(); |
| 576 | dma_fence_signal(&request->fence); |
| 577 | local_bh_enable(); /* kick start the tasklets */ |
| 578 | |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 579 | spin_lock_irq(&b->lock); |
| 580 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 581 | /* Wake up all other completed waiters and select the |
| 582 | * next bottom-half for the next user interrupt. |
| 583 | */ |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 584 | __intel_engine_remove_wait(engine, |
| 585 | &request->signaling.wait); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 586 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 587 | /* Find the next oldest signal. Note that as we have |
| 588 | * not been holding the lock, another client may |
| 589 | * have installed an even older signal than the one |
| 590 | * we just completed - so double check we are still |
| 591 | * the oldest before picking the next one. |
| 592 | */ |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 593 | if (request == rcu_access_pointer(b->first_signal)) { |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 594 | struct rb_node *rb = |
| 595 | rb_next(&request->signaling.node); |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 596 | rcu_assign_pointer(b->first_signal, |
| 597 | rb ? to_signaler(rb) : NULL); |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 598 | } |
| 599 | rb_erase(&request->signaling.node, &b->signals); |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 600 | RB_CLEAR_NODE(&request->signaling.node); |
| 601 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 602 | spin_unlock_irq(&b->lock); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 603 | |
Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 604 | i915_gem_request_put(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 605 | } else { |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 606 | DEFINE_WAIT(exec); |
| 607 | |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 608 | if (kthread_should_stop()) { |
| 609 | GEM_BUG_ON(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 610 | break; |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 611 | } |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 612 | |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 613 | if (request) |
| 614 | add_wait_queue(&request->execute, &exec); |
| 615 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 616 | schedule(); |
Chris Wilson | fe3288b | 2017-02-12 17:20:01 +0000 | [diff] [blame] | 617 | |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 618 | if (request) |
| 619 | remove_wait_queue(&request->execute, &exec); |
| 620 | |
Chris Wilson | fe3288b | 2017-02-12 17:20:01 +0000 | [diff] [blame] | 621 | if (kthread_should_park()) |
| 622 | kthread_parkme(); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 623 | } |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 624 | i915_gem_request_put(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 625 | } while (1); |
| 626 | __set_current_state(TASK_RUNNING); |
| 627 | |
| 628 | return 0; |
| 629 | } |
| 630 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 631 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 632 | { |
| 633 | struct intel_engine_cs *engine = request->engine; |
| 634 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 635 | struct rb_node *parent, **p; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 636 | bool first, wakeup; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 637 | u32 seqno; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 638 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 639 | /* Note that we may be called from an interrupt handler on another |
| 640 | * device (e.g. nouveau signaling a fence completion causing us |
| 641 | * to submit a request, and so enable signaling). As such, |
| 642 | * we need to make sure that all other users of b->lock protect |
| 643 | * against interrupts, i.e. use spin_lock_irqsave. |
| 644 | */ |
| 645 | |
| 646 | /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */ |
Chris Wilson | 4a50d20 | 2016-07-26 12:01:50 +0100 | [diff] [blame] | 647 | assert_spin_locked(&request->lock); |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 648 | |
| 649 | seqno = i915_gem_request_global_seqno(request); |
| 650 | if (!seqno) |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 651 | return; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 652 | |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 653 | request->signaling.wait.tsk = b->signaler; |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 654 | request->signaling.wait.request = request; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 655 | request->signaling.wait.seqno = seqno; |
Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 656 | i915_gem_request_get(request); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 657 | |
Chris Wilson | 4a50d20 | 2016-07-26 12:01:50 +0100 | [diff] [blame] | 658 | spin_lock(&b->lock); |
| 659 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 660 | /* First add ourselves into the list of waiters, but register our |
| 661 | * bottom-half as the signaller thread. As per usual, only the oldest |
| 662 | * waiter (not just signaller) is tasked as the bottom-half waking |
| 663 | * up all completed waiters after the user interrupt. |
| 664 | * |
| 665 | * If we are the oldest waiter, enable the irq (after which we |
| 666 | * must double check that the seqno did not complete). |
| 667 | */ |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 668 | wakeup = __intel_engine_add_wait(engine, &request->signaling.wait); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 669 | |
| 670 | /* Now insert ourselves into the retirement ordered list of signals |
| 671 | * on this engine. We track the oldest seqno as that will be the |
| 672 | * first signal to complete. |
| 673 | */ |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 674 | parent = NULL; |
| 675 | first = true; |
| 676 | p = &b->signals.rb_node; |
| 677 | while (*p) { |
| 678 | parent = *p; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 679 | if (i915_seqno_passed(seqno, |
| 680 | to_signaler(parent)->signaling.wait.seqno)) { |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 681 | p = &parent->rb_right; |
| 682 | first = false; |
| 683 | } else { |
| 684 | p = &parent->rb_left; |
| 685 | } |
| 686 | } |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 687 | rb_link_node(&request->signaling.node, parent, p); |
| 688 | rb_insert_color(&request->signaling.node, &b->signals); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 689 | if (first) |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 690 | rcu_assign_pointer(b->first_signal, request); |
Chris Wilson | b385085 | 2016-07-01 17:23:26 +0100 | [diff] [blame] | 691 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 692 | spin_unlock(&b->lock); |
| 693 | |
| 694 | if (wakeup) |
| 695 | wake_up_process(b->signaler); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 696 | } |
| 697 | |
Chris Wilson | 9eb143b | 2017-02-23 07:44:16 +0000 | [diff] [blame] | 698 | void intel_engine_cancel_signaling(struct drm_i915_gem_request *request) |
| 699 | { |
| 700 | struct intel_engine_cs *engine = request->engine; |
| 701 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 702 | |
| 703 | assert_spin_locked(&request->lock); |
| 704 | GEM_BUG_ON(!request->signaling.wait.seqno); |
| 705 | |
| 706 | spin_lock(&b->lock); |
| 707 | |
| 708 | if (!RB_EMPTY_NODE(&request->signaling.node)) { |
| 709 | if (request == rcu_access_pointer(b->first_signal)) { |
| 710 | struct rb_node *rb = |
| 711 | rb_next(&request->signaling.node); |
| 712 | rcu_assign_pointer(b->first_signal, |
| 713 | rb ? to_signaler(rb) : NULL); |
| 714 | } |
| 715 | rb_erase(&request->signaling.node, &b->signals); |
| 716 | RB_CLEAR_NODE(&request->signaling.node); |
| 717 | i915_gem_request_put(request); |
| 718 | } |
| 719 | |
| 720 | __intel_engine_remove_wait(engine, &request->signaling.wait); |
| 721 | |
| 722 | spin_unlock(&b->lock); |
| 723 | |
| 724 | request->signaling.wait.seqno = 0; |
| 725 | } |
| 726 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 727 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) |
| 728 | { |
| 729 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 730 | struct task_struct *tsk; |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 731 | |
| 732 | spin_lock_init(&b->lock); |
| 733 | setup_timer(&b->fake_irq, |
| 734 | intel_breadcrumbs_fake_irq, |
| 735 | (unsigned long)engine); |
Chris Wilson | 83348ba | 2016-08-09 17:47:51 +0100 | [diff] [blame] | 736 | setup_timer(&b->hangcheck, |
| 737 | intel_breadcrumbs_hangcheck, |
| 738 | (unsigned long)engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 739 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 740 | /* Spawn a thread to provide a common bottom-half for all signals. |
| 741 | * As this is an asynchronous interface we cannot steal the current |
| 742 | * task for handling the bottom-half to the user interrupt, therefore |
| 743 | * we create a thread to do the coherent seqno dance after the |
| 744 | * interrupt and then signal the waitqueue (via the dma-buf/fence). |
| 745 | */ |
| 746 | tsk = kthread_run(intel_breadcrumbs_signaler, engine, |
| 747 | "i915/signal:%d", engine->id); |
| 748 | if (IS_ERR(tsk)) |
| 749 | return PTR_ERR(tsk); |
| 750 | |
| 751 | b->signaler = tsk; |
| 752 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 753 | return 0; |
| 754 | } |
| 755 | |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 756 | static void cancel_fake_irq(struct intel_engine_cs *engine) |
| 757 | { |
| 758 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 759 | |
| 760 | del_timer_sync(&b->hangcheck); |
| 761 | del_timer_sync(&b->fake_irq); |
| 762 | clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); |
| 763 | } |
| 764 | |
| 765 | void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) |
| 766 | { |
| 767 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 768 | |
| 769 | cancel_fake_irq(engine); |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 770 | spin_lock_irq(&b->lock); |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 771 | |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 772 | if (b->irq_enabled) |
| 773 | irq_enable(engine); |
| 774 | else |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 775 | irq_disable(engine); |
Chris Wilson | 67b807a8 | 2017-02-27 20:58:50 +0000 | [diff] [blame^] | 776 | |
| 777 | /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the |
| 778 | * GPU is active and may have already executed the MI_USER_INTERRUPT |
| 779 | * before the CPU is ready to receive. However, the engine is currently |
| 780 | * idle (we haven't started it yet), there is no possibility for a |
| 781 | * missed interrupt as we enabled the irq and so we can clear the |
| 782 | * immediate wakeup (until a real interrupt arrives for the waiter). |
| 783 | */ |
| 784 | clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); |
| 785 | |
| 786 | if (b->irq_armed) |
| 787 | enable_fake_irq(b); |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 788 | |
Chris Wilson | f6168e3 | 2016-10-28 13:58:55 +0100 | [diff] [blame] | 789 | spin_unlock_irq(&b->lock); |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 790 | } |
| 791 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 792 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) |
| 793 | { |
| 794 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 795 | |
Chris Wilson | 381744f | 2016-11-21 11:07:59 +0000 | [diff] [blame] | 796 | /* The engines should be idle and all requests accounted for! */ |
| 797 | WARN_ON(READ_ONCE(b->first_wait)); |
| 798 | WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 799 | WARN_ON(rcu_access_pointer(b->first_signal)); |
Chris Wilson | 381744f | 2016-11-21 11:07:59 +0000 | [diff] [blame] | 800 | WARN_ON(!RB_EMPTY_ROOT(&b->signals)); |
| 801 | |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 802 | if (!IS_ERR_OR_NULL(b->signaler)) |
| 803 | kthread_stop(b->signaler); |
| 804 | |
Chris Wilson | ad07dfc | 2016-10-07 07:53:26 +0100 | [diff] [blame] | 805 | cancel_fake_irq(engine); |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 806 | } |
| 807 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 808 | bool intel_breadcrumbs_busy(struct intel_engine_cs *engine) |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 809 | { |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 810 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
| 811 | bool busy = false; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 812 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 813 | spin_lock_irq(&b->lock); |
Chris Wilson | 6a5d1db | 2016-11-08 14:37:19 +0000 | [diff] [blame] | 814 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 815 | if (b->first_wait) { |
| 816 | wake_up_process(b->first_wait->tsk); |
| 817 | busy |= intel_engine_flag(engine); |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 818 | } |
| 819 | |
Chris Wilson | cced5e2 | 2017-02-23 07:44:15 +0000 | [diff] [blame] | 820 | if (rcu_access_pointer(b->first_signal)) { |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 821 | wake_up_process(b->signaler); |
| 822 | busy |= intel_engine_flag(engine); |
| 823 | } |
| 824 | |
| 825 | spin_unlock_irq(&b->lock); |
| 826 | |
| 827 | return busy; |
Chris Wilson | c81d461 | 2016-07-01 17:23:25 +0100 | [diff] [blame] | 828 | } |
Chris Wilson | f97fbf9 | 2017-02-13 17:15:14 +0000 | [diff] [blame] | 829 | |
| 830 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 831 | #include "selftests/intel_breadcrumbs.c" |
| 832 | #endif |