blob: 908d79520853dd2f1526d36abdaf0e3f9bd008e8 [file] [log] [blame]
Rob Herringf3ba9122018-09-10 14:27:58 -05001// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3/* Copyright 2019 Collabora ltd. */
4#include <linux/delay.h>
5#include <linux/interrupt.h>
6#include <linux/io.h>
Steven Price030761e2021-06-30 08:27:50 +02007#include <linux/iopoll.h>
Rob Herringf3ba9122018-09-10 14:27:58 -05008#include <linux/platform_device.h>
9#include <linux/pm_runtime.h>
Christian König52791ee2019-08-11 10:06:32 +020010#include <linux/dma-resv.h>
Rob Herringf3ba9122018-09-10 14:27:58 -050011#include <drm/gpu_scheduler.h>
12#include <drm/panfrost_drm.h>
13
14#include "panfrost_device.h"
15#include "panfrost_devfreq.h"
16#include "panfrost_job.h"
17#include "panfrost_features.h"
18#include "panfrost_issues.h"
19#include "panfrost_gem.h"
20#include "panfrost_regs.h"
21#include "panfrost_gpu.h"
22#include "panfrost_mmu.h"
23
Boris Brezillon5bc5cc22020-11-05 16:17:04 +010024#define JOB_TIMEOUT_MS 500
25
Rob Herringf3ba9122018-09-10 14:27:58 -050026#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
27#define job_read(dev, reg) readl(dev->iomem + (reg))
28
29struct panfrost_queue_state {
30 struct drm_gpu_scheduler sched;
Rob Herringf3ba9122018-09-10 14:27:58 -050031 u64 fence_context;
32 u64 emit_seqno;
33};
34
35struct panfrost_job_slot {
36 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
37 spinlock_t job_lock;
Boris Brezillon1d0cab52021-06-30 08:27:45 +020038 int irq;
Rob Herringf3ba9122018-09-10 14:27:58 -050039};
40
41static struct panfrost_job *
42to_panfrost_job(struct drm_sched_job *sched_job)
43{
44 return container_of(sched_job, struct panfrost_job, base);
45}
46
47struct panfrost_fence {
48 struct dma_fence base;
49 struct drm_device *dev;
50 /* panfrost seqno for signaled() test */
51 u64 seqno;
52 int queue;
53};
54
55static inline struct panfrost_fence *
56to_panfrost_fence(struct dma_fence *fence)
57{
58 return (struct panfrost_fence *)fence;
59}
60
61static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
62{
63 return "panfrost";
64}
65
66static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
67{
68 struct panfrost_fence *f = to_panfrost_fence(fence);
69
70 switch (f->queue) {
71 case 0:
72 return "panfrost-js-0";
73 case 1:
74 return "panfrost-js-1";
75 case 2:
76 return "panfrost-js-2";
77 default:
78 return NULL;
79 }
80}
81
82static const struct dma_fence_ops panfrost_fence_ops = {
83 .get_driver_name = panfrost_fence_get_driver_name,
84 .get_timeline_name = panfrost_fence_get_timeline_name,
85};
86
87static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
88{
89 struct panfrost_fence *fence;
90 struct panfrost_job_slot *js = pfdev->js;
91
92 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
93 if (!fence)
94 return ERR_PTR(-ENOMEM);
95
96 fence->dev = pfdev->ddev;
97 fence->queue = js_num;
98 fence->seqno = ++js->queue[js_num].emit_seqno;
99 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
100 js->queue[js_num].fence_context, fence->seqno);
101
102 return &fence->base;
103}
104
Daniel Vetter53516282021-08-05 12:46:52 +0200105int panfrost_job_get_slot(struct panfrost_job *job)
Rob Herringf3ba9122018-09-10 14:27:58 -0500106{
107 /* JS0: fragment jobs.
108 * JS1: vertex/tiler jobs
109 * JS2: compute jobs
110 */
111 if (job->requirements & PANFROST_JD_REQ_FS)
112 return 0;
113
114/* Not exposed to userspace yet */
115#if 0
116 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
117 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
118 (job->pfdev->features.nr_core_groups == 2))
119 return 2;
120 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
121 return 2;
122 }
123#endif
124 return 1;
125}
126
127static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
128 u32 requirements,
129 int js)
130{
131 u64 affinity;
132
133 /*
134 * Use all cores for now.
135 * Eventually we may need to support tiler only jobs and h/w with
136 * multiple (2) coherent core groups
137 */
138 affinity = pfdev->features.shader_present;
139
Alyssa Rosenzweige9ae2202021-08-25 11:33:48 -0400140 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
141 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
Rob Herringf3ba9122018-09-10 14:27:58 -0500142}
143
Steven Price030761e2021-06-30 08:27:50 +0200144static u32
145panfrost_get_job_chain_flag(const struct panfrost_job *job)
146{
147 struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
148
149 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
150 return 0;
151
152 return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
153}
154
155static struct panfrost_job *
156panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
157{
158 struct panfrost_job *job = pfdev->jobs[slot][0];
159
160 WARN_ON(!job);
161 pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
162 pfdev->jobs[slot][1] = NULL;
163
164 return job;
165}
166
167static unsigned int
168panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
169 struct panfrost_job *job)
170{
171 if (WARN_ON(!job))
172 return 0;
173
174 if (!pfdev->jobs[slot][0]) {
175 pfdev->jobs[slot][0] = job;
176 return 0;
177 }
178
179 WARN_ON(pfdev->jobs[slot][1]);
180 pfdev->jobs[slot][1] = job;
181 WARN_ON(panfrost_get_job_chain_flag(job) ==
182 panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
183 return 1;
184}
185
Rob Herringf3ba9122018-09-10 14:27:58 -0500186static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
187{
188 struct panfrost_device *pfdev = job->pfdev;
Steven Price030761e2021-06-30 08:27:50 +0200189 unsigned int subslot;
Rob Herringf3ba9122018-09-10 14:27:58 -0500190 u32 cfg;
191 u64 jc_head = job->jc;
192 int ret;
193
Clément Péron9bfacfc2020-07-10 11:53:59 +0200194 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
Steven Priceb99773e2020-05-22 16:36:53 +0100195
Rob Herringf3ba9122018-09-10 14:27:58 -0500196 ret = pm_runtime_get_sync(pfdev->dev);
197 if (ret < 0)
198 return;
199
Rob Herring330bec42019-08-26 17:33:11 -0500200 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
Rob Herring330bec42019-08-26 17:33:11 -0500201 return;
202 }
Rob Herringf3ba9122018-09-10 14:27:58 -0500203
Boris Brezillon7fdc48c2021-06-21 15:38:56 +0200204 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
Rob Herringf3ba9122018-09-10 14:27:58 -0500205
Alyssa Rosenzweige9ae2202021-08-25 11:33:48 -0400206 job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
207 job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
Rob Herringf3ba9122018-09-10 14:27:58 -0500208
209 panfrost_job_write_affinity(pfdev, job->requirements, js);
210
211 /* start MMU, medium priority, cache clean/flush on end, clean/flush on
212 * start */
Rob Herring7282f762019-08-13 09:01:15 -0600213 cfg |= JS_CONFIG_THREAD_PRI(8) |
Rob Herringf3ba9122018-09-10 14:27:58 -0500214 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
Steven Price030761e2021-06-30 08:27:50 +0200215 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
216 panfrost_get_job_chain_flag(job);
Rob Herringf3ba9122018-09-10 14:27:58 -0500217
218 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
219 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
220
221 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
222 cfg |= JS_CONFIG_START_MMU;
223
224 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
225
226 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
227 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
228
229 /* GO ! */
Rob Herringf3ba9122018-09-10 14:27:58 -0500230
Steven Price030761e2021-06-30 08:27:50 +0200231 spin_lock(&pfdev->js->job_lock);
232 subslot = panfrost_enqueue_job(pfdev, js, job);
233 /* Don't queue the job if a reset is in progress */
234 if (!atomic_read(&pfdev->reset.pending)) {
235 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
236 dev_dbg(pfdev->dev,
237 "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
238 job, js, subslot, jc_head, cfg & 0xf);
239 }
240 spin_unlock(&pfdev->js->job_lock);
Rob Herringf3ba9122018-09-10 14:27:58 -0500241}
242
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200243static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
244 int bo_count,
Daniel Vetter53516282021-08-05 12:46:52 +0200245 struct drm_sched_job *job)
Rob Herringf3ba9122018-09-10 14:27:58 -0500246{
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200247 int i, ret;
Rob Herringf3ba9122018-09-10 14:27:58 -0500248
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200249 for (i = 0; i < bo_count; i++) {
Daniel Vetter7601d532021-06-22 18:55:02 +0200250 /* panfrost always uses write mode in its current uapi */
Daniel Vetter53516282021-08-05 12:46:52 +0200251 ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
252 true);
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200253 if (ret)
254 return ret;
255 }
256
257 return 0;
Rob Herringf3ba9122018-09-10 14:27:58 -0500258}
259
260static void panfrost_attach_object_fences(struct drm_gem_object **bos,
261 int bo_count,
262 struct dma_fence *fence)
263{
264 int i;
265
266 for (i = 0; i < bo_count; i++)
Christian König52791ee2019-08-11 10:06:32 +0200267 dma_resv_add_excl_fence(bos[i]->resv, fence);
Rob Herringf3ba9122018-09-10 14:27:58 -0500268}
269
270int panfrost_job_push(struct panfrost_job *job)
271{
272 struct panfrost_device *pfdev = job->pfdev;
Rob Herringf3ba9122018-09-10 14:27:58 -0500273 struct ww_acquire_ctx acquire_ctx;
274 int ret = 0;
275
Rob Herringf3ba9122018-09-10 14:27:58 -0500276 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
277 &acquire_ctx);
Daniel Vetter94dd80f2021-06-22 18:55:00 +0200278 if (ret)
Rob Herringf3ba9122018-09-10 14:27:58 -0500279 return ret;
Daniel Vetter94dd80f2021-06-22 18:55:00 +0200280
281 mutex_lock(&pfdev->sched_lock);
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200282 drm_sched_job_arm(&job->base);
283
Rob Herringf3ba9122018-09-10 14:27:58 -0500284 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
285
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200286 ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
Daniel Vetter53516282021-08-05 12:46:52 +0200287 &job->base);
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200288 if (ret) {
289 mutex_unlock(&pfdev->sched_lock);
290 goto unlock;
291 }
Rob Herringf3ba9122018-09-10 14:27:58 -0500292
Daniel Vetter7d7a0fc2021-06-22 18:55:01 +0200293 kref_get(&job->refcount); /* put by scheduler job completion */
Rob Herringf3ba9122018-09-10 14:27:58 -0500294
Daniel Vetter0e10e9a2021-08-05 12:46:50 +0200295 drm_sched_entity_push_job(&job->base);
Rob Herringf3ba9122018-09-10 14:27:58 -0500296
297 mutex_unlock(&pfdev->sched_lock);
298
299 panfrost_attach_object_fences(job->bos, job->bo_count,
300 job->render_done_fence);
301
302unlock:
303 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
304
305 return ret;
306}
307
308static void panfrost_job_cleanup(struct kref *ref)
309{
310 struct panfrost_job *job = container_of(ref, struct panfrost_job,
311 refcount);
312 unsigned int i;
313
Rob Herringf3ba9122018-09-10 14:27:58 -0500314 dma_fence_put(job->done_fence);
315 dma_fence_put(job->render_done_fence);
316
Boris Brezillonbdefca22020-01-15 20:15:54 -0600317 if (job->mappings) {
Boris Brezillon7e0cf7e2019-11-29 14:59:08 +0100318 for (i = 0; i < job->bo_count; i++) {
319 if (!job->mappings[i])
320 break;
321
322 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
Boris Brezillonbdefca22020-01-15 20:15:54 -0600323 panfrost_gem_mapping_put(job->mappings[i]);
Boris Brezillon7e0cf7e2019-11-29 14:59:08 +0100324 }
Boris Brezillonbdefca22020-01-15 20:15:54 -0600325 kvfree(job->mappings);
326 }
327
328 if (job->bos) {
YueHaibingfe154a22020-02-03 15:27:24 +0000329 for (i = 0; i < job->bo_count; i++)
Emil Velikov496d0cc2020-05-15 10:51:07 +0100330 drm_gem_object_put(job->bos[i]);
Boris Brezillonbdefca22020-01-15 20:15:54 -0600331
Rob Herringf3ba9122018-09-10 14:27:58 -0500332 kvfree(job->bos);
333 }
334
335 kfree(job);
336}
337
338void panfrost_job_put(struct panfrost_job *job)
339{
340 kref_put(&job->refcount, panfrost_job_cleanup);
341}
342
343static void panfrost_job_free(struct drm_sched_job *sched_job)
344{
345 struct panfrost_job *job = to_panfrost_job(sched_job);
346
347 drm_sched_job_cleanup(sched_job);
348
349 panfrost_job_put(job);
350}
351
Rob Herringf3ba9122018-09-10 14:27:58 -0500352static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
353{
354 struct panfrost_job *job = to_panfrost_job(sched_job);
355 struct panfrost_device *pfdev = job->pfdev;
356 int slot = panfrost_job_get_slot(job);
357 struct dma_fence *fence = NULL;
358
359 if (unlikely(job->base.s_fence->finished.error))
360 return NULL;
361
Steven Price030761e2021-06-30 08:27:50 +0200362 /* Nothing to execute: can happen if the job has finished while
363 * we were resetting the GPU.
364 */
365 if (!job->jc)
366 return NULL;
Rob Herringf3ba9122018-09-10 14:27:58 -0500367
368 fence = panfrost_fence_create(pfdev, slot);
369 if (IS_ERR(fence))
Boris Brezillon9f4e9112021-06-30 08:27:38 +0200370 return fence;
Rob Herringf3ba9122018-09-10 14:27:58 -0500371
372 if (job->done_fence)
373 dma_fence_put(job->done_fence);
374 job->done_fence = dma_fence_get(fence);
375
376 panfrost_job_hw_submit(job, slot);
377
378 return fence;
379}
380
381void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
382{
383 int j;
384 u32 irq_mask = 0;
385
386 for (j = 0; j < NUM_JOB_SLOTS; j++) {
387 irq_mask |= MK_JS_MASK(j);
388 }
389
390 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
391 job_write(pfdev, JOB_INT_MASK, irq_mask);
392}
393
Steven Price030761e2021-06-30 08:27:50 +0200394static void panfrost_job_handle_err(struct panfrost_device *pfdev,
395 struct panfrost_job *job,
396 unsigned int js)
Boris Brezillon1a11a882020-10-02 14:25:06 +0200397{
Steven Price030761e2021-06-30 08:27:50 +0200398 u32 js_status = job_read(pfdev, JS_STATUS(js));
399 const char *exception_name = panfrost_exception_name(js_status);
400 bool signal_fence = true;
401
402 if (!panfrost_exception_is_fault(js_status)) {
403 dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
404 js, exception_name,
405 job_read(pfdev, JS_HEAD_LO(js)),
406 job_read(pfdev, JS_TAIL_LO(js)));
407 } else {
408 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
409 js, exception_name,
410 job_read(pfdev, JS_HEAD_LO(js)),
411 job_read(pfdev, JS_TAIL_LO(js)));
412 }
413
414 if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
415 /* Update the job head so we can resume */
416 job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
417 ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
418
419 /* The job will be resumed, don't signal the fence */
420 signal_fence = false;
421 } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
422 /* Job has been hard-stopped, flag it as canceled */
423 dma_fence_set_error(job->done_fence, -ECANCELED);
424 job->jc = 0;
425 } else if (panfrost_exception_is_fault(js_status)) {
426 /* We might want to provide finer-grained error code based on
427 * the exception type, but unconditionally setting to EINVAL
428 * is good enough for now.
429 */
430 dma_fence_set_error(job->done_fence, -EINVAL);
431 job->jc = 0;
432 }
433
434 panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
435 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
436
437 if (signal_fence)
438 dma_fence_signal_locked(job->done_fence);
439
440 pm_runtime_put_autosuspend(pfdev->dev);
441
442 if (panfrost_exception_needs_reset(pfdev, js_status)) {
443 atomic_set(&pfdev->reset.pending, 1);
444 drm_sched_fault(&pfdev->js->queue[js].sched);
445 }
446}
447
448static void panfrost_job_handle_done(struct panfrost_device *pfdev,
449 struct panfrost_job *job)
450{
451 /* Set ->jc to 0 to avoid re-submitting an already finished job (can
452 * happen when we receive the DONE interrupt while doing a GPU reset).
453 */
454 job->jc = 0;
455 panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
456 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
457
458 dma_fence_signal_locked(job->done_fence);
459 pm_runtime_put_autosuspend(pfdev->dev);
460}
461
462static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
463{
464 struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
465 struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
466 u32 js_state = 0, js_events = 0;
467 unsigned int i, j;
468
469 /* First we collect all failed/done jobs. */
470 while (status) {
471 u32 js_state_mask = 0;
472
473 for (j = 0; j < NUM_JOB_SLOTS; j++) {
474 if (status & MK_JS_MASK(j))
475 js_state_mask |= MK_JS_MASK(j);
476
477 if (status & JOB_INT_MASK_DONE(j)) {
478 if (done[j][0])
479 done[j][1] = panfrost_dequeue_job(pfdev, j);
480 else
481 done[j][0] = panfrost_dequeue_job(pfdev, j);
482 }
483
484 if (status & JOB_INT_MASK_ERR(j)) {
485 /* Cancel the next submission. Will be submitted
486 * after we're done handling this failure if
487 * there's no reset pending.
488 */
489 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
490 failed[j] = panfrost_dequeue_job(pfdev, j);
491 }
492 }
493
494 /* JS_STATE is sampled when JOB_INT_CLEAR is written.
495 * For each BIT(slot) or BIT(slot + 16) bit written to
496 * JOB_INT_CLEAR, the corresponding bits in JS_STATE
497 * (BIT(slot) and BIT(slot + 16)) are updated, but this
498 * is racy. If we only have one job done at the time we
499 * read JOB_INT_RAWSTAT but the second job fails before we
500 * clear the status, we end up with a status containing
501 * only the DONE bit and consider both jobs as DONE since
502 * JS_STATE reports both NEXT and CURRENT as inactive.
503 * To prevent that, let's repeat this clear+read steps
504 * until status is 0.
505 */
506 job_write(pfdev, JOB_INT_CLEAR, status);
507 js_state &= ~js_state_mask;
508 js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
509 js_events |= status;
510 status = job_read(pfdev, JOB_INT_RAWSTAT);
511 }
512
513 /* Then we handle the dequeued jobs. */
514 for (j = 0; j < NUM_JOB_SLOTS; j++) {
515 if (!(js_events & MK_JS_MASK(j)))
516 continue;
517
518 if (failed[j]) {
519 panfrost_job_handle_err(pfdev, failed[j], j);
520 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
521 /* When the current job doesn't fail, the JM dequeues
522 * the next job without waiting for an ACK, this means
523 * we can have 2 jobs dequeued and only catch the
524 * interrupt when the second one is done. If both slots
525 * are inactive, but one job remains in pfdev->jobs[j],
526 * consider it done. Of course that doesn't apply if a
527 * failure happened since we cancelled execution of the
528 * job in _NEXT (see above).
529 */
530 if (WARN_ON(!done[j][0]))
531 done[j][0] = panfrost_dequeue_job(pfdev, j);
532 else
533 done[j][1] = panfrost_dequeue_job(pfdev, j);
534 }
535
536 for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
537 panfrost_job_handle_done(pfdev, done[j][i]);
538 }
539
540 /* And finally we requeue jobs that were waiting in the second slot
541 * and have been stopped if we detected a failure on the first slot.
542 */
543 for (j = 0; j < NUM_JOB_SLOTS; j++) {
544 if (!(js_events & MK_JS_MASK(j)))
545 continue;
546
547 if (!failed[j] || !pfdev->jobs[j][0])
548 continue;
549
550 if (pfdev->jobs[j][0]->jc == 0) {
551 /* The job was cancelled, signal the fence now */
552 struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
553
554 dma_fence_set_error(canceled->done_fence, -ECANCELED);
555 panfrost_job_handle_done(pfdev, canceled);
556 } else if (!atomic_read(&pfdev->reset.pending)) {
557 /* Requeue the job we removed if no reset is pending */
558 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
559 }
560 }
561}
562
563static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
564{
565 u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
566
567 while (status) {
568 pm_runtime_mark_last_busy(pfdev->dev);
569
570 spin_lock(&pfdev->js->job_lock);
571 panfrost_job_handle_irq(pfdev, status);
572 spin_unlock(&pfdev->js->job_lock);
573 status = job_read(pfdev, JOB_INT_RAWSTAT);
574 }
575}
576
577static u32 panfrost_active_slots(struct panfrost_device *pfdev,
578 u32 *js_state_mask, u32 js_state)
579{
580 u32 rawstat;
581
582 if (!(js_state & *js_state_mask))
583 return 0;
584
585 rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
586 if (rawstat) {
587 unsigned int i;
588
589 for (i = 0; i < NUM_JOB_SLOTS; i++) {
590 if (rawstat & MK_JS_MASK(i))
591 *js_state_mask &= ~MK_JS_MASK(i);
592 }
593 }
594
595 return js_state & *js_state_mask;
596}
597
598static void
599panfrost_reset(struct panfrost_device *pfdev,
600 struct drm_sched_job *bad)
601{
602 u32 js_state, js_state_mask = 0xffffffff;
603 unsigned int i, j;
Boris Brezillona11c4712021-06-30 08:27:44 +0200604 bool cookie;
Steven Price030761e2021-06-30 08:27:50 +0200605 int ret;
Boris Brezillon1a11a882020-10-02 14:25:06 +0200606
Boris Brezillona11c4712021-06-30 08:27:44 +0200607 if (!atomic_read(&pfdev->reset.pending))
608 return;
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100609
Boris Brezillona11c4712021-06-30 08:27:44 +0200610 /* Stop the schedulers.
611 *
612 * FIXME: We temporarily get out of the dma_fence_signalling section
613 * because the cleanup path generate lockdep splats when taking locks
614 * to release job resources. We should rework the code to follow this
615 * pattern:
616 *
617 * try_lock
618 * if (locked)
619 * release
620 * else
621 * schedule_work_to_release_later
622 */
623 for (i = 0; i < NUM_JOB_SLOTS; i++)
624 drm_sched_stop(&pfdev->js->queue[i].sched, bad);
625
626 cookie = dma_fence_begin_signalling();
627
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100628 if (bad)
629 drm_sched_increase_karma(bad);
630
Boris Brezillon1d0cab52021-06-30 08:27:45 +0200631 /* Mask job interrupts and synchronize to make sure we won't be
632 * interrupted during our reset.
633 */
634 job_write(pfdev, JOB_INT_MASK, 0);
635 synchronize_irq(pfdev->js->irq);
636
Steven Price030761e2021-06-30 08:27:50 +0200637 for (i = 0; i < NUM_JOB_SLOTS; i++) {
638 /* Cancel the next job and soft-stop the running job. */
639 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
640 job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
641 }
642
643 /* Wait at most 10ms for soft-stops to complete */
644 ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
645 !panfrost_active_slots(pfdev, &js_state_mask, js_state),
646 10, 10000);
647
648 if (ret)
649 dev_err(pfdev->dev, "Soft-stop failed\n");
650
651 /* Handle the remaining interrupts before we reset. */
652 panfrost_job_handle_irqs(pfdev);
653
654 /* Remaining interrupts have been handled, but we might still have
655 * stuck jobs. Let's make sure the PM counters stay balanced by
656 * manually calling pm_runtime_put_noidle() and
657 * panfrost_devfreq_record_idle() for each stuck job.
Boris Brezillon1d0cab52021-06-30 08:27:45 +0200658 */
Boris Brezillona11c4712021-06-30 08:27:44 +0200659 spin_lock(&pfdev->js->job_lock);
660 for (i = 0; i < NUM_JOB_SLOTS; i++) {
Steven Price030761e2021-06-30 08:27:50 +0200661 for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
Boris Brezillona11c4712021-06-30 08:27:44 +0200662 pm_runtime_put_noidle(pfdev->dev);
663 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
Boris Brezillona11c4712021-06-30 08:27:44 +0200664 }
665 }
Steven Price030761e2021-06-30 08:27:50 +0200666 memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
Boris Brezillona11c4712021-06-30 08:27:44 +0200667 spin_unlock(&pfdev->js->job_lock);
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100668
Steven Price030761e2021-06-30 08:27:50 +0200669 /* Proceed with reset now. */
Boris Brezillona11c4712021-06-30 08:27:44 +0200670 panfrost_device_reset(pfdev);
671
Steven Price030761e2021-06-30 08:27:50 +0200672 /* panfrost_device_reset() unmasks job interrupts, but we want to
673 * keep them masked a bit longer.
674 */
675 job_write(pfdev, JOB_INT_MASK, 0);
676
Boris Brezillona11c4712021-06-30 08:27:44 +0200677 /* GPU has been reset, we can clear the reset pending bit. */
678 atomic_set(&pfdev->reset.pending, 0);
679
680 /* Now resubmit jobs that were previously queued but didn't have a
681 * chance to finish.
682 * FIXME: We temporarily get out of the DMA fence signalling section
683 * while resubmitting jobs because the job submission logic will
684 * allocate memory with the GFP_KERNEL flag which can trigger memory
685 * reclaim and exposes a lock ordering issue.
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100686 */
Boris Brezillona11c4712021-06-30 08:27:44 +0200687 dma_fence_end_signalling(cookie);
688 for (i = 0; i < NUM_JOB_SLOTS; i++)
689 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
690 cookie = dma_fence_begin_signalling();
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100691
Steven Price030761e2021-06-30 08:27:50 +0200692 /* Restart the schedulers */
Boris Brezillona11c4712021-06-30 08:27:44 +0200693 for (i = 0; i < NUM_JOB_SLOTS; i++)
694 drm_sched_start(&pfdev->js->queue[i].sched, true);
Boris Brezillon1a11a882020-10-02 14:25:06 +0200695
Steven Price030761e2021-06-30 08:27:50 +0200696 /* Re-enable job interrupts now that everything has been restarted. */
697 job_write(pfdev, JOB_INT_MASK,
698 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
699 GENMASK(NUM_JOB_SLOTS - 1, 0));
700
Boris Brezillona11c4712021-06-30 08:27:44 +0200701 dma_fence_end_signalling(cookie);
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100702}
703
Luben Tuikova6a1f032021-01-20 15:09:59 -0500704static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
705 *sched_job)
Rob Herringf3ba9122018-09-10 14:27:58 -0500706{
707 struct panfrost_job *job = to_panfrost_job(sched_job);
708 struct panfrost_device *pfdev = job->pfdev;
709 int js = panfrost_job_get_slot(job);
Rob Herringf3ba9122018-09-10 14:27:58 -0500710
711 /*
712 * If the GPU managed to complete this jobs fence, the timeout is
713 * spurious. Bail out.
714 */
715 if (dma_fence_is_signaled(job->done_fence))
Luben Tuikova6a1f032021-01-20 15:09:59 -0500716 return DRM_GPU_SCHED_STAT_NOMINAL;
Rob Herringf3ba9122018-09-10 14:27:58 -0500717
Rob Herring7282f762019-08-13 09:01:15 -0600718 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
Rob Herringf3ba9122018-09-10 14:27:58 -0500719 js,
Rob Herring7282f762019-08-13 09:01:15 -0600720 job_read(pfdev, JS_CONFIG(js)),
Rob Herringf3ba9122018-09-10 14:27:58 -0500721 job_read(pfdev, JS_STATUS(js)),
722 job_read(pfdev, JS_HEAD_LO(js)),
723 job_read(pfdev, JS_TAIL_LO(js)),
724 sched_job);
725
Boris Brezillona11c4712021-06-30 08:27:44 +0200726 atomic_set(&pfdev->reset.pending, 1);
727 panfrost_reset(pfdev, sched_job);
Luben Tuikova6a1f032021-01-20 15:09:59 -0500728
729 return DRM_GPU_SCHED_STAT_NOMINAL;
Rob Herringf3ba9122018-09-10 14:27:58 -0500730}
731
Steven Price030761e2021-06-30 08:27:50 +0200732static void panfrost_reset_work(struct work_struct *work)
733{
734 struct panfrost_device *pfdev;
735
736 pfdev = container_of(work, struct panfrost_device, reset.work);
737 panfrost_reset(pfdev, NULL);
738}
739
Rob Herringf3ba9122018-09-10 14:27:58 -0500740static const struct drm_sched_backend_ops panfrost_sched_ops = {
Rob Herringf3ba9122018-09-10 14:27:58 -0500741 .run_job = panfrost_job_run,
742 .timedout_job = panfrost_job_timedout,
743 .free_job = panfrost_job_free
744};
745
Boris Brezillon070ce762021-06-30 08:27:43 +0200746static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
747{
748 struct panfrost_device *pfdev = data;
Boris Brezillon070ce762021-06-30 08:27:43 +0200749
Steven Price030761e2021-06-30 08:27:50 +0200750 panfrost_job_handle_irqs(pfdev);
Boris Brezillon070ce762021-06-30 08:27:43 +0200751 job_write(pfdev, JOB_INT_MASK,
752 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
753 GENMASK(NUM_JOB_SLOTS - 1, 0));
Rob Herringf3ba9122018-09-10 14:27:58 -0500754 return IRQ_HANDLED;
755}
756
Boris Brezillon070ce762021-06-30 08:27:43 +0200757static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
758{
759 struct panfrost_device *pfdev = data;
760 u32 status = job_read(pfdev, JOB_INT_STAT);
761
762 if (!status)
763 return IRQ_NONE;
764
765 job_write(pfdev, JOB_INT_MASK, 0);
766 return IRQ_WAKE_THREAD;
767}
768
Rob Herringf3ba9122018-09-10 14:27:58 -0500769int panfrost_job_init(struct panfrost_device *pfdev)
770{
771 struct panfrost_job_slot *js;
Steven Price030761e2021-06-30 08:27:50 +0200772 unsigned int nentries = 2;
Boris Brezillon1d0cab52021-06-30 08:27:45 +0200773 int ret, j;
Rob Herringf3ba9122018-09-10 14:27:58 -0500774
Steven Price030761e2021-06-30 08:27:50 +0200775 /* All GPUs have two entries per queue, but without jobchain
776 * disambiguation stopping the right job in the close path is tricky,
777 * so let's just advertise one entry in that case.
778 */
779 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
780 nentries = 1;
Boris Brezillon5bc5cc22020-11-05 16:17:04 +0100781
Rob Herringf3ba9122018-09-10 14:27:58 -0500782 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
783 if (!js)
784 return -ENOMEM;
785
Steven Price030761e2021-06-30 08:27:50 +0200786 INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
Rob Herringf3ba9122018-09-10 14:27:58 -0500787 spin_lock_init(&js->job_lock);
788
Boris Brezillon1d0cab52021-06-30 08:27:45 +0200789 js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
790 if (js->irq <= 0)
Rob Herringf3ba9122018-09-10 14:27:58 -0500791 return -ENODEV;
792
Boris Brezillon1d0cab52021-06-30 08:27:45 +0200793 ret = devm_request_threaded_irq(pfdev->dev, js->irq,
Boris Brezillon070ce762021-06-30 08:27:43 +0200794 panfrost_job_irq_handler,
795 panfrost_job_irq_handler_thread,
796 IRQF_SHARED, KBUILD_MODNAME "-job",
797 pfdev);
Rob Herringf3ba9122018-09-10 14:27:58 -0500798 if (ret) {
799 dev_err(pfdev->dev, "failed to request job irq");
800 return ret;
801 }
802
Boris Brezillona11c4712021-06-30 08:27:44 +0200803 pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
804 if (!pfdev->reset.wq)
805 return -ENOMEM;
Steven Pricea17d6092020-10-29 17:00:47 +0000806
Boris Brezillona11c4712021-06-30 08:27:44 +0200807 for (j = 0; j < NUM_JOB_SLOTS; j++) {
Rob Herringf3ba9122018-09-10 14:27:58 -0500808 js->queue[j].fence_context = dma_fence_context_alloc(1);
809
810 ret = drm_sched_init(&js->queue[j].sched,
811 &panfrost_sched_ops,
Steven Price030761e2021-06-30 08:27:50 +0200812 nentries, 0,
Boris Brezillona11c4712021-06-30 08:27:44 +0200813 msecs_to_jiffies(JOB_TIMEOUT_MS),
814 pfdev->reset.wq,
Christian Königf2f12eb2021-02-02 12:40:01 +0100815 NULL, "pan_js");
Rob Herringf3ba9122018-09-10 14:27:58 -0500816 if (ret) {
817 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
818 goto err_sched;
819 }
820 }
821
822 panfrost_job_enable_interrupts(pfdev);
823
824 return 0;
825
826err_sched:
827 for (j--; j >= 0; j--)
828 drm_sched_fini(&js->queue[j].sched);
829
Boris Brezillona11c4712021-06-30 08:27:44 +0200830 destroy_workqueue(pfdev->reset.wq);
Rob Herringf3ba9122018-09-10 14:27:58 -0500831 return ret;
832}
833
834void panfrost_job_fini(struct panfrost_device *pfdev)
835{
836 struct panfrost_job_slot *js = pfdev->js;
837 int j;
838
839 job_write(pfdev, JOB_INT_MASK, 0);
840
Steven Pricea17d6092020-10-29 17:00:47 +0000841 for (j = 0; j < NUM_JOB_SLOTS; j++) {
Rob Herringf3ba9122018-09-10 14:27:58 -0500842 drm_sched_fini(&js->queue[j].sched);
Steven Pricea17d6092020-10-29 17:00:47 +0000843 }
Rob Herringf3ba9122018-09-10 14:27:58 -0500844
Boris Brezillona11c4712021-06-30 08:27:44 +0200845 cancel_work_sync(&pfdev->reset.work);
846 destroy_workqueue(pfdev->reset.wq);
Rob Herringf3ba9122018-09-10 14:27:58 -0500847}
848
849int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
850{
851 struct panfrost_device *pfdev = panfrost_priv->pfdev;
852 struct panfrost_job_slot *js = pfdev->js;
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100853 struct drm_gpu_scheduler *sched;
Rob Herringf3ba9122018-09-10 14:27:58 -0500854 int ret, i;
855
856 for (i = 0; i < NUM_JOB_SLOTS; i++) {
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100857 sched = &js->queue[i].sched;
858 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
859 DRM_SCHED_PRIORITY_NORMAL, &sched,
860 1, NULL);
Rob Herringf3ba9122018-09-10 14:27:58 -0500861 if (WARN_ON(ret))
862 return ret;
863 }
864 return 0;
865}
866
867void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
868{
Boris Brezillon30b5d4e2021-06-30 08:27:49 +0200869 struct panfrost_device *pfdev = panfrost_priv->pfdev;
Rob Herringf3ba9122018-09-10 14:27:58 -0500870 int i;
871
Steven Pricea17d6092020-10-29 17:00:47 +0000872 for (i = 0; i < NUM_JOB_SLOTS; i++)
Rob Herringf3ba9122018-09-10 14:27:58 -0500873 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
Boris Brezillon30b5d4e2021-06-30 08:27:49 +0200874
875 /* Kill in-flight jobs */
876 spin_lock(&pfdev->js->job_lock);
877 for (i = 0; i < NUM_JOB_SLOTS; i++) {
878 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
Steven Price030761e2021-06-30 08:27:50 +0200879 int j;
Boris Brezillon30b5d4e2021-06-30 08:27:49 +0200880
Steven Price030761e2021-06-30 08:27:50 +0200881 for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
882 struct panfrost_job *job = pfdev->jobs[i][j];
883 u32 cmd;
Boris Brezillon30b5d4e2021-06-30 08:27:49 +0200884
Steven Price030761e2021-06-30 08:27:50 +0200885 if (!job || job->base.entity != entity)
886 continue;
887
888 if (j == 1) {
889 /* Try to cancel the job before it starts */
890 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
891 /* Reset the job head so it doesn't get restarted if
892 * the job in the first slot failed.
893 */
894 job->jc = 0;
895 }
896
897 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
898 cmd = panfrost_get_job_chain_flag(job) ?
899 JS_COMMAND_HARD_STOP_1 :
900 JS_COMMAND_HARD_STOP_0;
901 } else {
902 cmd = JS_COMMAND_HARD_STOP;
903 }
904
905 job_write(pfdev, JS_COMMAND(i), cmd);
906 }
Boris Brezillon30b5d4e2021-06-30 08:27:49 +0200907 }
908 spin_unlock(&pfdev->js->job_lock);
Rob Herringf3ba9122018-09-10 14:27:58 -0500909}
910
911int panfrost_job_is_idle(struct panfrost_device *pfdev)
912{
913 struct panfrost_job_slot *js = pfdev->js;
914 int i;
915
916 for (i = 0; i < NUM_JOB_SLOTS; i++) {
917 /* If there are any jobs in the HW queue, we're not idle */
918 if (atomic_read(&js->queue[i].sched.hw_rq_count))
919 return false;
Rob Herringf3ba9122018-09-10 14:27:58 -0500920 }
921
922 return true;
923}