Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ |
| 3 | /* Copyright 2019 Collabora ltd. */ |
| 4 | #include <linux/delay.h> |
| 5 | #include <linux/interrupt.h> |
| 6 | #include <linux/io.h> |
| 7 | #include <linux/platform_device.h> |
| 8 | #include <linux/pm_runtime.h> |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 9 | #include <linux/dma-resv.h> |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 10 | #include <drm/gpu_scheduler.h> |
| 11 | #include <drm/panfrost_drm.h> |
| 12 | |
| 13 | #include "panfrost_device.h" |
| 14 | #include "panfrost_devfreq.h" |
| 15 | #include "panfrost_job.h" |
| 16 | #include "panfrost_features.h" |
| 17 | #include "panfrost_issues.h" |
| 18 | #include "panfrost_gem.h" |
| 19 | #include "panfrost_regs.h" |
| 20 | #include "panfrost_gpu.h" |
| 21 | #include "panfrost_mmu.h" |
| 22 | |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 23 | #define JOB_TIMEOUT_MS 500 |
| 24 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 25 | #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) |
| 26 | #define job_read(dev, reg) readl(dev->iomem + (reg)) |
| 27 | |
| 28 | struct panfrost_queue_state { |
| 29 | struct drm_gpu_scheduler sched; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 30 | u64 fence_context; |
| 31 | u64 emit_seqno; |
| 32 | }; |
| 33 | |
| 34 | struct panfrost_job_slot { |
| 35 | struct panfrost_queue_state queue[NUM_JOB_SLOTS]; |
| 36 | spinlock_t job_lock; |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 37 | int irq; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 38 | }; |
| 39 | |
| 40 | static struct panfrost_job * |
| 41 | to_panfrost_job(struct drm_sched_job *sched_job) |
| 42 | { |
| 43 | return container_of(sched_job, struct panfrost_job, base); |
| 44 | } |
| 45 | |
| 46 | struct panfrost_fence { |
| 47 | struct dma_fence base; |
| 48 | struct drm_device *dev; |
| 49 | /* panfrost seqno for signaled() test */ |
| 50 | u64 seqno; |
| 51 | int queue; |
| 52 | }; |
| 53 | |
| 54 | static inline struct panfrost_fence * |
| 55 | to_panfrost_fence(struct dma_fence *fence) |
| 56 | { |
| 57 | return (struct panfrost_fence *)fence; |
| 58 | } |
| 59 | |
| 60 | static const char *panfrost_fence_get_driver_name(struct dma_fence *fence) |
| 61 | { |
| 62 | return "panfrost"; |
| 63 | } |
| 64 | |
| 65 | static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence) |
| 66 | { |
| 67 | struct panfrost_fence *f = to_panfrost_fence(fence); |
| 68 | |
| 69 | switch (f->queue) { |
| 70 | case 0: |
| 71 | return "panfrost-js-0"; |
| 72 | case 1: |
| 73 | return "panfrost-js-1"; |
| 74 | case 2: |
| 75 | return "panfrost-js-2"; |
| 76 | default: |
| 77 | return NULL; |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | static const struct dma_fence_ops panfrost_fence_ops = { |
| 82 | .get_driver_name = panfrost_fence_get_driver_name, |
| 83 | .get_timeline_name = panfrost_fence_get_timeline_name, |
| 84 | }; |
| 85 | |
| 86 | static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num) |
| 87 | { |
| 88 | struct panfrost_fence *fence; |
| 89 | struct panfrost_job_slot *js = pfdev->js; |
| 90 | |
| 91 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
| 92 | if (!fence) |
| 93 | return ERR_PTR(-ENOMEM); |
| 94 | |
| 95 | fence->dev = pfdev->ddev; |
| 96 | fence->queue = js_num; |
| 97 | fence->seqno = ++js->queue[js_num].emit_seqno; |
| 98 | dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, |
| 99 | js->queue[js_num].fence_context, fence->seqno); |
| 100 | |
| 101 | return &fence->base; |
| 102 | } |
| 103 | |
| 104 | static int panfrost_job_get_slot(struct panfrost_job *job) |
| 105 | { |
| 106 | /* JS0: fragment jobs. |
| 107 | * JS1: vertex/tiler jobs |
| 108 | * JS2: compute jobs |
| 109 | */ |
| 110 | if (job->requirements & PANFROST_JD_REQ_FS) |
| 111 | return 0; |
| 112 | |
| 113 | /* Not exposed to userspace yet */ |
| 114 | #if 0 |
| 115 | if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { |
| 116 | if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && |
| 117 | (job->pfdev->features.nr_core_groups == 2)) |
| 118 | return 2; |
| 119 | if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) |
| 120 | return 2; |
| 121 | } |
| 122 | #endif |
| 123 | return 1; |
| 124 | } |
| 125 | |
| 126 | static void panfrost_job_write_affinity(struct panfrost_device *pfdev, |
| 127 | u32 requirements, |
| 128 | int js) |
| 129 | { |
| 130 | u64 affinity; |
| 131 | |
| 132 | /* |
| 133 | * Use all cores for now. |
| 134 | * Eventually we may need to support tiler only jobs and h/w with |
| 135 | * multiple (2) coherent core groups |
| 136 | */ |
| 137 | affinity = pfdev->features.shader_present; |
| 138 | |
| 139 | job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF); |
| 140 | job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32); |
| 141 | } |
| 142 | |
| 143 | static void panfrost_job_hw_submit(struct panfrost_job *job, int js) |
| 144 | { |
| 145 | struct panfrost_device *pfdev = job->pfdev; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 146 | u32 cfg; |
| 147 | u64 jc_head = job->jc; |
| 148 | int ret; |
| 149 | |
Clément Péron | 9bfacfc | 2020-07-10 11:53:59 +0200 | [diff] [blame] | 150 | panfrost_devfreq_record_busy(&pfdev->pfdevfreq); |
Steven Price | b99773e | 2020-05-22 16:36:53 +0100 | [diff] [blame] | 151 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 152 | ret = pm_runtime_get_sync(pfdev->dev); |
| 153 | if (ret < 0) |
| 154 | return; |
| 155 | |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 156 | if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 157 | return; |
| 158 | } |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 159 | |
Boris Brezillon | 7fdc48c | 2021-06-21 15:38:56 +0200 | [diff] [blame] | 160 | cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 161 | |
| 162 | job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); |
| 163 | job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); |
| 164 | |
| 165 | panfrost_job_write_affinity(pfdev, job->requirements, js); |
| 166 | |
| 167 | /* start MMU, medium priority, cache clean/flush on end, clean/flush on |
| 168 | * start */ |
Rob Herring | 7282f76 | 2019-08-13 09:01:15 -0600 | [diff] [blame] | 169 | cfg |= JS_CONFIG_THREAD_PRI(8) | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 170 | JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | |
| 171 | JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; |
| 172 | |
| 173 | if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) |
| 174 | cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; |
| 175 | |
| 176 | if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649)) |
| 177 | cfg |= JS_CONFIG_START_MMU; |
| 178 | |
| 179 | job_write(pfdev, JS_CONFIG_NEXT(js), cfg); |
| 180 | |
| 181 | if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) |
| 182 | job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id); |
| 183 | |
| 184 | /* GO ! */ |
| 185 | dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx", |
| 186 | job, js, jc_head); |
| 187 | |
| 188 | job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 189 | } |
| 190 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 191 | static int panfrost_acquire_object_fences(struct drm_gem_object **bos, |
| 192 | int bo_count, |
| 193 | struct xarray *deps) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 194 | { |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 195 | int i, ret; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 196 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 197 | for (i = 0; i < bo_count; i++) { |
Daniel Vetter | 7601d53 | 2021-06-22 18:55:02 +0200 | [diff] [blame] | 198 | /* panfrost always uses write mode in its current uapi */ |
| 199 | ret = drm_gem_fence_array_add_implicit(deps, bos[i], true); |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 200 | if (ret) |
| 201 | return ret; |
| 202 | } |
| 203 | |
| 204 | return 0; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | static void panfrost_attach_object_fences(struct drm_gem_object **bos, |
| 208 | int bo_count, |
| 209 | struct dma_fence *fence) |
| 210 | { |
| 211 | int i; |
| 212 | |
| 213 | for (i = 0; i < bo_count; i++) |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 214 | dma_resv_add_excl_fence(bos[i]->resv, fence); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | int panfrost_job_push(struct panfrost_job *job) |
| 218 | { |
| 219 | struct panfrost_device *pfdev = job->pfdev; |
| 220 | int slot = panfrost_job_get_slot(job); |
| 221 | struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot]; |
| 222 | struct ww_acquire_ctx acquire_ctx; |
| 223 | int ret = 0; |
| 224 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 225 | |
| 226 | ret = drm_gem_lock_reservations(job->bos, job->bo_count, |
| 227 | &acquire_ctx); |
Daniel Vetter | 94dd80f | 2021-06-22 18:55:00 +0200 | [diff] [blame] | 228 | if (ret) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 229 | return ret; |
Daniel Vetter | 94dd80f | 2021-06-22 18:55:00 +0200 | [diff] [blame] | 230 | |
| 231 | mutex_lock(&pfdev->sched_lock); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 232 | |
| 233 | ret = drm_sched_job_init(&job->base, entity, NULL); |
| 234 | if (ret) { |
| 235 | mutex_unlock(&pfdev->sched_lock); |
| 236 | goto unlock; |
| 237 | } |
| 238 | |
| 239 | job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); |
| 240 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 241 | ret = panfrost_acquire_object_fences(job->bos, job->bo_count, |
| 242 | &job->deps); |
| 243 | if (ret) { |
| 244 | mutex_unlock(&pfdev->sched_lock); |
| 245 | goto unlock; |
| 246 | } |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 247 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 248 | kref_get(&job->refcount); /* put by scheduler job completion */ |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 249 | |
| 250 | drm_sched_entity_push_job(&job->base, entity); |
| 251 | |
| 252 | mutex_unlock(&pfdev->sched_lock); |
| 253 | |
| 254 | panfrost_attach_object_fences(job->bos, job->bo_count, |
| 255 | job->render_done_fence); |
| 256 | |
| 257 | unlock: |
| 258 | drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx); |
| 259 | |
| 260 | return ret; |
| 261 | } |
| 262 | |
| 263 | static void panfrost_job_cleanup(struct kref *ref) |
| 264 | { |
| 265 | struct panfrost_job *job = container_of(ref, struct panfrost_job, |
| 266 | refcount); |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 267 | struct dma_fence *fence; |
| 268 | unsigned long index; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 269 | unsigned int i; |
| 270 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 271 | xa_for_each(&job->deps, index, fence) { |
| 272 | dma_fence_put(fence); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 273 | } |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 274 | xa_destroy(&job->deps); |
| 275 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 276 | dma_fence_put(job->done_fence); |
| 277 | dma_fence_put(job->render_done_fence); |
| 278 | |
Boris Brezillon | bdefca2 | 2020-01-15 20:15:54 -0600 | [diff] [blame] | 279 | if (job->mappings) { |
Boris Brezillon | 7e0cf7e | 2019-11-29 14:59:08 +0100 | [diff] [blame] | 280 | for (i = 0; i < job->bo_count; i++) { |
| 281 | if (!job->mappings[i]) |
| 282 | break; |
| 283 | |
| 284 | atomic_dec(&job->mappings[i]->obj->gpu_usecount); |
Boris Brezillon | bdefca2 | 2020-01-15 20:15:54 -0600 | [diff] [blame] | 285 | panfrost_gem_mapping_put(job->mappings[i]); |
Boris Brezillon | 7e0cf7e | 2019-11-29 14:59:08 +0100 | [diff] [blame] | 286 | } |
Boris Brezillon | bdefca2 | 2020-01-15 20:15:54 -0600 | [diff] [blame] | 287 | kvfree(job->mappings); |
| 288 | } |
| 289 | |
| 290 | if (job->bos) { |
YueHaibing | fe154a2 | 2020-02-03 15:27:24 +0000 | [diff] [blame] | 291 | for (i = 0; i < job->bo_count; i++) |
Emil Velikov | 496d0cc | 2020-05-15 10:51:07 +0100 | [diff] [blame] | 292 | drm_gem_object_put(job->bos[i]); |
Boris Brezillon | bdefca2 | 2020-01-15 20:15:54 -0600 | [diff] [blame] | 293 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 294 | kvfree(job->bos); |
| 295 | } |
| 296 | |
| 297 | kfree(job); |
| 298 | } |
| 299 | |
| 300 | void panfrost_job_put(struct panfrost_job *job) |
| 301 | { |
| 302 | kref_put(&job->refcount, panfrost_job_cleanup); |
| 303 | } |
| 304 | |
| 305 | static void panfrost_job_free(struct drm_sched_job *sched_job) |
| 306 | { |
| 307 | struct panfrost_job *job = to_panfrost_job(sched_job); |
| 308 | |
| 309 | drm_sched_job_cleanup(sched_job); |
| 310 | |
| 311 | panfrost_job_put(job); |
| 312 | } |
| 313 | |
| 314 | static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job, |
| 315 | struct drm_sched_entity *s_entity) |
| 316 | { |
| 317 | struct panfrost_job *job = to_panfrost_job(sched_job); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 318 | |
Daniel Vetter | 7d7a0fc | 2021-06-22 18:55:01 +0200 | [diff] [blame] | 319 | if (!xa_empty(&job->deps)) |
| 320 | return xa_erase(&job->deps, job->last_dep++); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 321 | |
| 322 | return NULL; |
| 323 | } |
| 324 | |
| 325 | static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) |
| 326 | { |
| 327 | struct panfrost_job *job = to_panfrost_job(sched_job); |
| 328 | struct panfrost_device *pfdev = job->pfdev; |
| 329 | int slot = panfrost_job_get_slot(job); |
| 330 | struct dma_fence *fence = NULL; |
| 331 | |
| 332 | if (unlikely(job->base.s_fence->finished.error)) |
| 333 | return NULL; |
| 334 | |
| 335 | pfdev->jobs[slot] = job; |
| 336 | |
| 337 | fence = panfrost_fence_create(pfdev, slot); |
| 338 | if (IS_ERR(fence)) |
Boris Brezillon | 9f4e911 | 2021-06-30 08:27:38 +0200 | [diff] [blame] | 339 | return fence; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 340 | |
| 341 | if (job->done_fence) |
| 342 | dma_fence_put(job->done_fence); |
| 343 | job->done_fence = dma_fence_get(fence); |
| 344 | |
| 345 | panfrost_job_hw_submit(job, slot); |
| 346 | |
| 347 | return fence; |
| 348 | } |
| 349 | |
| 350 | void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) |
| 351 | { |
| 352 | int j; |
| 353 | u32 irq_mask = 0; |
| 354 | |
| 355 | for (j = 0; j < NUM_JOB_SLOTS; j++) { |
| 356 | irq_mask |= MK_JS_MASK(j); |
| 357 | } |
| 358 | |
| 359 | job_write(pfdev, JOB_INT_CLEAR, irq_mask); |
| 360 | job_write(pfdev, JOB_INT_MASK, irq_mask); |
| 361 | } |
| 362 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 363 | static void panfrost_reset(struct panfrost_device *pfdev, |
| 364 | struct drm_sched_job *bad) |
Boris Brezillon | 1a11a88 | 2020-10-02 14:25:06 +0200 | [diff] [blame] | 365 | { |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 366 | unsigned int i; |
| 367 | bool cookie; |
Boris Brezillon | 1a11a88 | 2020-10-02 14:25:06 +0200 | [diff] [blame] | 368 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 369 | if (!atomic_read(&pfdev->reset.pending)) |
| 370 | return; |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 371 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 372 | /* Stop the schedulers. |
| 373 | * |
| 374 | * FIXME: We temporarily get out of the dma_fence_signalling section |
| 375 | * because the cleanup path generate lockdep splats when taking locks |
| 376 | * to release job resources. We should rework the code to follow this |
| 377 | * pattern: |
| 378 | * |
| 379 | * try_lock |
| 380 | * if (locked) |
| 381 | * release |
| 382 | * else |
| 383 | * schedule_work_to_release_later |
| 384 | */ |
| 385 | for (i = 0; i < NUM_JOB_SLOTS; i++) |
| 386 | drm_sched_stop(&pfdev->js->queue[i].sched, bad); |
| 387 | |
| 388 | cookie = dma_fence_begin_signalling(); |
| 389 | |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 390 | if (bad) |
| 391 | drm_sched_increase_karma(bad); |
| 392 | |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 393 | /* Mask job interrupts and synchronize to make sure we won't be |
| 394 | * interrupted during our reset. |
| 395 | */ |
| 396 | job_write(pfdev, JOB_INT_MASK, 0); |
| 397 | synchronize_irq(pfdev->js->irq); |
| 398 | |
| 399 | /* Schedulers are stopped and interrupts are masked+flushed, we don't |
| 400 | * need to protect the 'evict unfinished jobs' lock with the job_lock. |
| 401 | */ |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 402 | spin_lock(&pfdev->js->job_lock); |
| 403 | for (i = 0; i < NUM_JOB_SLOTS; i++) { |
| 404 | if (pfdev->jobs[i]) { |
| 405 | pm_runtime_put_noidle(pfdev->dev); |
| 406 | panfrost_devfreq_record_idle(&pfdev->pfdevfreq); |
| 407 | pfdev->jobs[i] = NULL; |
| 408 | } |
| 409 | } |
| 410 | spin_unlock(&pfdev->js->job_lock); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 411 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 412 | panfrost_device_reset(pfdev); |
| 413 | |
| 414 | /* GPU has been reset, we can clear the reset pending bit. */ |
| 415 | atomic_set(&pfdev->reset.pending, 0); |
| 416 | |
| 417 | /* Now resubmit jobs that were previously queued but didn't have a |
| 418 | * chance to finish. |
| 419 | * FIXME: We temporarily get out of the DMA fence signalling section |
| 420 | * while resubmitting jobs because the job submission logic will |
| 421 | * allocate memory with the GFP_KERNEL flag which can trigger memory |
| 422 | * reclaim and exposes a lock ordering issue. |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 423 | */ |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 424 | dma_fence_end_signalling(cookie); |
| 425 | for (i = 0; i < NUM_JOB_SLOTS; i++) |
| 426 | drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); |
| 427 | cookie = dma_fence_begin_signalling(); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 428 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 429 | for (i = 0; i < NUM_JOB_SLOTS; i++) |
| 430 | drm_sched_start(&pfdev->js->queue[i].sched, true); |
Boris Brezillon | 1a11a88 | 2020-10-02 14:25:06 +0200 | [diff] [blame] | 431 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 432 | dma_fence_end_signalling(cookie); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 433 | } |
| 434 | |
Luben Tuikov | a6a1f03 | 2021-01-20 15:09:59 -0500 | [diff] [blame] | 435 | static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job |
| 436 | *sched_job) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 437 | { |
| 438 | struct panfrost_job *job = to_panfrost_job(sched_job); |
| 439 | struct panfrost_device *pfdev = job->pfdev; |
| 440 | int js = panfrost_job_get_slot(job); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 441 | |
| 442 | /* |
| 443 | * If the GPU managed to complete this jobs fence, the timeout is |
| 444 | * spurious. Bail out. |
| 445 | */ |
| 446 | if (dma_fence_is_signaled(job->done_fence)) |
Luben Tuikov | a6a1f03 | 2021-01-20 15:09:59 -0500 | [diff] [blame] | 447 | return DRM_GPU_SCHED_STAT_NOMINAL; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 448 | |
Rob Herring | 7282f76 | 2019-08-13 09:01:15 -0600 | [diff] [blame] | 449 | dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 450 | js, |
Rob Herring | 7282f76 | 2019-08-13 09:01:15 -0600 | [diff] [blame] | 451 | job_read(pfdev, JS_CONFIG(js)), |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 452 | job_read(pfdev, JS_STATUS(js)), |
| 453 | job_read(pfdev, JS_HEAD_LO(js)), |
| 454 | job_read(pfdev, JS_TAIL_LO(js)), |
| 455 | sched_job); |
| 456 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 457 | atomic_set(&pfdev->reset.pending, 1); |
| 458 | panfrost_reset(pfdev, sched_job); |
Luben Tuikov | a6a1f03 | 2021-01-20 15:09:59 -0500 | [diff] [blame] | 459 | |
| 460 | return DRM_GPU_SCHED_STAT_NOMINAL; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | static const struct drm_sched_backend_ops panfrost_sched_ops = { |
| 464 | .dependency = panfrost_job_dependency, |
| 465 | .run_job = panfrost_job_run, |
| 466 | .timedout_job = panfrost_job_timedout, |
| 467 | .free_job = panfrost_job_free |
| 468 | }; |
| 469 | |
Boris Brezillon | 070ce76 | 2021-06-30 08:27:43 +0200 | [diff] [blame] | 470 | static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 471 | { |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 472 | int j; |
| 473 | |
| 474 | dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status); |
| 475 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 476 | for (j = 0; status; j++) { |
| 477 | u32 mask = MK_JS_MASK(j); |
| 478 | |
| 479 | if (!(status & mask)) |
| 480 | continue; |
| 481 | |
| 482 | job_write(pfdev, JOB_INT_CLEAR, mask); |
| 483 | |
| 484 | if (status & JOB_INT_MASK_ERR(j)) { |
| 485 | job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); |
| 486 | |
| 487 | dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", |
| 488 | j, |
Boris Brezillon | 6ef2f37 | 2021-06-30 08:27:40 +0200 | [diff] [blame] | 489 | panfrost_exception_name(job_read(pfdev, JS_STATUS(j))), |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 490 | job_read(pfdev, JS_HEAD_LO(j)), |
| 491 | job_read(pfdev, JS_TAIL_LO(j))); |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 492 | drm_sched_fault(&pfdev->js->queue[j].sched); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | if (status & JOB_INT_MASK_DONE(j)) { |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 496 | struct panfrost_job *job; |
Rob Herring | 7282f76 | 2019-08-13 09:01:15 -0600 | [diff] [blame] | 497 | |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 498 | job = pfdev->jobs[j]; |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 499 | /* The only reason this job could be NULL is if the |
| 500 | * job IRQ handler is called just after the |
| 501 | * in-flight job eviction in the reset path, and |
| 502 | * this shouldn't happen because the job IRQ has |
| 503 | * been masked and synchronized when this eviction |
| 504 | * happens. |
| 505 | */ |
| 506 | WARN_ON(!job); |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 507 | if (job) { |
| 508 | pfdev->jobs[j] = NULL; |
| 509 | |
Boris Brezillon | 7fdc48c | 2021-06-21 15:38:56 +0200 | [diff] [blame] | 510 | panfrost_mmu_as_put(pfdev, job->file_priv->mmu); |
Clément Péron | 9bfacfc | 2020-07-10 11:53:59 +0200 | [diff] [blame] | 511 | panfrost_devfreq_record_idle(&pfdev->pfdevfreq); |
Rob Herring | 330bec4 | 2019-08-26 17:33:11 -0500 | [diff] [blame] | 512 | |
| 513 | dma_fence_signal_locked(job->done_fence); |
| 514 | pm_runtime_put_autosuspend(pfdev->dev); |
| 515 | } |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | status &= ~mask; |
| 519 | } |
Boris Brezillon | 070ce76 | 2021-06-30 08:27:43 +0200 | [diff] [blame] | 520 | } |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 521 | |
Boris Brezillon | 070ce76 | 2021-06-30 08:27:43 +0200 | [diff] [blame] | 522 | static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) |
| 523 | { |
| 524 | struct panfrost_device *pfdev = data; |
| 525 | u32 status = job_read(pfdev, JOB_INT_RAWSTAT); |
| 526 | |
| 527 | while (status) { |
| 528 | pm_runtime_mark_last_busy(pfdev->dev); |
| 529 | |
| 530 | spin_lock(&pfdev->js->job_lock); |
| 531 | panfrost_job_handle_irq(pfdev, status); |
| 532 | spin_unlock(&pfdev->js->job_lock); |
| 533 | status = job_read(pfdev, JOB_INT_RAWSTAT); |
| 534 | } |
| 535 | |
| 536 | job_write(pfdev, JOB_INT_MASK, |
| 537 | GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | |
| 538 | GENMASK(NUM_JOB_SLOTS - 1, 0)); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 539 | return IRQ_HANDLED; |
| 540 | } |
| 541 | |
Boris Brezillon | 070ce76 | 2021-06-30 08:27:43 +0200 | [diff] [blame] | 542 | static irqreturn_t panfrost_job_irq_handler(int irq, void *data) |
| 543 | { |
| 544 | struct panfrost_device *pfdev = data; |
| 545 | u32 status = job_read(pfdev, JOB_INT_STAT); |
| 546 | |
| 547 | if (!status) |
| 548 | return IRQ_NONE; |
| 549 | |
| 550 | job_write(pfdev, JOB_INT_MASK, 0); |
| 551 | return IRQ_WAKE_THREAD; |
| 552 | } |
| 553 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 554 | static void panfrost_reset_work(struct work_struct *work) |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 555 | { |
| 556 | struct panfrost_device *pfdev = container_of(work, |
| 557 | struct panfrost_device, |
| 558 | reset.work); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 559 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 560 | panfrost_reset(pfdev, NULL); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 561 | } |
| 562 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 563 | int panfrost_job_init(struct panfrost_device *pfdev) |
| 564 | { |
| 565 | struct panfrost_job_slot *js; |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 566 | int ret, j; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 567 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 568 | INIT_WORK(&pfdev->reset.work, panfrost_reset_work); |
Boris Brezillon | 5bc5cc2 | 2020-11-05 16:17:04 +0100 | [diff] [blame] | 569 | |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 570 | pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); |
| 571 | if (!js) |
| 572 | return -ENOMEM; |
| 573 | |
| 574 | spin_lock_init(&js->job_lock); |
| 575 | |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 576 | js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); |
| 577 | if (js->irq <= 0) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 578 | return -ENODEV; |
| 579 | |
Boris Brezillon | 1d0cab5 | 2021-06-30 08:27:45 +0200 | [diff] [blame^] | 580 | ret = devm_request_threaded_irq(pfdev->dev, js->irq, |
Boris Brezillon | 070ce76 | 2021-06-30 08:27:43 +0200 | [diff] [blame] | 581 | panfrost_job_irq_handler, |
| 582 | panfrost_job_irq_handler_thread, |
| 583 | IRQF_SHARED, KBUILD_MODNAME "-job", |
| 584 | pfdev); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 585 | if (ret) { |
| 586 | dev_err(pfdev->dev, "failed to request job irq"); |
| 587 | return ret; |
| 588 | } |
| 589 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 590 | pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); |
| 591 | if (!pfdev->reset.wq) |
| 592 | return -ENOMEM; |
Steven Price | a17d609 | 2020-10-29 17:00:47 +0000 | [diff] [blame] | 593 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 594 | for (j = 0; j < NUM_JOB_SLOTS; j++) { |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 595 | js->queue[j].fence_context = dma_fence_context_alloc(1); |
| 596 | |
| 597 | ret = drm_sched_init(&js->queue[j].sched, |
| 598 | &panfrost_sched_ops, |
Boris Brezillon | 78efe21 | 2021-06-30 08:27:37 +0200 | [diff] [blame] | 599 | 1, 0, |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 600 | msecs_to_jiffies(JOB_TIMEOUT_MS), |
| 601 | pfdev->reset.wq, |
Christian König | f2f12eb | 2021-02-02 12:40:01 +0100 | [diff] [blame] | 602 | NULL, "pan_js"); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 603 | if (ret) { |
| 604 | dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); |
| 605 | goto err_sched; |
| 606 | } |
| 607 | } |
| 608 | |
| 609 | panfrost_job_enable_interrupts(pfdev); |
| 610 | |
| 611 | return 0; |
| 612 | |
| 613 | err_sched: |
| 614 | for (j--; j >= 0; j--) |
| 615 | drm_sched_fini(&js->queue[j].sched); |
| 616 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 617 | destroy_workqueue(pfdev->reset.wq); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 618 | return ret; |
| 619 | } |
| 620 | |
| 621 | void panfrost_job_fini(struct panfrost_device *pfdev) |
| 622 | { |
| 623 | struct panfrost_job_slot *js = pfdev->js; |
| 624 | int j; |
| 625 | |
| 626 | job_write(pfdev, JOB_INT_MASK, 0); |
| 627 | |
Steven Price | a17d609 | 2020-10-29 17:00:47 +0000 | [diff] [blame] | 628 | for (j = 0; j < NUM_JOB_SLOTS; j++) { |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 629 | drm_sched_fini(&js->queue[j].sched); |
Steven Price | a17d609 | 2020-10-29 17:00:47 +0000 | [diff] [blame] | 630 | } |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 631 | |
Boris Brezillon | a11c471 | 2021-06-30 08:27:44 +0200 | [diff] [blame] | 632 | cancel_work_sync(&pfdev->reset.work); |
| 633 | destroy_workqueue(pfdev->reset.wq); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) |
| 637 | { |
| 638 | struct panfrost_device *pfdev = panfrost_priv->pfdev; |
| 639 | struct panfrost_job_slot *js = pfdev->js; |
Nirmoy Das | b3ac176 | 2019-12-05 11:38:00 +0100 | [diff] [blame] | 640 | struct drm_gpu_scheduler *sched; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 641 | int ret, i; |
| 642 | |
| 643 | for (i = 0; i < NUM_JOB_SLOTS; i++) { |
Nirmoy Das | b3ac176 | 2019-12-05 11:38:00 +0100 | [diff] [blame] | 644 | sched = &js->queue[i].sched; |
| 645 | ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], |
| 646 | DRM_SCHED_PRIORITY_NORMAL, &sched, |
| 647 | 1, NULL); |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 648 | if (WARN_ON(ret)) |
| 649 | return ret; |
| 650 | } |
| 651 | return 0; |
| 652 | } |
| 653 | |
| 654 | void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) |
| 655 | { |
| 656 | int i; |
| 657 | |
Steven Price | a17d609 | 2020-10-29 17:00:47 +0000 | [diff] [blame] | 658 | for (i = 0; i < NUM_JOB_SLOTS; i++) |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 659 | drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); |
| 660 | } |
| 661 | |
| 662 | int panfrost_job_is_idle(struct panfrost_device *pfdev) |
| 663 | { |
| 664 | struct panfrost_job_slot *js = pfdev->js; |
| 665 | int i; |
| 666 | |
| 667 | for (i = 0; i < NUM_JOB_SLOTS; i++) { |
| 668 | /* If there are any jobs in the HW queue, we're not idle */ |
| 669 | if (atomic_read(&js->queue[i].sched.hw_rq_count)) |
| 670 | return false; |
Rob Herring | f3ba912 | 2018-09-10 14:27:58 -0500 | [diff] [blame] | 671 | } |
| 672 | |
| 673 | return true; |
| 674 | } |