blob: fedbd785e42fb85dcfb0aa10e2dc2f20aeaaedcb [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clark7198e6b2013-07-19 12:59:32 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clark7198e6b2013-07-19 12:59:32 -04005 */
6
7#include "msm_gpu.h"
8#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -05009#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040010#include "msm_fence.h"
Jordan Crouse4241db42018-11-02 09:25:21 -060011#include "msm_gpu_trace.h"
Jonathan Marekc2052a42018-11-14 17:08:04 -050012#include "adreno/adreno_gpu.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040013
Jordan Crousec0fec7f2018-07-24 10:33:27 -060014#include <generated/utsrelease.h>
Rob Clark18bb8a62017-09-13 10:17:18 -040015#include <linux/string_helpers.h>
Jordan Crousec0fec7f2018-07-24 10:33:27 -060016#include <linux/devcoredump.h>
Arnd Bergmann70082a52019-09-18 21:57:07 +020017#include <linux/sched/task.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040018
19/*
20 * Power Management:
21 */
22
Rob Clark7198e6b2013-07-19 12:59:32 -040023static int enable_pwrrail(struct msm_gpu *gpu)
24{
25 struct drm_device *dev = gpu->dev;
26 int ret = 0;
27
28 if (gpu->gpu_reg) {
29 ret = regulator_enable(gpu->gpu_reg);
30 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +053031 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -040032 return ret;
33 }
34 }
35
36 if (gpu->gpu_cx) {
37 ret = regulator_enable(gpu->gpu_cx);
38 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +053039 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -040040 return ret;
41 }
42 }
43
44 return 0;
45}
46
47static int disable_pwrrail(struct msm_gpu *gpu)
48{
49 if (gpu->gpu_cx)
50 regulator_disable(gpu->gpu_cx);
51 if (gpu->gpu_reg)
52 regulator_disable(gpu->gpu_reg);
53 return 0;
54}
55
56static int enable_clk(struct msm_gpu *gpu)
57{
Jordan Crouse98db8032017-03-07 10:02:56 -070058 if (gpu->core_clk && gpu->fast_rate)
59 clk_set_rate(gpu->core_clk, gpu->fast_rate);
Jordan Crouse89d777a2016-11-28 12:28:31 -070060
Jordan Crouseb5f103a2016-11-28 12:28:33 -070061 /* Set the RBBM timer rate to 19.2Mhz */
Jordan Crouse98db8032017-03-07 10:02:56 -070062 if (gpu->rbbmtimer_clk)
63 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
Jordan Crouseb5f103a2016-11-28 12:28:33 -070064
Jordan Crouse8e54eea2018-08-06 11:33:21 -060065 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -040066}
67
68static int disable_clk(struct msm_gpu *gpu)
69{
Jordan Crouse8e54eea2018-08-06 11:33:21 -060070 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -040071
Jordan Crousebf5af4a2017-03-07 10:02:54 -070072 /*
73 * Set the clock to a deliberately low rate. On older targets the clock
74 * speed had to be non zero to avoid problems. On newer targets this
75 * will be rounded down to zero anyway so it all works out.
76 */
Jordan Crouse98db8032017-03-07 10:02:56 -070077 if (gpu->core_clk)
78 clk_set_rate(gpu->core_clk, 27000000);
Jordan Crouse89d777a2016-11-28 12:28:31 -070079
Jordan Crouse98db8032017-03-07 10:02:56 -070080 if (gpu->rbbmtimer_clk)
81 clk_set_rate(gpu->rbbmtimer_clk, 0);
Jordan Crouseb5f103a2016-11-28 12:28:33 -070082
Rob Clark7198e6b2013-07-19 12:59:32 -040083 return 0;
84}
85
86static int enable_axi(struct msm_gpu *gpu)
87{
Tian Taodd29bd42020-10-19 14:04:22 +080088 return clk_prepare_enable(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -040089}
90
91static int disable_axi(struct msm_gpu *gpu)
92{
Tian Taodd29bd42020-10-19 14:04:22 +080093 clk_disable_unprepare(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -040094 return 0;
95}
96
97int msm_gpu_pm_resume(struct msm_gpu *gpu)
98{
99 int ret;
100
Rob Clarkeeb75472017-02-10 15:36:33 -0500101 DBG("%s", gpu->name);
Rob Clarkec1cb6e2020-09-01 08:41:56 -0700102 trace_msm_gpu_resume(0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400103
104 ret = enable_pwrrail(gpu);
105 if (ret)
106 return ret;
107
108 ret = enable_clk(gpu);
109 if (ret)
110 return ret;
111
112 ret = enable_axi(gpu);
113 if (ret)
114 return ret;
115
Rob Clarkaf5b4ff2021-07-26 07:46:48 -0700116 msm_devfreq_resume(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700117
Rob Clarkeeb75472017-02-10 15:36:33 -0500118 gpu->needs_hw_init = true;
119
Rob Clark7198e6b2013-07-19 12:59:32 -0400120 return 0;
121}
122
123int msm_gpu_pm_suspend(struct msm_gpu *gpu)
124{
125 int ret;
126
Rob Clarkeeb75472017-02-10 15:36:33 -0500127 DBG("%s", gpu->name);
Rob Clarkec1cb6e2020-09-01 08:41:56 -0700128 trace_msm_gpu_suspend(0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400129
Rob Clarkaf5b4ff2021-07-26 07:46:48 -0700130 msm_devfreq_suspend(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700131
Rob Clark7198e6b2013-07-19 12:59:32 -0400132 ret = disable_axi(gpu);
133 if (ret)
134 return ret;
135
136 ret = disable_clk(gpu);
137 if (ret)
138 return ret;
139
140 ret = disable_pwrrail(gpu);
141 if (ret)
142 return ret;
143
Rob Clark3ab1c5c2021-03-24 18:23:53 -0700144 gpu->suspend_count++;
145
Rob Clark7198e6b2013-07-19 12:59:32 -0400146 return 0;
147}
148
Rob Clarkeeb75472017-02-10 15:36:33 -0500149int msm_gpu_hw_init(struct msm_gpu *gpu)
Rob Clark37d77c32014-01-11 16:25:08 -0500150{
Rob Clarkeeb75472017-02-10 15:36:33 -0500151 int ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500152
Rob Clarkcb1e3812017-06-13 09:15:36 -0400153 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
154
Rob Clarkeeb75472017-02-10 15:36:33 -0500155 if (!gpu->needs_hw_init)
156 return 0;
Rob Clark37d77c32014-01-11 16:25:08 -0500157
Rob Clarkeeb75472017-02-10 15:36:33 -0500158 disable_irq(gpu->irq);
159 ret = gpu->funcs->hw_init(gpu);
160 if (!ret)
161 gpu->needs_hw_init = false;
162 enable_irq(gpu->irq);
Rob Clark37d77c32014-01-11 16:25:08 -0500163
Rob Clarkeeb75472017-02-10 15:36:33 -0500164 return ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500165}
166
Rob Clark2a86efb2020-10-23 09:51:13 -0700167static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
168 uint32_t fence)
169{
170 struct msm_gem_submit *submit;
Rob Clark298287f2021-07-26 07:43:58 -0700171 unsigned long flags;
Rob Clark2a86efb2020-10-23 09:51:13 -0700172
Rob Clark298287f2021-07-26 07:43:58 -0700173 spin_lock_irqsave(&ring->submit_lock, flags);
Rob Clark2a86efb2020-10-23 09:51:13 -0700174 list_for_each_entry(submit, &ring->submits, node) {
175 if (submit->seqno > fence)
176 break;
177
178 msm_update_fence(submit->ring->fctx,
179 submit->fence->seqno);
Rob Clark298287f2021-07-26 07:43:58 -0700180 dma_fence_signal(submit->fence);
Rob Clark2a86efb2020-10-23 09:51:13 -0700181 }
Rob Clark298287f2021-07-26 07:43:58 -0700182 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark2a86efb2020-10-23 09:51:13 -0700183}
184
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600185#ifdef CONFIG_DEV_COREDUMP
186static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
187 size_t count, void *data, size_t datalen)
188{
189 struct msm_gpu *gpu = data;
190 struct drm_print_iterator iter;
191 struct drm_printer p;
192 struct msm_gpu_state *state;
193
194 state = msm_gpu_crashstate_get(gpu);
195 if (!state)
196 return 0;
197
198 iter.data = buffer;
199 iter.offset = 0;
200 iter.start = offset;
201 iter.remain = count;
202
203 p = drm_coredump_printer(&iter);
204
205 drm_printf(&p, "---\n");
206 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
207 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
Arnd Bergmann3530a172018-07-26 14:39:25 +0200208 drm_printf(&p, "time: %lld.%09ld\n",
209 state->time.tv_sec, state->time.tv_nsec);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600210 if (state->comm)
211 drm_printf(&p, "comm: %s\n", state->comm);
212 if (state->cmd)
213 drm_printf(&p, "cmdline: %s\n", state->cmd);
214
215 gpu->funcs->show(gpu, state, &p);
216
217 msm_gpu_crashstate_put(gpu);
218
219 return count - iter.remain;
220}
221
222static void msm_gpu_devcoredump_free(void *data)
223{
224 struct msm_gpu *gpu = data;
225
226 msm_gpu_crashstate_put(gpu);
227}
228
Jordan Crousecdb95932018-07-24 10:33:31 -0600229static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
230 struct msm_gem_object *obj, u64 iova, u32 flags)
231{
232 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
233
234 /* Don't record write only objects */
Jordan Crousecdb95932018-07-24 10:33:31 -0600235 state_bo->size = obj->base.size;
236 state_bo->iova = iova;
237
Jordan Crouse896a2482018-11-02 09:25:22 -0600238 /* Only store data for non imported buffer objects marked for read */
239 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
Jordan Crousecdb95932018-07-24 10:33:31 -0600240 void *ptr;
241
242 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
243 if (!state_bo->data)
Jordan Crouse896a2482018-11-02 09:25:22 -0600244 goto out;
Jordan Crousecdb95932018-07-24 10:33:31 -0600245
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700246 msm_gem_lock(&obj->base);
Jordan Crousecdb95932018-07-24 10:33:31 -0600247 ptr = msm_gem_get_vaddr_active(&obj->base);
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700248 msm_gem_unlock(&obj->base);
Jordan Crousecdb95932018-07-24 10:33:31 -0600249 if (IS_ERR(ptr)) {
250 kvfree(state_bo->data);
Jordan Crouse896a2482018-11-02 09:25:22 -0600251 state_bo->data = NULL;
252 goto out;
Jordan Crousecdb95932018-07-24 10:33:31 -0600253 }
254
255 memcpy(state_bo->data, ptr, obj->base.size);
256 msm_gem_put_vaddr(&obj->base);
257 }
Jordan Crouse896a2482018-11-02 09:25:22 -0600258out:
Jordan Crousecdb95932018-07-24 10:33:31 -0600259 state->nr_bos++;
260}
261
262static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
263 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600264{
265 struct msm_gpu_state *state;
266
Sharat Masetty4f3a31a2018-10-12 14:26:55 +0530267 /* Check if the target supports capturing crash state */
268 if (!gpu->funcs->gpu_state_get)
269 return;
270
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600271 /* Only save one crash state at a time */
272 if (gpu->crashstate)
273 return;
274
275 state = gpu->funcs->gpu_state_get(gpu);
276 if (IS_ERR_OR_NULL(state))
277 return;
278
279 /* Fill in the additional crash state information */
280 state->comm = kstrdup(comm, GFP_KERNEL);
281 state->cmd = kstrdup(cmd, GFP_KERNEL);
Rob Clarke25e92e2021-06-10 14:44:13 -0700282 state->fault_info = gpu->fault_info;
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600283
Jordan Crousecdb95932018-07-24 10:33:31 -0600284 if (submit) {
Rob Clarke515af82020-02-18 13:20:12 -0800285 int i, nr = 0;
Jordan Crousecdb95932018-07-24 10:33:31 -0600286
Rob Clarke515af82020-02-18 13:20:12 -0800287 /* count # of buffers to dump: */
288 for (i = 0; i < submit->nr_bos; i++)
289 if (should_dump(submit, i))
290 nr++;
291 /* always dump cmd bo's, but don't double count them: */
292 for (i = 0; i < submit->nr_cmds; i++)
293 if (!should_dump(submit, submit->cmd[i].idx))
294 nr++;
295
296 state->bos = kcalloc(nr,
Jordan Crousecdb95932018-07-24 10:33:31 -0600297 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
298
Rob Clarke515af82020-02-18 13:20:12 -0800299 for (i = 0; i < submit->nr_bos; i++) {
300 if (should_dump(submit, i)) {
301 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
302 submit->bos[i].iova, submit->bos[i].flags);
303 }
304 }
305
Jordan Crouse896a2482018-11-02 09:25:22 -0600306 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
307 int idx = submit->cmd[i].idx;
308
Rob Clarke515af82020-02-18 13:20:12 -0800309 if (!should_dump(submit, submit->cmd[i].idx)) {
310 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
311 submit->bos[idx].iova, submit->bos[idx].flags);
312 }
Jordan Crouse896a2482018-11-02 09:25:22 -0600313 }
Jordan Crousecdb95932018-07-24 10:33:31 -0600314 }
315
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600316 /* Set the active crash state to be dumped on failure */
317 gpu->crashstate = state;
318
319 /* FIXME: Release the crashstate if this errors out? */
320 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
321 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
322}
323#else
Anders Roxell69690192018-07-31 22:45:32 +0200324static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
325 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600326{
327}
328#endif
329
Rob Clark37d77c32014-01-11 16:25:08 -0500330/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400331 * Hangcheck detection for locked gpu:
332 */
333
Rob Clark18bb8a62017-09-13 10:17:18 -0400334static struct msm_gem_submit *
335find_submit(struct msm_ringbuffer *ring, uint32_t fence)
336{
337 struct msm_gem_submit *submit;
Rob Clark298287f2021-07-26 07:43:58 -0700338 unsigned long flags;
Rob Clark18bb8a62017-09-13 10:17:18 -0400339
Rob Clark298287f2021-07-26 07:43:58 -0700340 spin_lock_irqsave(&ring->submit_lock, flags);
Rob Clark77d20522020-10-23 09:51:16 -0700341 list_for_each_entry(submit, &ring->submits, node) {
342 if (submit->seqno == fence) {
Rob Clark298287f2021-07-26 07:43:58 -0700343 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark18bb8a62017-09-13 10:17:18 -0400344 return submit;
Rob Clark77d20522020-10-23 09:51:16 -0700345 }
346 }
Rob Clark298287f2021-07-26 07:43:58 -0700347 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark18bb8a62017-09-13 10:17:18 -0400348
349 return NULL;
350}
351
Rob Clarkb6295f92016-03-15 18:26:28 -0400352static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400353
Rob Clark7e688292020-10-19 14:10:51 -0700354static void recover_worker(struct kthread_work *work)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400355{
356 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
357 struct drm_device *dev = gpu->dev;
Rob Clark96169f42017-09-15 11:04:44 -0400358 struct msm_drm_private *priv = dev->dev_private;
Rob Clark4816b622016-05-03 10:10:15 -0400359 struct msm_gem_submit *submit;
Jordan Crousef97deca2017-10-20 11:06:57 -0600360 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600361 char *comm = NULL, *cmd = NULL;
Jordan Crousef97deca2017-10-20 11:06:57 -0600362 int i;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400363
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400364 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400365
Mamta Shukla6a41da12018-10-20 23:19:26 +0530366 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
Jordan Crousef97deca2017-10-20 11:06:57 -0600367
Rob Clark96169f42017-09-15 11:04:44 -0400368 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
Rob Clark18bb8a62017-09-13 10:17:18 -0400369 if (submit) {
370 struct task_struct *task;
Rob Clark4816b622016-05-03 10:10:15 -0400371
Jordan Crouseb0fb6602019-03-22 14:21:22 -0600372 /* Increment the fault counts */
Rob Clark48dc4242019-04-16 16:13:28 -0700373 gpu->global_faults++;
Jordan Crouseb0fb6602019-03-22 14:21:22 -0600374 submit->queue->faults++;
Rob Clark48dc4242019-04-16 16:13:28 -0700375
Sharat Masetty482f9632018-10-12 14:26:56 +0530376 task = get_pid_task(submit->pid, PIDTYPE_PID);
Rob Clark18bb8a62017-09-13 10:17:18 -0400377 if (task) {
Sharat Masetty482f9632018-10-12 14:26:56 +0530378 comm = kstrdup(task->comm, GFP_KERNEL);
Sharat Masetty482f9632018-10-12 14:26:56 +0530379 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
380 put_task_struct(task);
Rob Clark4816b622016-05-03 10:10:15 -0400381 }
Jordan Crouse65a3c272018-07-24 10:33:26 -0600382
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700383 /* msm_rd_dump_submit() needs bo locked to dump: */
384 for (i = 0; i < submit->nr_bos; i++)
385 msm_gem_lock(&submit->bos[i].obj->base);
386
Jordan Crouse65a3c272018-07-24 10:33:26 -0600387 if (comm && cmd) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530388 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
Jordan Crouse65a3c272018-07-24 10:33:26 -0600389 gpu->name, comm, cmd);
390
391 msm_rd_dump_submit(priv->hangrd, submit,
392 "offending task: %s (%s)", comm, cmd);
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700393 } else {
Jordan Crouse65a3c272018-07-24 10:33:26 -0600394 msm_rd_dump_submit(priv->hangrd, submit, NULL);
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700395 }
396
397 for (i = 0; i < submit->nr_bos; i++)
398 msm_gem_unlock(&submit->bos[i].obj->base);
Rob Clark96169f42017-09-15 11:04:44 -0400399 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400400
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600401 /* Record the crash state */
402 pm_runtime_get_sync(&gpu->pdev->dev);
Jordan Crousecdb95932018-07-24 10:33:31 -0600403 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600404 pm_runtime_put_sync(&gpu->pdev->dev);
405
Jordan Crouse65a3c272018-07-24 10:33:26 -0600406 kfree(cmd);
407 kfree(comm);
Rob Clark96169f42017-09-15 11:04:44 -0400408
409 /*
410 * Update all the rings with the latest and greatest fence.. this
411 * needs to happen after msm_rd_dump_submit() to ensure that the
412 * bo's referenced by the offending submit are still around.
413 */
Jordan Crouse7ddae822017-12-13 13:45:44 -0700414 for (i = 0; i < gpu->nr_rings; i++) {
Rob Clark96169f42017-09-15 11:04:44 -0400415 struct msm_ringbuffer *ring = gpu->rb[i];
416
417 uint32_t fence = ring->memptrs->fence;
418
419 /*
420 * For the current (faulting?) ring/submit advance the fence by
421 * one more to clear the faulting submit
422 */
423 if (ring == cur_ring)
424 fence++;
425
426 update_fences(gpu, ring, fence);
Rob Clark4816b622016-05-03 10:10:15 -0400427 }
428
429 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400430 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400431 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400432
Rob Clarkeeb75472017-02-10 15:36:33 -0500433 pm_runtime_get_sync(&gpu->pdev->dev);
Rob Clark37d77c32014-01-11 16:25:08 -0500434 gpu->funcs->recover(gpu);
Rob Clarkeeb75472017-02-10 15:36:33 -0500435 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark1a370be2015-06-07 13:46:04 -0400436
Jordan Crousef97deca2017-10-20 11:06:57 -0600437 /*
438 * Replay all remaining submits starting with highest priority
439 * ring
440 */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600441 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600442 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark298287f2021-07-26 07:43:58 -0700443 unsigned long flags;
Jordan Crousef97deca2017-10-20 11:06:57 -0600444
Rob Clark298287f2021-07-26 07:43:58 -0700445 spin_lock_irqsave(&ring->submit_lock, flags);
Jordan Crousef97deca2017-10-20 11:06:57 -0600446 list_for_each_entry(submit, &ring->submits, node)
Jordan Crouse15eb9ad2020-08-17 15:01:37 -0700447 gpu->funcs->submit(gpu, submit);
Rob Clark298287f2021-07-26 07:43:58 -0700448 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark1a370be2015-06-07 13:46:04 -0400449 }
Rob Clark37d77c32014-01-11 16:25:08 -0500450 }
Rob Clark4816b622016-05-03 10:10:15 -0400451
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400452 mutex_unlock(&dev->struct_mutex);
453
454 msm_gpu_retire(gpu);
455}
456
Rob Clarke25e92e2021-06-10 14:44:13 -0700457static void fault_worker(struct kthread_work *work)
458{
459 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
460 struct drm_device *dev = gpu->dev;
461 struct msm_gem_submit *submit;
462 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
463 char *comm = NULL, *cmd = NULL;
464
465 mutex_lock(&dev->struct_mutex);
466
467 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
468 if (submit && submit->fault_dumped)
469 goto resume_smmu;
470
471 if (submit) {
472 struct task_struct *task;
473
474 task = get_pid_task(submit->pid, PIDTYPE_PID);
475 if (task) {
476 comm = kstrdup(task->comm, GFP_KERNEL);
477 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
478 put_task_struct(task);
479 }
480
481 /*
482 * When we get GPU iova faults, we can get 1000s of them,
483 * but we really only want to log the first one.
484 */
485 submit->fault_dumped = true;
486 }
487
488 /* Record the crash state */
489 pm_runtime_get_sync(&gpu->pdev->dev);
490 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
491 pm_runtime_put_sync(&gpu->pdev->dev);
492
493 kfree(cmd);
494 kfree(comm);
495
496resume_smmu:
497 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
498 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
499
500 mutex_unlock(&dev->struct_mutex);
501}
502
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400503static void hangcheck_timer_reset(struct msm_gpu *gpu)
504{
Samuel Iglesias Gonsalvez1d2fa582021-06-07 12:44:41 +0200505 struct msm_drm_private *priv = gpu->dev->dev_private;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400506 mod_timer(&gpu->hangcheck_timer,
Samuel Iglesias Gonsalvez1d2fa582021-06-07 12:44:41 +0200507 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400508}
509
Kees Cooke99e88a2017-10-16 14:43:17 -0700510static void hangcheck_handler(struct timer_list *t)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400511{
Kees Cooke99e88a2017-10-16 14:43:17 -0700512 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
Rob Clark6b8819c2013-09-11 17:14:30 -0400513 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600514 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
515 uint32_t fence = ring->memptrs->fence;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400516
Jordan Crousef97deca2017-10-20 11:06:57 -0600517 if (fence != ring->hangcheck_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400518 /* some progress has been made.. ya! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600519 ring->hangcheck_fence = fence;
520 } else if (fence < ring->seqno) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400521 /* no progress and not done.. hung! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600522 ring->hangcheck_fence = fence;
Mamta Shukla6a41da12018-10-20 23:19:26 +0530523 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600524 gpu->name, ring->id);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530525 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
Rob Clark26791c42013-09-03 07:12:03 -0400526 gpu->name, fence);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530527 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600528 gpu->name, ring->seqno);
529
Rob Clark7e688292020-10-19 14:10:51 -0700530 kthread_queue_work(gpu->worker, &gpu->recover_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400531 }
532
533 /* if still more pending work, reset the hangcheck timer: */
Jordan Crousef97deca2017-10-20 11:06:57 -0600534 if (ring->seqno > ring->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400535 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400536
537 /* workaround for missing irq: */
Rob Clark298287f2021-07-26 07:43:58 -0700538 msm_gpu_retire(gpu);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400539}
540
541/*
Rob Clark70c70f02014-05-30 14:49:43 -0400542 * Performance Counters:
543 */
544
545/* called under perf_lock */
546static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
547{
548 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
549 int i, n = min(ncntrs, gpu->num_perfcntrs);
550
551 /* read current values: */
552 for (i = 0; i < gpu->num_perfcntrs; i++)
553 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
554
555 /* update cntrs: */
556 for (i = 0; i < n; i++)
557 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
558
559 /* save current values: */
560 for (i = 0; i < gpu->num_perfcntrs; i++)
561 gpu->last_cntrs[i] = current_cntrs[i];
562
563 return n;
564}
565
566static void update_sw_cntrs(struct msm_gpu *gpu)
567{
568 ktime_t time;
569 uint32_t elapsed;
570 unsigned long flags;
571
572 spin_lock_irqsave(&gpu->perf_lock, flags);
573 if (!gpu->perfcntr_active)
574 goto out;
575
576 time = ktime_get();
577 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
578
579 gpu->totaltime += elapsed;
580 if (gpu->last_sample.active)
581 gpu->activetime += elapsed;
582
583 gpu->last_sample.active = msm_gpu_active(gpu);
584 gpu->last_sample.time = time;
585
586out:
587 spin_unlock_irqrestore(&gpu->perf_lock, flags);
588}
589
590void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
591{
592 unsigned long flags;
593
Rob Clarkeeb75472017-02-10 15:36:33 -0500594 pm_runtime_get_sync(&gpu->pdev->dev);
595
Rob Clark70c70f02014-05-30 14:49:43 -0400596 spin_lock_irqsave(&gpu->perf_lock, flags);
597 /* we could dynamically enable/disable perfcntr registers too.. */
598 gpu->last_sample.active = msm_gpu_active(gpu);
599 gpu->last_sample.time = ktime_get();
600 gpu->activetime = gpu->totaltime = 0;
601 gpu->perfcntr_active = true;
602 update_hw_cntrs(gpu, 0, NULL);
603 spin_unlock_irqrestore(&gpu->perf_lock, flags);
604}
605
606void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
607{
608 gpu->perfcntr_active = false;
Rob Clarkeeb75472017-02-10 15:36:33 -0500609 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark70c70f02014-05-30 14:49:43 -0400610}
611
612/* returns -errno or # of cntrs sampled */
613int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
614 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
615{
616 unsigned long flags;
617 int ret;
618
619 spin_lock_irqsave(&gpu->perf_lock, flags);
620
621 if (!gpu->perfcntr_active) {
622 ret = -EINVAL;
623 goto out;
624 }
625
626 *activetime = gpu->activetime;
627 *totaltime = gpu->totaltime;
628
629 gpu->activetime = gpu->totaltime = 0;
630
631 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
632
633out:
634 spin_unlock_irqrestore(&gpu->perf_lock, flags);
635
636 return ret;
637}
638
639/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400640 * Cmdstream submission/retirement:
641 */
642
Jordan Crouse4241db42018-11-02 09:25:21 -0600643static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
644 struct msm_gem_submit *submit)
Rob Clark7d12a272016-03-16 16:07:38 -0400645{
Jordan Crouse4241db42018-11-02 09:25:21 -0600646 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
647 volatile struct msm_gpu_submit_stats *stats;
648 u64 elapsed, clock = 0;
Rob Clark298287f2021-07-26 07:43:58 -0700649 unsigned long flags;
Rob Clark7d12a272016-03-16 16:07:38 -0400650 int i;
651
Jordan Crouse4241db42018-11-02 09:25:21 -0600652 stats = &ring->memptrs->stats[index];
653 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
654 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
655 do_div(elapsed, 192);
656
657 /* Calculate the clock frequency from the number of CP cycles */
658 if (elapsed) {
659 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
660 do_div(clock, elapsed);
661 }
662
663 trace_msm_gpu_submit_retired(submit, elapsed, clock,
664 stats->alwayson_start, stats->alwayson_end);
665
Rob Clark7d12a272016-03-16 16:07:38 -0400666 for (i = 0; i < submit->nr_bos; i++) {
Rob Clarkab5c54c2020-11-16 09:48:49 -0800667 struct drm_gem_object *obj = &submit->bos[i].obj->base;
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530668
Rob Clarkab5c54c2020-11-16 09:48:49 -0800669 msm_gem_lock(obj);
670 msm_gem_active_put(obj);
671 msm_gem_unpin_iova_locked(obj, submit->aspace);
672 msm_gem_unlock(obj);
673 drm_gem_object_put(obj);
Rob Clark7d12a272016-03-16 16:07:38 -0400674 }
675
Rob Clarkeeb75472017-02-10 15:36:33 -0500676 pm_runtime_mark_last_busy(&gpu->pdev->dev);
677 pm_runtime_put_autosuspend(&gpu->pdev->dev);
Rob Clark964d2f92020-10-23 09:51:17 -0700678
Rob Clark298287f2021-07-26 07:43:58 -0700679 spin_lock_irqsave(&ring->submit_lock, flags);
Rob Clark964d2f92020-10-23 09:51:17 -0700680 list_del(&submit->node);
Rob Clark298287f2021-07-26 07:43:58 -0700681 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark964d2f92020-10-23 09:51:17 -0700682
683 msm_gem_submit_put(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400684}
685
Rob Clarkb6295f92016-03-15 18:26:28 -0400686static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400687{
Jordan Crousef97deca2017-10-20 11:06:57 -0600688 int i;
Rob Clark1a370be2015-06-07 13:46:04 -0400689
Jordan Crousef97deca2017-10-20 11:06:57 -0600690 /* Retire the commits starting with highest priority */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600691 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600692 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark1a370be2015-06-07 13:46:04 -0400693
Rob Clark77d20522020-10-23 09:51:16 -0700694 while (true) {
695 struct msm_gem_submit *submit = NULL;
Rob Clark298287f2021-07-26 07:43:58 -0700696 unsigned long flags;
Rob Clark77d20522020-10-23 09:51:16 -0700697
Rob Clark298287f2021-07-26 07:43:58 -0700698 spin_lock_irqsave(&ring->submit_lock, flags);
Rob Clark77d20522020-10-23 09:51:16 -0700699 submit = list_first_entry_or_null(&ring->submits,
700 struct msm_gem_submit, node);
Rob Clark298287f2021-07-26 07:43:58 -0700701 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark77d20522020-10-23 09:51:16 -0700702
703 /*
704 * If no submit, we are done. If submit->fence hasn't
705 * been signalled, then later submits are not signalled
706 * either, so we are also done.
707 */
708 if (submit && dma_fence_is_signaled(submit->fence)) {
Jordan Crouse4241db42018-11-02 09:25:21 -0600709 retire_submit(gpu, ring, submit);
Rob Clark77d20522020-10-23 09:51:16 -0700710 } else {
711 break;
712 }
Rob Clark1a370be2015-06-07 13:46:04 -0400713 }
714 }
715}
716
Rob Clark7e688292020-10-19 14:10:51 -0700717static void retire_worker(struct kthread_work *work)
Rob Clark7198e6b2013-07-19 12:59:32 -0400718{
719 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400720
Rob Clarkb6295f92016-03-15 18:26:28 -0400721 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400722}
723
724/* call from irq handler to schedule work to retire bo's */
725void msm_gpu_retire(struct msm_gpu *gpu)
726{
Rob Clark298287f2021-07-26 07:43:58 -0700727 int i;
728
729 for (i = 0; i < gpu->nr_rings; i++)
730 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
731
Rob Clark7e688292020-10-19 14:10:51 -0700732 kthread_queue_work(gpu->worker, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400733 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400734}
735
736/* add bo's to gpu's ring, and kick gpu: */
Jordan Crouse15eb9ad2020-08-17 15:01:37 -0700737void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
Rob Clark7198e6b2013-07-19 12:59:32 -0400738{
739 struct drm_device *dev = gpu->dev;
740 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600741 struct msm_ringbuffer *ring = submit->ring;
Rob Clark298287f2021-07-26 07:43:58 -0700742 unsigned long flags;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400743 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400744
Rob Clark1a370be2015-06-07 13:46:04 -0400745 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
746
Rob Clarkeeb75472017-02-10 15:36:33 -0500747 pm_runtime_get_sync(&gpu->pdev->dev);
748
749 msm_gpu_hw_init(gpu);
Rob Clark37d77c32014-01-11 16:25:08 -0500750
Jordan Crousef97deca2017-10-20 11:06:57 -0600751 submit->seqno = ++ring->seqno;
752
Rob Clark998b9a52017-09-15 10:46:45 -0400753 msm_rd_dump_submit(priv->rd, submit, NULL);
Rob Clarka7d3c952014-05-30 14:47:38 -0400754
Rob Clark70c70f02014-05-30 14:49:43 -0400755 update_sw_cntrs(gpu);
756
Rob Clark7198e6b2013-07-19 12:59:32 -0400757 for (i = 0; i < submit->nr_bos; i++) {
758 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530759 struct drm_gem_object *drm_obj = &msm_obj->base;
Rob Clark78babc12016-11-11 12:06:46 -0500760 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400761
Rob Clark7d12a272016-03-16 16:07:38 -0400762 /* submit takes a reference to the bo and iova until retired: */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100763 drm_gem_object_get(&msm_obj->base);
Rob Clark6c0e3ea2020-10-23 09:51:10 -0700764 msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400765
Rob Clarkbf6811f2013-09-01 13:25:09 -0400766 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530767 dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400768 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
Akhil P Oommen9d8baa22020-09-22 20:25:26 +0530769 dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
770
771 msm_gem_active_get(drm_obj, gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400772 }
Rob Clark1a370be2015-06-07 13:46:04 -0400773
Rob Clark964d2f92020-10-23 09:51:17 -0700774 /*
775 * ring->submits holds a ref to the submit, to deal with the case
776 * that a submit completes before msm_ioctl_gem_submit() returns.
777 */
778 msm_gem_submit_get(submit);
779
Rob Clark298287f2021-07-26 07:43:58 -0700780 spin_lock_irqsave(&ring->submit_lock, flags);
Rob Clark964d2f92020-10-23 09:51:17 -0700781 list_add_tail(&submit->node, &ring->submits);
Rob Clark298287f2021-07-26 07:43:58 -0700782 spin_unlock_irqrestore(&ring->submit_lock, flags);
Rob Clark964d2f92020-10-23 09:51:17 -0700783
Jordan Crouse15eb9ad2020-08-17 15:01:37 -0700784 gpu->funcs->submit(gpu, submit);
785 priv->lastctx = submit->queue->ctx;
Rob Clark1a370be2015-06-07 13:46:04 -0400786
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400787 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400788}
789
790/*
791 * Init/Cleanup:
792 */
793
794static irqreturn_t irq_handler(int irq, void *data)
795{
796 struct msm_gpu *gpu = data;
797 return gpu->funcs->irq(gpu);
798}
799
Jordan Crouse98db8032017-03-07 10:02:56 -0700800static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
801{
Jordan Crouse8e3e7912019-07-25 10:53:55 -0600802 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
Jordan Crouse98db8032017-03-07 10:02:56 -0700803
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600804 if (ret < 1) {
Jordan Crouse98db8032017-03-07 10:02:56 -0700805 gpu->nr_clocks = 0;
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600806 return ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700807 }
808
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600809 gpu->nr_clocks = ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700810
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600811 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
812 gpu->nr_clocks, "core");
Jordan Crouse98db8032017-03-07 10:02:56 -0700813
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600814 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
815 gpu->nr_clocks, "rbbmtimer");
Jordan Crouse98db8032017-03-07 10:02:56 -0700816
817 return 0;
818}
Rob Clark7198e6b2013-07-19 12:59:32 -0400819
Jordan Crouse933415e2020-08-17 15:01:40 -0700820/* Return a new address space for a msm_drm_private instance */
821struct msm_gem_address_space *
Rob Clark25faf2f2020-08-17 15:01:45 -0700822msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
Jordan Crouse933415e2020-08-17 15:01:40 -0700823{
824 struct msm_gem_address_space *aspace = NULL;
Jordan Crouse933415e2020-08-17 15:01:40 -0700825 if (!gpu)
826 return NULL;
827
828 /*
829 * If the target doesn't support private address spaces then return
830 * the global one
831 */
Rob Clark25faf2f2020-08-17 15:01:45 -0700832 if (gpu->funcs->create_private_address_space) {
Jordan Crouse933415e2020-08-17 15:01:40 -0700833 aspace = gpu->funcs->create_private_address_space(gpu);
Rob Clark25faf2f2020-08-17 15:01:45 -0700834 if (!IS_ERR(aspace))
835 aspace->pid = get_pid(task_pid(task));
836 }
Jordan Crouse933415e2020-08-17 15:01:40 -0700837
838 if (IS_ERR_OR_NULL(aspace))
839 aspace = msm_gem_address_space_get(gpu->aspace);
840
841 return aspace;
842}
843
Rob Clark7198e6b2013-07-19 12:59:32 -0400844int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
845 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600846 const char *name, struct msm_gpu_config *config)
Rob Clark7198e6b2013-07-19 12:59:32 -0400847{
Jordan Crousef97deca2017-10-20 11:06:57 -0600848 int i, ret, nr_rings = config->nr_rings;
849 void *memptrs;
850 uint64_t memptrs_iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400851
Rob Clark70c70f02014-05-30 14:49:43 -0400852 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
853 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
854
Rob Clark7198e6b2013-07-19 12:59:32 -0400855 gpu->dev = drm;
856 gpu->funcs = funcs;
857 gpu->name = name;
858
Rob Clark7e688292020-10-19 14:10:51 -0700859 gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
860 if (IS_ERR(gpu->worker)) {
861 ret = PTR_ERR(gpu->worker);
862 gpu->worker = NULL;
863 goto fail;
864 }
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400865
Rob Clark7e688292020-10-19 14:10:51 -0700866 sched_set_fifo_low(gpu->worker->task);
867
868 INIT_LIST_HEAD(&gpu->active_list);
869 kthread_init_work(&gpu->retire_work, retire_worker);
870 kthread_init_work(&gpu->recover_work, recover_worker);
Rob Clarke25e92e2021-06-10 14:44:13 -0700871 kthread_init_work(&gpu->fault_work, fault_worker);
Rob Clark1a370be2015-06-07 13:46:04 -0400872
Kees Cooke99e88a2017-10-16 14:43:17 -0700873 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400874
Rob Clark70c70f02014-05-30 14:49:43 -0400875 spin_lock_init(&gpu->perf_lock);
876
Rob Clark7198e6b2013-07-19 12:59:32 -0400877
878 /* Map registers: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600879 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400880 if (IS_ERR(gpu->mmio)) {
881 ret = PTR_ERR(gpu->mmio);
882 goto fail;
883 }
884
885 /* Get Interrupt: */
Jordan Crouse878411a2018-12-18 11:32:36 -0700886 gpu->irq = platform_get_irq(pdev, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400887 if (gpu->irq < 0) {
888 ret = gpu->irq;
Mamta Shukla6a41da12018-10-20 23:19:26 +0530889 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400890 goto fail;
891 }
892
893 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
894 IRQF_TRIGGER_HIGH, gpu->name, gpu);
895 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530896 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400897 goto fail;
898 }
899
Jordan Crouse98db8032017-03-07 10:02:56 -0700900 ret = get_clocks(pdev, gpu);
901 if (ret)
902 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400903
Rob Clark720c3bb2017-01-30 11:30:58 -0500904 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
Rob Clark7198e6b2013-07-19 12:59:32 -0400905 DBG("ebi1_clk: %p", gpu->ebi1_clk);
906 if (IS_ERR(gpu->ebi1_clk))
907 gpu->ebi1_clk = NULL;
908
909 /* Acquire regulators: */
910 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
911 DBG("gpu_reg: %p", gpu->gpu_reg);
912 if (IS_ERR(gpu->gpu_reg))
913 gpu->gpu_reg = NULL;
914
915 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
916 DBG("gpu_cx: %p", gpu->gpu_cx);
917 if (IS_ERR(gpu->gpu_cx))
918 gpu->gpu_cx = NULL;
919
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600920 gpu->pdev = pdev;
Rob Clark9cba4052020-08-17 15:01:32 -0700921 platform_set_drvdata(pdev, &gpu->adreno_smmu);
Rob Clark667ce332016-09-28 19:58:32 -0400922
Jordan Crousef91c14a2018-01-10 10:41:54 -0700923 msm_devfreq_init(gpu);
924
Jordan Crouseccac7ce2020-05-22 16:03:15 -0600925
926 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600927
928 if (gpu->aspace == NULL)
Mamta Shukla6a41da12018-10-20 23:19:26 +0530929 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600930 else if (IS_ERR(gpu->aspace)) {
931 ret = PTR_ERR(gpu->aspace);
932 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400933 }
Rob Clarka1ad3522014-07-11 11:59:22 -0400934
Jordan Crouse546ec7b2018-11-02 09:25:18 -0600935 memptrs = msm_gem_kernel_new(drm,
936 sizeof(struct msm_rbmemptrs) * nr_rings,
Jordan Crouse604234f2020-09-03 20:03:11 -0600937 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
Jordan Crousef97deca2017-10-20 11:06:57 -0600938 &memptrs_iova);
Jordan Crousecd414f32017-10-20 11:06:56 -0600939
Jordan Crousef97deca2017-10-20 11:06:57 -0600940 if (IS_ERR(memptrs)) {
941 ret = PTR_ERR(memptrs);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530942 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
Jordan Crousecd414f32017-10-20 11:06:56 -0600943 goto fail;
944 }
945
Jordan Crouse0815d772018-11-07 15:35:52 -0700946 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
947
Jordan Crousef97deca2017-10-20 11:06:57 -0600948 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
Arnd Bergmann39ae0d32017-08-03 13:50:48 +0200949 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600950 ARRAY_SIZE(gpu->rb));
951 nr_rings = ARRAY_SIZE(gpu->rb);
Rob Clark7198e6b2013-07-19 12:59:32 -0400952 }
953
Jordan Crousef97deca2017-10-20 11:06:57 -0600954 /* Create ringbuffer(s): */
955 for (i = 0; i < nr_rings; i++) {
956 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
957
958 if (IS_ERR(gpu->rb[i])) {
959 ret = PTR_ERR(gpu->rb[i]);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530960 DRM_DEV_ERROR(drm->dev,
Jordan Crousef97deca2017-10-20 11:06:57 -0600961 "could not create ringbuffer %d: %d\n", i, ret);
962 goto fail;
963 }
964
965 memptrs += sizeof(struct msm_rbmemptrs);
966 memptrs_iova += sizeof(struct msm_rbmemptrs);
967 }
968
969 gpu->nr_rings = nr_rings;
970
Rob Clark7198e6b2013-07-19 12:59:32 -0400971 return 0;
972
973fail:
Jordan Crousef97deca2017-10-20 11:06:57 -0600974 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
975 msm_ringbuffer_destroy(gpu->rb[i]);
976 gpu->rb[i] = NULL;
977 }
978
Jordan Crouse1e29dff2018-11-07 15:35:46 -0700979 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
Jordan Crousecd414f32017-10-20 11:06:56 -0600980
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600981 platform_set_drvdata(pdev, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400982 return ret;
983}
984
985void msm_gpu_cleanup(struct msm_gpu *gpu)
986{
Jordan Crousef97deca2017-10-20 11:06:57 -0600987 int i;
988
Rob Clark7198e6b2013-07-19 12:59:32 -0400989 DBG("%s", gpu->name);
990
991 WARN_ON(!list_empty(&gpu->active_list));
992
Jordan Crousef97deca2017-10-20 11:06:57 -0600993 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
994 msm_ringbuffer_destroy(gpu->rb[i]);
995 gpu->rb[i] = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400996 }
Jordan Crousecd414f32017-10-20 11:06:56 -0600997
Jordan Crouse1e29dff2018-11-07 15:35:46 -0700998 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
Jordan Crousecd414f32017-10-20 11:06:56 -0600999
1000 if (!IS_ERR_OR_NULL(gpu->aspace)) {
Drew Davenport53bf7f72019-09-16 14:11:54 -06001001 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
Jordan Crouse1267a4d2017-07-27 10:42:39 -06001002 msm_gem_address_space_put(gpu->aspace);
1003 }
Rob Clark7e688292020-10-19 14:10:51 -07001004
1005 if (gpu->worker) {
1006 kthread_destroy_worker(gpu->worker);
1007 }
Akhil P Oommenec793cf2020-10-30 16:17:10 +05301008
Rob Clarkaf5b4ff2021-07-26 07:46:48 -07001009 msm_devfreq_cleanup(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -04001010}