blob: f388944c93e2750a5b3092fee535b400e5fa63ca [file] [log] [blame]
Rob Clark7198e6b2013-07-19 12:59:32 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -050020#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040021#include "msm_fence.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040022
Jordan Crousec0fec7f2018-07-24 10:33:27 -060023#include <generated/utsrelease.h>
Rob Clark18bb8a62017-09-13 10:17:18 -040024#include <linux/string_helpers.h>
Jordan Crousef91c14a2018-01-10 10:41:54 -070025#include <linux/pm_opp.h>
26#include <linux/devfreq.h>
Jordan Crousec0fec7f2018-07-24 10:33:27 -060027#include <linux/devcoredump.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040028
29/*
30 * Power Management:
31 */
32
Jordan Crousef91c14a2018-01-10 10:41:54 -070033static int msm_devfreq_target(struct device *dev, unsigned long *freq,
34 u32 flags)
35{
36 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
37 struct dev_pm_opp *opp;
38
39 opp = devfreq_recommended_opp(dev, freq, flags);
40
41 if (IS_ERR(opp))
42 return PTR_ERR(opp);
43
44 clk_set_rate(gpu->core_clk, *freq);
45 dev_pm_opp_put(opp);
46
47 return 0;
48}
49
50static int msm_devfreq_get_dev_status(struct device *dev,
51 struct devfreq_dev_status *status)
52{
53 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
54 u64 cycles;
55 u32 freq = ((u32) status->current_frequency) / 1000000;
56 ktime_t time;
57
58 status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
59 gpu->funcs->gpu_busy(gpu, &cycles);
60
61 status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
62
63 gpu->devfreq.busy_cycles = cycles;
64
65 time = ktime_get();
66 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
67 gpu->devfreq.time = time;
68
69 return 0;
70}
71
72static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
73{
74 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
75
76 *freq = (unsigned long) clk_get_rate(gpu->core_clk);
77
78 return 0;
79}
80
81static struct devfreq_dev_profile msm_devfreq_profile = {
82 .polling_ms = 10,
83 .target = msm_devfreq_target,
84 .get_dev_status = msm_devfreq_get_dev_status,
85 .get_cur_freq = msm_devfreq_get_cur_freq,
86};
87
88static void msm_devfreq_init(struct msm_gpu *gpu)
89{
90 /* We need target support to do devfreq */
91 if (!gpu->funcs->gpu_busy)
92 return;
93
94 msm_devfreq_profile.initial_freq = gpu->fast_rate;
95
96 /*
97 * Don't set the freq_table or max_state and let devfreq build the table
98 * from OPP
99 */
100
101 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
102 &msm_devfreq_profile, "simple_ondemand", NULL);
103
104 if (IS_ERR(gpu->devfreq.devfreq)) {
105 dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
106 gpu->devfreq.devfreq = NULL;
107 }
108}
109
Rob Clark7198e6b2013-07-19 12:59:32 -0400110static int enable_pwrrail(struct msm_gpu *gpu)
111{
112 struct drm_device *dev = gpu->dev;
113 int ret = 0;
114
115 if (gpu->gpu_reg) {
116 ret = regulator_enable(gpu->gpu_reg);
117 if (ret) {
118 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
119 return ret;
120 }
121 }
122
123 if (gpu->gpu_cx) {
124 ret = regulator_enable(gpu->gpu_cx);
125 if (ret) {
126 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
127 return ret;
128 }
129 }
130
131 return 0;
132}
133
134static int disable_pwrrail(struct msm_gpu *gpu)
135{
136 if (gpu->gpu_cx)
137 regulator_disable(gpu->gpu_cx);
138 if (gpu->gpu_reg)
139 regulator_disable(gpu->gpu_reg);
140 return 0;
141}
142
143static int enable_clk(struct msm_gpu *gpu)
144{
Rob Clark7198e6b2013-07-19 12:59:32 -0400145 int i;
146
Jordan Crouse98db8032017-03-07 10:02:56 -0700147 if (gpu->core_clk && gpu->fast_rate)
148 clk_set_rate(gpu->core_clk, gpu->fast_rate);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700149
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700150 /* Set the RBBM timer rate to 19.2Mhz */
Jordan Crouse98db8032017-03-07 10:02:56 -0700151 if (gpu->rbbmtimer_clk)
152 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700153
Jordan Crouse98db8032017-03-07 10:02:56 -0700154 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Jordan Crouse89d777a2016-11-28 12:28:31 -0700155 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400156 clk_prepare(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400157
Jordan Crouse98db8032017-03-07 10:02:56 -0700158 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400159 if (gpu->grp_clks[i])
160 clk_enable(gpu->grp_clks[i]);
161
162 return 0;
163}
164
165static int disable_clk(struct msm_gpu *gpu)
166{
Rob Clark7198e6b2013-07-19 12:59:32 -0400167 int i;
168
Jordan Crouse98db8032017-03-07 10:02:56 -0700169 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Jordan Crouse89d777a2016-11-28 12:28:31 -0700170 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400171 clk_disable(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400172
Jordan Crouse98db8032017-03-07 10:02:56 -0700173 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400174 if (gpu->grp_clks[i])
175 clk_unprepare(gpu->grp_clks[i]);
176
Jordan Crousebf5af4a2017-03-07 10:02:54 -0700177 /*
178 * Set the clock to a deliberately low rate. On older targets the clock
179 * speed had to be non zero to avoid problems. On newer targets this
180 * will be rounded down to zero anyway so it all works out.
181 */
Jordan Crouse98db8032017-03-07 10:02:56 -0700182 if (gpu->core_clk)
183 clk_set_rate(gpu->core_clk, 27000000);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700184
Jordan Crouse98db8032017-03-07 10:02:56 -0700185 if (gpu->rbbmtimer_clk)
186 clk_set_rate(gpu->rbbmtimer_clk, 0);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700187
Rob Clark7198e6b2013-07-19 12:59:32 -0400188 return 0;
189}
190
191static int enable_axi(struct msm_gpu *gpu)
192{
193 if (gpu->ebi1_clk)
194 clk_prepare_enable(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400195 return 0;
196}
197
198static int disable_axi(struct msm_gpu *gpu)
199{
200 if (gpu->ebi1_clk)
201 clk_disable_unprepare(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400202 return 0;
203}
204
205int msm_gpu_pm_resume(struct msm_gpu *gpu)
206{
207 int ret;
208
Rob Clarkeeb75472017-02-10 15:36:33 -0500209 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400210
211 ret = enable_pwrrail(gpu);
212 if (ret)
213 return ret;
214
215 ret = enable_clk(gpu);
216 if (ret)
217 return ret;
218
219 ret = enable_axi(gpu);
220 if (ret)
221 return ret;
222
Jordan Crousef91c14a2018-01-10 10:41:54 -0700223 if (gpu->devfreq.devfreq) {
224 gpu->devfreq.busy_cycles = 0;
225 gpu->devfreq.time = ktime_get();
226
227 devfreq_resume_device(gpu->devfreq.devfreq);
228 }
229
Rob Clarkeeb75472017-02-10 15:36:33 -0500230 gpu->needs_hw_init = true;
231
Rob Clark7198e6b2013-07-19 12:59:32 -0400232 return 0;
233}
234
235int msm_gpu_pm_suspend(struct msm_gpu *gpu)
236{
237 int ret;
238
Rob Clarkeeb75472017-02-10 15:36:33 -0500239 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400240
Jordan Crousef91c14a2018-01-10 10:41:54 -0700241 if (gpu->devfreq.devfreq)
242 devfreq_suspend_device(gpu->devfreq.devfreq);
243
Rob Clark7198e6b2013-07-19 12:59:32 -0400244 ret = disable_axi(gpu);
245 if (ret)
246 return ret;
247
248 ret = disable_clk(gpu);
249 if (ret)
250 return ret;
251
252 ret = disable_pwrrail(gpu);
253 if (ret)
254 return ret;
255
256 return 0;
257}
258
Rob Clarkeeb75472017-02-10 15:36:33 -0500259int msm_gpu_hw_init(struct msm_gpu *gpu)
Rob Clark37d77c32014-01-11 16:25:08 -0500260{
Rob Clarkeeb75472017-02-10 15:36:33 -0500261 int ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500262
Rob Clarkcb1e3812017-06-13 09:15:36 -0400263 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
264
Rob Clarkeeb75472017-02-10 15:36:33 -0500265 if (!gpu->needs_hw_init)
266 return 0;
Rob Clark37d77c32014-01-11 16:25:08 -0500267
Rob Clarkeeb75472017-02-10 15:36:33 -0500268 disable_irq(gpu->irq);
269 ret = gpu->funcs->hw_init(gpu);
270 if (!ret)
271 gpu->needs_hw_init = false;
272 enable_irq(gpu->irq);
Rob Clark37d77c32014-01-11 16:25:08 -0500273
Rob Clarkeeb75472017-02-10 15:36:33 -0500274 return ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500275}
276
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600277#ifdef CONFIG_DEV_COREDUMP
278static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
279 size_t count, void *data, size_t datalen)
280{
281 struct msm_gpu *gpu = data;
282 struct drm_print_iterator iter;
283 struct drm_printer p;
284 struct msm_gpu_state *state;
285
286 state = msm_gpu_crashstate_get(gpu);
287 if (!state)
288 return 0;
289
290 iter.data = buffer;
291 iter.offset = 0;
292 iter.start = offset;
293 iter.remain = count;
294
295 p = drm_coredump_printer(&iter);
296
297 drm_printf(&p, "---\n");
298 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
299 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
Arnd Bergmann3530a172018-07-26 14:39:25 +0200300 drm_printf(&p, "time: %lld.%09ld\n",
301 state->time.tv_sec, state->time.tv_nsec);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600302 if (state->comm)
303 drm_printf(&p, "comm: %s\n", state->comm);
304 if (state->cmd)
305 drm_printf(&p, "cmdline: %s\n", state->cmd);
306
307 gpu->funcs->show(gpu, state, &p);
308
309 msm_gpu_crashstate_put(gpu);
310
311 return count - iter.remain;
312}
313
314static void msm_gpu_devcoredump_free(void *data)
315{
316 struct msm_gpu *gpu = data;
317
318 msm_gpu_crashstate_put(gpu);
319}
320
Jordan Crousecdb95932018-07-24 10:33:31 -0600321static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
322 struct msm_gem_object *obj, u64 iova, u32 flags)
323{
324 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
325
326 /* Don't record write only objects */
327
328 state_bo->size = obj->base.size;
329 state_bo->iova = iova;
330
331 /* Only store the data for buffer objects marked for read */
332 if ((flags & MSM_SUBMIT_BO_READ)) {
333 void *ptr;
334
335 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
336 if (!state_bo->data)
337 return;
338
339 ptr = msm_gem_get_vaddr_active(&obj->base);
340 if (IS_ERR(ptr)) {
341 kvfree(state_bo->data);
342 return;
343 }
344
345 memcpy(state_bo->data, ptr, obj->base.size);
346 msm_gem_put_vaddr(&obj->base);
347 }
348
349 state->nr_bos++;
350}
351
352static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
353 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600354{
355 struct msm_gpu_state *state;
356
357 /* Only save one crash state at a time */
358 if (gpu->crashstate)
359 return;
360
361 state = gpu->funcs->gpu_state_get(gpu);
362 if (IS_ERR_OR_NULL(state))
363 return;
364
365 /* Fill in the additional crash state information */
366 state->comm = kstrdup(comm, GFP_KERNEL);
367 state->cmd = kstrdup(cmd, GFP_KERNEL);
368
Jordan Crousecdb95932018-07-24 10:33:31 -0600369 if (submit) {
370 int i;
371
372 state->bos = kcalloc(submit->nr_bos,
373 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
374
375 for (i = 0; state->bos && i < submit->nr_bos; i++)
376 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
377 submit->bos[i].iova, submit->bos[i].flags);
378 }
379
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600380 /* Set the active crash state to be dumped on failure */
381 gpu->crashstate = state;
382
383 /* FIXME: Release the crashstate if this errors out? */
384 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
385 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
386}
387#else
388static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
389 char *cmd)
390{
391}
392#endif
393
Rob Clark37d77c32014-01-11 16:25:08 -0500394/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400395 * Hangcheck detection for locked gpu:
396 */
397
Jordan Crousef97deca2017-10-20 11:06:57 -0600398static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
399 uint32_t fence)
400{
401 struct msm_gem_submit *submit;
402
403 list_for_each_entry(submit, &ring->submits, node) {
404 if (submit->seqno > fence)
405 break;
406
407 msm_update_fence(submit->ring->fctx,
408 submit->fence->seqno);
409 }
410}
411
Rob Clark18bb8a62017-09-13 10:17:18 -0400412static struct msm_gem_submit *
413find_submit(struct msm_ringbuffer *ring, uint32_t fence)
414{
415 struct msm_gem_submit *submit;
416
417 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
418
419 list_for_each_entry(submit, &ring->submits, node)
420 if (submit->seqno == fence)
421 return submit;
422
423 return NULL;
424}
425
Rob Clarkb6295f92016-03-15 18:26:28 -0400426static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400427
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400428static void recover_worker(struct work_struct *work)
429{
430 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
431 struct drm_device *dev = gpu->dev;
Rob Clark96169f42017-09-15 11:04:44 -0400432 struct msm_drm_private *priv = dev->dev_private;
Rob Clark4816b622016-05-03 10:10:15 -0400433 struct msm_gem_submit *submit;
Jordan Crousef97deca2017-10-20 11:06:57 -0600434 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600435 char *comm = NULL, *cmd = NULL;
Jordan Crousef97deca2017-10-20 11:06:57 -0600436 int i;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400437
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400438 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400439
Rob Clark4816b622016-05-03 10:10:15 -0400440 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
Jordan Crousef97deca2017-10-20 11:06:57 -0600441
Rob Clark96169f42017-09-15 11:04:44 -0400442 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
Rob Clark18bb8a62017-09-13 10:17:18 -0400443 if (submit) {
444 struct task_struct *task;
Rob Clark4816b622016-05-03 10:10:15 -0400445
Rob Clark18bb8a62017-09-13 10:17:18 -0400446 rcu_read_lock();
447 task = pid_task(submit->pid, PIDTYPE_PID);
448 if (task) {
Jordan Crouse65a3c272018-07-24 10:33:26 -0600449 comm = kstrdup(task->comm, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400450
451 /*
452 * So slightly annoying, in other paths like
453 * mmap'ing gem buffers, mmap_sem is acquired
454 * before struct_mutex, which means we can't
455 * hold struct_mutex across the call to
456 * get_cmdline(). But submits are retired
457 * from the same in-order workqueue, so we can
458 * safely drop the lock here without worrying
459 * about the submit going away.
460 */
461 mutex_unlock(&dev->struct_mutex);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600462 cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400463 mutex_lock(&dev->struct_mutex);
Rob Clark4816b622016-05-03 10:10:15 -0400464 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400465 rcu_read_unlock();
Jordan Crouse65a3c272018-07-24 10:33:26 -0600466
467 if (comm && cmd) {
468 dev_err(dev->dev, "%s: offending task: %s (%s)\n",
469 gpu->name, comm, cmd);
470
471 msm_rd_dump_submit(priv->hangrd, submit,
472 "offending task: %s (%s)", comm, cmd);
473 } else
474 msm_rd_dump_submit(priv->hangrd, submit, NULL);
Rob Clark96169f42017-09-15 11:04:44 -0400475 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400476
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600477 /* Record the crash state */
478 pm_runtime_get_sync(&gpu->pdev->dev);
Jordan Crousecdb95932018-07-24 10:33:31 -0600479 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600480 pm_runtime_put_sync(&gpu->pdev->dev);
481
Jordan Crouse65a3c272018-07-24 10:33:26 -0600482 kfree(cmd);
483 kfree(comm);
Rob Clark96169f42017-09-15 11:04:44 -0400484
485 /*
486 * Update all the rings with the latest and greatest fence.. this
487 * needs to happen after msm_rd_dump_submit() to ensure that the
488 * bo's referenced by the offending submit are still around.
489 */
Jordan Crouse7ddae822017-12-13 13:45:44 -0700490 for (i = 0; i < gpu->nr_rings; i++) {
Rob Clark96169f42017-09-15 11:04:44 -0400491 struct msm_ringbuffer *ring = gpu->rb[i];
492
493 uint32_t fence = ring->memptrs->fence;
494
495 /*
496 * For the current (faulting?) ring/submit advance the fence by
497 * one more to clear the faulting submit
498 */
499 if (ring == cur_ring)
500 fence++;
501
502 update_fences(gpu, ring, fence);
Rob Clark4816b622016-05-03 10:10:15 -0400503 }
504
505 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400506 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400507 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400508
Rob Clarkeeb75472017-02-10 15:36:33 -0500509 pm_runtime_get_sync(&gpu->pdev->dev);
Rob Clark37d77c32014-01-11 16:25:08 -0500510 gpu->funcs->recover(gpu);
Rob Clarkeeb75472017-02-10 15:36:33 -0500511 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark1a370be2015-06-07 13:46:04 -0400512
Jordan Crousef97deca2017-10-20 11:06:57 -0600513 /*
514 * Replay all remaining submits starting with highest priority
515 * ring
516 */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600517 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600518 struct msm_ringbuffer *ring = gpu->rb[i];
519
520 list_for_each_entry(submit, &ring->submits, node)
521 gpu->funcs->submit(gpu, submit, NULL);
Rob Clark1a370be2015-06-07 13:46:04 -0400522 }
Rob Clark37d77c32014-01-11 16:25:08 -0500523 }
Rob Clark4816b622016-05-03 10:10:15 -0400524
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400525 mutex_unlock(&dev->struct_mutex);
526
527 msm_gpu_retire(gpu);
528}
529
530static void hangcheck_timer_reset(struct msm_gpu *gpu)
531{
532 DBG("%s", gpu->name);
533 mod_timer(&gpu->hangcheck_timer,
534 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
535}
536
Kees Cooke99e88a2017-10-16 14:43:17 -0700537static void hangcheck_handler(struct timer_list *t)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400538{
Kees Cooke99e88a2017-10-16 14:43:17 -0700539 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
Rob Clark6b8819c2013-09-11 17:14:30 -0400540 struct drm_device *dev = gpu->dev;
541 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600542 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
543 uint32_t fence = ring->memptrs->fence;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400544
Jordan Crousef97deca2017-10-20 11:06:57 -0600545 if (fence != ring->hangcheck_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400546 /* some progress has been made.. ya! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600547 ring->hangcheck_fence = fence;
548 } else if (fence < ring->seqno) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400549 /* no progress and not done.. hung! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600550 ring->hangcheck_fence = fence;
551 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
552 gpu->name, ring->id);
Rob Clark26791c42013-09-03 07:12:03 -0400553 dev_err(dev->dev, "%s: completed fence: %u\n",
554 gpu->name, fence);
555 dev_err(dev->dev, "%s: submitted fence: %u\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600556 gpu->name, ring->seqno);
557
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400558 queue_work(priv->wq, &gpu->recover_work);
559 }
560
561 /* if still more pending work, reset the hangcheck timer: */
Jordan Crousef97deca2017-10-20 11:06:57 -0600562 if (ring->seqno > ring->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400563 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400564
565 /* workaround for missing irq: */
566 queue_work(priv->wq, &gpu->retire_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400567}
568
569/*
Rob Clark70c70f02014-05-30 14:49:43 -0400570 * Performance Counters:
571 */
572
573/* called under perf_lock */
574static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
575{
576 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
577 int i, n = min(ncntrs, gpu->num_perfcntrs);
578
579 /* read current values: */
580 for (i = 0; i < gpu->num_perfcntrs; i++)
581 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
582
583 /* update cntrs: */
584 for (i = 0; i < n; i++)
585 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
586
587 /* save current values: */
588 for (i = 0; i < gpu->num_perfcntrs; i++)
589 gpu->last_cntrs[i] = current_cntrs[i];
590
591 return n;
592}
593
594static void update_sw_cntrs(struct msm_gpu *gpu)
595{
596 ktime_t time;
597 uint32_t elapsed;
598 unsigned long flags;
599
600 spin_lock_irqsave(&gpu->perf_lock, flags);
601 if (!gpu->perfcntr_active)
602 goto out;
603
604 time = ktime_get();
605 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
606
607 gpu->totaltime += elapsed;
608 if (gpu->last_sample.active)
609 gpu->activetime += elapsed;
610
611 gpu->last_sample.active = msm_gpu_active(gpu);
612 gpu->last_sample.time = time;
613
614out:
615 spin_unlock_irqrestore(&gpu->perf_lock, flags);
616}
617
618void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
619{
620 unsigned long flags;
621
Rob Clarkeeb75472017-02-10 15:36:33 -0500622 pm_runtime_get_sync(&gpu->pdev->dev);
623
Rob Clark70c70f02014-05-30 14:49:43 -0400624 spin_lock_irqsave(&gpu->perf_lock, flags);
625 /* we could dynamically enable/disable perfcntr registers too.. */
626 gpu->last_sample.active = msm_gpu_active(gpu);
627 gpu->last_sample.time = ktime_get();
628 gpu->activetime = gpu->totaltime = 0;
629 gpu->perfcntr_active = true;
630 update_hw_cntrs(gpu, 0, NULL);
631 spin_unlock_irqrestore(&gpu->perf_lock, flags);
632}
633
634void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
635{
636 gpu->perfcntr_active = false;
Rob Clarkeeb75472017-02-10 15:36:33 -0500637 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark70c70f02014-05-30 14:49:43 -0400638}
639
640/* returns -errno or # of cntrs sampled */
641int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
642 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
643{
644 unsigned long flags;
645 int ret;
646
647 spin_lock_irqsave(&gpu->perf_lock, flags);
648
649 if (!gpu->perfcntr_active) {
650 ret = -EINVAL;
651 goto out;
652 }
653
654 *activetime = gpu->activetime;
655 *totaltime = gpu->totaltime;
656
657 gpu->activetime = gpu->totaltime = 0;
658
659 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
660
661out:
662 spin_unlock_irqrestore(&gpu->perf_lock, flags);
663
664 return ret;
665}
666
667/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400668 * Cmdstream submission/retirement:
669 */
670
Rob Clark7d12a272016-03-16 16:07:38 -0400671static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
672{
673 int i;
674
675 for (i = 0; i < submit->nr_bos; i++) {
676 struct msm_gem_object *msm_obj = submit->bos[i].obj;
677 /* move to inactive: */
678 msm_gem_move_to_inactive(&msm_obj->base);
Rob Clark8bdcd942017-06-13 11:07:08 -0400679 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100680 drm_gem_object_put(&msm_obj->base);
Rob Clark7d12a272016-03-16 16:07:38 -0400681 }
682
Rob Clarkeeb75472017-02-10 15:36:33 -0500683 pm_runtime_mark_last_busy(&gpu->pdev->dev);
684 pm_runtime_put_autosuspend(&gpu->pdev->dev);
Rob Clark40e68152016-05-03 09:50:26 -0400685 msm_gem_submit_free(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400686}
687
Rob Clarkb6295f92016-03-15 18:26:28 -0400688static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400689{
690 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600691 struct msm_gem_submit *submit, *tmp;
692 int i;
Rob Clark1a370be2015-06-07 13:46:04 -0400693
694 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
695
Jordan Crousef97deca2017-10-20 11:06:57 -0600696 /* Retire the commits starting with highest priority */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600697 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600698 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark1a370be2015-06-07 13:46:04 -0400699
Jordan Crousef97deca2017-10-20 11:06:57 -0600700 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
701 if (dma_fence_is_signaled(submit->fence))
702 retire_submit(gpu, submit);
Rob Clark1a370be2015-06-07 13:46:04 -0400703 }
704 }
705}
706
Rob Clark7198e6b2013-07-19 12:59:32 -0400707static void retire_worker(struct work_struct *work)
708{
709 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
710 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600711 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400712
Jordan Crousef97deca2017-10-20 11:06:57 -0600713 for (i = 0; i < gpu->nr_rings; i++)
714 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400715
Rob Clark7198e6b2013-07-19 12:59:32 -0400716 mutex_lock(&dev->struct_mutex);
Rob Clarkb6295f92016-03-15 18:26:28 -0400717 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400718 mutex_unlock(&dev->struct_mutex);
719}
720
721/* call from irq handler to schedule work to retire bo's */
722void msm_gpu_retire(struct msm_gpu *gpu)
723{
724 struct msm_drm_private *priv = gpu->dev->dev_private;
725 queue_work(priv->wq, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400726 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400727}
728
729/* add bo's to gpu's ring, and kick gpu: */
Rob Clarkf44d32c2016-06-16 16:37:38 -0400730void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
Rob Clark7198e6b2013-07-19 12:59:32 -0400731 struct msm_file_private *ctx)
732{
733 struct drm_device *dev = gpu->dev;
734 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600735 struct msm_ringbuffer *ring = submit->ring;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400736 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400737
Rob Clark1a370be2015-06-07 13:46:04 -0400738 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
739
Rob Clarkeeb75472017-02-10 15:36:33 -0500740 pm_runtime_get_sync(&gpu->pdev->dev);
741
742 msm_gpu_hw_init(gpu);
Rob Clark37d77c32014-01-11 16:25:08 -0500743
Jordan Crousef97deca2017-10-20 11:06:57 -0600744 submit->seqno = ++ring->seqno;
745
746 list_add_tail(&submit->node, &ring->submits);
Rob Clark1a370be2015-06-07 13:46:04 -0400747
Rob Clark998b9a52017-09-15 10:46:45 -0400748 msm_rd_dump_submit(priv->rd, submit, NULL);
Rob Clarka7d3c952014-05-30 14:47:38 -0400749
Rob Clark70c70f02014-05-30 14:49:43 -0400750 update_sw_cntrs(gpu);
751
Rob Clark7198e6b2013-07-19 12:59:32 -0400752 for (i = 0; i < submit->nr_bos; i++) {
753 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Rob Clark78babc12016-11-11 12:06:46 -0500754 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400755
756 /* can't happen yet.. but when we add 2d support we'll have
757 * to deal w/ cross-ring synchronization:
758 */
759 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
760
Rob Clark7d12a272016-03-16 16:07:38 -0400761 /* submit takes a reference to the bo and iova until retired: */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100762 drm_gem_object_get(&msm_obj->base);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600763 msm_gem_get_iova(&msm_obj->base,
Rob Clark8bdcd942017-06-13 11:07:08 -0400764 submit->gpu->aspace, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400765
Rob Clarkbf6811f2013-09-01 13:25:09 -0400766 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
767 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400768 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
769 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400770 }
Rob Clark1a370be2015-06-07 13:46:04 -0400771
Rob Clark1193c3b2016-05-03 09:46:49 -0400772 gpu->funcs->submit(gpu, submit, ctx);
Rob Clark1a370be2015-06-07 13:46:04 -0400773 priv->lastctx = ctx;
774
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400775 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400776}
777
778/*
779 * Init/Cleanup:
780 */
781
782static irqreturn_t irq_handler(int irq, void *data)
783{
784 struct msm_gpu *gpu = data;
785 return gpu->funcs->irq(gpu);
786}
787
Jordan Crouse98db8032017-03-07 10:02:56 -0700788static struct clk *get_clock(struct device *dev, const char *name)
789{
790 struct clk *clk = devm_clk_get(dev, name);
791
792 return IS_ERR(clk) ? NULL : clk;
793}
794
795static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
796{
797 struct device *dev = &pdev->dev;
798 struct property *prop;
799 const char *name;
800 int i = 0;
801
802 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
803 if (gpu->nr_clocks < 1) {
804 gpu->nr_clocks = 0;
805 return 0;
806 }
807
808 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
809 GFP_KERNEL);
Jordan Crouse9d20a0e2018-01-22 11:10:45 -0700810 if (!gpu->grp_clks) {
811 gpu->nr_clocks = 0;
Jordan Crouse98db8032017-03-07 10:02:56 -0700812 return -ENOMEM;
Jordan Crouse9d20a0e2018-01-22 11:10:45 -0700813 }
Jordan Crouse98db8032017-03-07 10:02:56 -0700814
815 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
816 gpu->grp_clks[i] = get_clock(dev, name);
817
818 /* Remember the key clocks that we need to control later */
Rob Clark134ccad2017-05-03 10:43:14 -0400819 if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
Jordan Crouse98db8032017-03-07 10:02:56 -0700820 gpu->core_clk = gpu->grp_clks[i];
Rob Clark134ccad2017-05-03 10:43:14 -0400821 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
Jordan Crouse98db8032017-03-07 10:02:56 -0700822 gpu->rbbmtimer_clk = gpu->grp_clks[i];
823
824 ++i;
825 }
826
827 return 0;
828}
Rob Clark7198e6b2013-07-19 12:59:32 -0400829
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600830static struct msm_gem_address_space *
831msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
832 uint64_t va_start, uint64_t va_end)
833{
834 struct iommu_domain *iommu;
835 struct msm_gem_address_space *aspace;
836 int ret;
837
838 /*
839 * Setup IOMMU.. eventually we will (I think) do this once per context
840 * and have separate page tables per context. For now, to keep things
841 * simple and to get something working, just use a single address space:
842 */
843 iommu = iommu_domain_alloc(&platform_bus_type);
844 if (!iommu)
845 return NULL;
846
847 iommu->geometry.aperture_start = va_start;
848 iommu->geometry.aperture_end = va_end;
849
850 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
851
852 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
853 if (IS_ERR(aspace)) {
854 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
855 PTR_ERR(aspace));
856 iommu_domain_free(iommu);
857 return ERR_CAST(aspace);
858 }
859
860 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
861 if (ret) {
862 msm_gem_address_space_put(aspace);
863 return ERR_PTR(ret);
864 }
865
866 return aspace;
867}
868
Rob Clark7198e6b2013-07-19 12:59:32 -0400869int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
870 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600871 const char *name, struct msm_gpu_config *config)
Rob Clark7198e6b2013-07-19 12:59:32 -0400872{
Jordan Crousef97deca2017-10-20 11:06:57 -0600873 int i, ret, nr_rings = config->nr_rings;
874 void *memptrs;
875 uint64_t memptrs_iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400876
Rob Clark70c70f02014-05-30 14:49:43 -0400877 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
878 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
879
Rob Clark7198e6b2013-07-19 12:59:32 -0400880 gpu->dev = drm;
881 gpu->funcs = funcs;
882 gpu->name = name;
883
884 INIT_LIST_HEAD(&gpu->active_list);
885 INIT_WORK(&gpu->retire_work, retire_worker);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400886 INIT_WORK(&gpu->recover_work, recover_worker);
887
Rob Clark1a370be2015-06-07 13:46:04 -0400888
Kees Cooke99e88a2017-10-16 14:43:17 -0700889 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400890
Rob Clark70c70f02014-05-30 14:49:43 -0400891 spin_lock_init(&gpu->perf_lock);
892
Rob Clark7198e6b2013-07-19 12:59:32 -0400893
894 /* Map registers: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600895 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400896 if (IS_ERR(gpu->mmio)) {
897 ret = PTR_ERR(gpu->mmio);
898 goto fail;
899 }
900
901 /* Get Interrupt: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600902 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
Rob Clark7198e6b2013-07-19 12:59:32 -0400903 if (gpu->irq < 0) {
904 ret = gpu->irq;
905 dev_err(drm->dev, "failed to get irq: %d\n", ret);
906 goto fail;
907 }
908
909 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
910 IRQF_TRIGGER_HIGH, gpu->name, gpu);
911 if (ret) {
912 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
913 goto fail;
914 }
915
Jordan Crouse98db8032017-03-07 10:02:56 -0700916 ret = get_clocks(pdev, gpu);
917 if (ret)
918 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400919
Rob Clark720c3bb2017-01-30 11:30:58 -0500920 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
Rob Clark7198e6b2013-07-19 12:59:32 -0400921 DBG("ebi1_clk: %p", gpu->ebi1_clk);
922 if (IS_ERR(gpu->ebi1_clk))
923 gpu->ebi1_clk = NULL;
924
925 /* Acquire regulators: */
926 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
927 DBG("gpu_reg: %p", gpu->gpu_reg);
928 if (IS_ERR(gpu->gpu_reg))
929 gpu->gpu_reg = NULL;
930
931 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
932 DBG("gpu_cx: %p", gpu->gpu_cx);
933 if (IS_ERR(gpu->gpu_cx))
934 gpu->gpu_cx = NULL;
935
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600936 gpu->pdev = pdev;
937 platform_set_drvdata(pdev, gpu);
Rob Clark667ce332016-09-28 19:58:32 -0400938
Jordan Crousef91c14a2018-01-10 10:41:54 -0700939 msm_devfreq_init(gpu);
940
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600941 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
942 config->va_start, config->va_end);
943
944 if (gpu->aspace == NULL)
Rob Clark871d8122013-11-16 12:56:06 -0500945 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600946 else if (IS_ERR(gpu->aspace)) {
947 ret = PTR_ERR(gpu->aspace);
948 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400949 }
Rob Clarka1ad3522014-07-11 11:59:22 -0400950
Jordan Crousef97deca2017-10-20 11:06:57 -0600951 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
Jordan Crousecd414f32017-10-20 11:06:56 -0600952 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
Jordan Crousef97deca2017-10-20 11:06:57 -0600953 &memptrs_iova);
Jordan Crousecd414f32017-10-20 11:06:56 -0600954
Jordan Crousef97deca2017-10-20 11:06:57 -0600955 if (IS_ERR(memptrs)) {
956 ret = PTR_ERR(memptrs);
Jordan Crousecd414f32017-10-20 11:06:56 -0600957 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
958 goto fail;
959 }
960
Jordan Crousef97deca2017-10-20 11:06:57 -0600961 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
Arnd Bergmann39ae0d32017-08-03 13:50:48 +0200962 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600963 ARRAY_SIZE(gpu->rb));
964 nr_rings = ARRAY_SIZE(gpu->rb);
Rob Clark7198e6b2013-07-19 12:59:32 -0400965 }
966
Jordan Crousef97deca2017-10-20 11:06:57 -0600967 /* Create ringbuffer(s): */
968 for (i = 0; i < nr_rings; i++) {
969 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
970
971 if (IS_ERR(gpu->rb[i])) {
972 ret = PTR_ERR(gpu->rb[i]);
973 dev_err(drm->dev,
974 "could not create ringbuffer %d: %d\n", i, ret);
975 goto fail;
976 }
977
978 memptrs += sizeof(struct msm_rbmemptrs);
979 memptrs_iova += sizeof(struct msm_rbmemptrs);
980 }
981
982 gpu->nr_rings = nr_rings;
983
Rob Clark7198e6b2013-07-19 12:59:32 -0400984 return 0;
985
986fail:
Jordan Crousef97deca2017-10-20 11:06:57 -0600987 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
988 msm_ringbuffer_destroy(gpu->rb[i]);
989 gpu->rb[i] = NULL;
990 }
991
Jordan Crousecd414f32017-10-20 11:06:56 -0600992 if (gpu->memptrs_bo) {
993 msm_gem_put_vaddr(gpu->memptrs_bo);
994 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100995 drm_gem_object_put_unlocked(gpu->memptrs_bo);
Jordan Crousecd414f32017-10-20 11:06:56 -0600996 }
997
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600998 platform_set_drvdata(pdev, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400999 return ret;
1000}
1001
1002void msm_gpu_cleanup(struct msm_gpu *gpu)
1003{
Jordan Crousef97deca2017-10-20 11:06:57 -06001004 int i;
1005
Rob Clark7198e6b2013-07-19 12:59:32 -04001006 DBG("%s", gpu->name);
1007
1008 WARN_ON(!list_empty(&gpu->active_list));
1009
Jordan Crousef97deca2017-10-20 11:06:57 -06001010 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1011 msm_ringbuffer_destroy(gpu->rb[i]);
1012 gpu->rb[i] = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -04001013 }
Jordan Crousecd414f32017-10-20 11:06:56 -06001014
1015 if (gpu->memptrs_bo) {
1016 msm_gem_put_vaddr(gpu->memptrs_bo);
1017 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +11001018 drm_gem_object_put_unlocked(gpu->memptrs_bo);
Jordan Crousecd414f32017-10-20 11:06:56 -06001019 }
1020
1021 if (!IS_ERR_OR_NULL(gpu->aspace)) {
Jordan Crouse1267a4d2017-07-27 10:42:39 -06001022 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
1023 NULL, 0);
1024 msm_gem_address_space_put(gpu->aspace);
1025 }
Rob Clark7198e6b2013-07-19 12:59:32 -04001026}