blob: 11aac83370664f45ce5c8a39e6bb6b284581ae40 [file] [log] [blame]
Rob Clark7198e6b2013-07-19 12:59:32 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -050020#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040021#include "msm_fence.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040022
Jordan Crousec0fec7f2018-07-24 10:33:27 -060023#include <generated/utsrelease.h>
Rob Clark18bb8a62017-09-13 10:17:18 -040024#include <linux/string_helpers.h>
Jordan Crousef91c14a2018-01-10 10:41:54 -070025#include <linux/pm_opp.h>
26#include <linux/devfreq.h>
Jordan Crousec0fec7f2018-07-24 10:33:27 -060027#include <linux/devcoredump.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040028
29/*
30 * Power Management:
31 */
32
Jordan Crousef91c14a2018-01-10 10:41:54 -070033static int msm_devfreq_target(struct device *dev, unsigned long *freq,
34 u32 flags)
35{
36 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
37 struct dev_pm_opp *opp;
38
39 opp = devfreq_recommended_opp(dev, freq, flags);
40
41 if (IS_ERR(opp))
42 return PTR_ERR(opp);
43
Sharat Masettyde0a3d092018-10-04 15:11:42 +053044 if (gpu->funcs->gpu_set_freq)
45 gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
46 else
47 clk_set_rate(gpu->core_clk, *freq);
48
Jordan Crousef91c14a2018-01-10 10:41:54 -070049 dev_pm_opp_put(opp);
50
51 return 0;
52}
53
54static int msm_devfreq_get_dev_status(struct device *dev,
55 struct devfreq_dev_status *status)
56{
57 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
Jordan Crousef91c14a2018-01-10 10:41:54 -070058 ktime_t time;
59
Sharat Masettyde0a3d092018-10-04 15:11:42 +053060 if (gpu->funcs->gpu_get_freq)
61 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
62 else
63 status->current_frequency = clk_get_rate(gpu->core_clk);
Jordan Crousef91c14a2018-01-10 10:41:54 -070064
Sharat Masettyde0a3d092018-10-04 15:11:42 +053065 status->busy_time = gpu->funcs->gpu_busy(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -070066
67 time = ktime_get();
68 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
69 gpu->devfreq.time = time;
70
71 return 0;
72}
73
74static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
75{
76 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
77
Sharat Masettyde0a3d092018-10-04 15:11:42 +053078 if (gpu->funcs->gpu_get_freq)
79 *freq = gpu->funcs->gpu_get_freq(gpu);
80 else
81 *freq = clk_get_rate(gpu->core_clk);
Jordan Crousef91c14a2018-01-10 10:41:54 -070082
83 return 0;
84}
85
86static struct devfreq_dev_profile msm_devfreq_profile = {
87 .polling_ms = 10,
88 .target = msm_devfreq_target,
89 .get_dev_status = msm_devfreq_get_dev_status,
90 .get_cur_freq = msm_devfreq_get_cur_freq,
91};
92
93static void msm_devfreq_init(struct msm_gpu *gpu)
94{
95 /* We need target support to do devfreq */
Sharat Masettyde0a3d092018-10-04 15:11:42 +053096 if (!gpu->funcs->gpu_busy)
Jordan Crousef91c14a2018-01-10 10:41:54 -070097 return;
98
99 msm_devfreq_profile.initial_freq = gpu->fast_rate;
100
101 /*
102 * Don't set the freq_table or max_state and let devfreq build the table
103 * from OPP
104 */
105
106 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
107 &msm_devfreq_profile, "simple_ondemand", NULL);
108
109 if (IS_ERR(gpu->devfreq.devfreq)) {
110 dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
111 gpu->devfreq.devfreq = NULL;
112 }
Sharat Masettyd3fa91c2018-10-04 15:11:40 +0530113
114 devfreq_suspend_device(gpu->devfreq.devfreq);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700115}
116
Rob Clark7198e6b2013-07-19 12:59:32 -0400117static int enable_pwrrail(struct msm_gpu *gpu)
118{
119 struct drm_device *dev = gpu->dev;
120 int ret = 0;
121
122 if (gpu->gpu_reg) {
123 ret = regulator_enable(gpu->gpu_reg);
124 if (ret) {
125 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
126 return ret;
127 }
128 }
129
130 if (gpu->gpu_cx) {
131 ret = regulator_enable(gpu->gpu_cx);
132 if (ret) {
133 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
134 return ret;
135 }
136 }
137
138 return 0;
139}
140
141static int disable_pwrrail(struct msm_gpu *gpu)
142{
143 if (gpu->gpu_cx)
144 regulator_disable(gpu->gpu_cx);
145 if (gpu->gpu_reg)
146 regulator_disable(gpu->gpu_reg);
147 return 0;
148}
149
150static int enable_clk(struct msm_gpu *gpu)
151{
Jordan Crouse98db8032017-03-07 10:02:56 -0700152 if (gpu->core_clk && gpu->fast_rate)
153 clk_set_rate(gpu->core_clk, gpu->fast_rate);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700154
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700155 /* Set the RBBM timer rate to 19.2Mhz */
Jordan Crouse98db8032017-03-07 10:02:56 -0700156 if (gpu->rbbmtimer_clk)
157 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700158
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600159 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -0400160}
161
162static int disable_clk(struct msm_gpu *gpu)
163{
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600164 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -0400165
Jordan Crousebf5af4a2017-03-07 10:02:54 -0700166 /*
167 * Set the clock to a deliberately low rate. On older targets the clock
168 * speed had to be non zero to avoid problems. On newer targets this
169 * will be rounded down to zero anyway so it all works out.
170 */
Jordan Crouse98db8032017-03-07 10:02:56 -0700171 if (gpu->core_clk)
172 clk_set_rate(gpu->core_clk, 27000000);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700173
Jordan Crouse98db8032017-03-07 10:02:56 -0700174 if (gpu->rbbmtimer_clk)
175 clk_set_rate(gpu->rbbmtimer_clk, 0);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700176
Rob Clark7198e6b2013-07-19 12:59:32 -0400177 return 0;
178}
179
180static int enable_axi(struct msm_gpu *gpu)
181{
182 if (gpu->ebi1_clk)
183 clk_prepare_enable(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400184 return 0;
185}
186
187static int disable_axi(struct msm_gpu *gpu)
188{
189 if (gpu->ebi1_clk)
190 clk_disable_unprepare(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400191 return 0;
192}
193
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530194void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
195{
196 gpu->devfreq.busy_cycles = 0;
197 gpu->devfreq.time = ktime_get();
198
199 devfreq_resume_device(gpu->devfreq.devfreq);
200}
201
Rob Clark7198e6b2013-07-19 12:59:32 -0400202int msm_gpu_pm_resume(struct msm_gpu *gpu)
203{
204 int ret;
205
Rob Clarkeeb75472017-02-10 15:36:33 -0500206 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400207
208 ret = enable_pwrrail(gpu);
209 if (ret)
210 return ret;
211
212 ret = enable_clk(gpu);
213 if (ret)
214 return ret;
215
216 ret = enable_axi(gpu);
217 if (ret)
218 return ret;
219
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530220 msm_gpu_resume_devfreq(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700221
Rob Clarkeeb75472017-02-10 15:36:33 -0500222 gpu->needs_hw_init = true;
223
Rob Clark7198e6b2013-07-19 12:59:32 -0400224 return 0;
225}
226
227int msm_gpu_pm_suspend(struct msm_gpu *gpu)
228{
229 int ret;
230
Rob Clarkeeb75472017-02-10 15:36:33 -0500231 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400232
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530233 devfreq_suspend_device(gpu->devfreq.devfreq);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700234
Rob Clark7198e6b2013-07-19 12:59:32 -0400235 ret = disable_axi(gpu);
236 if (ret)
237 return ret;
238
239 ret = disable_clk(gpu);
240 if (ret)
241 return ret;
242
243 ret = disable_pwrrail(gpu);
244 if (ret)
245 return ret;
246
247 return 0;
248}
249
Rob Clarkeeb75472017-02-10 15:36:33 -0500250int msm_gpu_hw_init(struct msm_gpu *gpu)
Rob Clark37d77c32014-01-11 16:25:08 -0500251{
Rob Clarkeeb75472017-02-10 15:36:33 -0500252 int ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500253
Rob Clarkcb1e3812017-06-13 09:15:36 -0400254 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
255
Rob Clarkeeb75472017-02-10 15:36:33 -0500256 if (!gpu->needs_hw_init)
257 return 0;
Rob Clark37d77c32014-01-11 16:25:08 -0500258
Rob Clarkeeb75472017-02-10 15:36:33 -0500259 disable_irq(gpu->irq);
260 ret = gpu->funcs->hw_init(gpu);
261 if (!ret)
262 gpu->needs_hw_init = false;
263 enable_irq(gpu->irq);
Rob Clark37d77c32014-01-11 16:25:08 -0500264
Rob Clarkeeb75472017-02-10 15:36:33 -0500265 return ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500266}
267
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600268#ifdef CONFIG_DEV_COREDUMP
269static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
270 size_t count, void *data, size_t datalen)
271{
272 struct msm_gpu *gpu = data;
273 struct drm_print_iterator iter;
274 struct drm_printer p;
275 struct msm_gpu_state *state;
276
277 state = msm_gpu_crashstate_get(gpu);
278 if (!state)
279 return 0;
280
281 iter.data = buffer;
282 iter.offset = 0;
283 iter.start = offset;
284 iter.remain = count;
285
286 p = drm_coredump_printer(&iter);
287
288 drm_printf(&p, "---\n");
289 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
290 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
Arnd Bergmann3530a172018-07-26 14:39:25 +0200291 drm_printf(&p, "time: %lld.%09ld\n",
292 state->time.tv_sec, state->time.tv_nsec);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600293 if (state->comm)
294 drm_printf(&p, "comm: %s\n", state->comm);
295 if (state->cmd)
296 drm_printf(&p, "cmdline: %s\n", state->cmd);
297
298 gpu->funcs->show(gpu, state, &p);
299
300 msm_gpu_crashstate_put(gpu);
301
302 return count - iter.remain;
303}
304
305static void msm_gpu_devcoredump_free(void *data)
306{
307 struct msm_gpu *gpu = data;
308
309 msm_gpu_crashstate_put(gpu);
310}
311
Jordan Crousecdb95932018-07-24 10:33:31 -0600312static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
313 struct msm_gem_object *obj, u64 iova, u32 flags)
314{
315 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
316
317 /* Don't record write only objects */
318
319 state_bo->size = obj->base.size;
320 state_bo->iova = iova;
321
322 /* Only store the data for buffer objects marked for read */
323 if ((flags & MSM_SUBMIT_BO_READ)) {
324 void *ptr;
325
326 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
327 if (!state_bo->data)
328 return;
329
330 ptr = msm_gem_get_vaddr_active(&obj->base);
331 if (IS_ERR(ptr)) {
332 kvfree(state_bo->data);
333 return;
334 }
335
336 memcpy(state_bo->data, ptr, obj->base.size);
337 msm_gem_put_vaddr(&obj->base);
338 }
339
340 state->nr_bos++;
341}
342
343static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
344 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600345{
346 struct msm_gpu_state *state;
347
348 /* Only save one crash state at a time */
349 if (gpu->crashstate)
350 return;
351
352 state = gpu->funcs->gpu_state_get(gpu);
353 if (IS_ERR_OR_NULL(state))
354 return;
355
356 /* Fill in the additional crash state information */
357 state->comm = kstrdup(comm, GFP_KERNEL);
358 state->cmd = kstrdup(cmd, GFP_KERNEL);
359
Jordan Crousecdb95932018-07-24 10:33:31 -0600360 if (submit) {
361 int i;
362
363 state->bos = kcalloc(submit->nr_bos,
364 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
365
366 for (i = 0; state->bos && i < submit->nr_bos; i++)
367 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
368 submit->bos[i].iova, submit->bos[i].flags);
369 }
370
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600371 /* Set the active crash state to be dumped on failure */
372 gpu->crashstate = state;
373
374 /* FIXME: Release the crashstate if this errors out? */
375 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
376 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
377}
378#else
Anders Roxell69690192018-07-31 22:45:32 +0200379static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
380 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600381{
382}
383#endif
384
Rob Clark37d77c32014-01-11 16:25:08 -0500385/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400386 * Hangcheck detection for locked gpu:
387 */
388
Jordan Crousef97deca2017-10-20 11:06:57 -0600389static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
390 uint32_t fence)
391{
392 struct msm_gem_submit *submit;
393
394 list_for_each_entry(submit, &ring->submits, node) {
395 if (submit->seqno > fence)
396 break;
397
398 msm_update_fence(submit->ring->fctx,
399 submit->fence->seqno);
400 }
401}
402
Rob Clark18bb8a62017-09-13 10:17:18 -0400403static struct msm_gem_submit *
404find_submit(struct msm_ringbuffer *ring, uint32_t fence)
405{
406 struct msm_gem_submit *submit;
407
408 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
409
410 list_for_each_entry(submit, &ring->submits, node)
411 if (submit->seqno == fence)
412 return submit;
413
414 return NULL;
415}
416
Rob Clarkb6295f92016-03-15 18:26:28 -0400417static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400418
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400419static void recover_worker(struct work_struct *work)
420{
421 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
422 struct drm_device *dev = gpu->dev;
Rob Clark96169f42017-09-15 11:04:44 -0400423 struct msm_drm_private *priv = dev->dev_private;
Rob Clark4816b622016-05-03 10:10:15 -0400424 struct msm_gem_submit *submit;
Jordan Crousef97deca2017-10-20 11:06:57 -0600425 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600426 char *comm = NULL, *cmd = NULL;
Jordan Crousef97deca2017-10-20 11:06:57 -0600427 int i;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400428
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400429 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400430
Rob Clark4816b622016-05-03 10:10:15 -0400431 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
Jordan Crousef97deca2017-10-20 11:06:57 -0600432
Rob Clark96169f42017-09-15 11:04:44 -0400433 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
Rob Clark18bb8a62017-09-13 10:17:18 -0400434 if (submit) {
435 struct task_struct *task;
Rob Clark4816b622016-05-03 10:10:15 -0400436
Rob Clark18bb8a62017-09-13 10:17:18 -0400437 rcu_read_lock();
438 task = pid_task(submit->pid, PIDTYPE_PID);
439 if (task) {
Jordan Crouse65a3c272018-07-24 10:33:26 -0600440 comm = kstrdup(task->comm, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400441
442 /*
443 * So slightly annoying, in other paths like
444 * mmap'ing gem buffers, mmap_sem is acquired
445 * before struct_mutex, which means we can't
446 * hold struct_mutex across the call to
447 * get_cmdline(). But submits are retired
448 * from the same in-order workqueue, so we can
449 * safely drop the lock here without worrying
450 * about the submit going away.
451 */
452 mutex_unlock(&dev->struct_mutex);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600453 cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400454 mutex_lock(&dev->struct_mutex);
Rob Clark4816b622016-05-03 10:10:15 -0400455 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400456 rcu_read_unlock();
Jordan Crouse65a3c272018-07-24 10:33:26 -0600457
458 if (comm && cmd) {
459 dev_err(dev->dev, "%s: offending task: %s (%s)\n",
460 gpu->name, comm, cmd);
461
462 msm_rd_dump_submit(priv->hangrd, submit,
463 "offending task: %s (%s)", comm, cmd);
464 } else
465 msm_rd_dump_submit(priv->hangrd, submit, NULL);
Rob Clark96169f42017-09-15 11:04:44 -0400466 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400467
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600468 /* Record the crash state */
469 pm_runtime_get_sync(&gpu->pdev->dev);
Jordan Crousecdb95932018-07-24 10:33:31 -0600470 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600471 pm_runtime_put_sync(&gpu->pdev->dev);
472
Jordan Crouse65a3c272018-07-24 10:33:26 -0600473 kfree(cmd);
474 kfree(comm);
Rob Clark96169f42017-09-15 11:04:44 -0400475
476 /*
477 * Update all the rings with the latest and greatest fence.. this
478 * needs to happen after msm_rd_dump_submit() to ensure that the
479 * bo's referenced by the offending submit are still around.
480 */
Jordan Crouse7ddae822017-12-13 13:45:44 -0700481 for (i = 0; i < gpu->nr_rings; i++) {
Rob Clark96169f42017-09-15 11:04:44 -0400482 struct msm_ringbuffer *ring = gpu->rb[i];
483
484 uint32_t fence = ring->memptrs->fence;
485
486 /*
487 * For the current (faulting?) ring/submit advance the fence by
488 * one more to clear the faulting submit
489 */
490 if (ring == cur_ring)
491 fence++;
492
493 update_fences(gpu, ring, fence);
Rob Clark4816b622016-05-03 10:10:15 -0400494 }
495
496 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400497 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400498 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400499
Rob Clarkeeb75472017-02-10 15:36:33 -0500500 pm_runtime_get_sync(&gpu->pdev->dev);
Rob Clark37d77c32014-01-11 16:25:08 -0500501 gpu->funcs->recover(gpu);
Rob Clarkeeb75472017-02-10 15:36:33 -0500502 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark1a370be2015-06-07 13:46:04 -0400503
Jordan Crousef97deca2017-10-20 11:06:57 -0600504 /*
505 * Replay all remaining submits starting with highest priority
506 * ring
507 */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600508 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600509 struct msm_ringbuffer *ring = gpu->rb[i];
510
511 list_for_each_entry(submit, &ring->submits, node)
512 gpu->funcs->submit(gpu, submit, NULL);
Rob Clark1a370be2015-06-07 13:46:04 -0400513 }
Rob Clark37d77c32014-01-11 16:25:08 -0500514 }
Rob Clark4816b622016-05-03 10:10:15 -0400515
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400516 mutex_unlock(&dev->struct_mutex);
517
518 msm_gpu_retire(gpu);
519}
520
521static void hangcheck_timer_reset(struct msm_gpu *gpu)
522{
523 DBG("%s", gpu->name);
524 mod_timer(&gpu->hangcheck_timer,
525 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
526}
527
Kees Cooke99e88a2017-10-16 14:43:17 -0700528static void hangcheck_handler(struct timer_list *t)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400529{
Kees Cooke99e88a2017-10-16 14:43:17 -0700530 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
Rob Clark6b8819c2013-09-11 17:14:30 -0400531 struct drm_device *dev = gpu->dev;
532 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600533 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
534 uint32_t fence = ring->memptrs->fence;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400535
Jordan Crousef97deca2017-10-20 11:06:57 -0600536 if (fence != ring->hangcheck_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400537 /* some progress has been made.. ya! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600538 ring->hangcheck_fence = fence;
539 } else if (fence < ring->seqno) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400540 /* no progress and not done.. hung! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600541 ring->hangcheck_fence = fence;
542 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
543 gpu->name, ring->id);
Rob Clark26791c42013-09-03 07:12:03 -0400544 dev_err(dev->dev, "%s: completed fence: %u\n",
545 gpu->name, fence);
546 dev_err(dev->dev, "%s: submitted fence: %u\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600547 gpu->name, ring->seqno);
548
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400549 queue_work(priv->wq, &gpu->recover_work);
550 }
551
552 /* if still more pending work, reset the hangcheck timer: */
Jordan Crousef97deca2017-10-20 11:06:57 -0600553 if (ring->seqno > ring->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400554 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400555
556 /* workaround for missing irq: */
557 queue_work(priv->wq, &gpu->retire_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400558}
559
560/*
Rob Clark70c70f02014-05-30 14:49:43 -0400561 * Performance Counters:
562 */
563
564/* called under perf_lock */
565static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
566{
567 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
568 int i, n = min(ncntrs, gpu->num_perfcntrs);
569
570 /* read current values: */
571 for (i = 0; i < gpu->num_perfcntrs; i++)
572 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
573
574 /* update cntrs: */
575 for (i = 0; i < n; i++)
576 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
577
578 /* save current values: */
579 for (i = 0; i < gpu->num_perfcntrs; i++)
580 gpu->last_cntrs[i] = current_cntrs[i];
581
582 return n;
583}
584
585static void update_sw_cntrs(struct msm_gpu *gpu)
586{
587 ktime_t time;
588 uint32_t elapsed;
589 unsigned long flags;
590
591 spin_lock_irqsave(&gpu->perf_lock, flags);
592 if (!gpu->perfcntr_active)
593 goto out;
594
595 time = ktime_get();
596 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
597
598 gpu->totaltime += elapsed;
599 if (gpu->last_sample.active)
600 gpu->activetime += elapsed;
601
602 gpu->last_sample.active = msm_gpu_active(gpu);
603 gpu->last_sample.time = time;
604
605out:
606 spin_unlock_irqrestore(&gpu->perf_lock, flags);
607}
608
609void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
610{
611 unsigned long flags;
612
Rob Clarkeeb75472017-02-10 15:36:33 -0500613 pm_runtime_get_sync(&gpu->pdev->dev);
614
Rob Clark70c70f02014-05-30 14:49:43 -0400615 spin_lock_irqsave(&gpu->perf_lock, flags);
616 /* we could dynamically enable/disable perfcntr registers too.. */
617 gpu->last_sample.active = msm_gpu_active(gpu);
618 gpu->last_sample.time = ktime_get();
619 gpu->activetime = gpu->totaltime = 0;
620 gpu->perfcntr_active = true;
621 update_hw_cntrs(gpu, 0, NULL);
622 spin_unlock_irqrestore(&gpu->perf_lock, flags);
623}
624
625void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
626{
627 gpu->perfcntr_active = false;
Rob Clarkeeb75472017-02-10 15:36:33 -0500628 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark70c70f02014-05-30 14:49:43 -0400629}
630
631/* returns -errno or # of cntrs sampled */
632int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
633 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
634{
635 unsigned long flags;
636 int ret;
637
638 spin_lock_irqsave(&gpu->perf_lock, flags);
639
640 if (!gpu->perfcntr_active) {
641 ret = -EINVAL;
642 goto out;
643 }
644
645 *activetime = gpu->activetime;
646 *totaltime = gpu->totaltime;
647
648 gpu->activetime = gpu->totaltime = 0;
649
650 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
651
652out:
653 spin_unlock_irqrestore(&gpu->perf_lock, flags);
654
655 return ret;
656}
657
658/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400659 * Cmdstream submission/retirement:
660 */
661
Rob Clark7d12a272016-03-16 16:07:38 -0400662static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
663{
664 int i;
665
666 for (i = 0; i < submit->nr_bos; i++) {
667 struct msm_gem_object *msm_obj = submit->bos[i].obj;
668 /* move to inactive: */
669 msm_gem_move_to_inactive(&msm_obj->base);
Rob Clark8bdcd942017-06-13 11:07:08 -0400670 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100671 drm_gem_object_put(&msm_obj->base);
Rob Clark7d12a272016-03-16 16:07:38 -0400672 }
673
Rob Clarkeeb75472017-02-10 15:36:33 -0500674 pm_runtime_mark_last_busy(&gpu->pdev->dev);
675 pm_runtime_put_autosuspend(&gpu->pdev->dev);
Rob Clark40e68152016-05-03 09:50:26 -0400676 msm_gem_submit_free(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400677}
678
Rob Clarkb6295f92016-03-15 18:26:28 -0400679static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400680{
681 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600682 struct msm_gem_submit *submit, *tmp;
683 int i;
Rob Clark1a370be2015-06-07 13:46:04 -0400684
685 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
686
Jordan Crousef97deca2017-10-20 11:06:57 -0600687 /* Retire the commits starting with highest priority */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600688 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600689 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark1a370be2015-06-07 13:46:04 -0400690
Jordan Crousef97deca2017-10-20 11:06:57 -0600691 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
692 if (dma_fence_is_signaled(submit->fence))
693 retire_submit(gpu, submit);
Rob Clark1a370be2015-06-07 13:46:04 -0400694 }
695 }
696}
697
Rob Clark7198e6b2013-07-19 12:59:32 -0400698static void retire_worker(struct work_struct *work)
699{
700 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
701 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600702 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400703
Jordan Crousef97deca2017-10-20 11:06:57 -0600704 for (i = 0; i < gpu->nr_rings; i++)
705 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400706
Rob Clark7198e6b2013-07-19 12:59:32 -0400707 mutex_lock(&dev->struct_mutex);
Rob Clarkb6295f92016-03-15 18:26:28 -0400708 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400709 mutex_unlock(&dev->struct_mutex);
710}
711
712/* call from irq handler to schedule work to retire bo's */
713void msm_gpu_retire(struct msm_gpu *gpu)
714{
715 struct msm_drm_private *priv = gpu->dev->dev_private;
716 queue_work(priv->wq, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400717 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400718}
719
720/* add bo's to gpu's ring, and kick gpu: */
Rob Clarkf44d32c2016-06-16 16:37:38 -0400721void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
Rob Clark7198e6b2013-07-19 12:59:32 -0400722 struct msm_file_private *ctx)
723{
724 struct drm_device *dev = gpu->dev;
725 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600726 struct msm_ringbuffer *ring = submit->ring;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400727 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400728
Rob Clark1a370be2015-06-07 13:46:04 -0400729 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
730
Rob Clarkeeb75472017-02-10 15:36:33 -0500731 pm_runtime_get_sync(&gpu->pdev->dev);
732
733 msm_gpu_hw_init(gpu);
Rob Clark37d77c32014-01-11 16:25:08 -0500734
Jordan Crousef97deca2017-10-20 11:06:57 -0600735 submit->seqno = ++ring->seqno;
736
737 list_add_tail(&submit->node, &ring->submits);
Rob Clark1a370be2015-06-07 13:46:04 -0400738
Rob Clark998b9a52017-09-15 10:46:45 -0400739 msm_rd_dump_submit(priv->rd, submit, NULL);
Rob Clarka7d3c952014-05-30 14:47:38 -0400740
Rob Clark70c70f02014-05-30 14:49:43 -0400741 update_sw_cntrs(gpu);
742
Rob Clark7198e6b2013-07-19 12:59:32 -0400743 for (i = 0; i < submit->nr_bos; i++) {
744 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Rob Clark78babc12016-11-11 12:06:46 -0500745 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400746
747 /* can't happen yet.. but when we add 2d support we'll have
748 * to deal w/ cross-ring synchronization:
749 */
750 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
751
Rob Clark7d12a272016-03-16 16:07:38 -0400752 /* submit takes a reference to the bo and iova until retired: */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100753 drm_gem_object_get(&msm_obj->base);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600754 msm_gem_get_iova(&msm_obj->base,
Rob Clark8bdcd942017-06-13 11:07:08 -0400755 submit->gpu->aspace, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400756
Rob Clarkbf6811f2013-09-01 13:25:09 -0400757 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
758 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400759 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
760 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400761 }
Rob Clark1a370be2015-06-07 13:46:04 -0400762
Rob Clark1193c3b2016-05-03 09:46:49 -0400763 gpu->funcs->submit(gpu, submit, ctx);
Rob Clark1a370be2015-06-07 13:46:04 -0400764 priv->lastctx = ctx;
765
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400766 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400767}
768
769/*
770 * Init/Cleanup:
771 */
772
773static irqreturn_t irq_handler(int irq, void *data)
774{
775 struct msm_gpu *gpu = data;
776 return gpu->funcs->irq(gpu);
777}
778
Jordan Crouse98db8032017-03-07 10:02:56 -0700779static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
780{
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600781 int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
Jordan Crouse98db8032017-03-07 10:02:56 -0700782
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600783 if (ret < 1) {
Jordan Crouse98db8032017-03-07 10:02:56 -0700784 gpu->nr_clocks = 0;
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600785 return ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700786 }
787
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600788 gpu->nr_clocks = ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700789
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600790 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
791 gpu->nr_clocks, "core");
Jordan Crouse98db8032017-03-07 10:02:56 -0700792
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600793 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
794 gpu->nr_clocks, "rbbmtimer");
Jordan Crouse98db8032017-03-07 10:02:56 -0700795
796 return 0;
797}
Rob Clark7198e6b2013-07-19 12:59:32 -0400798
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600799static struct msm_gem_address_space *
800msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
801 uint64_t va_start, uint64_t va_end)
802{
803 struct iommu_domain *iommu;
804 struct msm_gem_address_space *aspace;
805 int ret;
806
807 /*
808 * Setup IOMMU.. eventually we will (I think) do this once per context
809 * and have separate page tables per context. For now, to keep things
810 * simple and to get something working, just use a single address space:
811 */
812 iommu = iommu_domain_alloc(&platform_bus_type);
813 if (!iommu)
814 return NULL;
815
816 iommu->geometry.aperture_start = va_start;
817 iommu->geometry.aperture_end = va_end;
818
819 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
820
821 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
822 if (IS_ERR(aspace)) {
823 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
824 PTR_ERR(aspace));
825 iommu_domain_free(iommu);
826 return ERR_CAST(aspace);
827 }
828
829 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
830 if (ret) {
831 msm_gem_address_space_put(aspace);
832 return ERR_PTR(ret);
833 }
834
835 return aspace;
836}
837
Rob Clark7198e6b2013-07-19 12:59:32 -0400838int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
839 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600840 const char *name, struct msm_gpu_config *config)
Rob Clark7198e6b2013-07-19 12:59:32 -0400841{
Jordan Crousef97deca2017-10-20 11:06:57 -0600842 int i, ret, nr_rings = config->nr_rings;
843 void *memptrs;
844 uint64_t memptrs_iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400845
Rob Clark70c70f02014-05-30 14:49:43 -0400846 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
847 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
848
Rob Clark7198e6b2013-07-19 12:59:32 -0400849 gpu->dev = drm;
850 gpu->funcs = funcs;
851 gpu->name = name;
852
853 INIT_LIST_HEAD(&gpu->active_list);
854 INIT_WORK(&gpu->retire_work, retire_worker);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400855 INIT_WORK(&gpu->recover_work, recover_worker);
856
Rob Clark1a370be2015-06-07 13:46:04 -0400857
Kees Cooke99e88a2017-10-16 14:43:17 -0700858 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400859
Rob Clark70c70f02014-05-30 14:49:43 -0400860 spin_lock_init(&gpu->perf_lock);
861
Rob Clark7198e6b2013-07-19 12:59:32 -0400862
863 /* Map registers: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600864 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400865 if (IS_ERR(gpu->mmio)) {
866 ret = PTR_ERR(gpu->mmio);
867 goto fail;
868 }
869
870 /* Get Interrupt: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600871 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
Rob Clark7198e6b2013-07-19 12:59:32 -0400872 if (gpu->irq < 0) {
873 ret = gpu->irq;
874 dev_err(drm->dev, "failed to get irq: %d\n", ret);
875 goto fail;
876 }
877
878 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
879 IRQF_TRIGGER_HIGH, gpu->name, gpu);
880 if (ret) {
881 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
882 goto fail;
883 }
884
Jordan Crouse98db8032017-03-07 10:02:56 -0700885 ret = get_clocks(pdev, gpu);
886 if (ret)
887 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400888
Rob Clark720c3bb2017-01-30 11:30:58 -0500889 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
Rob Clark7198e6b2013-07-19 12:59:32 -0400890 DBG("ebi1_clk: %p", gpu->ebi1_clk);
891 if (IS_ERR(gpu->ebi1_clk))
892 gpu->ebi1_clk = NULL;
893
894 /* Acquire regulators: */
895 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
896 DBG("gpu_reg: %p", gpu->gpu_reg);
897 if (IS_ERR(gpu->gpu_reg))
898 gpu->gpu_reg = NULL;
899
900 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
901 DBG("gpu_cx: %p", gpu->gpu_cx);
902 if (IS_ERR(gpu->gpu_cx))
903 gpu->gpu_cx = NULL;
904
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600905 gpu->pdev = pdev;
906 platform_set_drvdata(pdev, gpu);
Rob Clark667ce332016-09-28 19:58:32 -0400907
Jordan Crousef91c14a2018-01-10 10:41:54 -0700908 msm_devfreq_init(gpu);
909
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600910 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
911 config->va_start, config->va_end);
912
913 if (gpu->aspace == NULL)
Rob Clark871d8122013-11-16 12:56:06 -0500914 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600915 else if (IS_ERR(gpu->aspace)) {
916 ret = PTR_ERR(gpu->aspace);
917 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400918 }
Rob Clarka1ad3522014-07-11 11:59:22 -0400919
Jordan Crousef97deca2017-10-20 11:06:57 -0600920 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
Jordan Crousecd414f32017-10-20 11:06:56 -0600921 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
Jordan Crousef97deca2017-10-20 11:06:57 -0600922 &memptrs_iova);
Jordan Crousecd414f32017-10-20 11:06:56 -0600923
Jordan Crousef97deca2017-10-20 11:06:57 -0600924 if (IS_ERR(memptrs)) {
925 ret = PTR_ERR(memptrs);
Jordan Crousecd414f32017-10-20 11:06:56 -0600926 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
927 goto fail;
928 }
929
Jordan Crousef97deca2017-10-20 11:06:57 -0600930 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
Arnd Bergmann39ae0d32017-08-03 13:50:48 +0200931 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600932 ARRAY_SIZE(gpu->rb));
933 nr_rings = ARRAY_SIZE(gpu->rb);
Rob Clark7198e6b2013-07-19 12:59:32 -0400934 }
935
Jordan Crousef97deca2017-10-20 11:06:57 -0600936 /* Create ringbuffer(s): */
937 for (i = 0; i < nr_rings; i++) {
938 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
939
940 if (IS_ERR(gpu->rb[i])) {
941 ret = PTR_ERR(gpu->rb[i]);
942 dev_err(drm->dev,
943 "could not create ringbuffer %d: %d\n", i, ret);
944 goto fail;
945 }
946
947 memptrs += sizeof(struct msm_rbmemptrs);
948 memptrs_iova += sizeof(struct msm_rbmemptrs);
949 }
950
951 gpu->nr_rings = nr_rings;
952
Rob Clark7198e6b2013-07-19 12:59:32 -0400953 return 0;
954
955fail:
Jordan Crousef97deca2017-10-20 11:06:57 -0600956 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
957 msm_ringbuffer_destroy(gpu->rb[i]);
958 gpu->rb[i] = NULL;
959 }
960
Jordan Crousecd414f32017-10-20 11:06:56 -0600961 if (gpu->memptrs_bo) {
962 msm_gem_put_vaddr(gpu->memptrs_bo);
963 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100964 drm_gem_object_put_unlocked(gpu->memptrs_bo);
Jordan Crousecd414f32017-10-20 11:06:56 -0600965 }
966
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600967 platform_set_drvdata(pdev, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400968 return ret;
969}
970
971void msm_gpu_cleanup(struct msm_gpu *gpu)
972{
Jordan Crousef97deca2017-10-20 11:06:57 -0600973 int i;
974
Rob Clark7198e6b2013-07-19 12:59:32 -0400975 DBG("%s", gpu->name);
976
977 WARN_ON(!list_empty(&gpu->active_list));
978
Jordan Crousef97deca2017-10-20 11:06:57 -0600979 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
980 msm_ringbuffer_destroy(gpu->rb[i]);
981 gpu->rb[i] = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400982 }
Jordan Crousecd414f32017-10-20 11:06:56 -0600983
984 if (gpu->memptrs_bo) {
985 msm_gem_put_vaddr(gpu->memptrs_bo);
986 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100987 drm_gem_object_put_unlocked(gpu->memptrs_bo);
Jordan Crousecd414f32017-10-20 11:06:56 -0600988 }
989
990 if (!IS_ERR_OR_NULL(gpu->aspace)) {
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600991 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
992 NULL, 0);
993 msm_gem_address_space_put(gpu->aspace);
994 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400995}