blob: 6e079a83bd369899f0daba5817a6dcf4d8c27630 [file] [log] [blame]
Rob Clark7198e6b2013-07-19 12:59:32 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -050020#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040021#include "msm_fence.h"
Jordan Crouse4241db42018-11-02 09:25:21 -060022#include "msm_gpu_trace.h"
Jonathan Marekc2052a42018-11-14 17:08:04 -050023#include "adreno/adreno_gpu.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040024
Jordan Crousec0fec7f2018-07-24 10:33:27 -060025#include <generated/utsrelease.h>
Rob Clark18bb8a62017-09-13 10:17:18 -040026#include <linux/string_helpers.h>
Jordan Crousef91c14a2018-01-10 10:41:54 -070027#include <linux/pm_opp.h>
28#include <linux/devfreq.h>
Jordan Crousec0fec7f2018-07-24 10:33:27 -060029#include <linux/devcoredump.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040030
31/*
32 * Power Management:
33 */
34
Jordan Crousef91c14a2018-01-10 10:41:54 -070035static int msm_devfreq_target(struct device *dev, unsigned long *freq,
36 u32 flags)
37{
38 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
39 struct dev_pm_opp *opp;
40
41 opp = devfreq_recommended_opp(dev, freq, flags);
42
43 if (IS_ERR(opp))
44 return PTR_ERR(opp);
45
Sharat Masettyde0a3d092018-10-04 15:11:42 +053046 if (gpu->funcs->gpu_set_freq)
47 gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
48 else
49 clk_set_rate(gpu->core_clk, *freq);
50
Jordan Crousef91c14a2018-01-10 10:41:54 -070051 dev_pm_opp_put(opp);
52
53 return 0;
54}
55
56static int msm_devfreq_get_dev_status(struct device *dev,
57 struct devfreq_dev_status *status)
58{
59 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
Jordan Crousef91c14a2018-01-10 10:41:54 -070060 ktime_t time;
61
Sharat Masettyde0a3d092018-10-04 15:11:42 +053062 if (gpu->funcs->gpu_get_freq)
63 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
64 else
65 status->current_frequency = clk_get_rate(gpu->core_clk);
Jordan Crousef91c14a2018-01-10 10:41:54 -070066
Sharat Masettyde0a3d092018-10-04 15:11:42 +053067 status->busy_time = gpu->funcs->gpu_busy(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -070068
69 time = ktime_get();
70 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
71 gpu->devfreq.time = time;
72
73 return 0;
74}
75
76static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
77{
78 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
79
Sharat Masettyde0a3d092018-10-04 15:11:42 +053080 if (gpu->funcs->gpu_get_freq)
81 *freq = gpu->funcs->gpu_get_freq(gpu);
82 else
83 *freq = clk_get_rate(gpu->core_clk);
Jordan Crousef91c14a2018-01-10 10:41:54 -070084
85 return 0;
86}
87
88static struct devfreq_dev_profile msm_devfreq_profile = {
89 .polling_ms = 10,
90 .target = msm_devfreq_target,
91 .get_dev_status = msm_devfreq_get_dev_status,
92 .get_cur_freq = msm_devfreq_get_cur_freq,
93};
94
95static void msm_devfreq_init(struct msm_gpu *gpu)
96{
97 /* We need target support to do devfreq */
Sharat Masettyde0a3d092018-10-04 15:11:42 +053098 if (!gpu->funcs->gpu_busy)
Jordan Crousef91c14a2018-01-10 10:41:54 -070099 return;
100
101 msm_devfreq_profile.initial_freq = gpu->fast_rate;
102
103 /*
104 * Don't set the freq_table or max_state and let devfreq build the table
105 * from OPP
106 */
107
108 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
109 &msm_devfreq_profile, "simple_ondemand", NULL);
110
111 if (IS_ERR(gpu->devfreq.devfreq)) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530112 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
Jordan Crousef91c14a2018-01-10 10:41:54 -0700113 gpu->devfreq.devfreq = NULL;
114 }
Sharat Masettyd3fa91c2018-10-04 15:11:40 +0530115
116 devfreq_suspend_device(gpu->devfreq.devfreq);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700117}
118
Rob Clark7198e6b2013-07-19 12:59:32 -0400119static int enable_pwrrail(struct msm_gpu *gpu)
120{
121 struct drm_device *dev = gpu->dev;
122 int ret = 0;
123
124 if (gpu->gpu_reg) {
125 ret = regulator_enable(gpu->gpu_reg);
126 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530127 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400128 return ret;
129 }
130 }
131
132 if (gpu->gpu_cx) {
133 ret = regulator_enable(gpu->gpu_cx);
134 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530135 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400136 return ret;
137 }
138 }
139
140 return 0;
141}
142
143static int disable_pwrrail(struct msm_gpu *gpu)
144{
145 if (gpu->gpu_cx)
146 regulator_disable(gpu->gpu_cx);
147 if (gpu->gpu_reg)
148 regulator_disable(gpu->gpu_reg);
149 return 0;
150}
151
152static int enable_clk(struct msm_gpu *gpu)
153{
Jordan Crouse98db8032017-03-07 10:02:56 -0700154 if (gpu->core_clk && gpu->fast_rate)
155 clk_set_rate(gpu->core_clk, gpu->fast_rate);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700156
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700157 /* Set the RBBM timer rate to 19.2Mhz */
Jordan Crouse98db8032017-03-07 10:02:56 -0700158 if (gpu->rbbmtimer_clk)
159 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700160
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600161 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -0400162}
163
164static int disable_clk(struct msm_gpu *gpu)
165{
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600166 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
Rob Clark7198e6b2013-07-19 12:59:32 -0400167
Jordan Crousebf5af4a2017-03-07 10:02:54 -0700168 /*
169 * Set the clock to a deliberately low rate. On older targets the clock
170 * speed had to be non zero to avoid problems. On newer targets this
171 * will be rounded down to zero anyway so it all works out.
172 */
Jordan Crouse98db8032017-03-07 10:02:56 -0700173 if (gpu->core_clk)
174 clk_set_rate(gpu->core_clk, 27000000);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700175
Jordan Crouse98db8032017-03-07 10:02:56 -0700176 if (gpu->rbbmtimer_clk)
177 clk_set_rate(gpu->rbbmtimer_clk, 0);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700178
Rob Clark7198e6b2013-07-19 12:59:32 -0400179 return 0;
180}
181
182static int enable_axi(struct msm_gpu *gpu)
183{
184 if (gpu->ebi1_clk)
185 clk_prepare_enable(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400186 return 0;
187}
188
189static int disable_axi(struct msm_gpu *gpu)
190{
191 if (gpu->ebi1_clk)
192 clk_disable_unprepare(gpu->ebi1_clk);
Rob Clark7198e6b2013-07-19 12:59:32 -0400193 return 0;
194}
195
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530196void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
197{
198 gpu->devfreq.busy_cycles = 0;
199 gpu->devfreq.time = ktime_get();
200
201 devfreq_resume_device(gpu->devfreq.devfreq);
202}
203
Rob Clark7198e6b2013-07-19 12:59:32 -0400204int msm_gpu_pm_resume(struct msm_gpu *gpu)
205{
206 int ret;
207
Rob Clarkeeb75472017-02-10 15:36:33 -0500208 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400209
210 ret = enable_pwrrail(gpu);
211 if (ret)
212 return ret;
213
214 ret = enable_clk(gpu);
215 if (ret)
216 return ret;
217
218 ret = enable_axi(gpu);
219 if (ret)
220 return ret;
221
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530222 msm_gpu_resume_devfreq(gpu);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700223
Rob Clarkeeb75472017-02-10 15:36:33 -0500224 gpu->needs_hw_init = true;
225
Rob Clark7198e6b2013-07-19 12:59:32 -0400226 return 0;
227}
228
229int msm_gpu_pm_suspend(struct msm_gpu *gpu)
230{
231 int ret;
232
Rob Clarkeeb75472017-02-10 15:36:33 -0500233 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400234
Sharat Masettyde0a3d092018-10-04 15:11:42 +0530235 devfreq_suspend_device(gpu->devfreq.devfreq);
Jordan Crousef91c14a2018-01-10 10:41:54 -0700236
Rob Clark7198e6b2013-07-19 12:59:32 -0400237 ret = disable_axi(gpu);
238 if (ret)
239 return ret;
240
241 ret = disable_clk(gpu);
242 if (ret)
243 return ret;
244
245 ret = disable_pwrrail(gpu);
246 if (ret)
247 return ret;
248
249 return 0;
250}
251
Rob Clarkeeb75472017-02-10 15:36:33 -0500252int msm_gpu_hw_init(struct msm_gpu *gpu)
Rob Clark37d77c32014-01-11 16:25:08 -0500253{
Rob Clarkeeb75472017-02-10 15:36:33 -0500254 int ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500255
Rob Clarkcb1e3812017-06-13 09:15:36 -0400256 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
257
Rob Clarkeeb75472017-02-10 15:36:33 -0500258 if (!gpu->needs_hw_init)
259 return 0;
Rob Clark37d77c32014-01-11 16:25:08 -0500260
Rob Clarkeeb75472017-02-10 15:36:33 -0500261 disable_irq(gpu->irq);
262 ret = gpu->funcs->hw_init(gpu);
263 if (!ret)
264 gpu->needs_hw_init = false;
265 enable_irq(gpu->irq);
Rob Clark37d77c32014-01-11 16:25:08 -0500266
Rob Clarkeeb75472017-02-10 15:36:33 -0500267 return ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500268}
269
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600270#ifdef CONFIG_DEV_COREDUMP
271static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
272 size_t count, void *data, size_t datalen)
273{
274 struct msm_gpu *gpu = data;
275 struct drm_print_iterator iter;
276 struct drm_printer p;
277 struct msm_gpu_state *state;
278
279 state = msm_gpu_crashstate_get(gpu);
280 if (!state)
281 return 0;
282
283 iter.data = buffer;
284 iter.offset = 0;
285 iter.start = offset;
286 iter.remain = count;
287
288 p = drm_coredump_printer(&iter);
289
290 drm_printf(&p, "---\n");
291 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
292 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
Arnd Bergmann3530a172018-07-26 14:39:25 +0200293 drm_printf(&p, "time: %lld.%09ld\n",
294 state->time.tv_sec, state->time.tv_nsec);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600295 if (state->comm)
296 drm_printf(&p, "comm: %s\n", state->comm);
297 if (state->cmd)
298 drm_printf(&p, "cmdline: %s\n", state->cmd);
299
300 gpu->funcs->show(gpu, state, &p);
301
302 msm_gpu_crashstate_put(gpu);
303
304 return count - iter.remain;
305}
306
307static void msm_gpu_devcoredump_free(void *data)
308{
309 struct msm_gpu *gpu = data;
310
311 msm_gpu_crashstate_put(gpu);
312}
313
Jordan Crousecdb95932018-07-24 10:33:31 -0600314static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
315 struct msm_gem_object *obj, u64 iova, u32 flags)
316{
317 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
318
319 /* Don't record write only objects */
Jordan Crousecdb95932018-07-24 10:33:31 -0600320 state_bo->size = obj->base.size;
321 state_bo->iova = iova;
322
Jordan Crouse896a2482018-11-02 09:25:22 -0600323 /* Only store data for non imported buffer objects marked for read */
324 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
Jordan Crousecdb95932018-07-24 10:33:31 -0600325 void *ptr;
326
327 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
328 if (!state_bo->data)
Jordan Crouse896a2482018-11-02 09:25:22 -0600329 goto out;
Jordan Crousecdb95932018-07-24 10:33:31 -0600330
331 ptr = msm_gem_get_vaddr_active(&obj->base);
332 if (IS_ERR(ptr)) {
333 kvfree(state_bo->data);
Jordan Crouse896a2482018-11-02 09:25:22 -0600334 state_bo->data = NULL;
335 goto out;
Jordan Crousecdb95932018-07-24 10:33:31 -0600336 }
337
338 memcpy(state_bo->data, ptr, obj->base.size);
339 msm_gem_put_vaddr(&obj->base);
340 }
Jordan Crouse896a2482018-11-02 09:25:22 -0600341out:
Jordan Crousecdb95932018-07-24 10:33:31 -0600342 state->nr_bos++;
343}
344
345static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
346 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600347{
348 struct msm_gpu_state *state;
349
350 /* Only save one crash state at a time */
351 if (gpu->crashstate)
352 return;
353
354 state = gpu->funcs->gpu_state_get(gpu);
355 if (IS_ERR_OR_NULL(state))
356 return;
357
358 /* Fill in the additional crash state information */
359 state->comm = kstrdup(comm, GFP_KERNEL);
360 state->cmd = kstrdup(cmd, GFP_KERNEL);
361
Jordan Crousecdb95932018-07-24 10:33:31 -0600362 if (submit) {
363 int i;
364
Jordan Crouse896a2482018-11-02 09:25:22 -0600365 state->bos = kcalloc(submit->nr_cmds,
Jordan Crousecdb95932018-07-24 10:33:31 -0600366 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
367
Jordan Crouse896a2482018-11-02 09:25:22 -0600368 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
369 int idx = submit->cmd[i].idx;
370
371 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
372 submit->bos[idx].iova, submit->bos[idx].flags);
373 }
Jordan Crousecdb95932018-07-24 10:33:31 -0600374 }
375
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600376 /* Set the active crash state to be dumped on failure */
377 gpu->crashstate = state;
378
379 /* FIXME: Release the crashstate if this errors out? */
380 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
381 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
382}
383#else
Anders Roxell69690192018-07-31 22:45:32 +0200384static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
385 struct msm_gem_submit *submit, char *comm, char *cmd)
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600386{
387}
388#endif
389
Rob Clark37d77c32014-01-11 16:25:08 -0500390/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400391 * Hangcheck detection for locked gpu:
392 */
393
Jordan Crousef97deca2017-10-20 11:06:57 -0600394static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
395 uint32_t fence)
396{
397 struct msm_gem_submit *submit;
398
399 list_for_each_entry(submit, &ring->submits, node) {
400 if (submit->seqno > fence)
401 break;
402
403 msm_update_fence(submit->ring->fctx,
404 submit->fence->seqno);
405 }
406}
407
Rob Clark18bb8a62017-09-13 10:17:18 -0400408static struct msm_gem_submit *
409find_submit(struct msm_ringbuffer *ring, uint32_t fence)
410{
411 struct msm_gem_submit *submit;
412
413 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
414
415 list_for_each_entry(submit, &ring->submits, node)
416 if (submit->seqno == fence)
417 return submit;
418
419 return NULL;
420}
421
Rob Clarkb6295f92016-03-15 18:26:28 -0400422static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400423
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400424static void recover_worker(struct work_struct *work)
425{
426 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
427 struct drm_device *dev = gpu->dev;
Rob Clark96169f42017-09-15 11:04:44 -0400428 struct msm_drm_private *priv = dev->dev_private;
Rob Clark4816b622016-05-03 10:10:15 -0400429 struct msm_gem_submit *submit;
Jordan Crousef97deca2017-10-20 11:06:57 -0600430 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600431 char *comm = NULL, *cmd = NULL;
Jordan Crousef97deca2017-10-20 11:06:57 -0600432 int i;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400433
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400434 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400435
Mamta Shukla6a41da12018-10-20 23:19:26 +0530436 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
Jordan Crousef97deca2017-10-20 11:06:57 -0600437
Rob Clark96169f42017-09-15 11:04:44 -0400438 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
Rob Clark18bb8a62017-09-13 10:17:18 -0400439 if (submit) {
440 struct task_struct *task;
Rob Clark4816b622016-05-03 10:10:15 -0400441
Rob Clark18bb8a62017-09-13 10:17:18 -0400442 rcu_read_lock();
443 task = pid_task(submit->pid, PIDTYPE_PID);
444 if (task) {
Jordan Crouse65a3c272018-07-24 10:33:26 -0600445 comm = kstrdup(task->comm, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400446
447 /*
448 * So slightly annoying, in other paths like
449 * mmap'ing gem buffers, mmap_sem is acquired
450 * before struct_mutex, which means we can't
451 * hold struct_mutex across the call to
452 * get_cmdline(). But submits are retired
453 * from the same in-order workqueue, so we can
454 * safely drop the lock here without worrying
455 * about the submit going away.
456 */
457 mutex_unlock(&dev->struct_mutex);
Jordan Crouse65a3c272018-07-24 10:33:26 -0600458 cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
Rob Clark18bb8a62017-09-13 10:17:18 -0400459 mutex_lock(&dev->struct_mutex);
Rob Clark4816b622016-05-03 10:10:15 -0400460 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400461 rcu_read_unlock();
Jordan Crouse65a3c272018-07-24 10:33:26 -0600462
463 if (comm && cmd) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530464 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
Jordan Crouse65a3c272018-07-24 10:33:26 -0600465 gpu->name, comm, cmd);
466
467 msm_rd_dump_submit(priv->hangrd, submit,
468 "offending task: %s (%s)", comm, cmd);
469 } else
470 msm_rd_dump_submit(priv->hangrd, submit, NULL);
Rob Clark96169f42017-09-15 11:04:44 -0400471 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400472
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600473 /* Record the crash state */
474 pm_runtime_get_sync(&gpu->pdev->dev);
Jordan Crousecdb95932018-07-24 10:33:31 -0600475 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600476 pm_runtime_put_sync(&gpu->pdev->dev);
477
Jordan Crouse65a3c272018-07-24 10:33:26 -0600478 kfree(cmd);
479 kfree(comm);
Rob Clark96169f42017-09-15 11:04:44 -0400480
481 /*
482 * Update all the rings with the latest and greatest fence.. this
483 * needs to happen after msm_rd_dump_submit() to ensure that the
484 * bo's referenced by the offending submit are still around.
485 */
Jordan Crouse7ddae822017-12-13 13:45:44 -0700486 for (i = 0; i < gpu->nr_rings; i++) {
Rob Clark96169f42017-09-15 11:04:44 -0400487 struct msm_ringbuffer *ring = gpu->rb[i];
488
489 uint32_t fence = ring->memptrs->fence;
490
491 /*
492 * For the current (faulting?) ring/submit advance the fence by
493 * one more to clear the faulting submit
494 */
495 if (ring == cur_ring)
496 fence++;
497
498 update_fences(gpu, ring, fence);
Rob Clark4816b622016-05-03 10:10:15 -0400499 }
500
501 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400502 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400503 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400504
Rob Clarkeeb75472017-02-10 15:36:33 -0500505 pm_runtime_get_sync(&gpu->pdev->dev);
Rob Clark37d77c32014-01-11 16:25:08 -0500506 gpu->funcs->recover(gpu);
Rob Clarkeeb75472017-02-10 15:36:33 -0500507 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark1a370be2015-06-07 13:46:04 -0400508
Jordan Crousef97deca2017-10-20 11:06:57 -0600509 /*
510 * Replay all remaining submits starting with highest priority
511 * ring
512 */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600513 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600514 struct msm_ringbuffer *ring = gpu->rb[i];
515
516 list_for_each_entry(submit, &ring->submits, node)
517 gpu->funcs->submit(gpu, submit, NULL);
Rob Clark1a370be2015-06-07 13:46:04 -0400518 }
Rob Clark37d77c32014-01-11 16:25:08 -0500519 }
Rob Clark4816b622016-05-03 10:10:15 -0400520
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400521 mutex_unlock(&dev->struct_mutex);
522
523 msm_gpu_retire(gpu);
524}
525
526static void hangcheck_timer_reset(struct msm_gpu *gpu)
527{
528 DBG("%s", gpu->name);
529 mod_timer(&gpu->hangcheck_timer,
530 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
531}
532
Kees Cooke99e88a2017-10-16 14:43:17 -0700533static void hangcheck_handler(struct timer_list *t)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400534{
Kees Cooke99e88a2017-10-16 14:43:17 -0700535 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
Rob Clark6b8819c2013-09-11 17:14:30 -0400536 struct drm_device *dev = gpu->dev;
537 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600538 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
539 uint32_t fence = ring->memptrs->fence;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400540
Jordan Crousef97deca2017-10-20 11:06:57 -0600541 if (fence != ring->hangcheck_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400542 /* some progress has been made.. ya! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600543 ring->hangcheck_fence = fence;
544 } else if (fence < ring->seqno) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400545 /* no progress and not done.. hung! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600546 ring->hangcheck_fence = fence;
Mamta Shukla6a41da12018-10-20 23:19:26 +0530547 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600548 gpu->name, ring->id);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530549 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
Rob Clark26791c42013-09-03 07:12:03 -0400550 gpu->name, fence);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530551 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600552 gpu->name, ring->seqno);
553
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400554 queue_work(priv->wq, &gpu->recover_work);
555 }
556
557 /* if still more pending work, reset the hangcheck timer: */
Jordan Crousef97deca2017-10-20 11:06:57 -0600558 if (ring->seqno > ring->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400559 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400560
561 /* workaround for missing irq: */
562 queue_work(priv->wq, &gpu->retire_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400563}
564
565/*
Rob Clark70c70f02014-05-30 14:49:43 -0400566 * Performance Counters:
567 */
568
569/* called under perf_lock */
570static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
571{
572 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
573 int i, n = min(ncntrs, gpu->num_perfcntrs);
574
575 /* read current values: */
576 for (i = 0; i < gpu->num_perfcntrs; i++)
577 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
578
579 /* update cntrs: */
580 for (i = 0; i < n; i++)
581 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
582
583 /* save current values: */
584 for (i = 0; i < gpu->num_perfcntrs; i++)
585 gpu->last_cntrs[i] = current_cntrs[i];
586
587 return n;
588}
589
590static void update_sw_cntrs(struct msm_gpu *gpu)
591{
592 ktime_t time;
593 uint32_t elapsed;
594 unsigned long flags;
595
596 spin_lock_irqsave(&gpu->perf_lock, flags);
597 if (!gpu->perfcntr_active)
598 goto out;
599
600 time = ktime_get();
601 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
602
603 gpu->totaltime += elapsed;
604 if (gpu->last_sample.active)
605 gpu->activetime += elapsed;
606
607 gpu->last_sample.active = msm_gpu_active(gpu);
608 gpu->last_sample.time = time;
609
610out:
611 spin_unlock_irqrestore(&gpu->perf_lock, flags);
612}
613
614void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
615{
616 unsigned long flags;
617
Rob Clarkeeb75472017-02-10 15:36:33 -0500618 pm_runtime_get_sync(&gpu->pdev->dev);
619
Rob Clark70c70f02014-05-30 14:49:43 -0400620 spin_lock_irqsave(&gpu->perf_lock, flags);
621 /* we could dynamically enable/disable perfcntr registers too.. */
622 gpu->last_sample.active = msm_gpu_active(gpu);
623 gpu->last_sample.time = ktime_get();
624 gpu->activetime = gpu->totaltime = 0;
625 gpu->perfcntr_active = true;
626 update_hw_cntrs(gpu, 0, NULL);
627 spin_unlock_irqrestore(&gpu->perf_lock, flags);
628}
629
630void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
631{
632 gpu->perfcntr_active = false;
Rob Clarkeeb75472017-02-10 15:36:33 -0500633 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark70c70f02014-05-30 14:49:43 -0400634}
635
636/* returns -errno or # of cntrs sampled */
637int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
638 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
639{
640 unsigned long flags;
641 int ret;
642
643 spin_lock_irqsave(&gpu->perf_lock, flags);
644
645 if (!gpu->perfcntr_active) {
646 ret = -EINVAL;
647 goto out;
648 }
649
650 *activetime = gpu->activetime;
651 *totaltime = gpu->totaltime;
652
653 gpu->activetime = gpu->totaltime = 0;
654
655 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
656
657out:
658 spin_unlock_irqrestore(&gpu->perf_lock, flags);
659
660 return ret;
661}
662
663/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400664 * Cmdstream submission/retirement:
665 */
666
Jordan Crouse4241db42018-11-02 09:25:21 -0600667static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
668 struct msm_gem_submit *submit)
Rob Clark7d12a272016-03-16 16:07:38 -0400669{
Jordan Crouse4241db42018-11-02 09:25:21 -0600670 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
671 volatile struct msm_gpu_submit_stats *stats;
672 u64 elapsed, clock = 0;
Rob Clark7d12a272016-03-16 16:07:38 -0400673 int i;
674
Jordan Crouse4241db42018-11-02 09:25:21 -0600675 stats = &ring->memptrs->stats[index];
676 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
677 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
678 do_div(elapsed, 192);
679
680 /* Calculate the clock frequency from the number of CP cycles */
681 if (elapsed) {
682 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
683 do_div(clock, elapsed);
684 }
685
686 trace_msm_gpu_submit_retired(submit, elapsed, clock,
687 stats->alwayson_start, stats->alwayson_end);
688
Rob Clark7d12a272016-03-16 16:07:38 -0400689 for (i = 0; i < submit->nr_bos; i++) {
690 struct msm_gem_object *msm_obj = submit->bos[i].obj;
691 /* move to inactive: */
692 msm_gem_move_to_inactive(&msm_obj->base);
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -0700693 msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100694 drm_gem_object_put(&msm_obj->base);
Rob Clark7d12a272016-03-16 16:07:38 -0400695 }
696
Rob Clarkeeb75472017-02-10 15:36:33 -0500697 pm_runtime_mark_last_busy(&gpu->pdev->dev);
698 pm_runtime_put_autosuspend(&gpu->pdev->dev);
Rob Clark40e68152016-05-03 09:50:26 -0400699 msm_gem_submit_free(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400700}
701
Rob Clarkb6295f92016-03-15 18:26:28 -0400702static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400703{
704 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600705 struct msm_gem_submit *submit, *tmp;
706 int i;
Rob Clark1a370be2015-06-07 13:46:04 -0400707
708 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
709
Jordan Crousef97deca2017-10-20 11:06:57 -0600710 /* Retire the commits starting with highest priority */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600711 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600712 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark1a370be2015-06-07 13:46:04 -0400713
Jordan Crousef97deca2017-10-20 11:06:57 -0600714 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
715 if (dma_fence_is_signaled(submit->fence))
Jordan Crouse4241db42018-11-02 09:25:21 -0600716 retire_submit(gpu, ring, submit);
Rob Clark1a370be2015-06-07 13:46:04 -0400717 }
718 }
719}
720
Rob Clark7198e6b2013-07-19 12:59:32 -0400721static void retire_worker(struct work_struct *work)
722{
723 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
724 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600725 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400726
Jordan Crousef97deca2017-10-20 11:06:57 -0600727 for (i = 0; i < gpu->nr_rings; i++)
728 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400729
Rob Clark7198e6b2013-07-19 12:59:32 -0400730 mutex_lock(&dev->struct_mutex);
Rob Clarkb6295f92016-03-15 18:26:28 -0400731 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400732 mutex_unlock(&dev->struct_mutex);
733}
734
735/* call from irq handler to schedule work to retire bo's */
736void msm_gpu_retire(struct msm_gpu *gpu)
737{
738 struct msm_drm_private *priv = gpu->dev->dev_private;
739 queue_work(priv->wq, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400740 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400741}
742
743/* add bo's to gpu's ring, and kick gpu: */
Rob Clarkf44d32c2016-06-16 16:37:38 -0400744void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
Rob Clark7198e6b2013-07-19 12:59:32 -0400745 struct msm_file_private *ctx)
746{
747 struct drm_device *dev = gpu->dev;
748 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600749 struct msm_ringbuffer *ring = submit->ring;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400750 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400751
Rob Clark1a370be2015-06-07 13:46:04 -0400752 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
753
Rob Clarkeeb75472017-02-10 15:36:33 -0500754 pm_runtime_get_sync(&gpu->pdev->dev);
755
756 msm_gpu_hw_init(gpu);
Rob Clark37d77c32014-01-11 16:25:08 -0500757
Jordan Crousef97deca2017-10-20 11:06:57 -0600758 submit->seqno = ++ring->seqno;
759
760 list_add_tail(&submit->node, &ring->submits);
Rob Clark1a370be2015-06-07 13:46:04 -0400761
Rob Clark998b9a52017-09-15 10:46:45 -0400762 msm_rd_dump_submit(priv->rd, submit, NULL);
Rob Clarka7d3c952014-05-30 14:47:38 -0400763
Rob Clark70c70f02014-05-30 14:49:43 -0400764 update_sw_cntrs(gpu);
765
Rob Clark7198e6b2013-07-19 12:59:32 -0400766 for (i = 0; i < submit->nr_bos; i++) {
767 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Rob Clark78babc12016-11-11 12:06:46 -0500768 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400769
770 /* can't happen yet.. but when we add 2d support we'll have
771 * to deal w/ cross-ring synchronization:
772 */
773 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
774
Rob Clark7d12a272016-03-16 16:07:38 -0400775 /* submit takes a reference to the bo and iova until retired: */
Steve Kowalikdc9a9b32018-01-26 14:55:54 +1100776 drm_gem_object_get(&msm_obj->base);
Jordan Crouse9fe041f2018-11-07 15:35:50 -0700777 msm_gem_get_and_pin_iova(&msm_obj->base,
Rob Clark8bdcd942017-06-13 11:07:08 -0400778 submit->gpu->aspace, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400779
Rob Clarkbf6811f2013-09-01 13:25:09 -0400780 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
781 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400782 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
783 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400784 }
Rob Clark1a370be2015-06-07 13:46:04 -0400785
Rob Clark1193c3b2016-05-03 09:46:49 -0400786 gpu->funcs->submit(gpu, submit, ctx);
Rob Clark1a370be2015-06-07 13:46:04 -0400787 priv->lastctx = ctx;
788
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400789 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400790}
791
792/*
793 * Init/Cleanup:
794 */
795
796static irqreturn_t irq_handler(int irq, void *data)
797{
798 struct msm_gpu *gpu = data;
799 return gpu->funcs->irq(gpu);
800}
801
Jordan Crouse98db8032017-03-07 10:02:56 -0700802static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
803{
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600804 int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
Jordan Crouse98db8032017-03-07 10:02:56 -0700805
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600806 if (ret < 1) {
Jordan Crouse98db8032017-03-07 10:02:56 -0700807 gpu->nr_clocks = 0;
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600808 return ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700809 }
810
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600811 gpu->nr_clocks = ret;
Jordan Crouse98db8032017-03-07 10:02:56 -0700812
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600813 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
814 gpu->nr_clocks, "core");
Jordan Crouse98db8032017-03-07 10:02:56 -0700815
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600816 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
817 gpu->nr_clocks, "rbbmtimer");
Jordan Crouse98db8032017-03-07 10:02:56 -0700818
819 return 0;
820}
Rob Clark7198e6b2013-07-19 12:59:32 -0400821
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600822static struct msm_gem_address_space *
823msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
824 uint64_t va_start, uint64_t va_end)
825{
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600826 struct msm_gem_address_space *aspace;
827 int ret;
828
829 /*
830 * Setup IOMMU.. eventually we will (I think) do this once per context
831 * and have separate page tables per context. For now, to keep things
832 * simple and to get something working, just use a single address space:
833 */
Jonathan Marekc2052a42018-11-14 17:08:04 -0500834 if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
835 struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
836 if (!iommu)
837 return NULL;
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600838
Jonathan Marekc2052a42018-11-14 17:08:04 -0500839 iommu->geometry.aperture_start = va_start;
840 iommu->geometry.aperture_end = va_end;
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600841
Jonathan Marekc2052a42018-11-14 17:08:04 -0500842 DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600843
Jonathan Marekc2052a42018-11-14 17:08:04 -0500844 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
845 if (IS_ERR(aspace))
846 iommu_domain_free(iommu);
847 } else {
848 aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
849 va_start, va_end);
850 }
851
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600852 if (IS_ERR(aspace)) {
Jonathan Marekc2052a42018-11-14 17:08:04 -0500853 DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600854 PTR_ERR(aspace));
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600855 return ERR_CAST(aspace);
856 }
857
858 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
859 if (ret) {
860 msm_gem_address_space_put(aspace);
861 return ERR_PTR(ret);
862 }
863
864 return aspace;
865}
866
Rob Clark7198e6b2013-07-19 12:59:32 -0400867int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
868 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600869 const char *name, struct msm_gpu_config *config)
Rob Clark7198e6b2013-07-19 12:59:32 -0400870{
Jordan Crousef97deca2017-10-20 11:06:57 -0600871 int i, ret, nr_rings = config->nr_rings;
872 void *memptrs;
873 uint64_t memptrs_iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400874
Rob Clark70c70f02014-05-30 14:49:43 -0400875 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
876 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
877
Rob Clark7198e6b2013-07-19 12:59:32 -0400878 gpu->dev = drm;
879 gpu->funcs = funcs;
880 gpu->name = name;
881
882 INIT_LIST_HEAD(&gpu->active_list);
883 INIT_WORK(&gpu->retire_work, retire_worker);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400884 INIT_WORK(&gpu->recover_work, recover_worker);
885
Rob Clark1a370be2015-06-07 13:46:04 -0400886
Kees Cooke99e88a2017-10-16 14:43:17 -0700887 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400888
Rob Clark70c70f02014-05-30 14:49:43 -0400889 spin_lock_init(&gpu->perf_lock);
890
Rob Clark7198e6b2013-07-19 12:59:32 -0400891
892 /* Map registers: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600893 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400894 if (IS_ERR(gpu->mmio)) {
895 ret = PTR_ERR(gpu->mmio);
896 goto fail;
897 }
898
899 /* Get Interrupt: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600900 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
Rob Clark7198e6b2013-07-19 12:59:32 -0400901 if (gpu->irq < 0) {
902 ret = gpu->irq;
Mamta Shukla6a41da12018-10-20 23:19:26 +0530903 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400904 goto fail;
905 }
906
907 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
908 IRQF_TRIGGER_HIGH, gpu->name, gpu);
909 if (ret) {
Mamta Shukla6a41da12018-10-20 23:19:26 +0530910 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
Rob Clark7198e6b2013-07-19 12:59:32 -0400911 goto fail;
912 }
913
Jordan Crouse98db8032017-03-07 10:02:56 -0700914 ret = get_clocks(pdev, gpu);
915 if (ret)
916 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400917
Rob Clark720c3bb2017-01-30 11:30:58 -0500918 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
Rob Clark7198e6b2013-07-19 12:59:32 -0400919 DBG("ebi1_clk: %p", gpu->ebi1_clk);
920 if (IS_ERR(gpu->ebi1_clk))
921 gpu->ebi1_clk = NULL;
922
923 /* Acquire regulators: */
924 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
925 DBG("gpu_reg: %p", gpu->gpu_reg);
926 if (IS_ERR(gpu->gpu_reg))
927 gpu->gpu_reg = NULL;
928
929 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
930 DBG("gpu_cx: %p", gpu->gpu_cx);
931 if (IS_ERR(gpu->gpu_cx))
932 gpu->gpu_cx = NULL;
933
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600934 gpu->pdev = pdev;
935 platform_set_drvdata(pdev, gpu);
Rob Clark667ce332016-09-28 19:58:32 -0400936
Jordan Crousef91c14a2018-01-10 10:41:54 -0700937 msm_devfreq_init(gpu);
938
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600939 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
940 config->va_start, config->va_end);
941
942 if (gpu->aspace == NULL)
Mamta Shukla6a41da12018-10-20 23:19:26 +0530943 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600944 else if (IS_ERR(gpu->aspace)) {
945 ret = PTR_ERR(gpu->aspace);
946 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400947 }
Rob Clarka1ad3522014-07-11 11:59:22 -0400948
Jordan Crouse546ec7b2018-11-02 09:25:18 -0600949 memptrs = msm_gem_kernel_new(drm,
950 sizeof(struct msm_rbmemptrs) * nr_rings,
Jordan Crousecd414f32017-10-20 11:06:56 -0600951 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
Jordan Crousef97deca2017-10-20 11:06:57 -0600952 &memptrs_iova);
Jordan Crousecd414f32017-10-20 11:06:56 -0600953
Jordan Crousef97deca2017-10-20 11:06:57 -0600954 if (IS_ERR(memptrs)) {
955 ret = PTR_ERR(memptrs);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530956 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
Jordan Crousecd414f32017-10-20 11:06:56 -0600957 goto fail;
958 }
959
Jordan Crouse0815d772018-11-07 15:35:52 -0700960 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
961
Jordan Crousef97deca2017-10-20 11:06:57 -0600962 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
Arnd Bergmann39ae0d32017-08-03 13:50:48 +0200963 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600964 ARRAY_SIZE(gpu->rb));
965 nr_rings = ARRAY_SIZE(gpu->rb);
Rob Clark7198e6b2013-07-19 12:59:32 -0400966 }
967
Jordan Crousef97deca2017-10-20 11:06:57 -0600968 /* Create ringbuffer(s): */
969 for (i = 0; i < nr_rings; i++) {
970 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
971
972 if (IS_ERR(gpu->rb[i])) {
973 ret = PTR_ERR(gpu->rb[i]);
Mamta Shukla6a41da12018-10-20 23:19:26 +0530974 DRM_DEV_ERROR(drm->dev,
Jordan Crousef97deca2017-10-20 11:06:57 -0600975 "could not create ringbuffer %d: %d\n", i, ret);
976 goto fail;
977 }
978
979 memptrs += sizeof(struct msm_rbmemptrs);
980 memptrs_iova += sizeof(struct msm_rbmemptrs);
981 }
982
983 gpu->nr_rings = nr_rings;
984
Rob Clark7198e6b2013-07-19 12:59:32 -0400985 return 0;
986
987fail:
Jordan Crousef97deca2017-10-20 11:06:57 -0600988 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
989 msm_ringbuffer_destroy(gpu->rb[i]);
990 gpu->rb[i] = NULL;
991 }
992
Jordan Crouse1e29dff2018-11-07 15:35:46 -0700993 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
Jordan Crousecd414f32017-10-20 11:06:56 -0600994
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600995 platform_set_drvdata(pdev, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400996 return ret;
997}
998
999void msm_gpu_cleanup(struct msm_gpu *gpu)
1000{
Jordan Crousef97deca2017-10-20 11:06:57 -06001001 int i;
1002
Rob Clark7198e6b2013-07-19 12:59:32 -04001003 DBG("%s", gpu->name);
1004
1005 WARN_ON(!list_empty(&gpu->active_list));
1006
Jordan Crousef97deca2017-10-20 11:06:57 -06001007 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1008 msm_ringbuffer_destroy(gpu->rb[i]);
1009 gpu->rb[i] = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -04001010 }
Jordan Crousecd414f32017-10-20 11:06:56 -06001011
Jordan Crouse1e29dff2018-11-07 15:35:46 -07001012 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
Jordan Crousecd414f32017-10-20 11:06:56 -06001013
1014 if (!IS_ERR_OR_NULL(gpu->aspace)) {
Jordan Crouse1267a4d2017-07-27 10:42:39 -06001015 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
1016 NULL, 0);
1017 msm_gem_address_space_put(gpu->aspace);
1018 }
Rob Clark7198e6b2013-07-19 12:59:32 -04001019}