blob: 2322014034398110d879d6c97f7d2e5fe25c68fe [file] [log] [blame]
Rob Clark7198e6b2013-07-19 12:59:32 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -050020#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040021#include "msm_fence.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040022
Rob Clark18bb8a62017-09-13 10:17:18 -040023#include <linux/string_helpers.h>
24
Rob Clark7198e6b2013-07-19 12:59:32 -040025
26/*
27 * Power Management:
28 */
29
Rob Clark6490ad42015-06-04 10:26:37 -040030#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
Rob Clark7198e6b2013-07-19 12:59:32 -040031#include <mach/board.h>
Rob Clarkbf2b33af2013-11-15 09:03:15 -050032static void bs_init(struct msm_gpu *gpu)
Rob Clark7198e6b2013-07-19 12:59:32 -040033{
Rob Clarkbf2b33af2013-11-15 09:03:15 -050034 if (gpu->bus_scale_table) {
35 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
Rob Clark7198e6b2013-07-19 12:59:32 -040036 DBG("bus scale client: %08x", gpu->bsc);
37 }
38}
39
40static void bs_fini(struct msm_gpu *gpu)
41{
42 if (gpu->bsc) {
43 msm_bus_scale_unregister_client(gpu->bsc);
44 gpu->bsc = 0;
45 }
46}
47
48static void bs_set(struct msm_gpu *gpu, int idx)
49{
50 if (gpu->bsc) {
51 DBG("set bus scaling: %d", idx);
52 msm_bus_scale_client_update_request(gpu->bsc, idx);
53 }
54}
55#else
Rob Clarkbf2b33af2013-11-15 09:03:15 -050056static void bs_init(struct msm_gpu *gpu) {}
Rob Clark7198e6b2013-07-19 12:59:32 -040057static void bs_fini(struct msm_gpu *gpu) {}
58static void bs_set(struct msm_gpu *gpu, int idx) {}
59#endif
60
61static int enable_pwrrail(struct msm_gpu *gpu)
62{
63 struct drm_device *dev = gpu->dev;
64 int ret = 0;
65
66 if (gpu->gpu_reg) {
67 ret = regulator_enable(gpu->gpu_reg);
68 if (ret) {
69 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
70 return ret;
71 }
72 }
73
74 if (gpu->gpu_cx) {
75 ret = regulator_enable(gpu->gpu_cx);
76 if (ret) {
77 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
78 return ret;
79 }
80 }
81
82 return 0;
83}
84
85static int disable_pwrrail(struct msm_gpu *gpu)
86{
87 if (gpu->gpu_cx)
88 regulator_disable(gpu->gpu_cx);
89 if (gpu->gpu_reg)
90 regulator_disable(gpu->gpu_reg);
91 return 0;
92}
93
94static int enable_clk(struct msm_gpu *gpu)
95{
Rob Clark7198e6b2013-07-19 12:59:32 -040096 int i;
97
Jordan Crouse98db8032017-03-07 10:02:56 -070098 if (gpu->core_clk && gpu->fast_rate)
99 clk_set_rate(gpu->core_clk, gpu->fast_rate);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700100
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700101 /* Set the RBBM timer rate to 19.2Mhz */
Jordan Crouse98db8032017-03-07 10:02:56 -0700102 if (gpu->rbbmtimer_clk)
103 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700104
Jordan Crouse98db8032017-03-07 10:02:56 -0700105 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Jordan Crouse89d777a2016-11-28 12:28:31 -0700106 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400107 clk_prepare(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400108
Jordan Crouse98db8032017-03-07 10:02:56 -0700109 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400110 if (gpu->grp_clks[i])
111 clk_enable(gpu->grp_clks[i]);
112
113 return 0;
114}
115
116static int disable_clk(struct msm_gpu *gpu)
117{
Rob Clark7198e6b2013-07-19 12:59:32 -0400118 int i;
119
Jordan Crouse98db8032017-03-07 10:02:56 -0700120 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Jordan Crouse89d777a2016-11-28 12:28:31 -0700121 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400122 clk_disable(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400123
Jordan Crouse98db8032017-03-07 10:02:56 -0700124 for (i = gpu->nr_clocks - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400125 if (gpu->grp_clks[i])
126 clk_unprepare(gpu->grp_clks[i]);
127
Jordan Crousebf5af4a2017-03-07 10:02:54 -0700128 /*
129 * Set the clock to a deliberately low rate. On older targets the clock
130 * speed had to be non zero to avoid problems. On newer targets this
131 * will be rounded down to zero anyway so it all works out.
132 */
Jordan Crouse98db8032017-03-07 10:02:56 -0700133 if (gpu->core_clk)
134 clk_set_rate(gpu->core_clk, 27000000);
Jordan Crouse89d777a2016-11-28 12:28:31 -0700135
Jordan Crouse98db8032017-03-07 10:02:56 -0700136 if (gpu->rbbmtimer_clk)
137 clk_set_rate(gpu->rbbmtimer_clk, 0);
Jordan Crouseb5f103a2016-11-28 12:28:33 -0700138
Rob Clark7198e6b2013-07-19 12:59:32 -0400139 return 0;
140}
141
142static int enable_axi(struct msm_gpu *gpu)
143{
144 if (gpu->ebi1_clk)
145 clk_prepare_enable(gpu->ebi1_clk);
146 if (gpu->bus_freq)
147 bs_set(gpu, gpu->bus_freq);
148 return 0;
149}
150
151static int disable_axi(struct msm_gpu *gpu)
152{
153 if (gpu->ebi1_clk)
154 clk_disable_unprepare(gpu->ebi1_clk);
155 if (gpu->bus_freq)
156 bs_set(gpu, 0);
157 return 0;
158}
159
160int msm_gpu_pm_resume(struct msm_gpu *gpu)
161{
162 int ret;
163
Rob Clarkeeb75472017-02-10 15:36:33 -0500164 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400165
166 ret = enable_pwrrail(gpu);
167 if (ret)
168 return ret;
169
170 ret = enable_clk(gpu);
171 if (ret)
172 return ret;
173
174 ret = enable_axi(gpu);
175 if (ret)
176 return ret;
177
Rob Clarkeeb75472017-02-10 15:36:33 -0500178 gpu->needs_hw_init = true;
179
Rob Clark7198e6b2013-07-19 12:59:32 -0400180 return 0;
181}
182
183int msm_gpu_pm_suspend(struct msm_gpu *gpu)
184{
185 int ret;
186
Rob Clarkeeb75472017-02-10 15:36:33 -0500187 DBG("%s", gpu->name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400188
189 ret = disable_axi(gpu);
190 if (ret)
191 return ret;
192
193 ret = disable_clk(gpu);
194 if (ret)
195 return ret;
196
197 ret = disable_pwrrail(gpu);
198 if (ret)
199 return ret;
200
201 return 0;
202}
203
Rob Clarkeeb75472017-02-10 15:36:33 -0500204int msm_gpu_hw_init(struct msm_gpu *gpu)
Rob Clark37d77c32014-01-11 16:25:08 -0500205{
Rob Clarkeeb75472017-02-10 15:36:33 -0500206 int ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500207
Rob Clarkcb1e3812017-06-13 09:15:36 -0400208 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
209
Rob Clarkeeb75472017-02-10 15:36:33 -0500210 if (!gpu->needs_hw_init)
211 return 0;
Rob Clark37d77c32014-01-11 16:25:08 -0500212
Rob Clarkeeb75472017-02-10 15:36:33 -0500213 disable_irq(gpu->irq);
214 ret = gpu->funcs->hw_init(gpu);
215 if (!ret)
216 gpu->needs_hw_init = false;
217 enable_irq(gpu->irq);
Rob Clark37d77c32014-01-11 16:25:08 -0500218
Rob Clarkeeb75472017-02-10 15:36:33 -0500219 return ret;
Rob Clark37d77c32014-01-11 16:25:08 -0500220}
221
222/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400223 * Hangcheck detection for locked gpu:
224 */
225
Jordan Crousef97deca2017-10-20 11:06:57 -0600226static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
227 uint32_t fence)
228{
229 struct msm_gem_submit *submit;
230
231 list_for_each_entry(submit, &ring->submits, node) {
232 if (submit->seqno > fence)
233 break;
234
235 msm_update_fence(submit->ring->fctx,
236 submit->fence->seqno);
237 }
238}
239
Rob Clark18bb8a62017-09-13 10:17:18 -0400240static struct msm_gem_submit *
241find_submit(struct msm_ringbuffer *ring, uint32_t fence)
242{
243 struct msm_gem_submit *submit;
244
245 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
246
247 list_for_each_entry(submit, &ring->submits, node)
248 if (submit->seqno == fence)
249 return submit;
250
251 return NULL;
252}
253
Rob Clarkb6295f92016-03-15 18:26:28 -0400254static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400255
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400256static void recover_worker(struct work_struct *work)
257{
258 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
259 struct drm_device *dev = gpu->dev;
Rob Clark96169f42017-09-15 11:04:44 -0400260 struct msm_drm_private *priv = dev->dev_private;
Rob Clark4816b622016-05-03 10:10:15 -0400261 struct msm_gem_submit *submit;
Jordan Crousef97deca2017-10-20 11:06:57 -0600262 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
Jordan Crousef97deca2017-10-20 11:06:57 -0600263 int i;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400264
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400265 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400266
Rob Clark4816b622016-05-03 10:10:15 -0400267 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
Jordan Crousef97deca2017-10-20 11:06:57 -0600268
Rob Clark96169f42017-09-15 11:04:44 -0400269 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
Rob Clark18bb8a62017-09-13 10:17:18 -0400270 if (submit) {
271 struct task_struct *task;
Rob Clark4816b622016-05-03 10:10:15 -0400272
Rob Clark18bb8a62017-09-13 10:17:18 -0400273 rcu_read_lock();
274 task = pid_task(submit->pid, PIDTYPE_PID);
275 if (task) {
276 char *cmd;
277
278 /*
279 * So slightly annoying, in other paths like
280 * mmap'ing gem buffers, mmap_sem is acquired
281 * before struct_mutex, which means we can't
282 * hold struct_mutex across the call to
283 * get_cmdline(). But submits are retired
284 * from the same in-order workqueue, so we can
285 * safely drop the lock here without worrying
286 * about the submit going away.
287 */
288 mutex_unlock(&dev->struct_mutex);
289 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
290 mutex_lock(&dev->struct_mutex);
291
292 dev_err(dev->dev, "%s: offending task: %s (%s)\n",
293 gpu->name, task->comm, cmd);
Rob Clark96169f42017-09-15 11:04:44 -0400294
295 msm_rd_dump_submit(priv->hangrd, submit,
296 "offending task: %s (%s)", task->comm, cmd);
297 } else {
298 msm_rd_dump_submit(priv->hangrd, submit, NULL);
Rob Clark4816b622016-05-03 10:10:15 -0400299 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400300 rcu_read_unlock();
Rob Clark96169f42017-09-15 11:04:44 -0400301 }
Rob Clark18bb8a62017-09-13 10:17:18 -0400302
Rob Clark96169f42017-09-15 11:04:44 -0400303
304 /*
305 * Update all the rings with the latest and greatest fence.. this
306 * needs to happen after msm_rd_dump_submit() to ensure that the
307 * bo's referenced by the offending submit are still around.
308 */
309 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
310 struct msm_ringbuffer *ring = gpu->rb[i];
311
312 uint32_t fence = ring->memptrs->fence;
313
314 /*
315 * For the current (faulting?) ring/submit advance the fence by
316 * one more to clear the faulting submit
317 */
318 if (ring == cur_ring)
319 fence++;
320
321 update_fences(gpu, ring, fence);
Rob Clark4816b622016-05-03 10:10:15 -0400322 }
323
324 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400325 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400326 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400327
Rob Clarkeeb75472017-02-10 15:36:33 -0500328 pm_runtime_get_sync(&gpu->pdev->dev);
Rob Clark37d77c32014-01-11 16:25:08 -0500329 gpu->funcs->recover(gpu);
Rob Clarkeeb75472017-02-10 15:36:33 -0500330 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark1a370be2015-06-07 13:46:04 -0400331
Jordan Crousef97deca2017-10-20 11:06:57 -0600332 /*
333 * Replay all remaining submits starting with highest priority
334 * ring
335 */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600336 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600337 struct msm_ringbuffer *ring = gpu->rb[i];
338
339 list_for_each_entry(submit, &ring->submits, node)
340 gpu->funcs->submit(gpu, submit, NULL);
Rob Clark1a370be2015-06-07 13:46:04 -0400341 }
Rob Clark37d77c32014-01-11 16:25:08 -0500342 }
Rob Clark4816b622016-05-03 10:10:15 -0400343
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400344 mutex_unlock(&dev->struct_mutex);
345
346 msm_gpu_retire(gpu);
347}
348
349static void hangcheck_timer_reset(struct msm_gpu *gpu)
350{
351 DBG("%s", gpu->name);
352 mod_timer(&gpu->hangcheck_timer,
353 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
354}
355
Kees Cooke99e88a2017-10-16 14:43:17 -0700356static void hangcheck_handler(struct timer_list *t)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400357{
Kees Cooke99e88a2017-10-16 14:43:17 -0700358 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
Rob Clark6b8819c2013-09-11 17:14:30 -0400359 struct drm_device *dev = gpu->dev;
360 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600361 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
362 uint32_t fence = ring->memptrs->fence;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400363
Jordan Crousef97deca2017-10-20 11:06:57 -0600364 if (fence != ring->hangcheck_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400365 /* some progress has been made.. ya! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600366 ring->hangcheck_fence = fence;
367 } else if (fence < ring->seqno) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400368 /* no progress and not done.. hung! */
Jordan Crousef97deca2017-10-20 11:06:57 -0600369 ring->hangcheck_fence = fence;
370 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
371 gpu->name, ring->id);
Rob Clark26791c42013-09-03 07:12:03 -0400372 dev_err(dev->dev, "%s: completed fence: %u\n",
373 gpu->name, fence);
374 dev_err(dev->dev, "%s: submitted fence: %u\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600375 gpu->name, ring->seqno);
376
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400377 queue_work(priv->wq, &gpu->recover_work);
378 }
379
380 /* if still more pending work, reset the hangcheck timer: */
Jordan Crousef97deca2017-10-20 11:06:57 -0600381 if (ring->seqno > ring->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400382 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400383
384 /* workaround for missing irq: */
385 queue_work(priv->wq, &gpu->retire_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400386}
387
388/*
Rob Clark70c70f02014-05-30 14:49:43 -0400389 * Performance Counters:
390 */
391
392/* called under perf_lock */
393static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
394{
395 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
396 int i, n = min(ncntrs, gpu->num_perfcntrs);
397
398 /* read current values: */
399 for (i = 0; i < gpu->num_perfcntrs; i++)
400 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
401
402 /* update cntrs: */
403 for (i = 0; i < n; i++)
404 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
405
406 /* save current values: */
407 for (i = 0; i < gpu->num_perfcntrs; i++)
408 gpu->last_cntrs[i] = current_cntrs[i];
409
410 return n;
411}
412
413static void update_sw_cntrs(struct msm_gpu *gpu)
414{
415 ktime_t time;
416 uint32_t elapsed;
417 unsigned long flags;
418
419 spin_lock_irqsave(&gpu->perf_lock, flags);
420 if (!gpu->perfcntr_active)
421 goto out;
422
423 time = ktime_get();
424 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
425
426 gpu->totaltime += elapsed;
427 if (gpu->last_sample.active)
428 gpu->activetime += elapsed;
429
430 gpu->last_sample.active = msm_gpu_active(gpu);
431 gpu->last_sample.time = time;
432
433out:
434 spin_unlock_irqrestore(&gpu->perf_lock, flags);
435}
436
437void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
438{
439 unsigned long flags;
440
Rob Clarkeeb75472017-02-10 15:36:33 -0500441 pm_runtime_get_sync(&gpu->pdev->dev);
442
Rob Clark70c70f02014-05-30 14:49:43 -0400443 spin_lock_irqsave(&gpu->perf_lock, flags);
444 /* we could dynamically enable/disable perfcntr registers too.. */
445 gpu->last_sample.active = msm_gpu_active(gpu);
446 gpu->last_sample.time = ktime_get();
447 gpu->activetime = gpu->totaltime = 0;
448 gpu->perfcntr_active = true;
449 update_hw_cntrs(gpu, 0, NULL);
450 spin_unlock_irqrestore(&gpu->perf_lock, flags);
451}
452
453void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
454{
455 gpu->perfcntr_active = false;
Rob Clarkeeb75472017-02-10 15:36:33 -0500456 pm_runtime_put_sync(&gpu->pdev->dev);
Rob Clark70c70f02014-05-30 14:49:43 -0400457}
458
459/* returns -errno or # of cntrs sampled */
460int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
461 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
462{
463 unsigned long flags;
464 int ret;
465
466 spin_lock_irqsave(&gpu->perf_lock, flags);
467
468 if (!gpu->perfcntr_active) {
469 ret = -EINVAL;
470 goto out;
471 }
472
473 *activetime = gpu->activetime;
474 *totaltime = gpu->totaltime;
475
476 gpu->activetime = gpu->totaltime = 0;
477
478 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
479
480out:
481 spin_unlock_irqrestore(&gpu->perf_lock, flags);
482
483 return ret;
484}
485
486/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400487 * Cmdstream submission/retirement:
488 */
489
Rob Clark7d12a272016-03-16 16:07:38 -0400490static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
491{
492 int i;
493
494 for (i = 0; i < submit->nr_bos; i++) {
495 struct msm_gem_object *msm_obj = submit->bos[i].obj;
496 /* move to inactive: */
497 msm_gem_move_to_inactive(&msm_obj->base);
Rob Clark8bdcd942017-06-13 11:07:08 -0400498 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
Rob Clark7d12a272016-03-16 16:07:38 -0400499 drm_gem_object_unreference(&msm_obj->base);
500 }
501
Rob Clarkeeb75472017-02-10 15:36:33 -0500502 pm_runtime_mark_last_busy(&gpu->pdev->dev);
503 pm_runtime_put_autosuspend(&gpu->pdev->dev);
Rob Clark40e68152016-05-03 09:50:26 -0400504 msm_gem_submit_free(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400505}
506
Rob Clarkb6295f92016-03-15 18:26:28 -0400507static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400508{
509 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600510 struct msm_gem_submit *submit, *tmp;
511 int i;
Rob Clark1a370be2015-06-07 13:46:04 -0400512
513 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
514
Jordan Crousef97deca2017-10-20 11:06:57 -0600515 /* Retire the commits starting with highest priority */
Jordan Crouseb1fc2832017-10-20 11:07:01 -0600516 for (i = 0; i < gpu->nr_rings; i++) {
Jordan Crousef97deca2017-10-20 11:06:57 -0600517 struct msm_ringbuffer *ring = gpu->rb[i];
Rob Clark1a370be2015-06-07 13:46:04 -0400518
Jordan Crousef97deca2017-10-20 11:06:57 -0600519 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
520 if (dma_fence_is_signaled(submit->fence))
521 retire_submit(gpu, submit);
Rob Clark1a370be2015-06-07 13:46:04 -0400522 }
523 }
524}
525
Rob Clark7198e6b2013-07-19 12:59:32 -0400526static void retire_worker(struct work_struct *work)
527{
528 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
529 struct drm_device *dev = gpu->dev;
Jordan Crousef97deca2017-10-20 11:06:57 -0600530 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400531
Jordan Crousef97deca2017-10-20 11:06:57 -0600532 for (i = 0; i < gpu->nr_rings; i++)
533 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400534
Rob Clark7198e6b2013-07-19 12:59:32 -0400535 mutex_lock(&dev->struct_mutex);
Rob Clarkb6295f92016-03-15 18:26:28 -0400536 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400537 mutex_unlock(&dev->struct_mutex);
538}
539
540/* call from irq handler to schedule work to retire bo's */
541void msm_gpu_retire(struct msm_gpu *gpu)
542{
543 struct msm_drm_private *priv = gpu->dev->dev_private;
544 queue_work(priv->wq, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400545 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400546}
547
548/* add bo's to gpu's ring, and kick gpu: */
Rob Clarkf44d32c2016-06-16 16:37:38 -0400549void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
Rob Clark7198e6b2013-07-19 12:59:32 -0400550 struct msm_file_private *ctx)
551{
552 struct drm_device *dev = gpu->dev;
553 struct msm_drm_private *priv = dev->dev_private;
Jordan Crousef97deca2017-10-20 11:06:57 -0600554 struct msm_ringbuffer *ring = submit->ring;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400555 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400556
Rob Clark1a370be2015-06-07 13:46:04 -0400557 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
558
Rob Clarkeeb75472017-02-10 15:36:33 -0500559 pm_runtime_get_sync(&gpu->pdev->dev);
560
561 msm_gpu_hw_init(gpu);
Rob Clark37d77c32014-01-11 16:25:08 -0500562
Jordan Crousef97deca2017-10-20 11:06:57 -0600563 submit->seqno = ++ring->seqno;
564
565 list_add_tail(&submit->node, &ring->submits);
Rob Clark1a370be2015-06-07 13:46:04 -0400566
Rob Clark998b9a52017-09-15 10:46:45 -0400567 msm_rd_dump_submit(priv->rd, submit, NULL);
Rob Clarka7d3c952014-05-30 14:47:38 -0400568
Rob Clark70c70f02014-05-30 14:49:43 -0400569 update_sw_cntrs(gpu);
570
Rob Clark7198e6b2013-07-19 12:59:32 -0400571 for (i = 0; i < submit->nr_bos; i++) {
572 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Rob Clark78babc12016-11-11 12:06:46 -0500573 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400574
575 /* can't happen yet.. but when we add 2d support we'll have
576 * to deal w/ cross-ring synchronization:
577 */
578 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
579
Rob Clark7d12a272016-03-16 16:07:38 -0400580 /* submit takes a reference to the bo and iova until retired: */
581 drm_gem_object_reference(&msm_obj->base);
Sushmita Susheelendra0e082702017-06-13 16:52:54 -0600582 msm_gem_get_iova(&msm_obj->base,
Rob Clark8bdcd942017-06-13 11:07:08 -0400583 submit->gpu->aspace, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400584
Rob Clarkbf6811f2013-09-01 13:25:09 -0400585 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
586 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400587 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
588 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400589 }
Rob Clark1a370be2015-06-07 13:46:04 -0400590
Rob Clark1193c3b2016-05-03 09:46:49 -0400591 gpu->funcs->submit(gpu, submit, ctx);
Rob Clark1a370be2015-06-07 13:46:04 -0400592 priv->lastctx = ctx;
593
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400594 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400595}
596
597/*
598 * Init/Cleanup:
599 */
600
601static irqreturn_t irq_handler(int irq, void *data)
602{
603 struct msm_gpu *gpu = data;
604 return gpu->funcs->irq(gpu);
605}
606
Jordan Crouse98db8032017-03-07 10:02:56 -0700607static struct clk *get_clock(struct device *dev, const char *name)
608{
609 struct clk *clk = devm_clk_get(dev, name);
610
611 return IS_ERR(clk) ? NULL : clk;
612}
613
614static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
615{
616 struct device *dev = &pdev->dev;
617 struct property *prop;
618 const char *name;
619 int i = 0;
620
621 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
622 if (gpu->nr_clocks < 1) {
623 gpu->nr_clocks = 0;
624 return 0;
625 }
626
627 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
628 GFP_KERNEL);
629 if (!gpu->grp_clks)
630 return -ENOMEM;
631
632 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
633 gpu->grp_clks[i] = get_clock(dev, name);
634
635 /* Remember the key clocks that we need to control later */
Rob Clark134ccad2017-05-03 10:43:14 -0400636 if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
Jordan Crouse98db8032017-03-07 10:02:56 -0700637 gpu->core_clk = gpu->grp_clks[i];
Rob Clark134ccad2017-05-03 10:43:14 -0400638 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
Jordan Crouse98db8032017-03-07 10:02:56 -0700639 gpu->rbbmtimer_clk = gpu->grp_clks[i];
640
641 ++i;
642 }
643
644 return 0;
645}
Rob Clark7198e6b2013-07-19 12:59:32 -0400646
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600647static struct msm_gem_address_space *
648msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
649 uint64_t va_start, uint64_t va_end)
650{
651 struct iommu_domain *iommu;
652 struct msm_gem_address_space *aspace;
653 int ret;
654
655 /*
656 * Setup IOMMU.. eventually we will (I think) do this once per context
657 * and have separate page tables per context. For now, to keep things
658 * simple and to get something working, just use a single address space:
659 */
660 iommu = iommu_domain_alloc(&platform_bus_type);
661 if (!iommu)
662 return NULL;
663
664 iommu->geometry.aperture_start = va_start;
665 iommu->geometry.aperture_end = va_end;
666
667 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
668
669 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
670 if (IS_ERR(aspace)) {
671 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
672 PTR_ERR(aspace));
673 iommu_domain_free(iommu);
674 return ERR_CAST(aspace);
675 }
676
677 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
678 if (ret) {
679 msm_gem_address_space_put(aspace);
680 return ERR_PTR(ret);
681 }
682
683 return aspace;
684}
685
Rob Clark7198e6b2013-07-19 12:59:32 -0400686int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
687 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600688 const char *name, struct msm_gpu_config *config)
Rob Clark7198e6b2013-07-19 12:59:32 -0400689{
Jordan Crousef97deca2017-10-20 11:06:57 -0600690 int i, ret, nr_rings = config->nr_rings;
691 void *memptrs;
692 uint64_t memptrs_iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400693
Rob Clark70c70f02014-05-30 14:49:43 -0400694 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
695 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
696
Rob Clark7198e6b2013-07-19 12:59:32 -0400697 gpu->dev = drm;
698 gpu->funcs = funcs;
699 gpu->name = name;
700
701 INIT_LIST_HEAD(&gpu->active_list);
702 INIT_WORK(&gpu->retire_work, retire_worker);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400703 INIT_WORK(&gpu->recover_work, recover_worker);
704
Rob Clark1a370be2015-06-07 13:46:04 -0400705
Kees Cooke99e88a2017-10-16 14:43:17 -0700706 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
Rob Clark7198e6b2013-07-19 12:59:32 -0400707
Rob Clark70c70f02014-05-30 14:49:43 -0400708 spin_lock_init(&gpu->perf_lock);
709
Rob Clark7198e6b2013-07-19 12:59:32 -0400710
711 /* Map registers: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600712 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400713 if (IS_ERR(gpu->mmio)) {
714 ret = PTR_ERR(gpu->mmio);
715 goto fail;
716 }
717
718 /* Get Interrupt: */
Jordan Crouse5770fc72017-05-08 14:35:03 -0600719 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
Rob Clark7198e6b2013-07-19 12:59:32 -0400720 if (gpu->irq < 0) {
721 ret = gpu->irq;
722 dev_err(drm->dev, "failed to get irq: %d\n", ret);
723 goto fail;
724 }
725
726 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
727 IRQF_TRIGGER_HIGH, gpu->name, gpu);
728 if (ret) {
729 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
730 goto fail;
731 }
732
Jordan Crouse98db8032017-03-07 10:02:56 -0700733 ret = get_clocks(pdev, gpu);
734 if (ret)
735 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400736
Rob Clark720c3bb2017-01-30 11:30:58 -0500737 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
Rob Clark7198e6b2013-07-19 12:59:32 -0400738 DBG("ebi1_clk: %p", gpu->ebi1_clk);
739 if (IS_ERR(gpu->ebi1_clk))
740 gpu->ebi1_clk = NULL;
741
742 /* Acquire regulators: */
743 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
744 DBG("gpu_reg: %p", gpu->gpu_reg);
745 if (IS_ERR(gpu->gpu_reg))
746 gpu->gpu_reg = NULL;
747
748 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
749 DBG("gpu_cx: %p", gpu->gpu_cx);
750 if (IS_ERR(gpu->gpu_cx))
751 gpu->gpu_cx = NULL;
752
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600753 gpu->pdev = pdev;
754 platform_set_drvdata(pdev, gpu);
Rob Clark667ce332016-09-28 19:58:32 -0400755
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600756 bs_init(gpu);
Stephane Viau5e921b12015-09-15 08:41:46 -0400757
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600758 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
759 config->va_start, config->va_end);
760
761 if (gpu->aspace == NULL)
Rob Clark871d8122013-11-16 12:56:06 -0500762 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600763 else if (IS_ERR(gpu->aspace)) {
764 ret = PTR_ERR(gpu->aspace);
765 goto fail;
Rob Clark7198e6b2013-07-19 12:59:32 -0400766 }
Rob Clarka1ad3522014-07-11 11:59:22 -0400767
Jordan Crousef97deca2017-10-20 11:06:57 -0600768 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
Jordan Crousecd414f32017-10-20 11:06:56 -0600769 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
Jordan Crousef97deca2017-10-20 11:06:57 -0600770 &memptrs_iova);
Jordan Crousecd414f32017-10-20 11:06:56 -0600771
Jordan Crousef97deca2017-10-20 11:06:57 -0600772 if (IS_ERR(memptrs)) {
773 ret = PTR_ERR(memptrs);
Jordan Crousecd414f32017-10-20 11:06:56 -0600774 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
775 goto fail;
776 }
777
Jordan Crousef97deca2017-10-20 11:06:57 -0600778 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
Arnd Bergmann39ae0d32017-08-03 13:50:48 +0200779 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
Jordan Crousef97deca2017-10-20 11:06:57 -0600780 ARRAY_SIZE(gpu->rb));
781 nr_rings = ARRAY_SIZE(gpu->rb);
Rob Clark7198e6b2013-07-19 12:59:32 -0400782 }
783
Jordan Crousef97deca2017-10-20 11:06:57 -0600784 /* Create ringbuffer(s): */
785 for (i = 0; i < nr_rings; i++) {
786 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
787
788 if (IS_ERR(gpu->rb[i])) {
789 ret = PTR_ERR(gpu->rb[i]);
790 dev_err(drm->dev,
791 "could not create ringbuffer %d: %d\n", i, ret);
792 goto fail;
793 }
794
795 memptrs += sizeof(struct msm_rbmemptrs);
796 memptrs_iova += sizeof(struct msm_rbmemptrs);
797 }
798
799 gpu->nr_rings = nr_rings;
800
Rob Clark7198e6b2013-07-19 12:59:32 -0400801 return 0;
802
803fail:
Jordan Crousef97deca2017-10-20 11:06:57 -0600804 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
805 msm_ringbuffer_destroy(gpu->rb[i]);
806 gpu->rb[i] = NULL;
807 }
808
Jordan Crousecd414f32017-10-20 11:06:56 -0600809 if (gpu->memptrs_bo) {
810 msm_gem_put_vaddr(gpu->memptrs_bo);
811 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
812 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
813 }
814
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600815 platform_set_drvdata(pdev, NULL);
Rob Clark7198e6b2013-07-19 12:59:32 -0400816 return ret;
817}
818
819void msm_gpu_cleanup(struct msm_gpu *gpu)
820{
Jordan Crousef97deca2017-10-20 11:06:57 -0600821 int i;
822
Rob Clark7198e6b2013-07-19 12:59:32 -0400823 DBG("%s", gpu->name);
824
825 WARN_ON(!list_empty(&gpu->active_list));
826
827 bs_fini(gpu);
828
Jordan Crousef97deca2017-10-20 11:06:57 -0600829 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
830 msm_ringbuffer_destroy(gpu->rb[i]);
831 gpu->rb[i] = NULL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400832 }
Jordan Crousecd414f32017-10-20 11:06:56 -0600833
834 if (gpu->memptrs_bo) {
835 msm_gem_put_vaddr(gpu->memptrs_bo);
836 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
837 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
838 }
839
840 if (!IS_ERR_OR_NULL(gpu->aspace)) {
Jordan Crouse1267a4d2017-07-27 10:42:39 -0600841 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
842 NULL, 0);
843 msm_gem_address_space_put(gpu->aspace);
844 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400845}