blob: 3d6e3b7a13e26fe2b9a04ad4f6a30dcd89438f4e [file] [log] [blame]
Rob Clark7198e6b2013-07-19 12:59:32 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_gpu.h"
19#include "msm_gem.h"
Rob Clark871d8122013-11-16 12:56:06 -050020#include "msm_mmu.h"
Rob Clarkfde5de62016-03-15 15:35:08 -040021#include "msm_fence.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040022
23
24/*
25 * Power Management:
26 */
27
Rob Clark6490ad42015-06-04 10:26:37 -040028#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
Rob Clark7198e6b2013-07-19 12:59:32 -040029#include <mach/board.h>
Rob Clarkbf2b33af2013-11-15 09:03:15 -050030static void bs_init(struct msm_gpu *gpu)
Rob Clark7198e6b2013-07-19 12:59:32 -040031{
Rob Clarkbf2b33af2013-11-15 09:03:15 -050032 if (gpu->bus_scale_table) {
33 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
Rob Clark7198e6b2013-07-19 12:59:32 -040034 DBG("bus scale client: %08x", gpu->bsc);
35 }
36}
37
38static void bs_fini(struct msm_gpu *gpu)
39{
40 if (gpu->bsc) {
41 msm_bus_scale_unregister_client(gpu->bsc);
42 gpu->bsc = 0;
43 }
44}
45
46static void bs_set(struct msm_gpu *gpu, int idx)
47{
48 if (gpu->bsc) {
49 DBG("set bus scaling: %d", idx);
50 msm_bus_scale_client_update_request(gpu->bsc, idx);
51 }
52}
53#else
Rob Clarkbf2b33af2013-11-15 09:03:15 -050054static void bs_init(struct msm_gpu *gpu) {}
Rob Clark7198e6b2013-07-19 12:59:32 -040055static void bs_fini(struct msm_gpu *gpu) {}
56static void bs_set(struct msm_gpu *gpu, int idx) {}
57#endif
58
59static int enable_pwrrail(struct msm_gpu *gpu)
60{
61 struct drm_device *dev = gpu->dev;
62 int ret = 0;
63
64 if (gpu->gpu_reg) {
65 ret = regulator_enable(gpu->gpu_reg);
66 if (ret) {
67 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
68 return ret;
69 }
70 }
71
72 if (gpu->gpu_cx) {
73 ret = regulator_enable(gpu->gpu_cx);
74 if (ret) {
75 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
76 return ret;
77 }
78 }
79
80 return 0;
81}
82
83static int disable_pwrrail(struct msm_gpu *gpu)
84{
85 if (gpu->gpu_cx)
86 regulator_disable(gpu->gpu_cx);
87 if (gpu->gpu_reg)
88 regulator_disable(gpu->gpu_reg);
89 return 0;
90}
91
92static int enable_clk(struct msm_gpu *gpu)
93{
Rob Clark7198e6b2013-07-19 12:59:32 -040094 int i;
95
Jordan Crouse89d777a2016-11-28 12:28:31 -070096 if (gpu->grp_clks[0] && gpu->fast_rate)
97 clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
98
99 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
100 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400101 clk_prepare(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400102
Jordan Crouse89d777a2016-11-28 12:28:31 -0700103 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400104 if (gpu->grp_clks[i])
105 clk_enable(gpu->grp_clks[i]);
106
107 return 0;
108}
109
110static int disable_clk(struct msm_gpu *gpu)
111{
Rob Clark7198e6b2013-07-19 12:59:32 -0400112 int i;
113
Jordan Crouse89d777a2016-11-28 12:28:31 -0700114 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
115 if (gpu->grp_clks[i])
Rob Clark7198e6b2013-07-19 12:59:32 -0400116 clk_disable(gpu->grp_clks[i]);
Rob Clark7198e6b2013-07-19 12:59:32 -0400117
Jordan Crouse89d777a2016-11-28 12:28:31 -0700118 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
Rob Clark7198e6b2013-07-19 12:59:32 -0400119 if (gpu->grp_clks[i])
120 clk_unprepare(gpu->grp_clks[i]);
121
Jordan Crouse89d777a2016-11-28 12:28:31 -0700122 if (gpu->grp_clks[0] && gpu->slow_rate)
123 clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
124
Rob Clark7198e6b2013-07-19 12:59:32 -0400125 return 0;
126}
127
128static int enable_axi(struct msm_gpu *gpu)
129{
130 if (gpu->ebi1_clk)
131 clk_prepare_enable(gpu->ebi1_clk);
132 if (gpu->bus_freq)
133 bs_set(gpu, gpu->bus_freq);
134 return 0;
135}
136
137static int disable_axi(struct msm_gpu *gpu)
138{
139 if (gpu->ebi1_clk)
140 clk_disable_unprepare(gpu->ebi1_clk);
141 if (gpu->bus_freq)
142 bs_set(gpu, 0);
143 return 0;
144}
145
146int msm_gpu_pm_resume(struct msm_gpu *gpu)
147{
Rob Clark37d77c32014-01-11 16:25:08 -0500148 struct drm_device *dev = gpu->dev;
Rob Clark7198e6b2013-07-19 12:59:32 -0400149 int ret;
150
Rob Clark37d77c32014-01-11 16:25:08 -0500151 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
152
153 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
154
155 if (gpu->active_cnt++ > 0)
156 return 0;
157
158 if (WARN_ON(gpu->active_cnt <= 0))
159 return -EINVAL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400160
161 ret = enable_pwrrail(gpu);
162 if (ret)
163 return ret;
164
165 ret = enable_clk(gpu);
166 if (ret)
167 return ret;
168
169 ret = enable_axi(gpu);
170 if (ret)
171 return ret;
172
173 return 0;
174}
175
176int msm_gpu_pm_suspend(struct msm_gpu *gpu)
177{
Rob Clark37d77c32014-01-11 16:25:08 -0500178 struct drm_device *dev = gpu->dev;
Rob Clark7198e6b2013-07-19 12:59:32 -0400179 int ret;
180
Rob Clark37d77c32014-01-11 16:25:08 -0500181 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
182
183 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
184
185 if (--gpu->active_cnt > 0)
186 return 0;
187
188 if (WARN_ON(gpu->active_cnt < 0))
189 return -EINVAL;
Rob Clark7198e6b2013-07-19 12:59:32 -0400190
191 ret = disable_axi(gpu);
192 if (ret)
193 return ret;
194
195 ret = disable_clk(gpu);
196 if (ret)
197 return ret;
198
199 ret = disable_pwrrail(gpu);
200 if (ret)
201 return ret;
202
203 return 0;
204}
205
206/*
Rob Clark37d77c32014-01-11 16:25:08 -0500207 * Inactivity detection (for suspend):
208 */
209
210static void inactive_worker(struct work_struct *work)
211{
212 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
213 struct drm_device *dev = gpu->dev;
214
215 if (gpu->inactive)
216 return;
217
218 DBG("%s: inactive!\n", gpu->name);
219 mutex_lock(&dev->struct_mutex);
220 if (!(msm_gpu_active(gpu) || gpu->inactive)) {
221 disable_axi(gpu);
222 disable_clk(gpu);
223 gpu->inactive = true;
224 }
225 mutex_unlock(&dev->struct_mutex);
226}
227
228static void inactive_handler(unsigned long data)
229{
230 struct msm_gpu *gpu = (struct msm_gpu *)data;
231 struct msm_drm_private *priv = gpu->dev->dev_private;
232
233 queue_work(priv->wq, &gpu->inactive_work);
234}
235
236/* cancel inactive timer and make sure we are awake: */
237static void inactive_cancel(struct msm_gpu *gpu)
238{
239 DBG("%s", gpu->name);
240 del_timer(&gpu->inactive_timer);
241 if (gpu->inactive) {
242 enable_clk(gpu);
243 enable_axi(gpu);
244 gpu->inactive = false;
245 }
246}
247
248static void inactive_start(struct msm_gpu *gpu)
249{
250 DBG("%s", gpu->name);
251 mod_timer(&gpu->inactive_timer,
252 round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
253}
254
255/*
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400256 * Hangcheck detection for locked gpu:
257 */
258
Rob Clarkb6295f92016-03-15 18:26:28 -0400259static void retire_submits(struct msm_gpu *gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400260
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400261static void recover_worker(struct work_struct *work)
262{
263 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
264 struct drm_device *dev = gpu->dev;
Rob Clark4816b622016-05-03 10:10:15 -0400265 struct msm_gem_submit *submit;
Rob Clarkb6295f92016-03-15 18:26:28 -0400266 uint32_t fence = gpu->funcs->last_fence(gpu);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400267
Rob Clarkb6295f92016-03-15 18:26:28 -0400268 msm_update_fence(gpu->fctx, fence + 1);
269
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400270 mutex_lock(&dev->struct_mutex);
Rob Clark1a370be2015-06-07 13:46:04 -0400271
Rob Clark4816b622016-05-03 10:10:15 -0400272 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
273 list_for_each_entry(submit, &gpu->submit_list, node) {
274 if (submit->fence->seqno == (fence + 1)) {
275 struct task_struct *task;
276
277 rcu_read_lock();
278 task = pid_task(submit->pid, PIDTYPE_PID);
279 if (task) {
280 dev_err(dev->dev, "%s: offending task: %s\n",
281 gpu->name, task->comm);
282 }
283 rcu_read_unlock();
284 break;
285 }
286 }
287
288 if (msm_gpu_active(gpu)) {
Rob Clark1a370be2015-06-07 13:46:04 -0400289 /* retire completed submits, plus the one that hung: */
Rob Clarkb6295f92016-03-15 18:26:28 -0400290 retire_submits(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400291
Rob Clark37d77c32014-01-11 16:25:08 -0500292 inactive_cancel(gpu);
293 gpu->funcs->recover(gpu);
Rob Clark1a370be2015-06-07 13:46:04 -0400294
295 /* replay the remaining submits after the one that hung: */
296 list_for_each_entry(submit, &gpu->submit_list, node) {
297 gpu->funcs->submit(gpu, submit, NULL);
298 }
Rob Clark37d77c32014-01-11 16:25:08 -0500299 }
Rob Clark4816b622016-05-03 10:10:15 -0400300
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400301 mutex_unlock(&dev->struct_mutex);
302
303 msm_gpu_retire(gpu);
304}
305
306static void hangcheck_timer_reset(struct msm_gpu *gpu)
307{
308 DBG("%s", gpu->name);
309 mod_timer(&gpu->hangcheck_timer,
310 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
311}
312
313static void hangcheck_handler(unsigned long data)
314{
315 struct msm_gpu *gpu = (struct msm_gpu *)data;
Rob Clark6b8819c2013-09-11 17:14:30 -0400316 struct drm_device *dev = gpu->dev;
317 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400318 uint32_t fence = gpu->funcs->last_fence(gpu);
319
320 if (fence != gpu->hangcheck_fence) {
321 /* some progress has been made.. ya! */
322 gpu->hangcheck_fence = fence;
Rob Clarkca762a82016-03-15 17:22:13 -0400323 } else if (fence < gpu->fctx->last_fence) {
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400324 /* no progress and not done.. hung! */
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400325 gpu->hangcheck_fence = fence;
Rob Clark26791c42013-09-03 07:12:03 -0400326 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
327 gpu->name);
328 dev_err(dev->dev, "%s: completed fence: %u\n",
329 gpu->name, fence);
330 dev_err(dev->dev, "%s: submitted fence: %u\n",
Rob Clarkca762a82016-03-15 17:22:13 -0400331 gpu->name, gpu->fctx->last_fence);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400332 queue_work(priv->wq, &gpu->recover_work);
333 }
334
335 /* if still more pending work, reset the hangcheck timer: */
Rob Clarkca762a82016-03-15 17:22:13 -0400336 if (gpu->fctx->last_fence > gpu->hangcheck_fence)
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400337 hangcheck_timer_reset(gpu);
Rob Clark6b8819c2013-09-11 17:14:30 -0400338
339 /* workaround for missing irq: */
340 queue_work(priv->wq, &gpu->retire_work);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400341}
342
343/*
Rob Clark70c70f02014-05-30 14:49:43 -0400344 * Performance Counters:
345 */
346
347/* called under perf_lock */
348static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
349{
350 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
351 int i, n = min(ncntrs, gpu->num_perfcntrs);
352
353 /* read current values: */
354 for (i = 0; i < gpu->num_perfcntrs; i++)
355 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
356
357 /* update cntrs: */
358 for (i = 0; i < n; i++)
359 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
360
361 /* save current values: */
362 for (i = 0; i < gpu->num_perfcntrs; i++)
363 gpu->last_cntrs[i] = current_cntrs[i];
364
365 return n;
366}
367
368static void update_sw_cntrs(struct msm_gpu *gpu)
369{
370 ktime_t time;
371 uint32_t elapsed;
372 unsigned long flags;
373
374 spin_lock_irqsave(&gpu->perf_lock, flags);
375 if (!gpu->perfcntr_active)
376 goto out;
377
378 time = ktime_get();
379 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
380
381 gpu->totaltime += elapsed;
382 if (gpu->last_sample.active)
383 gpu->activetime += elapsed;
384
385 gpu->last_sample.active = msm_gpu_active(gpu);
386 gpu->last_sample.time = time;
387
388out:
389 spin_unlock_irqrestore(&gpu->perf_lock, flags);
390}
391
392void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
393{
394 unsigned long flags;
395
396 spin_lock_irqsave(&gpu->perf_lock, flags);
397 /* we could dynamically enable/disable perfcntr registers too.. */
398 gpu->last_sample.active = msm_gpu_active(gpu);
399 gpu->last_sample.time = ktime_get();
400 gpu->activetime = gpu->totaltime = 0;
401 gpu->perfcntr_active = true;
402 update_hw_cntrs(gpu, 0, NULL);
403 spin_unlock_irqrestore(&gpu->perf_lock, flags);
404}
405
406void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
407{
408 gpu->perfcntr_active = false;
409}
410
411/* returns -errno or # of cntrs sampled */
412int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
413 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
414{
415 unsigned long flags;
416 int ret;
417
418 spin_lock_irqsave(&gpu->perf_lock, flags);
419
420 if (!gpu->perfcntr_active) {
421 ret = -EINVAL;
422 goto out;
423 }
424
425 *activetime = gpu->activetime;
426 *totaltime = gpu->totaltime;
427
428 gpu->activetime = gpu->totaltime = 0;
429
430 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
431
432out:
433 spin_unlock_irqrestore(&gpu->perf_lock, flags);
434
435 return ret;
436}
437
438/*
Rob Clark7198e6b2013-07-19 12:59:32 -0400439 * Cmdstream submission/retirement:
440 */
441
Rob Clark7d12a272016-03-16 16:07:38 -0400442static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
443{
444 int i;
445
446 for (i = 0; i < submit->nr_bos; i++) {
447 struct msm_gem_object *msm_obj = submit->bos[i].obj;
448 /* move to inactive: */
449 msm_gem_move_to_inactive(&msm_obj->base);
450 msm_gem_put_iova(&msm_obj->base, gpu->id);
451 drm_gem_object_unreference(&msm_obj->base);
452 }
453
Rob Clark40e68152016-05-03 09:50:26 -0400454 msm_gem_submit_free(submit);
Rob Clark7d12a272016-03-16 16:07:38 -0400455}
456
Rob Clarkb6295f92016-03-15 18:26:28 -0400457static void retire_submits(struct msm_gpu *gpu)
Rob Clark1a370be2015-06-07 13:46:04 -0400458{
459 struct drm_device *dev = gpu->dev;
460
461 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
462
463 while (!list_empty(&gpu->submit_list)) {
464 struct msm_gem_submit *submit;
465
466 submit = list_first_entry(&gpu->submit_list,
467 struct msm_gem_submit, node);
468
Chris Wilsonf54d1862016-10-25 13:00:45 +0100469 if (dma_fence_is_signaled(submit->fence)) {
Rob Clark7d12a272016-03-16 16:07:38 -0400470 retire_submit(gpu, submit);
Rob Clark1a370be2015-06-07 13:46:04 -0400471 } else {
472 break;
473 }
474 }
475}
476
Rob Clark7198e6b2013-07-19 12:59:32 -0400477static void retire_worker(struct work_struct *work)
478{
479 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
480 struct drm_device *dev = gpu->dev;
481 uint32_t fence = gpu->funcs->last_fence(gpu);
482
Rob Clarkca762a82016-03-15 17:22:13 -0400483 msm_update_fence(gpu->fctx, fence);
Rob Clarkedd4fc62013-09-14 14:01:55 -0400484
Rob Clark7198e6b2013-07-19 12:59:32 -0400485 mutex_lock(&dev->struct_mutex);
Rob Clarkb6295f92016-03-15 18:26:28 -0400486 retire_submits(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400487 mutex_unlock(&dev->struct_mutex);
Rob Clark37d77c32014-01-11 16:25:08 -0500488
489 if (!msm_gpu_active(gpu))
490 inactive_start(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400491}
492
493/* call from irq handler to schedule work to retire bo's */
494void msm_gpu_retire(struct msm_gpu *gpu)
495{
496 struct msm_drm_private *priv = gpu->dev->dev_private;
497 queue_work(priv->wq, &gpu->retire_work);
Rob Clark70c70f02014-05-30 14:49:43 -0400498 update_sw_cntrs(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400499}
500
501/* add bo's to gpu's ring, and kick gpu: */
Rob Clarkf44d32c2016-06-16 16:37:38 -0400502void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
Rob Clark7198e6b2013-07-19 12:59:32 -0400503 struct msm_file_private *ctx)
504{
505 struct drm_device *dev = gpu->dev;
506 struct msm_drm_private *priv = dev->dev_private;
Rob Clarkf44d32c2016-06-16 16:37:38 -0400507 int i;
Rob Clark7198e6b2013-07-19 12:59:32 -0400508
Rob Clark1a370be2015-06-07 13:46:04 -0400509 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
510
Rob Clark37d77c32014-01-11 16:25:08 -0500511 inactive_cancel(gpu);
512
Rob Clark1a370be2015-06-07 13:46:04 -0400513 list_add_tail(&submit->node, &gpu->submit_list);
514
Rob Clarka7d3c952014-05-30 14:47:38 -0400515 msm_rd_dump_submit(submit);
516
Rob Clark70c70f02014-05-30 14:49:43 -0400517 update_sw_cntrs(gpu);
518
Rob Clark7198e6b2013-07-19 12:59:32 -0400519 for (i = 0; i < submit->nr_bos; i++) {
520 struct msm_gem_object *msm_obj = submit->bos[i].obj;
Rob Clark78babc12016-11-11 12:06:46 -0500521 uint64_t iova;
Rob Clark7198e6b2013-07-19 12:59:32 -0400522
523 /* can't happen yet.. but when we add 2d support we'll have
524 * to deal w/ cross-ring synchronization:
525 */
526 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
527
Rob Clark7d12a272016-03-16 16:07:38 -0400528 /* submit takes a reference to the bo and iova until retired: */
529 drm_gem_object_reference(&msm_obj->base);
530 msm_gem_get_iova_locked(&msm_obj->base,
531 submit->gpu->id, &iova);
Rob Clark7198e6b2013-07-19 12:59:32 -0400532
Rob Clarkbf6811f2013-09-01 13:25:09 -0400533 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
534 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
Rob Clarkb6295f92016-03-15 18:26:28 -0400535 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
536 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400537 }
Rob Clark1a370be2015-06-07 13:46:04 -0400538
Rob Clark1193c3b2016-05-03 09:46:49 -0400539 gpu->funcs->submit(gpu, submit, ctx);
Rob Clark1a370be2015-06-07 13:46:04 -0400540 priv->lastctx = ctx;
541
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400542 hangcheck_timer_reset(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400543}
544
545/*
546 * Init/Cleanup:
547 */
548
549static irqreturn_t irq_handler(int irq, void *data)
550{
551 struct msm_gpu *gpu = data;
552 return gpu->funcs->irq(gpu);
553}
554
555static const char *clk_names[] = {
Jordan Crouse89d777a2016-11-28 12:28:31 -0700556 "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
Rob Clarkde558cd2015-05-06 13:14:30 -0400557 "alt_mem_iface_clk",
Rob Clark7198e6b2013-07-19 12:59:32 -0400558};
559
560int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
561 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
562 const char *name, const char *ioname, const char *irqname, int ringsz)
563{
Rob Clark871d8122013-11-16 12:56:06 -0500564 struct iommu_domain *iommu;
Rob Clark7198e6b2013-07-19 12:59:32 -0400565 int i, ret;
566
Rob Clark70c70f02014-05-30 14:49:43 -0400567 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
568 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
569
Rob Clark7198e6b2013-07-19 12:59:32 -0400570 gpu->dev = drm;
571 gpu->funcs = funcs;
572 gpu->name = name;
Rob Clark37d77c32014-01-11 16:25:08 -0500573 gpu->inactive = true;
Rob Clarkca762a82016-03-15 17:22:13 -0400574 gpu->fctx = msm_fence_context_alloc(drm, name);
575 if (IS_ERR(gpu->fctx)) {
576 ret = PTR_ERR(gpu->fctx);
577 gpu->fctx = NULL;
578 goto fail;
579 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400580
581 INIT_LIST_HEAD(&gpu->active_list);
582 INIT_WORK(&gpu->retire_work, retire_worker);
Rob Clark37d77c32014-01-11 16:25:08 -0500583 INIT_WORK(&gpu->inactive_work, inactive_worker);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400584 INIT_WORK(&gpu->recover_work, recover_worker);
585
Rob Clark1a370be2015-06-07 13:46:04 -0400586 INIT_LIST_HEAD(&gpu->submit_list);
587
Rob Clark37d77c32014-01-11 16:25:08 -0500588 setup_timer(&gpu->inactive_timer, inactive_handler,
589 (unsigned long)gpu);
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400590 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
591 (unsigned long)gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400592
Rob Clark70c70f02014-05-30 14:49:43 -0400593 spin_lock_init(&gpu->perf_lock);
594
Rob Clark7198e6b2013-07-19 12:59:32 -0400595 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
596
597 /* Map registers: */
598 gpu->mmio = msm_ioremap(pdev, ioname, name);
599 if (IS_ERR(gpu->mmio)) {
600 ret = PTR_ERR(gpu->mmio);
601 goto fail;
602 }
603
604 /* Get Interrupt: */
605 gpu->irq = platform_get_irq_byname(pdev, irqname);
606 if (gpu->irq < 0) {
607 ret = gpu->irq;
608 dev_err(drm->dev, "failed to get irq: %d\n", ret);
609 goto fail;
610 }
611
612 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
613 IRQF_TRIGGER_HIGH, gpu->name, gpu);
614 if (ret) {
615 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
616 goto fail;
617 }
618
619 /* Acquire clocks: */
620 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
621 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
622 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
623 if (IS_ERR(gpu->grp_clks[i]))
624 gpu->grp_clks[i] = NULL;
625 }
626
627 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
628 DBG("ebi1_clk: %p", gpu->ebi1_clk);
629 if (IS_ERR(gpu->ebi1_clk))
630 gpu->ebi1_clk = NULL;
631
632 /* Acquire regulators: */
633 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
634 DBG("gpu_reg: %p", gpu->gpu_reg);
635 if (IS_ERR(gpu->gpu_reg))
636 gpu->gpu_reg = NULL;
637
638 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
639 DBG("gpu_cx: %p", gpu->gpu_cx);
640 if (IS_ERR(gpu->gpu_cx))
641 gpu->gpu_cx = NULL;
642
643 /* Setup IOMMU.. eventually we will (I think) do this once per context
644 * and have separate page tables per context. For now, to keep things
645 * simple and to get something working, just use a single address space:
646 */
Rob Clark871d8122013-11-16 12:56:06 -0500647 iommu = iommu_domain_alloc(&platform_bus_type);
648 if (iommu) {
Rob Clark667ce332016-09-28 19:58:32 -0400649 /* TODO 32b vs 64b address space.. */
650 iommu->geometry.aperture_start = 0x1000;
651 iommu->geometry.aperture_end = 0xffffffff;
652
Rob Clark871d8122013-11-16 12:56:06 -0500653 dev_info(drm->dev, "%s: using IOMMU\n", name);
Rob Clark667ce332016-09-28 19:58:32 -0400654 gpu->aspace = msm_gem_address_space_create(&pdev->dev,
655 iommu, "gpu");
656 if (IS_ERR(gpu->aspace)) {
657 ret = PTR_ERR(gpu->aspace);
Stephane Viau5e921b12015-09-15 08:41:46 -0400658 dev_err(drm->dev, "failed to init iommu: %d\n", ret);
Rob Clark667ce332016-09-28 19:58:32 -0400659 gpu->aspace = NULL;
Stephane Viau5e921b12015-09-15 08:41:46 -0400660 iommu_domain_free(iommu);
661 goto fail;
662 }
663
Rob Clark871d8122013-11-16 12:56:06 -0500664 } else {
665 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
Rob Clark7198e6b2013-07-19 12:59:32 -0400666 }
Rob Clark667ce332016-09-28 19:58:32 -0400667 gpu->id = msm_register_address_space(drm, gpu->aspace);
Rob Clark7198e6b2013-07-19 12:59:32 -0400668
Rob Clarka1ad3522014-07-11 11:59:22 -0400669
Rob Clark7198e6b2013-07-19 12:59:32 -0400670 /* Create ringbuffer: */
Rob Clarka1ad3522014-07-11 11:59:22 -0400671 mutex_lock(&drm->struct_mutex);
Rob Clark7198e6b2013-07-19 12:59:32 -0400672 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
Rob Clarka1ad3522014-07-11 11:59:22 -0400673 mutex_unlock(&drm->struct_mutex);
Rob Clark7198e6b2013-07-19 12:59:32 -0400674 if (IS_ERR(gpu->rb)) {
675 ret = PTR_ERR(gpu->rb);
676 gpu->rb = NULL;
677 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
678 goto fail;
679 }
680
Rob Clarkbf2b33af2013-11-15 09:03:15 -0500681 bs_init(gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400682
683 return 0;
684
685fail:
686 return ret;
687}
688
689void msm_gpu_cleanup(struct msm_gpu *gpu)
690{
691 DBG("%s", gpu->name);
692
693 WARN_ON(!list_empty(&gpu->active_list));
694
695 bs_fini(gpu);
696
697 if (gpu->rb) {
698 if (gpu->rb_iova)
699 msm_gem_put_iova(gpu->rb->bo, gpu->id);
700 msm_ringbuffer_destroy(gpu->rb);
701 }
702
Rob Clark667ce332016-09-28 19:58:32 -0400703 if (gpu->aspace)
704 msm_gem_address_space_destroy(gpu->aspace);
Rob Clarkca762a82016-03-15 17:22:13 -0400705
706 if (gpu->fctx)
707 msm_fence_context_free(gpu->fctx);
Rob Clark7198e6b2013-07-19 12:59:32 -0400708}