blob: 710c3fedfbf378a4fd2f79d32beb48363903556d [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Rob Clark7198e6b2013-07-19 12:59:32 -04002/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
Rob Clark7198e6b2013-07-19 12:59:32 -04005 */
6
7#ifndef __MSM_GPU_H__
8#define __MSM_GPU_H__
9
Rob Clark9cba4052020-08-17 15:01:32 -070010#include <linux/adreno-smmu-priv.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040011#include <linux/clk.h>
Jordan Crousefcf9d0b2019-02-12 11:52:38 +020012#include <linux/interconnect.h>
Sharat Masetty1f60d112020-07-13 18:11:42 +053013#include <linux/pm_opp.h>
Rob Clark7198e6b2013-07-19 12:59:32 -040014#include <linux/regulator/consumer.h>
15
16#include "msm_drv.h"
Rob Clarkca762a82016-03-15 17:22:13 -040017#include "msm_fence.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040018#include "msm_ringbuffer.h"
Jordan Crouse604234f2020-09-03 20:03:11 -060019#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040020
21struct msm_gem_submit;
Rob Clark70c70f02014-05-30 14:49:43 -040022struct msm_gpu_perfcntr;
Jordan Crousee00e4732018-07-24 10:33:24 -060023struct msm_gpu_state;
Rob Clark7198e6b2013-07-19 12:59:32 -040024
Jordan Crouse5770fc72017-05-08 14:35:03 -060025struct msm_gpu_config {
26 const char *ioname;
Jordan Crousef97deca2017-10-20 11:06:57 -060027 unsigned int nr_rings;
Jordan Crouse5770fc72017-05-08 14:35:03 -060028};
29
Rob Clark7198e6b2013-07-19 12:59:32 -040030/* So far, with hardware that I've seen to date, we can have:
31 * + zero, one, or two z180 2d cores
32 * + a3xx or a2xx 3d core, which share a common CP (the firmware
33 * for the CP seems to implement some different PM4 packet types
34 * but the basics of cmdstream submission are the same)
35 *
36 * Which means that the eventual complete "class" hierarchy, once
37 * support for all past and present hw is in place, becomes:
38 * + msm_gpu
39 * + adreno_gpu
40 * + a3xx_gpu
41 * + a2xx_gpu
42 * + z180_gpu
43 */
44struct msm_gpu_funcs {
45 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
46 int (*hw_init)(struct msm_gpu *gpu);
47 int (*pm_suspend)(struct msm_gpu *gpu);
48 int (*pm_resume)(struct msm_gpu *gpu);
Jordan Crouse15eb9ad2020-08-17 15:01:37 -070049 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
Jordan Crousef97deca2017-10-20 11:06:57 -060050 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
Rob Clark7198e6b2013-07-19 12:59:32 -040051 irqreturn_t (*irq)(struct msm_gpu *irq);
Jordan Crousef97deca2017-10-20 11:06:57 -060052 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
Rob Clarkbd6f82d2013-08-24 14:20:38 -040053 void (*recover)(struct msm_gpu *gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -040054 void (*destroy)(struct msm_gpu *gpu);
Arnd Bergmannc878a622018-08-13 23:23:44 +020055#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
Rob Clark7198e6b2013-07-19 12:59:32 -040056 /* show GPU status in debugfs: */
Jordan Crouse4f776f42018-07-24 10:33:25 -060057 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
Jordan Crousec0fec7f2018-07-24 10:33:27 -060058 struct drm_printer *p);
Rob Clark331dc0b2017-12-13 15:12:56 -050059 /* for generation specific debugfs: */
Wambui Karuga7ce844712020-03-10 16:31:21 +030060 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
Rob Clark7198e6b2013-07-19 12:59:32 -040061#endif
Sharat Masettyde0a3d092018-10-04 15:11:42 +053062 unsigned long (*gpu_busy)(struct msm_gpu *gpu);
Jordan Crousee00e4732018-07-24 10:33:24 -060063 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
Jordan Crousec0fec7f2018-07-24 10:33:27 -060064 int (*gpu_state_put)(struct msm_gpu_state *state);
Sharat Masettyde0a3d092018-10-04 15:11:42 +053065 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
Sharat Masetty1f60d112020-07-13 18:11:42 +053066 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
Jordan Crouseccac7ce2020-05-22 16:03:15 -060067 struct msm_gem_address_space *(*create_address_space)
68 (struct msm_gpu *gpu, struct platform_device *pdev);
Jordan Crouse933415e2020-08-17 15:01:40 -070069 struct msm_gem_address_space *(*create_private_address_space)
70 (struct msm_gpu *gpu);
Jordan Crouse8907afb2020-09-14 16:40:21 -060071 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
Rob Clark7198e6b2013-07-19 12:59:32 -040072};
73
Rob Clarke25e92e2021-06-10 14:44:13 -070074/* Additional state for iommu faults: */
75struct msm_gpu_fault_info {
76 u64 ttbr0;
77 unsigned long iova;
78 int flags;
79 const char *type;
80 const char *block;
81};
82
Rob Clarkaf5b4ff2021-07-26 07:46:48 -070083/**
84 * struct msm_gpu_devfreq - devfreq related state
85 */
86struct msm_gpu_devfreq {
87 /** devfreq: devfreq instance */
88 struct devfreq *devfreq;
89
90 /**
91 * busy_cycles:
92 *
93 * Used by implementation of gpu->gpu_busy() to track the last
94 * busy counter value, for calculating elapsed busy cycles since
95 * last sampling period.
96 */
97 u64 busy_cycles;
98
99 /** time: Time of last sampling period. */
100 ktime_t time;
Rob Clark9bc95572021-07-26 07:46:50 -0700101
102 /** idle_time: Time of last transition to idle: */
103 ktime_t idle_time;
104
105 /**
106 * idle_freq:
107 *
108 * Shadow frequency used while the GPU is idle. From the PoV of
109 * the devfreq governor, we are continuing to sample busyness and
110 * adjust frequency while the GPU is idle, but we use this shadow
111 * value as the GPU is actually clamped to minimum frequency while
112 * it is inactive.
113 */
114 unsigned long idle_freq;
Rob Clarkaf5b4ff2021-07-26 07:46:48 -0700115};
116
Rob Clark7198e6b2013-07-19 12:59:32 -0400117struct msm_gpu {
118 const char *name;
119 struct drm_device *dev;
Rob Clarkeeb75472017-02-10 15:36:33 -0500120 struct platform_device *pdev;
Rob Clark7198e6b2013-07-19 12:59:32 -0400121 const struct msm_gpu_funcs *funcs;
122
Rob Clark9cba4052020-08-17 15:01:32 -0700123 struct adreno_smmu_priv adreno_smmu;
124
Rob Clark70c70f02014-05-30 14:49:43 -0400125 /* performance counters (hw & sw): */
126 spinlock_t perf_lock;
127 bool perfcntr_active;
128 struct {
129 bool active;
130 ktime_t time;
131 } last_sample;
132 uint32_t totaltime, activetime; /* sw counters */
133 uint32_t last_cntrs[5]; /* hw counters */
134 const struct msm_gpu_perfcntr *perfcntrs;
135 uint32_t num_perfcntrs;
136
Jordan Crousef97deca2017-10-20 11:06:57 -0600137 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
138 int nr_rings;
Rob Clark7198e6b2013-07-19 12:59:32 -0400139
Rob Clarkd9844572020-10-23 09:51:14 -0700140 /*
141 * List of GEM active objects on this gpu. Protected by
142 * msm_drm_private::mm_lock
143 */
Rob Clark7198e6b2013-07-19 12:59:32 -0400144 struct list_head active_list;
145
Rob Clark9bc95572021-07-26 07:46:50 -0700146 /**
147 * active_submits:
148 *
149 * The number of submitted but not yet retired submits, used to
150 * determine transitions between active and idle.
151 *
152 * Protected by lock
153 */
154 int active_submits;
155
156 /** lock: protects active_submits and idle/active transitions */
157 struct mutex active_lock;
158
Rob Clarkeeb75472017-02-10 15:36:33 -0500159 /* does gpu need hw_init? */
160 bool needs_hw_init;
Rob Clark37d77c32014-01-11 16:25:08 -0500161
Rob Clark48dc4242019-04-16 16:13:28 -0700162 /* number of GPU hangs (for all contexts) */
163 int global_faults;
164
Rob Clark7198e6b2013-07-19 12:59:32 -0400165 void __iomem *mmio;
166 int irq;
167
Rob Clark667ce332016-09-28 19:58:32 -0400168 struct msm_gem_address_space *aspace;
Rob Clark7198e6b2013-07-19 12:59:32 -0400169
170 /* Power Control: */
171 struct regulator *gpu_reg, *gpu_cx;
Jordan Crouse8e54eea2018-08-06 11:33:21 -0600172 struct clk_bulk_data *grp_clks;
Jordan Crouse98db8032017-03-07 10:02:56 -0700173 int nr_clocks;
174 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
Jordan Crouse1babd702017-11-21 12:40:53 -0700175 uint32_t fast_rate;
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400176
Rob Clark37d77c32014-01-11 16:25:08 -0500177 /* Hang and Inactivity Detection:
178 */
179#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
Rob Clarkeeb75472017-02-10 15:36:33 -0500180
Samuel Iglesias Gonsalvez1d2fa582021-06-07 12:44:41 +0200181#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
Rob Clarkbd6f82d2013-08-24 14:20:38 -0400182 struct timer_list hangcheck_timer;
Rob Clark7e688292020-10-19 14:10:51 -0700183
Rob Clarke25e92e2021-06-10 14:44:13 -0700184 /* Fault info for most recent iova fault: */
185 struct msm_gpu_fault_info fault_info;
186
187 /* work for handling GPU ioval faults: */
188 struct kthread_work fault_work;
189
Rob Clark7e688292020-10-19 14:10:51 -0700190 /* work for handling GPU recovery: */
191 struct kthread_work recover_work;
192
193 /* work for handling active-list retiring: */
194 struct kthread_work retire_work;
195
196 /* worker for retire/recover: */
197 struct kthread_worker *worker;
Rob Clark1a370be2015-06-07 13:46:04 -0400198
Jordan Crousecd414f32017-10-20 11:06:56 -0600199 struct drm_gem_object *memptrs_bo;
Jordan Crousef91c14a2018-01-10 10:41:54 -0700200
Rob Clarkaf5b4ff2021-07-26 07:46:48 -0700201 struct msm_gpu_devfreq devfreq;
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600202
Rob Clark3ab1c5c2021-03-24 18:23:53 -0700203 uint32_t suspend_count;
204
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600205 struct msm_gpu_state *crashstate;
Jordan Crouse604234f2020-09-03 20:03:11 -0600206 /* True if the hardware supports expanded apriv (a650 and newer) */
207 bool hw_apriv;
Akhil P Oommenec793cf2020-10-30 16:17:10 +0530208
209 struct thermal_cooling_device *cooling;
Rob Clark7198e6b2013-07-19 12:59:32 -0400210};
211
Rob Clark69a93132020-08-17 15:01:31 -0700212static inline struct msm_gpu *dev_to_gpu(struct device *dev)
213{
Rob Clark9cba4052020-08-17 15:01:32 -0700214 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
215 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
Rob Clark69a93132020-08-17 15:01:31 -0700216}
217
Jordan Crousef97deca2017-10-20 11:06:57 -0600218/* It turns out that all targets use the same ringbuffer size */
219#define MSM_GPU_RINGBUFFER_SZ SZ_32K
Jordan Crouse4d87fc32017-10-20 11:07:00 -0600220#define MSM_GPU_RINGBUFFER_BLKSIZE 32
221
222#define MSM_GPU_RB_CNTL_DEFAULT \
223 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
224 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
Jordan Crousef97deca2017-10-20 11:06:57 -0600225
Rob Clark37d77c32014-01-11 16:25:08 -0500226static inline bool msm_gpu_active(struct msm_gpu *gpu)
227{
Jordan Crousef97deca2017-10-20 11:06:57 -0600228 int i;
229
230 for (i = 0; i < gpu->nr_rings; i++) {
231 struct msm_ringbuffer *ring = gpu->rb[i];
232
233 if (ring->seqno > ring->memptrs->fence)
234 return true;
235 }
236
237 return false;
Rob Clark37d77c32014-01-11 16:25:08 -0500238}
239
Rob Clark70c70f02014-05-30 14:49:43 -0400240/* Perf-Counters:
241 * The select_reg and select_val are just there for the benefit of the child
242 * class that actually enables the perf counter.. but msm_gpu base class
243 * will handle sampling/displaying the counters.
244 */
245
246struct msm_gpu_perfcntr {
247 uint32_t select_reg;
248 uint32_t sample_reg;
249 uint32_t select_val;
250 const char *name;
251};
252
Jordan Crousef7de1542017-10-20 11:06:55 -0600253struct msm_gpu_submitqueue {
254 int id;
255 u32 flags;
256 u32 prio;
257 int faults;
Jordan Crousecf655d62020-08-17 15:01:36 -0700258 struct msm_file_private *ctx;
Jordan Crousef7de1542017-10-20 11:06:55 -0600259 struct list_head node;
260 struct kref ref;
261};
262
Jordan Crousecdb95932018-07-24 10:33:31 -0600263struct msm_gpu_state_bo {
264 u64 iova;
265 size_t size;
266 void *data;
Sharat Masetty1df42892018-11-01 20:16:45 +0530267 bool encoded;
Jordan Crousecdb95932018-07-24 10:33:31 -0600268};
269
Jordan Crousee00e4732018-07-24 10:33:24 -0600270struct msm_gpu_state {
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600271 struct kref ref;
Arnd Bergmann3530a172018-07-26 14:39:25 +0200272 struct timespec64 time;
Jordan Crousee00e4732018-07-24 10:33:24 -0600273
274 struct {
275 u64 iova;
276 u32 fence;
277 u32 seqno;
278 u32 rptr;
279 u32 wptr;
Jordan Crouse43a56682018-07-24 10:33:29 -0600280 void *data;
281 int data_size;
Sharat Masetty1df42892018-11-01 20:16:45 +0530282 bool encoded;
Jordan Crousee00e4732018-07-24 10:33:24 -0600283 } ring[MSM_GPU_MAX_RINGS];
284
285 int nr_registers;
286 u32 *registers;
287
288 u32 rbbm_status;
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600289
290 char *comm;
291 char *cmd;
Jordan Crousecdb95932018-07-24 10:33:31 -0600292
Rob Clarke25e92e2021-06-10 14:44:13 -0700293 struct msm_gpu_fault_info fault_info;
294
Jordan Crousecdb95932018-07-24 10:33:31 -0600295 int nr_bos;
296 struct msm_gpu_state_bo *bos;
Jordan Crousee00e4732018-07-24 10:33:24 -0600297};
298
Rob Clark7198e6b2013-07-19 12:59:32 -0400299static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
300{
301 msm_writel(data, gpu->mmio + (reg << 2));
302}
303
304static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
305{
306 return msm_readl(gpu->mmio + (reg << 2));
307}
308
Jordan Crouseae53a822016-11-28 12:28:28 -0700309static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
310{
Sharat Masetty40a72b02020-11-25 12:30:14 +0530311 msm_rmw(gpu->mmio + (reg << 2), mask, or);
Jordan Crouseae53a822016-11-28 12:28:28 -0700312}
313
314static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
315{
316 u64 val;
317
318 /*
319 * Why not a readq here? Two reasons: 1) many of the LO registers are
320 * not quad word aligned and 2) the GPU hardware designers have a bit
321 * of a history of putting registers where they fit, especially in
322 * spins. The longer a GPU family goes the higher the chance that
323 * we'll get burned. We could do a series of validity checks if we
324 * wanted to, but really is a readq() that much better? Nah.
325 */
326
327 /*
328 * For some lo/hi registers (like perfcounters), the hi value is latched
329 * when the lo is read, so make sure to read the lo first to trigger
330 * that
331 */
332 val = (u64) msm_readl(gpu->mmio + (lo << 2));
333 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
334
335 return val;
336}
337
338static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
339{
340 /* Why not a writeq here? Read the screed above */
341 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
342 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
343}
344
Rob Clark7198e6b2013-07-19 12:59:32 -0400345int msm_gpu_pm_suspend(struct msm_gpu *gpu);
346int msm_gpu_pm_resume(struct msm_gpu *gpu);
Rob Clarkaf5b4ff2021-07-26 07:46:48 -0700347
348void msm_devfreq_init(struct msm_gpu *gpu);
349void msm_devfreq_cleanup(struct msm_gpu *gpu);
350void msm_devfreq_resume(struct msm_gpu *gpu);
351void msm_devfreq_suspend(struct msm_gpu *gpu);
Rob Clark9bc95572021-07-26 07:46:50 -0700352void msm_devfreq_active(struct msm_gpu *gpu);
353void msm_devfreq_idle(struct msm_gpu *gpu);
Rob Clark7198e6b2013-07-19 12:59:32 -0400354
Rob Clarkeeb75472017-02-10 15:36:33 -0500355int msm_gpu_hw_init(struct msm_gpu *gpu);
356
Rob Clark70c70f02014-05-30 14:49:43 -0400357void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
358void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
359int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
360 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
361
Rob Clark7198e6b2013-07-19 12:59:32 -0400362void msm_gpu_retire(struct msm_gpu *gpu);
Jordan Crouse15eb9ad2020-08-17 15:01:37 -0700363void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
Rob Clark7198e6b2013-07-19 12:59:32 -0400364
365int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
366 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
Jordan Crouse5770fc72017-05-08 14:35:03 -0600367 const char *name, struct msm_gpu_config *config);
368
Jordan Crouse933415e2020-08-17 15:01:40 -0700369struct msm_gem_address_space *
Rob Clark25faf2f2020-08-17 15:01:45 -0700370msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
Jordan Crouse933415e2020-08-17 15:01:40 -0700371
Rob Clark7198e6b2013-07-19 12:59:32 -0400372void msm_gpu_cleanup(struct msm_gpu *gpu);
373
Rob Clarke2550b72014-09-05 13:30:27 -0400374struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
Rob Clarkbfd28b12014-09-05 13:06:37 -0400375void __init adreno_register(void);
376void __exit adreno_unregister(void);
Rob Clark7198e6b2013-07-19 12:59:32 -0400377
Jordan Crousef7de1542017-10-20 11:06:55 -0600378static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
379{
380 if (queue)
381 kref_put(&queue->ref, msm_submitqueue_destroy);
382}
383
Jordan Crousec0fec7f2018-07-24 10:33:27 -0600384static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
385{
386 struct msm_gpu_state *state = NULL;
387
388 mutex_lock(&gpu->dev->struct_mutex);
389
390 if (gpu->crashstate) {
391 kref_get(&gpu->crashstate->ref);
392 state = gpu->crashstate;
393 }
394
395 mutex_unlock(&gpu->dev->struct_mutex);
396
397 return state;
398}
399
400static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
401{
402 mutex_lock(&gpu->dev->struct_mutex);
403
404 if (gpu->crashstate) {
405 if (gpu->funcs->gpu_state_put(gpu->crashstate))
406 gpu->crashstate = NULL;
407 }
408
409 mutex_unlock(&gpu->dev->struct_mutex);
410}
411
Jordan Crouse604234f2020-09-03 20:03:11 -0600412/*
413 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
414 * support expanded privileges
415 */
416#define check_apriv(gpu, flags) \
417 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
418
419
Rob Clark7198e6b2013-07-19 12:59:32 -0400420#endif /* __MSM_GPU_H__ */