Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2013 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef __MSM_GPU_H__ |
| 8 | #define __MSM_GPU_H__ |
| 9 | |
Rob Clark | 9cba405 | 2020-08-17 15:01:32 -0700 | [diff] [blame] | 10 | #include <linux/adreno-smmu-priv.h> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 11 | #include <linux/clk.h> |
Jordan Crouse | fcf9d0b | 2019-02-12 11:52:38 +0200 | [diff] [blame] | 12 | #include <linux/interconnect.h> |
Sharat Masetty | 1f60d11 | 2020-07-13 18:11:42 +0530 | [diff] [blame] | 13 | #include <linux/pm_opp.h> |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 14 | #include <linux/regulator/consumer.h> |
| 15 | |
| 16 | #include "msm_drv.h" |
Rob Clark | ca762a8 | 2016-03-15 17:22:13 -0400 | [diff] [blame] | 17 | #include "msm_fence.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 18 | #include "msm_ringbuffer.h" |
Jordan Crouse | 604234f | 2020-09-03 20:03:11 -0600 | [diff] [blame] | 19 | #include "msm_gem.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 20 | |
| 21 | struct msm_gem_submit; |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 22 | struct msm_gpu_perfcntr; |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 23 | struct msm_gpu_state; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 24 | |
Jordan Crouse | 5770fc7 | 2017-05-08 14:35:03 -0600 | [diff] [blame] | 25 | struct msm_gpu_config { |
| 26 | const char *ioname; |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 27 | unsigned int nr_rings; |
Jordan Crouse | 5770fc7 | 2017-05-08 14:35:03 -0600 | [diff] [blame] | 28 | }; |
| 29 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 30 | /* So far, with hardware that I've seen to date, we can have: |
| 31 | * + zero, one, or two z180 2d cores |
| 32 | * + a3xx or a2xx 3d core, which share a common CP (the firmware |
| 33 | * for the CP seems to implement some different PM4 packet types |
| 34 | * but the basics of cmdstream submission are the same) |
| 35 | * |
| 36 | * Which means that the eventual complete "class" hierarchy, once |
| 37 | * support for all past and present hw is in place, becomes: |
| 38 | * + msm_gpu |
| 39 | * + adreno_gpu |
| 40 | * + a3xx_gpu |
| 41 | * + a2xx_gpu |
| 42 | * + z180_gpu |
| 43 | */ |
| 44 | struct msm_gpu_funcs { |
| 45 | int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value); |
| 46 | int (*hw_init)(struct msm_gpu *gpu); |
| 47 | int (*pm_suspend)(struct msm_gpu *gpu); |
| 48 | int (*pm_resume)(struct msm_gpu *gpu); |
Jordan Crouse | 15eb9ad | 2020-08-17 15:01:37 -0700 | [diff] [blame] | 49 | void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 50 | void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 51 | irqreturn_t (*irq)(struct msm_gpu *irq); |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 52 | struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 53 | void (*recover)(struct msm_gpu *gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 54 | void (*destroy)(struct msm_gpu *gpu); |
Arnd Bergmann | c878a62 | 2018-08-13 23:23:44 +0200 | [diff] [blame] | 55 | #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 56 | /* show GPU status in debugfs: */ |
Jordan Crouse | 4f776f4 | 2018-07-24 10:33:25 -0600 | [diff] [blame] | 57 | void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 58 | struct drm_printer *p); |
Rob Clark | 331dc0b | 2017-12-13 15:12:56 -0500 | [diff] [blame] | 59 | /* for generation specific debugfs: */ |
Wambui Karuga | 7ce84471 | 2020-03-10 16:31:21 +0300 | [diff] [blame] | 60 | void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 61 | #endif |
Sharat Masetty | de0a3d09 | 2018-10-04 15:11:42 +0530 | [diff] [blame] | 62 | unsigned long (*gpu_busy)(struct msm_gpu *gpu); |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 63 | struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu); |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 64 | int (*gpu_state_put)(struct msm_gpu_state *state); |
Sharat Masetty | de0a3d09 | 2018-10-04 15:11:42 +0530 | [diff] [blame] | 65 | unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); |
Sharat Masetty | 1f60d11 | 2020-07-13 18:11:42 +0530 | [diff] [blame] | 66 | void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp); |
Jordan Crouse | ccac7ce | 2020-05-22 16:03:15 -0600 | [diff] [blame] | 67 | struct msm_gem_address_space *(*create_address_space) |
| 68 | (struct msm_gpu *gpu, struct platform_device *pdev); |
Jordan Crouse | 933415e | 2020-08-17 15:01:40 -0700 | [diff] [blame] | 69 | struct msm_gem_address_space *(*create_private_address_space) |
| 70 | (struct msm_gpu *gpu); |
Jordan Crouse | 8907afb | 2020-09-14 16:40:21 -0600 | [diff] [blame] | 71 | uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 72 | }; |
| 73 | |
Rob Clark | e25e92e | 2021-06-10 14:44:13 -0700 | [diff] [blame] | 74 | /* Additional state for iommu faults: */ |
| 75 | struct msm_gpu_fault_info { |
| 76 | u64 ttbr0; |
| 77 | unsigned long iova; |
| 78 | int flags; |
| 79 | const char *type; |
| 80 | const char *block; |
| 81 | }; |
| 82 | |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 83 | /** |
| 84 | * struct msm_gpu_devfreq - devfreq related state |
| 85 | */ |
| 86 | struct msm_gpu_devfreq { |
| 87 | /** devfreq: devfreq instance */ |
| 88 | struct devfreq *devfreq; |
| 89 | |
| 90 | /** |
Rob Clark | 7c0ffcd | 2021-11-20 12:01:03 -0800 | [diff] [blame] | 91 | * idle_constraint: |
| 92 | * |
| 93 | * A PM QoS constraint to limit max freq while the GPU is idle. |
| 94 | */ |
| 95 | struct dev_pm_qos_request idle_freq; |
| 96 | |
| 97 | /** |
| 98 | * boost_constraint: |
| 99 | * |
| 100 | * A PM QoS constraint to boost min freq for a period of time |
| 101 | * until the boost expires. |
| 102 | */ |
| 103 | struct dev_pm_qos_request boost_freq; |
| 104 | |
| 105 | /** |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 106 | * busy_cycles: |
| 107 | * |
| 108 | * Used by implementation of gpu->gpu_busy() to track the last |
| 109 | * busy counter value, for calculating elapsed busy cycles since |
| 110 | * last sampling period. |
| 111 | */ |
| 112 | u64 busy_cycles; |
| 113 | |
| 114 | /** time: Time of last sampling period. */ |
| 115 | ktime_t time; |
Rob Clark | 9bc9557 | 2021-07-26 07:46:50 -0700 | [diff] [blame] | 116 | |
| 117 | /** idle_time: Time of last transition to idle: */ |
| 118 | ktime_t idle_time; |
| 119 | |
| 120 | /** |
Rob Clark | 658f4c8 | 2021-09-27 16:04:54 -0700 | [diff] [blame] | 121 | * idle_work: |
| 122 | * |
| 123 | * Used to delay clamping to idle freq on active->idle transition. |
| 124 | */ |
| 125 | struct msm_hrtimer_work idle_work; |
Rob Clark | 7c0ffcd | 2021-11-20 12:01:03 -0800 | [diff] [blame] | 126 | |
| 127 | /** |
| 128 | * boost_work: |
| 129 | * |
| 130 | * Used to reset the boost_constraint after the boost period has |
| 131 | * elapsed |
| 132 | */ |
| 133 | struct msm_hrtimer_work boost_work; |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 134 | }; |
| 135 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 136 | struct msm_gpu { |
| 137 | const char *name; |
| 138 | struct drm_device *dev; |
Rob Clark | eeb7547 | 2017-02-10 15:36:33 -0500 | [diff] [blame] | 139 | struct platform_device *pdev; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 140 | const struct msm_gpu_funcs *funcs; |
| 141 | |
Rob Clark | 9cba405 | 2020-08-17 15:01:32 -0700 | [diff] [blame] | 142 | struct adreno_smmu_priv adreno_smmu; |
| 143 | |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 144 | /* performance counters (hw & sw): */ |
| 145 | spinlock_t perf_lock; |
| 146 | bool perfcntr_active; |
| 147 | struct { |
| 148 | bool active; |
| 149 | ktime_t time; |
| 150 | } last_sample; |
| 151 | uint32_t totaltime, activetime; /* sw counters */ |
| 152 | uint32_t last_cntrs[5]; /* hw counters */ |
| 153 | const struct msm_gpu_perfcntr *perfcntrs; |
| 154 | uint32_t num_perfcntrs; |
| 155 | |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 156 | struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; |
| 157 | int nr_rings; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 158 | |
Rob Clark | 1d054c9 | 2021-11-09 10:11:02 -0800 | [diff] [blame] | 159 | /** |
| 160 | * cur_ctx_seqno: |
| 161 | * |
| 162 | * The ctx->seqno value of the last context to submit rendering, |
| 163 | * and the one with current pgtables installed (for generations |
| 164 | * that support per-context pgtables). Tracked by seqno rather |
| 165 | * than pointer value to avoid dangling pointers, and cases where |
| 166 | * a ctx can be freed and a new one created with the same address. |
| 167 | */ |
| 168 | int cur_ctx_seqno; |
| 169 | |
Rob Clark | d984457 | 2020-10-23 09:51:14 -0700 | [diff] [blame] | 170 | /* |
| 171 | * List of GEM active objects on this gpu. Protected by |
| 172 | * msm_drm_private::mm_lock |
| 173 | */ |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 174 | struct list_head active_list; |
| 175 | |
Rob Clark | 9bc9557 | 2021-07-26 07:46:50 -0700 | [diff] [blame] | 176 | /** |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 177 | * lock: |
| 178 | * |
| 179 | * General lock for serializing all the gpu things. |
| 180 | * |
| 181 | * TODO move to per-ring locking where feasible (ie. submit/retire |
| 182 | * path, etc) |
| 183 | */ |
| 184 | struct mutex lock; |
| 185 | |
| 186 | /** |
Rob Clark | 9bc9557 | 2021-07-26 07:46:50 -0700 | [diff] [blame] | 187 | * active_submits: |
| 188 | * |
| 189 | * The number of submitted but not yet retired submits, used to |
| 190 | * determine transitions between active and idle. |
| 191 | * |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 192 | * Protected by active_lock |
Rob Clark | 9bc9557 | 2021-07-26 07:46:50 -0700 | [diff] [blame] | 193 | */ |
| 194 | int active_submits; |
| 195 | |
| 196 | /** lock: protects active_submits and idle/active transitions */ |
| 197 | struct mutex active_lock; |
| 198 | |
Rob Clark | eeb7547 | 2017-02-10 15:36:33 -0500 | [diff] [blame] | 199 | /* does gpu need hw_init? */ |
| 200 | bool needs_hw_init; |
Rob Clark | 37d77c3 | 2014-01-11 16:25:08 -0500 | [diff] [blame] | 201 | |
Rob Clark | 48dc424 | 2019-04-16 16:13:28 -0700 | [diff] [blame] | 202 | /* number of GPU hangs (for all contexts) */ |
| 203 | int global_faults; |
| 204 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 205 | void __iomem *mmio; |
| 206 | int irq; |
| 207 | |
Rob Clark | 667ce33 | 2016-09-28 19:58:32 -0400 | [diff] [blame] | 208 | struct msm_gem_address_space *aspace; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 209 | |
| 210 | /* Power Control: */ |
| 211 | struct regulator *gpu_reg, *gpu_cx; |
Jordan Crouse | 8e54eea | 2018-08-06 11:33:21 -0600 | [diff] [blame] | 212 | struct clk_bulk_data *grp_clks; |
Jordan Crouse | 98db803 | 2017-03-07 10:02:56 -0700 | [diff] [blame] | 213 | int nr_clocks; |
| 214 | struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; |
Jordan Crouse | 1babd70 | 2017-11-21 12:40:53 -0700 | [diff] [blame] | 215 | uint32_t fast_rate; |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 216 | |
Rob Clark | 37d77c3 | 2014-01-11 16:25:08 -0500 | [diff] [blame] | 217 | /* Hang and Inactivity Detection: |
| 218 | */ |
| 219 | #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ |
Rob Clark | eeb7547 | 2017-02-10 15:36:33 -0500 | [diff] [blame] | 220 | |
Samuel Iglesias Gonsalvez | 1d2fa58 | 2021-06-07 12:44:41 +0200 | [diff] [blame] | 221 | #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */ |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 222 | struct timer_list hangcheck_timer; |
Rob Clark | 7e68829 | 2020-10-19 14:10:51 -0700 | [diff] [blame] | 223 | |
Rob Clark | e25e92e | 2021-06-10 14:44:13 -0700 | [diff] [blame] | 224 | /* Fault info for most recent iova fault: */ |
| 225 | struct msm_gpu_fault_info fault_info; |
| 226 | |
| 227 | /* work for handling GPU ioval faults: */ |
| 228 | struct kthread_work fault_work; |
| 229 | |
Rob Clark | 7e68829 | 2020-10-19 14:10:51 -0700 | [diff] [blame] | 230 | /* work for handling GPU recovery: */ |
| 231 | struct kthread_work recover_work; |
| 232 | |
Rob Clark | 167a668 | 2022-01-08 10:09:10 -0800 | [diff] [blame] | 233 | /** retire_event: notified when submits are retired: */ |
| 234 | wait_queue_head_t retire_event; |
| 235 | |
Rob Clark | 7e68829 | 2020-10-19 14:10:51 -0700 | [diff] [blame] | 236 | /* work for handling active-list retiring: */ |
| 237 | struct kthread_work retire_work; |
| 238 | |
| 239 | /* worker for retire/recover: */ |
| 240 | struct kthread_worker *worker; |
Rob Clark | 1a370be | 2015-06-07 13:46:04 -0400 | [diff] [blame] | 241 | |
Jordan Crouse | cd414f3 | 2017-10-20 11:06:56 -0600 | [diff] [blame] | 242 | struct drm_gem_object *memptrs_bo; |
Jordan Crouse | f91c14a | 2018-01-10 10:41:54 -0700 | [diff] [blame] | 243 | |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 244 | struct msm_gpu_devfreq devfreq; |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 245 | |
Rob Clark | 3ab1c5c | 2021-03-24 18:23:53 -0700 | [diff] [blame] | 246 | uint32_t suspend_count; |
| 247 | |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 248 | struct msm_gpu_state *crashstate; |
Rob Clark | 5ca6779 | 2021-10-18 08:36:25 -0700 | [diff] [blame] | 249 | |
| 250 | /* Enable clamping to idle freq when inactive: */ |
| 251 | bool clamp_to_idle; |
| 252 | |
Jordan Crouse | 604234f | 2020-09-03 20:03:11 -0600 | [diff] [blame] | 253 | /* True if the hardware supports expanded apriv (a650 and newer) */ |
| 254 | bool hw_apriv; |
Akhil P Oommen | ec793cf | 2020-10-30 16:17:10 +0530 | [diff] [blame] | 255 | |
| 256 | struct thermal_cooling_device *cooling; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 257 | }; |
| 258 | |
Rob Clark | 69a9313 | 2020-08-17 15:01:31 -0700 | [diff] [blame] | 259 | static inline struct msm_gpu *dev_to_gpu(struct device *dev) |
| 260 | { |
Rob Clark | 9cba405 | 2020-08-17 15:01:32 -0700 | [diff] [blame] | 261 | struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); |
| 262 | return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); |
Rob Clark | 69a9313 | 2020-08-17 15:01:31 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 265 | /* It turns out that all targets use the same ringbuffer size */ |
| 266 | #define MSM_GPU_RINGBUFFER_SZ SZ_32K |
Jordan Crouse | 4d87fc3 | 2017-10-20 11:07:00 -0600 | [diff] [blame] | 267 | #define MSM_GPU_RINGBUFFER_BLKSIZE 32 |
| 268 | |
| 269 | #define MSM_GPU_RB_CNTL_DEFAULT \ |
| 270 | (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ |
| 271 | AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8))) |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 272 | |
Rob Clark | 37d77c3 | 2014-01-11 16:25:08 -0500 | [diff] [blame] | 273 | static inline bool msm_gpu_active(struct msm_gpu *gpu) |
| 274 | { |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 275 | int i; |
| 276 | |
| 277 | for (i = 0; i < gpu->nr_rings; i++) { |
| 278 | struct msm_ringbuffer *ring = gpu->rb[i]; |
| 279 | |
Rob Clark | 5f3aee4 | 2021-11-09 10:11:04 -0800 | [diff] [blame] | 280 | if (fence_after(ring->seqno, ring->memptrs->fence)) |
Jordan Crouse | f97deca | 2017-10-20 11:06:57 -0600 | [diff] [blame] | 281 | return true; |
| 282 | } |
| 283 | |
| 284 | return false; |
Rob Clark | 37d77c3 | 2014-01-11 16:25:08 -0500 | [diff] [blame] | 285 | } |
| 286 | |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 287 | /* Perf-Counters: |
| 288 | * The select_reg and select_val are just there for the benefit of the child |
| 289 | * class that actually enables the perf counter.. but msm_gpu base class |
| 290 | * will handle sampling/displaying the counters. |
| 291 | */ |
| 292 | |
| 293 | struct msm_gpu_perfcntr { |
| 294 | uint32_t select_reg; |
| 295 | uint32_t sample_reg; |
| 296 | uint32_t select_val; |
| 297 | const char *name; |
| 298 | }; |
| 299 | |
Rob Clark | fc40e5e | 2021-07-27 18:06:17 -0700 | [diff] [blame] | 300 | /* |
| 301 | * The number of priority levels provided by drm gpu scheduler. The |
| 302 | * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some |
| 303 | * cases, so we don't use it (no need for kernel generated jobs). |
| 304 | */ |
| 305 | #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) |
| 306 | |
| 307 | /** |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 308 | * struct msm_file_private - per-drm_file context |
| 309 | * |
| 310 | * @queuelock: synchronizes access to submitqueues list |
| 311 | * @submitqueues: list of &msm_gpu_submitqueue created by userspace |
| 312 | * @queueid: counter incremented each time a submitqueue is created, |
| 313 | * used to assign &msm_gpu_submitqueue.id |
| 314 | * @aspace: the per-process GPU address-space |
| 315 | * @ref: reference count |
| 316 | * @seqno: unique per process seqno |
| 317 | */ |
| 318 | struct msm_file_private { |
| 319 | rwlock_t queuelock; |
| 320 | struct list_head submitqueues; |
| 321 | int queueid; |
| 322 | struct msm_gem_address_space *aspace; |
| 323 | struct kref ref; |
| 324 | int seqno; |
Rob Clark | 6800246 | 2021-10-01 09:42:05 -0700 | [diff] [blame] | 325 | |
| 326 | /** |
| 327 | * entities: |
| 328 | * |
| 329 | * Table of per-priority-level sched entities used by submitqueues |
| 330 | * associated with this &drm_file. Because some userspace apps |
| 331 | * make assumptions about rendering from multiple gl contexts |
| 332 | * (of the same priority) within the process happening in FIFO |
| 333 | * order without requiring any fencing beyond MakeCurrent(), we |
| 334 | * create at most one &drm_sched_entity per-process per-priority- |
| 335 | * level. |
| 336 | */ |
| 337 | struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 338 | }; |
| 339 | |
| 340 | /** |
Rob Clark | fc40e5e | 2021-07-27 18:06:17 -0700 | [diff] [blame] | 341 | * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority |
| 342 | * |
| 343 | * @gpu: the gpu instance |
| 344 | * @prio: the userspace priority level |
| 345 | * @ring_nr: [out] the ringbuffer the userspace priority maps to |
| 346 | * @sched_prio: [out] the gpu scheduler priority level which the userspace |
| 347 | * priority maps to |
| 348 | * |
| 349 | * With drm/scheduler providing it's own level of prioritization, our total |
| 350 | * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES). |
| 351 | * Each ring is associated with it's own scheduler instance. However, our |
| 352 | * UABI is that lower numerical values are higher priority. So mapping the |
| 353 | * single userspace priority level into ring_nr and sched_prio takes some |
| 354 | * care. The userspace provided priority (when a submitqueue is created) |
| 355 | * is mapped to ring nr and scheduler priority as such: |
| 356 | * |
| 357 | * ring_nr = userspace_prio / NR_SCHED_PRIORITIES |
| 358 | * sched_prio = NR_SCHED_PRIORITIES - |
| 359 | * (userspace_prio % NR_SCHED_PRIORITIES) - 1 |
| 360 | * |
| 361 | * This allows generations without preemption (nr_rings==1) to have some |
| 362 | * amount of prioritization, and provides more priority levels for gens |
| 363 | * that do have preemption. |
| 364 | */ |
| 365 | static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, |
| 366 | unsigned *ring_nr, enum drm_sched_priority *sched_prio) |
| 367 | { |
| 368 | unsigned rn, sp; |
| 369 | |
| 370 | rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp); |
| 371 | |
| 372 | /* invert sched priority to map to higher-numeric-is-higher- |
| 373 | * priority convention |
| 374 | */ |
| 375 | sp = NR_SCHED_PRIORITIES - sp - 1; |
| 376 | |
| 377 | if (rn >= gpu->nr_rings) |
| 378 | return -EINVAL; |
| 379 | |
| 380 | *ring_nr = rn; |
| 381 | *sched_prio = sp; |
| 382 | |
| 383 | return 0; |
| 384 | } |
| 385 | |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 386 | /** |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 387 | * struct msm_gpu_submitqueues - Userspace created context. |
| 388 | * |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 389 | * A submitqueue is associated with a gl context or vk queue (or equiv) |
| 390 | * in userspace. |
| 391 | * |
| 392 | * @id: userspace id for the submitqueue, unique within the drm_file |
| 393 | * @flags: userspace flags for the submitqueue, specified at creation |
| 394 | * (currently unusued) |
Rob Clark | fc40e5e | 2021-07-27 18:06:17 -0700 | [diff] [blame] | 395 | * @ring_nr: the ringbuffer used by this submitqueue, which is determined |
| 396 | * by the submitqueue's priority |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 397 | * @faults: the number of GPU hangs associated with this submitqueue |
Rob Clark | 067ecab | 2021-11-11 11:24:56 -0800 | [diff] [blame] | 398 | * @last_fence: the sequence number of the last allocated fence (for error |
| 399 | * checking) |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 400 | * @ctx: the per-drm_file context associated with the submitqueue (ie. |
| 401 | * which set of pgtables do submits jobs associated with the |
| 402 | * submitqueue use) |
| 403 | * @node: node in the context's list of submitqueues |
Rob Clark | a61acbb | 2021-07-27 18:06:12 -0700 | [diff] [blame] | 404 | * @fence_idr: maps fence-id to dma_fence for userspace visible fence |
| 405 | * seqno, protected by submitqueue lock |
| 406 | * @lock: submitqueue lock |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 407 | * @ref: reference count |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 408 | * @entity: the submit job-queue |
Rob Clark | 375f9a6 | 2021-07-27 18:06:06 -0700 | [diff] [blame] | 409 | */ |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 410 | struct msm_gpu_submitqueue { |
| 411 | int id; |
| 412 | u32 flags; |
Rob Clark | fc40e5e | 2021-07-27 18:06:17 -0700 | [diff] [blame] | 413 | u32 ring_nr; |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 414 | int faults; |
Rob Clark | 067ecab | 2021-11-11 11:24:56 -0800 | [diff] [blame] | 415 | uint32_t last_fence; |
Jordan Crouse | cf655d6 | 2020-08-17 15:01:36 -0700 | [diff] [blame] | 416 | struct msm_file_private *ctx; |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 417 | struct list_head node; |
Rob Clark | a61acbb | 2021-07-27 18:06:12 -0700 | [diff] [blame] | 418 | struct idr fence_idr; |
| 419 | struct mutex lock; |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 420 | struct kref ref; |
Rob Clark | 6800246 | 2021-10-01 09:42:05 -0700 | [diff] [blame] | 421 | struct drm_sched_entity *entity; |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 422 | }; |
| 423 | |
Jordan Crouse | cdb9593 | 2018-07-24 10:33:31 -0600 | [diff] [blame] | 424 | struct msm_gpu_state_bo { |
| 425 | u64 iova; |
| 426 | size_t size; |
| 427 | void *data; |
Sharat Masetty | 1df4289 | 2018-11-01 20:16:45 +0530 | [diff] [blame] | 428 | bool encoded; |
Jordan Crouse | cdb9593 | 2018-07-24 10:33:31 -0600 | [diff] [blame] | 429 | }; |
| 430 | |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 431 | struct msm_gpu_state { |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 432 | struct kref ref; |
Arnd Bergmann | 3530a17 | 2018-07-26 14:39:25 +0200 | [diff] [blame] | 433 | struct timespec64 time; |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 434 | |
| 435 | struct { |
| 436 | u64 iova; |
| 437 | u32 fence; |
| 438 | u32 seqno; |
| 439 | u32 rptr; |
| 440 | u32 wptr; |
Jordan Crouse | 43a5668 | 2018-07-24 10:33:29 -0600 | [diff] [blame] | 441 | void *data; |
| 442 | int data_size; |
Sharat Masetty | 1df4289 | 2018-11-01 20:16:45 +0530 | [diff] [blame] | 443 | bool encoded; |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 444 | } ring[MSM_GPU_MAX_RINGS]; |
| 445 | |
| 446 | int nr_registers; |
| 447 | u32 *registers; |
| 448 | |
| 449 | u32 rbbm_status; |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 450 | |
| 451 | char *comm; |
| 452 | char *cmd; |
Jordan Crouse | cdb9593 | 2018-07-24 10:33:31 -0600 | [diff] [blame] | 453 | |
Rob Clark | e25e92e | 2021-06-10 14:44:13 -0700 | [diff] [blame] | 454 | struct msm_gpu_fault_info fault_info; |
| 455 | |
Jordan Crouse | cdb9593 | 2018-07-24 10:33:31 -0600 | [diff] [blame] | 456 | int nr_bos; |
| 457 | struct msm_gpu_state_bo *bos; |
Jordan Crouse | e00e473 | 2018-07-24 10:33:24 -0600 | [diff] [blame] | 458 | }; |
| 459 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 460 | static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) |
| 461 | { |
| 462 | msm_writel(data, gpu->mmio + (reg << 2)); |
| 463 | } |
| 464 | |
| 465 | static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) |
| 466 | { |
| 467 | return msm_readl(gpu->mmio + (reg << 2)); |
| 468 | } |
| 469 | |
Jordan Crouse | ae53a82 | 2016-11-28 12:28:28 -0700 | [diff] [blame] | 470 | static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) |
| 471 | { |
Sharat Masetty | 40a72b0 | 2020-11-25 12:30:14 +0530 | [diff] [blame] | 472 | msm_rmw(gpu->mmio + (reg << 2), mask, or); |
Jordan Crouse | ae53a82 | 2016-11-28 12:28:28 -0700 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) |
| 476 | { |
| 477 | u64 val; |
| 478 | |
| 479 | /* |
| 480 | * Why not a readq here? Two reasons: 1) many of the LO registers are |
| 481 | * not quad word aligned and 2) the GPU hardware designers have a bit |
| 482 | * of a history of putting registers where they fit, especially in |
| 483 | * spins. The longer a GPU family goes the higher the chance that |
| 484 | * we'll get burned. We could do a series of validity checks if we |
| 485 | * wanted to, but really is a readq() that much better? Nah. |
| 486 | */ |
| 487 | |
| 488 | /* |
| 489 | * For some lo/hi registers (like perfcounters), the hi value is latched |
| 490 | * when the lo is read, so make sure to read the lo first to trigger |
| 491 | * that |
| 492 | */ |
| 493 | val = (u64) msm_readl(gpu->mmio + (lo << 2)); |
| 494 | val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); |
| 495 | |
| 496 | return val; |
| 497 | } |
| 498 | |
| 499 | static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) |
| 500 | { |
| 501 | /* Why not a writeq here? Read the screed above */ |
| 502 | msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); |
| 503 | msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); |
| 504 | } |
| 505 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 506 | int msm_gpu_pm_suspend(struct msm_gpu *gpu); |
| 507 | int msm_gpu_pm_resume(struct msm_gpu *gpu); |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 508 | |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 509 | int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); |
| 510 | struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, |
| 511 | u32 id); |
| 512 | int msm_submitqueue_create(struct drm_device *drm, |
| 513 | struct msm_file_private *ctx, |
| 514 | u32 prio, u32 flags, u32 *id); |
| 515 | int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, |
| 516 | struct drm_msm_submitqueue_query *args); |
| 517 | int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); |
| 518 | void msm_submitqueue_close(struct msm_file_private *ctx); |
| 519 | |
| 520 | void msm_submitqueue_destroy(struct kref *kref); |
| 521 | |
Rob Clark | 6800246 | 2021-10-01 09:42:05 -0700 | [diff] [blame] | 522 | void __msm_file_private_destroy(struct kref *kref); |
Rob Clark | 4cd82aa | 2021-10-01 08:58:15 -0700 | [diff] [blame] | 523 | |
| 524 | static inline void msm_file_private_put(struct msm_file_private *ctx) |
| 525 | { |
| 526 | kref_put(&ctx->ref, __msm_file_private_destroy); |
| 527 | } |
| 528 | |
| 529 | static inline struct msm_file_private *msm_file_private_get( |
| 530 | struct msm_file_private *ctx) |
| 531 | { |
| 532 | kref_get(&ctx->ref); |
| 533 | return ctx; |
| 534 | } |
| 535 | |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 536 | void msm_devfreq_init(struct msm_gpu *gpu); |
| 537 | void msm_devfreq_cleanup(struct msm_gpu *gpu); |
| 538 | void msm_devfreq_resume(struct msm_gpu *gpu); |
| 539 | void msm_devfreq_suspend(struct msm_gpu *gpu); |
Rob Clark | 7c0ffcd | 2021-11-20 12:01:03 -0800 | [diff] [blame] | 540 | void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor); |
Rob Clark | 9bc9557 | 2021-07-26 07:46:50 -0700 | [diff] [blame] | 541 | void msm_devfreq_active(struct msm_gpu *gpu); |
| 542 | void msm_devfreq_idle(struct msm_gpu *gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 543 | |
Rob Clark | eeb7547 | 2017-02-10 15:36:33 -0500 | [diff] [blame] | 544 | int msm_gpu_hw_init(struct msm_gpu *gpu); |
| 545 | |
Rob Clark | 70c70f0 | 2014-05-30 14:49:43 -0400 | [diff] [blame] | 546 | void msm_gpu_perfcntr_start(struct msm_gpu *gpu); |
| 547 | void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); |
| 548 | int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, |
| 549 | uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); |
| 550 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 551 | void msm_gpu_retire(struct msm_gpu *gpu); |
Jordan Crouse | 15eb9ad | 2020-08-17 15:01:37 -0700 | [diff] [blame] | 552 | void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 553 | |
| 554 | int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, |
| 555 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, |
Jordan Crouse | 5770fc7 | 2017-05-08 14:35:03 -0600 | [diff] [blame] | 556 | const char *name, struct msm_gpu_config *config); |
| 557 | |
Jordan Crouse | 933415e | 2020-08-17 15:01:40 -0700 | [diff] [blame] | 558 | struct msm_gem_address_space * |
Rob Clark | 25faf2f | 2020-08-17 15:01:45 -0700 | [diff] [blame] | 559 | msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); |
Jordan Crouse | 933415e | 2020-08-17 15:01:40 -0700 | [diff] [blame] | 560 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 561 | void msm_gpu_cleanup(struct msm_gpu *gpu); |
| 562 | |
Rob Clark | e2550b7 | 2014-09-05 13:30:27 -0400 | [diff] [blame] | 563 | struct msm_gpu *adreno_load_gpu(struct drm_device *dev); |
Rob Clark | bfd28b1 | 2014-09-05 13:06:37 -0400 | [diff] [blame] | 564 | void __init adreno_register(void); |
| 565 | void __exit adreno_unregister(void); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 566 | |
Jordan Crouse | f7de154 | 2017-10-20 11:06:55 -0600 | [diff] [blame] | 567 | static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) |
| 568 | { |
| 569 | if (queue) |
| 570 | kref_put(&queue->ref, msm_submitqueue_destroy); |
| 571 | } |
| 572 | |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 573 | static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) |
| 574 | { |
| 575 | struct msm_gpu_state *state = NULL; |
| 576 | |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 577 | mutex_lock(&gpu->lock); |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 578 | |
| 579 | if (gpu->crashstate) { |
| 580 | kref_get(&gpu->crashstate->ref); |
| 581 | state = gpu->crashstate; |
| 582 | } |
| 583 | |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 584 | mutex_unlock(&gpu->lock); |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 585 | |
| 586 | return state; |
| 587 | } |
| 588 | |
| 589 | static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) |
| 590 | { |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 591 | mutex_lock(&gpu->lock); |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 592 | |
| 593 | if (gpu->crashstate) { |
| 594 | if (gpu->funcs->gpu_state_put(gpu->crashstate)) |
| 595 | gpu->crashstate = NULL; |
| 596 | } |
| 597 | |
Rob Clark | c28e2f2 | 2021-11-09 10:11:03 -0800 | [diff] [blame] | 598 | mutex_unlock(&gpu->lock); |
Jordan Crouse | c0fec7f | 2018-07-24 10:33:27 -0600 | [diff] [blame] | 599 | } |
| 600 | |
Jordan Crouse | 604234f | 2020-09-03 20:03:11 -0600 | [diff] [blame] | 601 | /* |
| 602 | * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can |
| 603 | * support expanded privileges |
| 604 | */ |
| 605 | #define check_apriv(gpu, flags) \ |
| 606 | (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags)) |
| 607 | |
| 608 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 609 | #endif /* __MSM_GPU_H__ */ |