Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* Copyright (C) 2014-2018 Broadcom */ |
| 3 | |
Sam Ravnborg | 220989e | 2019-07-16 08:42:03 +0200 | [diff] [blame] | 4 | #include <linux/device.h> |
| 5 | #include <linux/dma-mapping.h> |
| 6 | #include <linux/io.h> |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 7 | #include <linux/module.h> |
| 8 | #include <linux/platform_device.h> |
| 9 | #include <linux/pm_runtime.h> |
Eric Anholt | eea9b97 | 2019-03-08 09:43:36 -0800 | [diff] [blame] | 10 | #include <linux/reset.h> |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 11 | #include <linux/sched/signal.h> |
Sam Ravnborg | 220989e | 2019-07-16 08:42:03 +0200 | [diff] [blame] | 12 | #include <linux/uaccess.h> |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 13 | |
Sam Ravnborg | 220989e | 2019-07-16 08:42:03 +0200 | [diff] [blame] | 14 | #include <drm/drm_syncobj.h> |
| 15 | #include <uapi/drm/v3d_drm.h> |
| 16 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 17 | #include "v3d_drv.h" |
| 18 | #include "v3d_regs.h" |
| 19 | #include "v3d_trace.h" |
| 20 | |
| 21 | static void |
| 22 | v3d_init_core(struct v3d_dev *v3d, int core) |
| 23 | { |
| 24 | /* Set OVRTMUOUT, which means that the texture sampler uniform |
| 25 | * configuration's tmu output type field is used, instead of |
| 26 | * using the hardware default behavior based on the texture |
| 27 | * type. If you want the default behavior, you can still put |
| 28 | * "2" in the indirect texture state's output_type field. |
| 29 | */ |
Eric Anholt | a7dde1b | 2019-02-20 15:36:57 -0800 | [diff] [blame] | 30 | if (v3d->ver < 40) |
| 31 | V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 32 | |
| 33 | /* Whenever we flush the L2T cache, we always want to flush |
| 34 | * the whole thing. |
| 35 | */ |
| 36 | V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); |
| 37 | V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); |
| 38 | } |
| 39 | |
| 40 | /* Sets invariant state for the HW. */ |
| 41 | static void |
| 42 | v3d_init_hw_state(struct v3d_dev *v3d) |
| 43 | { |
| 44 | v3d_init_core(v3d, 0); |
| 45 | } |
| 46 | |
| 47 | static void |
| 48 | v3d_idle_axi(struct v3d_dev *v3d, int core) |
| 49 | { |
| 50 | V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); |
| 51 | |
| 52 | if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & |
| 53 | (V3D_GMP_STATUS_RD_COUNT_MASK | |
| 54 | V3D_GMP_STATUS_WR_COUNT_MASK | |
| 55 | V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { |
| 56 | DRM_ERROR("Failed to wait for safe GMP shutdown\n"); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | static void |
| 61 | v3d_idle_gca(struct v3d_dev *v3d) |
| 62 | { |
| 63 | if (v3d->ver >= 41) |
| 64 | return; |
| 65 | |
| 66 | V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); |
| 67 | |
| 68 | if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & |
| 69 | V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == |
| 70 | V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { |
| 71 | DRM_ERROR("Failed to wait for safe GCA shutdown\n"); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | static void |
Eric Anholt | eea9b97 | 2019-03-08 09:43:36 -0800 | [diff] [blame] | 76 | v3d_reset_by_bridge(struct v3d_dev *v3d) |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 77 | { |
| 78 | int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); |
| 79 | |
| 80 | if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { |
| 81 | V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, |
| 82 | V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); |
| 83 | V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); |
| 84 | |
| 85 | /* GFXH-1383: The SW_INIT may cause a stray write to address 0 |
| 86 | * of the unit, so reset it to its power-on value here. |
| 87 | */ |
| 88 | V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); |
| 89 | } else { |
| 90 | WARN_ON_ONCE(V3D_GET_FIELD(version, |
| 91 | V3D_TOP_GR_BRIDGE_MAJOR) != 7); |
| 92 | V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, |
| 93 | V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); |
| 94 | V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); |
| 95 | } |
Eric Anholt | eea9b97 | 2019-03-08 09:43:36 -0800 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | static void |
| 99 | v3d_reset_v3d(struct v3d_dev *v3d) |
| 100 | { |
| 101 | if (v3d->reset) |
| 102 | reset_control_reset(v3d->reset); |
| 103 | else |
| 104 | v3d_reset_by_bridge(v3d); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 105 | |
| 106 | v3d_init_hw_state(v3d); |
| 107 | } |
| 108 | |
| 109 | void |
| 110 | v3d_reset(struct v3d_dev *v3d) |
| 111 | { |
| 112 | struct drm_device *dev = &v3d->drm; |
| 113 | |
Eric Anholt | 1ba9d7c | 2019-04-18 17:10:13 -0700 | [diff] [blame] | 114 | DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n"); |
| 115 | DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n", |
| 116 | V3D_CORE_READ(0, V3D_ERR_STAT)); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 117 | trace_v3d_reset_begin(dev); |
| 118 | |
| 119 | /* XXX: only needed for safe powerdown, not reset. */ |
| 120 | if (false) |
| 121 | v3d_idle_axi(v3d, 0); |
| 122 | |
| 123 | v3d_idle_gca(v3d); |
| 124 | v3d_reset_v3d(v3d); |
| 125 | |
| 126 | v3d_mmu_set_page_table(v3d); |
| 127 | v3d_irq_reset(v3d); |
| 128 | |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 129 | v3d_perfmon_stop(v3d, v3d->active_perfmon, false); |
| 130 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 131 | trace_v3d_reset_end(dev); |
| 132 | } |
| 133 | |
| 134 | static void |
| 135 | v3d_flush_l3(struct v3d_dev *v3d) |
| 136 | { |
| 137 | if (v3d->ver < 41) { |
| 138 | u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); |
| 139 | |
| 140 | V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, |
| 141 | gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); |
| 142 | |
| 143 | if (v3d->ver < 33) { |
| 144 | V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, |
| 145 | gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); |
| 146 | } |
| 147 | } |
| 148 | } |
| 149 | |
Eric Anholt | 7b9d2fe | 2018-12-03 14:24:37 -0800 | [diff] [blame] | 150 | /* Invalidates the (read-only) L2C cache. This was the L2 cache for |
| 151 | * uniforms and instructions on V3D 3.2. |
| 152 | */ |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 153 | static void |
Eric Anholt | 7b9d2fe | 2018-12-03 14:24:37 -0800 | [diff] [blame] | 154 | v3d_invalidate_l2c(struct v3d_dev *v3d, int core) |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 155 | { |
Eric Anholt | 7b9d2fe | 2018-12-03 14:24:37 -0800 | [diff] [blame] | 156 | if (v3d->ver > 32) |
| 157 | return; |
| 158 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 159 | V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, |
| 160 | V3D_L2CACTL_L2CCLR | |
| 161 | V3D_L2CACTL_L2CENA); |
| 162 | } |
| 163 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 164 | /* Invalidates texture L2 cachelines */ |
| 165 | static void |
| 166 | v3d_flush_l2t(struct v3d_dev *v3d, int core) |
| 167 | { |
Eric Anholt | 51c1b6f | 2018-12-03 14:24:36 -0800 | [diff] [blame] | 168 | /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't |
| 169 | * need to wait for completion before dispatching the job -- |
| 170 | * L2T accesses will be stalled until the flush has completed. |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 171 | * However, we do need to make sure we don't try to trigger a |
| 172 | * new flush while the L2_CLEAN queue is trying to |
| 173 | * synchronously clean after a job. |
Eric Anholt | 51c1b6f | 2018-12-03 14:24:36 -0800 | [diff] [blame] | 174 | */ |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 175 | mutex_lock(&v3d->cache_clean_lock); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 176 | V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, |
| 177 | V3D_L2TCACTL_L2TFLS | |
| 178 | V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 179 | mutex_unlock(&v3d->cache_clean_lock); |
| 180 | } |
| 181 | |
| 182 | /* Cleans texture L1 and L2 cachelines (writing back dirty data). |
| 183 | * |
| 184 | * For cleaning, which happens from the CACHE_CLEAN queue after CSD has |
| 185 | * executed, we need to make sure that the clean is done before |
| 186 | * signaling job completion. So, we synchronously wait before |
| 187 | * returning, and we make sure that L2 invalidates don't happen in the |
| 188 | * meantime to confuse our are-we-done checks. |
| 189 | */ |
| 190 | void |
| 191 | v3d_clean_caches(struct v3d_dev *v3d) |
| 192 | { |
| 193 | struct drm_device *dev = &v3d->drm; |
| 194 | int core = 0; |
| 195 | |
| 196 | trace_v3d_cache_clean_begin(dev); |
| 197 | |
| 198 | V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); |
| 199 | if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & |
Iago Toral Quiroga | e4f8681 | 2021-09-15 12:05:07 +0200 | [diff] [blame] | 200 | V3D_L2TCACTL_TMUWCF), 100)) { |
| 201 | DRM_ERROR("Timeout waiting for TMU write combiner flush\n"); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | mutex_lock(&v3d->cache_clean_lock); |
| 205 | V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, |
| 206 | V3D_L2TCACTL_L2TFLS | |
| 207 | V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM)); |
| 208 | |
| 209 | if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & |
| 210 | V3D_L2TCACTL_L2TFLS), 100)) { |
| 211 | DRM_ERROR("Timeout waiting for L2T clean\n"); |
| 212 | } |
| 213 | |
| 214 | mutex_unlock(&v3d->cache_clean_lock); |
| 215 | |
| 216 | trace_v3d_cache_clean_end(dev); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | /* Invalidates the slice caches. These are read-only caches. */ |
| 220 | static void |
| 221 | v3d_invalidate_slices(struct v3d_dev *v3d, int core) |
| 222 | { |
| 223 | V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, |
| 224 | V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | |
| 225 | V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | |
| 226 | V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | |
| 227 | V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); |
| 228 | } |
| 229 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 230 | void |
| 231 | v3d_invalidate_caches(struct v3d_dev *v3d) |
| 232 | { |
Eric Anholt | aa5beec | 2018-12-03 14:24:38 -0800 | [diff] [blame] | 233 | /* Invalidate the caches from the outside in. That way if |
| 234 | * another CL's concurrent use of nearby memory were to pull |
| 235 | * an invalidated cacheline back in, we wouldn't leave stale |
| 236 | * data in the inner cache. |
| 237 | */ |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 238 | v3d_flush_l3(v3d); |
Eric Anholt | 7b9d2fe | 2018-12-03 14:24:37 -0800 | [diff] [blame] | 239 | v3d_invalidate_l2c(v3d, 0); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 240 | v3d_flush_l2t(v3d, 0); |
Eric Anholt | aa5beec | 2018-12-03 14:24:38 -0800 | [diff] [blame] | 241 | v3d_invalidate_slices(v3d, 0); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 244 | /* Takes the reservation lock on all the BOs being referenced, so that |
| 245 | * at queue submit time we can update the reservations. |
| 246 | * |
| 247 | * We don't lock the RCL the tile alloc/state BOs, or overflow memory |
| 248 | * (all of which are on exec->unref_list). They're entirely private |
| 249 | * to v3d, so we don't attach dma-buf fences to them. |
| 250 | */ |
| 251 | static int |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 252 | v3d_lock_bo_reservations(struct v3d_job *job, |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 253 | struct ww_acquire_ctx *acquire_ctx) |
| 254 | { |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 255 | int i, ret; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 256 | |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 257 | ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); |
Eric Anholt | c2b3e61 | 2019-03-08 08:17:14 -0800 | [diff] [blame] | 258 | if (ret) |
| 259 | return ret; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 260 | |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 261 | for (i = 0; i < job->bo_count; i++) { |
Daniel Vetter | da3208e | 2021-08-05 12:46:55 +0200 | [diff] [blame] | 262 | ret = drm_sched_job_add_implicit_dependencies(&job->base, |
| 263 | job->bo[i], true); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 264 | if (ret) { |
| 265 | drm_gem_unlock_reservations(job->bo, job->bo_count, |
| 266 | acquire_ctx); |
| 267 | return ret; |
| 268 | } |
| 269 | } |
| 270 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | /** |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 275 | * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 276 | * referenced by the job. |
| 277 | * @dev: DRM device |
| 278 | * @file_priv: DRM file for this fd |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 279 | * @job: V3D job being set up |
Lee Jones | e226878 | 2020-11-16 17:41:05 +0000 | [diff] [blame] | 280 | * @bo_handles: GEM handles |
| 281 | * @bo_count: Number of GEM handles passed in |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 282 | * |
| 283 | * The command validator needs to reference BOs by their index within |
| 284 | * the submitted job's BO list. This does the validation of the job's |
| 285 | * BO list and reference counting for the lifetime of the job. |
| 286 | * |
| 287 | * Note that this function doesn't need to unreference the BOs on |
| 288 | * failure, because that will happen at v3d_exec_cleanup() time. |
| 289 | */ |
| 290 | static int |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 291 | v3d_lookup_bos(struct drm_device *dev, |
| 292 | struct drm_file *file_priv, |
| 293 | struct v3d_job *job, |
| 294 | u64 bo_handles, |
| 295 | u32 bo_count) |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 296 | { |
| 297 | u32 *handles; |
| 298 | int ret = 0; |
| 299 | int i; |
| 300 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 301 | job->bo_count = bo_count; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 302 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 303 | if (!job->bo_count) { |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 304 | /* See comment on bo_index for why we have to check |
| 305 | * this. |
| 306 | */ |
| 307 | DRM_DEBUG("Rendering requires BOs\n"); |
| 308 | return -EINVAL; |
| 309 | } |
| 310 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 311 | job->bo = kvmalloc_array(job->bo_count, |
| 312 | sizeof(struct drm_gem_cma_object *), |
| 313 | GFP_KERNEL | __GFP_ZERO); |
| 314 | if (!job->bo) { |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 315 | DRM_DEBUG("Failed to allocate validated BO pointers\n"); |
| 316 | return -ENOMEM; |
| 317 | } |
| 318 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 319 | handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 320 | if (!handles) { |
| 321 | ret = -ENOMEM; |
| 322 | DRM_DEBUG("Failed to allocate incoming GEM handles\n"); |
| 323 | goto fail; |
| 324 | } |
| 325 | |
| 326 | if (copy_from_user(handles, |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 327 | (void __user *)(uintptr_t)bo_handles, |
| 328 | job->bo_count * sizeof(u32))) { |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 329 | ret = -EFAULT; |
| 330 | DRM_DEBUG("Failed to copy in GEM handles\n"); |
| 331 | goto fail; |
| 332 | } |
| 333 | |
| 334 | spin_lock(&file_priv->table_lock); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 335 | for (i = 0; i < job->bo_count; i++) { |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 336 | struct drm_gem_object *bo = idr_find(&file_priv->object_idr, |
| 337 | handles[i]); |
| 338 | if (!bo) { |
| 339 | DRM_DEBUG("Failed to look up GEM BO %d: %d\n", |
| 340 | i, handles[i]); |
| 341 | ret = -ENOENT; |
| 342 | spin_unlock(&file_priv->table_lock); |
| 343 | goto fail; |
| 344 | } |
| 345 | drm_gem_object_get(bo); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 346 | job->bo[i] = bo; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 347 | } |
| 348 | spin_unlock(&file_priv->table_lock); |
| 349 | |
| 350 | fail: |
| 351 | kvfree(handles); |
| 352 | return ret; |
| 353 | } |
| 354 | |
| 355 | static void |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 356 | v3d_job_free(struct kref *ref) |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 357 | { |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 358 | struct v3d_job *job = container_of(ref, struct v3d_job, refcount); |
| 359 | int i; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 360 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 361 | for (i = 0; i < job->bo_count; i++) { |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 362 | if (job->bo[i]) |
Emil Velikov | 2b86189 | 2020-05-15 10:51:12 +0100 | [diff] [blame] | 363 | drm_gem_object_put(job->bo[i]); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 364 | } |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 365 | kvfree(job->bo); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 366 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 367 | dma_fence_put(job->irq_fence); |
| 368 | dma_fence_put(job->done_fence); |
| 369 | |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 370 | pm_runtime_mark_last_busy(job->v3d->drm.dev); |
| 371 | pm_runtime_put_autosuspend(job->v3d->drm.dev); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 372 | |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 373 | if (job->perfmon) |
| 374 | v3d_perfmon_put(job->perfmon); |
| 375 | |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 376 | kfree(job); |
| 377 | } |
| 378 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 379 | static void |
| 380 | v3d_render_job_free(struct kref *ref) |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 381 | { |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 382 | struct v3d_render_job *job = container_of(ref, struct v3d_render_job, |
| 383 | base.refcount); |
| 384 | struct v3d_bo *bo, *save; |
| 385 | |
| 386 | list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { |
Emil Velikov | 2b86189 | 2020-05-15 10:51:12 +0100 | [diff] [blame] | 387 | drm_gem_object_put(&bo->base.base); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 388 | } |
| 389 | |
| 390 | v3d_job_free(ref); |
| 391 | } |
| 392 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 393 | void v3d_job_cleanup(struct v3d_job *job) |
| 394 | { |
| 395 | drm_sched_job_cleanup(&job->base); |
| 396 | v3d_job_put(job); |
| 397 | } |
| 398 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 399 | void v3d_job_put(struct v3d_job *job) |
| 400 | { |
| 401 | kref_put(&job->refcount, job->free); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 402 | } |
| 403 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 404 | int |
| 405 | v3d_wait_bo_ioctl(struct drm_device *dev, void *data, |
| 406 | struct drm_file *file_priv) |
| 407 | { |
| 408 | int ret; |
| 409 | struct drm_v3d_wait_bo *args = data; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 410 | ktime_t start = ktime_get(); |
| 411 | u64 delta_ns; |
| 412 | unsigned long timeout_jiffies = |
| 413 | nsecs_to_jiffies_timeout(args->timeout_ns); |
| 414 | |
| 415 | if (args->pad != 0) |
| 416 | return -EINVAL; |
| 417 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 418 | ret = drm_gem_dma_resv_wait(file_priv, args->handle, |
Melissa Wen | 223583d | 2021-09-30 17:15:22 +0100 | [diff] [blame^] | 419 | true, timeout_jiffies); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 420 | |
| 421 | /* Decrement the user's timeout, in case we got interrupted |
| 422 | * such that the ioctl will be restarted. |
| 423 | */ |
| 424 | delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); |
| 425 | if (delta_ns < args->timeout_ns) |
| 426 | args->timeout_ns -= delta_ns; |
| 427 | else |
| 428 | args->timeout_ns = 0; |
| 429 | |
| 430 | /* Asked to wait beyond the jiffie/scheduler precision? */ |
| 431 | if (ret == -ETIME && args->timeout_ns) |
| 432 | ret = -EAGAIN; |
| 433 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 434 | return ret; |
| 435 | } |
| 436 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 437 | static int |
Melissa Wen | 223583d | 2021-09-30 17:15:22 +0100 | [diff] [blame^] | 438 | v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job, |
| 439 | u32 in_sync, u32 point) |
| 440 | { |
| 441 | struct dma_fence *in_fence = NULL; |
| 442 | int ret; |
| 443 | |
| 444 | ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence); |
| 445 | if (ret == -EINVAL) |
| 446 | return ret; |
| 447 | |
| 448 | return drm_sched_job_add_dependency(&job->base, in_fence); |
| 449 | } |
| 450 | |
| 451 | static int |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 452 | v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, |
| 453 | struct v3d_job *job, void (*free)(struct kref *ref), |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 454 | u32 in_sync, enum v3d_queue queue) |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 455 | { |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 456 | struct v3d_file_priv *v3d_priv = file_priv->driver_priv; |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 457 | int ret; |
| 458 | |
| 459 | job->v3d = v3d; |
| 460 | job->free = free; |
| 461 | |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 462 | ret = pm_runtime_get_sync(v3d->drm.dev); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 463 | if (ret < 0) |
| 464 | return ret; |
| 465 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 466 | ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], |
| 467 | v3d_priv); |
| 468 | if (ret) |
| 469 | goto fail; |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 470 | |
Melissa Wen | 223583d | 2021-09-30 17:15:22 +0100 | [diff] [blame^] | 471 | ret = v3d_job_add_deps(file_priv, job, in_sync, 0); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 472 | if (ret) |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 473 | goto fail_job; |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 474 | |
| 475 | kref_init(&job->refcount); |
| 476 | |
| 477 | return 0; |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 478 | fail_job: |
| 479 | drm_sched_job_cleanup(&job->base); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 480 | fail: |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 481 | pm_runtime_put_autosuspend(v3d->drm.dev); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 482 | return ret; |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 485 | static void |
| 486 | v3d_push_job(struct v3d_job *job) |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 487 | { |
Daniel Vetter | dbe48d0 | 2021-08-17 10:49:16 +0200 | [diff] [blame] | 488 | drm_sched_job_arm(&job->base); |
| 489 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 490 | job->done_fence = dma_fence_get(&job->base.s_fence->finished); |
| 491 | |
| 492 | /* put by scheduler job completion */ |
| 493 | kref_get(&job->refcount); |
| 494 | |
Daniel Vetter | 0e10e9a | 2021-08-05 12:46:50 +0200 | [diff] [blame] | 495 | drm_sched_entity_push_job(&job->base); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 496 | } |
| 497 | |
| 498 | static void |
| 499 | v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, |
| 500 | struct v3d_job *job, |
| 501 | struct ww_acquire_ctx *acquire_ctx, |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 502 | u32 out_sync, |
| 503 | struct dma_fence *done_fence) |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 504 | { |
| 505 | struct drm_syncobj *sync_out; |
| 506 | int i; |
| 507 | |
| 508 | for (i = 0; i < job->bo_count; i++) { |
| 509 | /* XXX: Use shared fences for read-only objects. */ |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 510 | dma_resv_add_excl_fence(job->bo[i]->resv, |
Melissa Wen | 223583d | 2021-09-30 17:15:22 +0100 | [diff] [blame^] | 511 | job->done_fence); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); |
| 515 | |
| 516 | /* Update the return sync object for the job */ |
| 517 | sync_out = drm_syncobj_find(file_priv, out_sync); |
| 518 | if (sync_out) { |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 519 | drm_syncobj_replace_fence(sync_out, done_fence); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 520 | drm_syncobj_put(sync_out); |
| 521 | } |
| 522 | } |
| 523 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 524 | /** |
| 525 | * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. |
| 526 | * @dev: DRM device |
| 527 | * @data: ioctl argument |
| 528 | * @file_priv: DRM file for this fd |
| 529 | * |
| 530 | * This is the main entrypoint for userspace to submit a 3D frame to |
| 531 | * the GPU. Userspace provides the binner command list (if |
| 532 | * applicable), and the kernel sets up the render command list to draw |
| 533 | * to the framebuffer described in the ioctl, using the command lists |
| 534 | * that the 3D engine's binner will produce. |
| 535 | */ |
| 536 | int |
| 537 | v3d_submit_cl_ioctl(struct drm_device *dev, void *data, |
| 538 | struct drm_file *file_priv) |
| 539 | { |
| 540 | struct v3d_dev *v3d = to_v3d_dev(dev); |
| 541 | struct v3d_file_priv *v3d_priv = file_priv->driver_priv; |
| 542 | struct drm_v3d_submit_cl *args = data; |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 543 | struct v3d_bin_job *bin = NULL; |
| 544 | struct v3d_render_job *render; |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 545 | struct v3d_job *clean_job = NULL; |
| 546 | struct v3d_job *last_job; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 547 | struct ww_acquire_ctx acquire_ctx; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 548 | int ret = 0; |
| 549 | |
Eric Anholt | 55a9b74 | 2018-11-30 16:57:58 -0800 | [diff] [blame] | 550 | trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); |
| 551 | |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 552 | if (args->pad != 0) |
| 553 | return -EINVAL; |
| 554 | |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 555 | if (args->flags != 0 && |
| 556 | args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { |
| 557 | DRM_INFO("invalid flags: %d\n", args->flags); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 558 | return -EINVAL; |
| 559 | } |
| 560 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 561 | render = kcalloc(1, sizeof(*render), GFP_KERNEL); |
| 562 | if (!render) |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 563 | return -ENOMEM; |
| 564 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 565 | render->start = args->rcl_start; |
| 566 | render->end = args->rcl_end; |
| 567 | INIT_LIST_HEAD(&render->unref_list); |
| 568 | |
| 569 | ret = v3d_job_init(v3d, file_priv, &render->base, |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 570 | v3d_render_job_free, args->in_sync_rcl, V3D_RENDER); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 571 | if (ret) { |
| 572 | kfree(render); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 573 | return ret; |
| 574 | } |
| 575 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 576 | if (args->bcl_start != args->bcl_end) { |
| 577 | bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); |
Navid Emamdoost | 29cd13cf | 2019-10-21 13:52:49 -0500 | [diff] [blame] | 578 | if (!bin) { |
Melissa Wen | 9fcb4a8 | 2021-09-16 22:27:26 +0100 | [diff] [blame] | 579 | v3d_job_cleanup(&render->base); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 580 | return -ENOMEM; |
Navid Emamdoost | 29cd13cf | 2019-10-21 13:52:49 -0500 | [diff] [blame] | 581 | } |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 582 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 583 | ret = v3d_job_init(v3d, file_priv, &bin->base, |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 584 | v3d_job_free, args->in_sync_bcl, V3D_BIN); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 585 | if (ret) { |
Melissa Wen | 9fcb4a8 | 2021-09-16 22:27:26 +0100 | [diff] [blame] | 586 | v3d_job_cleanup(&render->base); |
Navid Emamdoost | 29cd13cf | 2019-10-21 13:52:49 -0500 | [diff] [blame] | 587 | kfree(bin); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 588 | return ret; |
| 589 | } |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 590 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 591 | bin->start = args->bcl_start; |
| 592 | bin->end = args->bcl_end; |
| 593 | bin->qma = args->qma; |
| 594 | bin->qms = args->qms; |
| 595 | bin->qts = args->qts; |
| 596 | bin->render = render; |
| 597 | } |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 598 | |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 599 | if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { |
| 600 | clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL); |
| 601 | if (!clean_job) { |
| 602 | ret = -ENOMEM; |
| 603 | goto fail; |
| 604 | } |
| 605 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 606 | ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 607 | if (ret) { |
| 608 | kfree(clean_job); |
| 609 | clean_job = NULL; |
| 610 | goto fail; |
| 611 | } |
| 612 | |
| 613 | last_job = clean_job; |
| 614 | } else { |
| 615 | last_job = &render->base; |
| 616 | } |
| 617 | |
| 618 | ret = v3d_lookup_bos(dev, file_priv, last_job, |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 619 | args->bo_handles, args->bo_handle_count); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 620 | if (ret) |
| 621 | goto fail; |
| 622 | |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 623 | ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 624 | if (ret) |
| 625 | goto fail; |
| 626 | |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 627 | if (args->perfmon_id) { |
| 628 | render->base.perfmon = v3d_perfmon_find(v3d_priv, |
| 629 | args->perfmon_id); |
| 630 | |
| 631 | if (!render->base.perfmon) { |
| 632 | ret = -ENOENT; |
| 633 | goto fail; |
| 634 | } |
| 635 | } |
| 636 | |
Eric Anholt | 7122b68 | 2018-06-06 10:48:51 -0700 | [diff] [blame] | 637 | mutex_lock(&v3d->sched_lock); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 638 | if (bin) { |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 639 | bin->base.perfmon = render->base.perfmon; |
| 640 | v3d_perfmon_get(bin->base.perfmon); |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 641 | v3d_push_job(&bin->base); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 642 | |
Daniel Vetter | da3208e | 2021-08-05 12:46:55 +0200 | [diff] [blame] | 643 | ret = drm_sched_job_add_dependency(&render->base.base, |
| 644 | dma_fence_get(bin->base.done_fence)); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 645 | if (ret) |
| 646 | goto fail_unreserve; |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 647 | } |
| 648 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 649 | v3d_push_job(&render->base); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 650 | |
| 651 | if (clean_job) { |
| 652 | struct dma_fence *render_fence = |
| 653 | dma_fence_get(render->base.done_fence); |
Daniel Vetter | da3208e | 2021-08-05 12:46:55 +0200 | [diff] [blame] | 654 | ret = drm_sched_job_add_dependency(&clean_job->base, |
| 655 | render_fence); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 656 | if (ret) |
| 657 | goto fail_unreserve; |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 658 | clean_job->perfmon = render->base.perfmon; |
| 659 | v3d_perfmon_get(clean_job->perfmon); |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 660 | v3d_push_job(clean_job); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 661 | } |
| 662 | |
Eric Anholt | 7122b68 | 2018-06-06 10:48:51 -0700 | [diff] [blame] | 663 | mutex_unlock(&v3d->sched_lock); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 664 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 665 | v3d_attach_fences_and_unlock_reservation(file_priv, |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 666 | last_job, |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 667 | &acquire_ctx, |
| 668 | args->out_sync, |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 669 | last_job->done_fence); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 670 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 671 | if (bin) |
| 672 | v3d_job_put(&bin->base); |
| 673 | v3d_job_put(&render->base); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 674 | if (clean_job) |
| 675 | v3d_job_put(clean_job); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 676 | |
| 677 | return 0; |
| 678 | |
| 679 | fail_unreserve: |
Eric Anholt | 7122b68 | 2018-06-06 10:48:51 -0700 | [diff] [blame] | 680 | mutex_unlock(&v3d->sched_lock); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 681 | drm_gem_unlock_reservations(last_job->bo, |
| 682 | last_job->bo_count, &acquire_ctx); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 683 | fail: |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 684 | if (bin) |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 685 | v3d_job_cleanup(&bin->base); |
| 686 | v3d_job_cleanup(&render->base); |
Iago Toral Quiroga | 455d56c | 2019-09-19 09:10:16 +0200 | [diff] [blame] | 687 | if (clean_job) |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 688 | v3d_job_cleanup(clean_job); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 689 | |
| 690 | return ret; |
| 691 | } |
| 692 | |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 693 | /** |
| 694 | * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. |
| 695 | * @dev: DRM device |
| 696 | * @data: ioctl argument |
| 697 | * @file_priv: DRM file for this fd |
| 698 | * |
| 699 | * Userspace provides the register setup for the TFU, which we don't |
| 700 | * need to validate since the TFU is behind the MMU. |
| 701 | */ |
| 702 | int |
| 703 | v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, |
| 704 | struct drm_file *file_priv) |
| 705 | { |
| 706 | struct v3d_dev *v3d = to_v3d_dev(dev); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 707 | struct drm_v3d_submit_tfu *args = data; |
| 708 | struct v3d_tfu_job *job; |
| 709 | struct ww_acquire_ctx acquire_ctx; |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 710 | int ret = 0; |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 711 | |
Eric Anholt | 55a9b74 | 2018-11-30 16:57:58 -0800 | [diff] [blame] | 712 | trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); |
| 713 | |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 714 | job = kcalloc(1, sizeof(*job), GFP_KERNEL); |
| 715 | if (!job) |
| 716 | return -ENOMEM; |
| 717 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 718 | ret = v3d_job_init(v3d, file_priv, &job->base, |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 719 | v3d_job_free, args->in_sync, V3D_TFU); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 720 | if (ret) { |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 721 | kfree(job); |
| 722 | return ret; |
| 723 | } |
| 724 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 725 | job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), |
| 726 | sizeof(*job->base.bo), GFP_KERNEL); |
| 727 | if (!job->base.bo) { |
Melissa Wen | 9fcb4a8 | 2021-09-16 22:27:26 +0100 | [diff] [blame] | 728 | v3d_job_cleanup(&job->base); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 729 | return -ENOMEM; |
| 730 | } |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 731 | |
| 732 | job->args = *args; |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 733 | |
| 734 | spin_lock(&file_priv->table_lock); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 735 | for (job->base.bo_count = 0; |
| 736 | job->base.bo_count < ARRAY_SIZE(args->bo_handles); |
| 737 | job->base.bo_count++) { |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 738 | struct drm_gem_object *bo; |
| 739 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 740 | if (!args->bo_handles[job->base.bo_count]) |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 741 | break; |
| 742 | |
| 743 | bo = idr_find(&file_priv->object_idr, |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 744 | args->bo_handles[job->base.bo_count]); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 745 | if (!bo) { |
| 746 | DRM_DEBUG("Failed to look up GEM BO %d: %d\n", |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 747 | job->base.bo_count, |
| 748 | args->bo_handles[job->base.bo_count]); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 749 | ret = -ENOENT; |
| 750 | spin_unlock(&file_priv->table_lock); |
| 751 | goto fail; |
| 752 | } |
| 753 | drm_gem_object_get(bo); |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 754 | job->base.bo[job->base.bo_count] = bo; |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 755 | } |
| 756 | spin_unlock(&file_priv->table_lock); |
| 757 | |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 758 | ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 759 | if (ret) |
| 760 | goto fail; |
| 761 | |
| 762 | mutex_lock(&v3d->sched_lock); |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 763 | v3d_push_job(&job->base); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 764 | mutex_unlock(&v3d->sched_lock); |
| 765 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 766 | v3d_attach_fences_and_unlock_reservation(file_priv, |
| 767 | &job->base, &acquire_ctx, |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 768 | args->out_sync, |
| 769 | job->base.done_fence); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 770 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 771 | v3d_job_put(&job->base); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 772 | |
| 773 | return 0; |
| 774 | |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 775 | fail: |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 776 | v3d_job_cleanup(&job->base); |
Eric Anholt | 1584f16 | 2018-11-28 15:09:25 -0800 | [diff] [blame] | 777 | |
| 778 | return ret; |
| 779 | } |
| 780 | |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 781 | /** |
| 782 | * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. |
| 783 | * @dev: DRM device |
| 784 | * @data: ioctl argument |
| 785 | * @file_priv: DRM file for this fd |
| 786 | * |
| 787 | * Userspace provides the register setup for the CSD, which we don't |
| 788 | * need to validate since the CSD is behind the MMU. |
| 789 | */ |
| 790 | int |
| 791 | v3d_submit_csd_ioctl(struct drm_device *dev, void *data, |
| 792 | struct drm_file *file_priv) |
| 793 | { |
| 794 | struct v3d_dev *v3d = to_v3d_dev(dev); |
| 795 | struct v3d_file_priv *v3d_priv = file_priv->driver_priv; |
| 796 | struct drm_v3d_submit_csd *args = data; |
| 797 | struct v3d_csd_job *job; |
| 798 | struct v3d_job *clean_job; |
| 799 | struct ww_acquire_ctx acquire_ctx; |
| 800 | int ret; |
| 801 | |
| 802 | trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); |
| 803 | |
| 804 | if (!v3d_has_csd(v3d)) { |
| 805 | DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); |
| 806 | return -EINVAL; |
| 807 | } |
| 808 | |
| 809 | job = kcalloc(1, sizeof(*job), GFP_KERNEL); |
| 810 | if (!job) |
| 811 | return -ENOMEM; |
| 812 | |
| 813 | ret = v3d_job_init(v3d, file_priv, &job->base, |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 814 | v3d_job_free, args->in_sync, V3D_CSD); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 815 | if (ret) { |
| 816 | kfree(job); |
| 817 | return ret; |
| 818 | } |
| 819 | |
| 820 | clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL); |
| 821 | if (!clean_job) { |
Melissa Wen | 9fcb4a8 | 2021-09-16 22:27:26 +0100 | [diff] [blame] | 822 | v3d_job_cleanup(&job->base); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 823 | return -ENOMEM; |
| 824 | } |
| 825 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 826 | ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 827 | if (ret) { |
Melissa Wen | 9fcb4a8 | 2021-09-16 22:27:26 +0100 | [diff] [blame] | 828 | v3d_job_cleanup(&job->base); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 829 | kfree(clean_job); |
| 830 | return ret; |
| 831 | } |
| 832 | |
| 833 | job->args = *args; |
| 834 | |
| 835 | ret = v3d_lookup_bos(dev, file_priv, clean_job, |
| 836 | args->bo_handles, args->bo_handle_count); |
| 837 | if (ret) |
| 838 | goto fail; |
| 839 | |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 840 | ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 841 | if (ret) |
| 842 | goto fail; |
| 843 | |
Juan A. Suarez Romero | 26a4dc2 | 2021-06-08 13:15:41 +0200 | [diff] [blame] | 844 | if (args->perfmon_id) { |
| 845 | job->base.perfmon = v3d_perfmon_find(v3d_priv, |
| 846 | args->perfmon_id); |
| 847 | if (!job->base.perfmon) { |
| 848 | ret = -ENOENT; |
| 849 | goto fail; |
| 850 | } |
| 851 | } |
| 852 | |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 853 | mutex_lock(&v3d->sched_lock); |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 854 | v3d_push_job(&job->base); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 855 | |
Daniel Vetter | da3208e | 2021-08-05 12:46:55 +0200 | [diff] [blame] | 856 | ret = drm_sched_job_add_dependency(&clean_job->base, |
| 857 | dma_fence_get(job->base.done_fence)); |
Eric Anholt | dffa9b7 | 2019-04-16 15:58:56 -0700 | [diff] [blame] | 858 | if (ret) |
| 859 | goto fail_unreserve; |
| 860 | |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 861 | v3d_push_job(clean_job); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 862 | mutex_unlock(&v3d->sched_lock); |
| 863 | |
| 864 | v3d_attach_fences_and_unlock_reservation(file_priv, |
| 865 | clean_job, |
| 866 | &acquire_ctx, |
| 867 | args->out_sync, |
| 868 | clean_job->done_fence); |
| 869 | |
| 870 | v3d_job_put(&job->base); |
| 871 | v3d_job_put(clean_job); |
| 872 | |
| 873 | return 0; |
| 874 | |
| 875 | fail_unreserve: |
| 876 | mutex_unlock(&v3d->sched_lock); |
| 877 | drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, |
| 878 | &acquire_ctx); |
| 879 | fail: |
Daniel Vetter | 916044f | 2021-08-05 12:46:54 +0200 | [diff] [blame] | 880 | v3d_job_cleanup(&job->base); |
| 881 | v3d_job_cleanup(clean_job); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 882 | |
| 883 | return ret; |
| 884 | } |
| 885 | |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 886 | int |
| 887 | v3d_gem_init(struct drm_device *dev) |
| 888 | { |
| 889 | struct v3d_dev *v3d = to_v3d_dev(dev); |
| 890 | u32 pt_size = 4096 * 1024; |
| 891 | int ret, i; |
| 892 | |
| 893 | for (i = 0; i < V3D_MAX_QUEUES; i++) |
| 894 | v3d->queue[i].fence_context = dma_fence_context_alloc(1); |
| 895 | |
| 896 | spin_lock_init(&v3d->mm_lock); |
| 897 | spin_lock_init(&v3d->job_lock); |
| 898 | mutex_init(&v3d->bo_lock); |
| 899 | mutex_init(&v3d->reset_lock); |
Eric Anholt | 7122b68 | 2018-06-06 10:48:51 -0700 | [diff] [blame] | 900 | mutex_init(&v3d->sched_lock); |
Eric Anholt | d223f98f | 2019-04-16 15:58:54 -0700 | [diff] [blame] | 901 | mutex_init(&v3d->cache_clean_lock); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 902 | |
| 903 | /* Note: We don't allocate address 0. Various bits of HW |
| 904 | * treat 0 as special, such as the occlusion query counters |
| 905 | * where 0 means "disabled". |
| 906 | */ |
| 907 | drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); |
| 908 | |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 909 | v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size, |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 910 | &v3d->pt_paddr, |
| 911 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); |
| 912 | if (!v3d->pt) { |
| 913 | drm_mm_takedown(&v3d->mm); |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 914 | dev_err(v3d->drm.dev, |
Melissa Wen | 223583d | 2021-09-30 17:15:22 +0100 | [diff] [blame^] | 915 | "Failed to allocate page tables. Please ensure you have CMA enabled.\n"); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 916 | return -ENOMEM; |
| 917 | } |
| 918 | |
| 919 | v3d_init_hw_state(v3d); |
| 920 | v3d_mmu_set_page_table(v3d); |
| 921 | |
| 922 | ret = v3d_sched_init(v3d); |
| 923 | if (ret) { |
| 924 | drm_mm_takedown(&v3d->mm); |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 925 | dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 926 | v3d->pt_paddr); |
| 927 | } |
| 928 | |
| 929 | return 0; |
| 930 | } |
| 931 | |
| 932 | void |
| 933 | v3d_gem_destroy(struct drm_device *dev) |
| 934 | { |
| 935 | struct v3d_dev *v3d = to_v3d_dev(dev); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 936 | |
| 937 | v3d_sched_fini(v3d); |
| 938 | |
Eric Anholt | a783a09 | 2019-04-16 15:58:53 -0700 | [diff] [blame] | 939 | /* Waiting for jobs to finish would need to be done before |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 940 | * unregistering V3D. |
| 941 | */ |
Eric Anholt | 14d1d19 | 2018-06-05 12:03:01 -0700 | [diff] [blame] | 942 | WARN_ON(v3d->bin_job); |
| 943 | WARN_ON(v3d->render_job); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 944 | |
| 945 | drm_mm_takedown(&v3d->mm); |
| 946 | |
Daniel Vetter | bc66252 | 2020-04-15 09:39:44 +0200 | [diff] [blame] | 947 | dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, |
| 948 | v3d->pt_paddr); |
Eric Anholt | 57692c9 | 2018-04-30 11:10:58 -0700 | [diff] [blame] | 949 | } |