blob: e60fbc28ef290026204ebe9b66c4df7d519acf4e [file] [log] [blame]
Eric Anholt57692c92018-04-30 11:10:58 -07001// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2014-2018 Broadcom */
3
Sam Ravnborg220989e2019-07-16 08:42:03 +02004#include <linux/device.h>
5#include <linux/dma-mapping.h>
6#include <linux/io.h>
Eric Anholt57692c92018-04-30 11:10:58 -07007#include <linux/module.h>
8#include <linux/platform_device.h>
9#include <linux/pm_runtime.h>
Eric Anholteea9b972019-03-08 09:43:36 -080010#include <linux/reset.h>
Eric Anholt57692c92018-04-30 11:10:58 -070011#include <linux/sched/signal.h>
Sam Ravnborg220989e2019-07-16 08:42:03 +020012#include <linux/uaccess.h>
Eric Anholt57692c92018-04-30 11:10:58 -070013
Sam Ravnborg220989e2019-07-16 08:42:03 +020014#include <drm/drm_syncobj.h>
15#include <uapi/drm/v3d_drm.h>
16
Eric Anholt57692c92018-04-30 11:10:58 -070017#include "v3d_drv.h"
18#include "v3d_regs.h"
19#include "v3d_trace.h"
20
21static void
22v3d_init_core(struct v3d_dev *v3d, int core)
23{
24 /* Set OVRTMUOUT, which means that the texture sampler uniform
25 * configuration's tmu output type field is used, instead of
26 * using the hardware default behavior based on the texture
27 * type. If you want the default behavior, you can still put
28 * "2" in the indirect texture state's output_type field.
29 */
Eric Anholta7dde1b2019-02-20 15:36:57 -080030 if (v3d->ver < 40)
31 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
Eric Anholt57692c92018-04-30 11:10:58 -070032
33 /* Whenever we flush the L2T cache, we always want to flush
34 * the whole thing.
35 */
36 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
37 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
38}
39
40/* Sets invariant state for the HW. */
41static void
42v3d_init_hw_state(struct v3d_dev *v3d)
43{
44 v3d_init_core(v3d, 0);
45}
46
47static void
48v3d_idle_axi(struct v3d_dev *v3d, int core)
49{
50 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
51
52 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
53 (V3D_GMP_STATUS_RD_COUNT_MASK |
54 V3D_GMP_STATUS_WR_COUNT_MASK |
55 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
56 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
57 }
58}
59
60static void
61v3d_idle_gca(struct v3d_dev *v3d)
62{
63 if (v3d->ver >= 41)
64 return;
65
66 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
67
68 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
69 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
70 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
71 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
72 }
73}
74
75static void
Eric Anholteea9b972019-03-08 09:43:36 -080076v3d_reset_by_bridge(struct v3d_dev *v3d)
Eric Anholt57692c92018-04-30 11:10:58 -070077{
78 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
79
80 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
81 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
82 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
83 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
84
85 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
86 * of the unit, so reset it to its power-on value here.
87 */
88 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
89 } else {
90 WARN_ON_ONCE(V3D_GET_FIELD(version,
91 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
93 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
94 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
95 }
Eric Anholteea9b972019-03-08 09:43:36 -080096}
97
98static void
99v3d_reset_v3d(struct v3d_dev *v3d)
100{
101 if (v3d->reset)
102 reset_control_reset(v3d->reset);
103 else
104 v3d_reset_by_bridge(v3d);
Eric Anholt57692c92018-04-30 11:10:58 -0700105
106 v3d_init_hw_state(v3d);
107}
108
109void
110v3d_reset(struct v3d_dev *v3d)
111{
112 struct drm_device *dev = &v3d->drm;
113
Eric Anholt1ba9d7c2019-04-18 17:10:13 -0700114 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
115 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
116 V3D_CORE_READ(0, V3D_ERR_STAT));
Eric Anholt57692c92018-04-30 11:10:58 -0700117 trace_v3d_reset_begin(dev);
118
119 /* XXX: only needed for safe powerdown, not reset. */
120 if (false)
121 v3d_idle_axi(v3d, 0);
122
123 v3d_idle_gca(v3d);
124 v3d_reset_v3d(v3d);
125
126 v3d_mmu_set_page_table(v3d);
127 v3d_irq_reset(v3d);
128
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200129 v3d_perfmon_stop(v3d, v3d->active_perfmon, false);
130
Eric Anholt57692c92018-04-30 11:10:58 -0700131 trace_v3d_reset_end(dev);
132}
133
134static void
135v3d_flush_l3(struct v3d_dev *v3d)
136{
137 if (v3d->ver < 41) {
138 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
139
140 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
141 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
142
143 if (v3d->ver < 33) {
144 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
145 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
146 }
147 }
148}
149
Eric Anholt7b9d2fe2018-12-03 14:24:37 -0800150/* Invalidates the (read-only) L2C cache. This was the L2 cache for
151 * uniforms and instructions on V3D 3.2.
152 */
Eric Anholt57692c92018-04-30 11:10:58 -0700153static void
Eric Anholt7b9d2fe2018-12-03 14:24:37 -0800154v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
Eric Anholt57692c92018-04-30 11:10:58 -0700155{
Eric Anholt7b9d2fe2018-12-03 14:24:37 -0800156 if (v3d->ver > 32)
157 return;
158
Eric Anholt57692c92018-04-30 11:10:58 -0700159 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
160 V3D_L2CACTL_L2CCLR |
161 V3D_L2CACTL_L2CENA);
162}
163
Eric Anholt57692c92018-04-30 11:10:58 -0700164/* Invalidates texture L2 cachelines */
165static void
166v3d_flush_l2t(struct v3d_dev *v3d, int core)
167{
Eric Anholt51c1b6f2018-12-03 14:24:36 -0800168 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
169 * need to wait for completion before dispatching the job --
170 * L2T accesses will be stalled until the flush has completed.
Eric Anholtd223f98f2019-04-16 15:58:54 -0700171 * However, we do need to make sure we don't try to trigger a
172 * new flush while the L2_CLEAN queue is trying to
173 * synchronously clean after a job.
Eric Anholt51c1b6f2018-12-03 14:24:36 -0800174 */
Eric Anholtd223f98f2019-04-16 15:58:54 -0700175 mutex_lock(&v3d->cache_clean_lock);
Eric Anholt57692c92018-04-30 11:10:58 -0700176 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
177 V3D_L2TCACTL_L2TFLS |
178 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
Eric Anholtd223f98f2019-04-16 15:58:54 -0700179 mutex_unlock(&v3d->cache_clean_lock);
180}
181
182/* Cleans texture L1 and L2 cachelines (writing back dirty data).
183 *
184 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
185 * executed, we need to make sure that the clean is done before
186 * signaling job completion. So, we synchronously wait before
187 * returning, and we make sure that L2 invalidates don't happen in the
188 * meantime to confuse our are-we-done checks.
189 */
190void
191v3d_clean_caches(struct v3d_dev *v3d)
192{
193 struct drm_device *dev = &v3d->drm;
194 int core = 0;
195
196 trace_v3d_cache_clean_begin(dev);
197
198 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
199 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
Iago Toral Quirogae4f86812021-09-15 12:05:07 +0200200 V3D_L2TCACTL_TMUWCF), 100)) {
201 DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
Eric Anholtd223f98f2019-04-16 15:58:54 -0700202 }
203
204 mutex_lock(&v3d->cache_clean_lock);
205 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
206 V3D_L2TCACTL_L2TFLS |
207 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
208
209 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
210 V3D_L2TCACTL_L2TFLS), 100)) {
211 DRM_ERROR("Timeout waiting for L2T clean\n");
212 }
213
214 mutex_unlock(&v3d->cache_clean_lock);
215
216 trace_v3d_cache_clean_end(dev);
Eric Anholt57692c92018-04-30 11:10:58 -0700217}
218
219/* Invalidates the slice caches. These are read-only caches. */
220static void
221v3d_invalidate_slices(struct v3d_dev *v3d, int core)
222{
223 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
224 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
225 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
226 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
227 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
228}
229
Eric Anholt57692c92018-04-30 11:10:58 -0700230void
231v3d_invalidate_caches(struct v3d_dev *v3d)
232{
Eric Anholtaa5beec2018-12-03 14:24:38 -0800233 /* Invalidate the caches from the outside in. That way if
234 * another CL's concurrent use of nearby memory were to pull
235 * an invalidated cacheline back in, we wouldn't leave stale
236 * data in the inner cache.
237 */
Eric Anholt57692c92018-04-30 11:10:58 -0700238 v3d_flush_l3(v3d);
Eric Anholt7b9d2fe2018-12-03 14:24:37 -0800239 v3d_invalidate_l2c(v3d, 0);
Eric Anholt57692c92018-04-30 11:10:58 -0700240 v3d_flush_l2t(v3d, 0);
Eric Anholtaa5beec2018-12-03 14:24:38 -0800241 v3d_invalidate_slices(v3d, 0);
Eric Anholt57692c92018-04-30 11:10:58 -0700242}
243
Eric Anholt57692c92018-04-30 11:10:58 -0700244/* Takes the reservation lock on all the BOs being referenced, so that
245 * at queue submit time we can update the reservations.
246 *
247 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
248 * (all of which are on exec->unref_list). They're entirely private
249 * to v3d, so we don't attach dma-buf fences to them.
250 */
251static int
Eric Anholtdffa9b72019-04-16 15:58:56 -0700252v3d_lock_bo_reservations(struct v3d_job *job,
Eric Anholt57692c92018-04-30 11:10:58 -0700253 struct ww_acquire_ctx *acquire_ctx)
254{
Eric Anholt57692c92018-04-30 11:10:58 -0700255 int i, ret;
Eric Anholt57692c92018-04-30 11:10:58 -0700256
Eric Anholtdffa9b72019-04-16 15:58:56 -0700257 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
Eric Anholtc2b3e612019-03-08 08:17:14 -0800258 if (ret)
259 return ret;
Eric Anholt57692c92018-04-30 11:10:58 -0700260
Eric Anholtdffa9b72019-04-16 15:58:56 -0700261 for (i = 0; i < job->bo_count; i++) {
Daniel Vetterda3208e2021-08-05 12:46:55 +0200262 ret = drm_sched_job_add_implicit_dependencies(&job->base,
263 job->bo[i], true);
Eric Anholtdffa9b72019-04-16 15:58:56 -0700264 if (ret) {
265 drm_gem_unlock_reservations(job->bo, job->bo_count,
266 acquire_ctx);
267 return ret;
268 }
269 }
270
Eric Anholt57692c92018-04-30 11:10:58 -0700271 return 0;
272}
273
274/**
Eric Anholta783a092019-04-16 15:58:53 -0700275 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
Eric Anholt57692c92018-04-30 11:10:58 -0700276 * referenced by the job.
277 * @dev: DRM device
278 * @file_priv: DRM file for this fd
Eric Anholta783a092019-04-16 15:58:53 -0700279 * @job: V3D job being set up
Lee Jonese2268782020-11-16 17:41:05 +0000280 * @bo_handles: GEM handles
281 * @bo_count: Number of GEM handles passed in
Eric Anholt57692c92018-04-30 11:10:58 -0700282 *
283 * The command validator needs to reference BOs by their index within
284 * the submitted job's BO list. This does the validation of the job's
285 * BO list and reference counting for the lifetime of the job.
286 *
287 * Note that this function doesn't need to unreference the BOs on
288 * failure, because that will happen at v3d_exec_cleanup() time.
289 */
290static int
Eric Anholta783a092019-04-16 15:58:53 -0700291v3d_lookup_bos(struct drm_device *dev,
292 struct drm_file *file_priv,
293 struct v3d_job *job,
294 u64 bo_handles,
295 u32 bo_count)
Eric Anholt57692c92018-04-30 11:10:58 -0700296{
297 u32 *handles;
298 int ret = 0;
299 int i;
300
Eric Anholta783a092019-04-16 15:58:53 -0700301 job->bo_count = bo_count;
Eric Anholt57692c92018-04-30 11:10:58 -0700302
Eric Anholta783a092019-04-16 15:58:53 -0700303 if (!job->bo_count) {
Eric Anholt57692c92018-04-30 11:10:58 -0700304 /* See comment on bo_index for why we have to check
305 * this.
306 */
307 DRM_DEBUG("Rendering requires BOs\n");
308 return -EINVAL;
309 }
310
Eric Anholta783a092019-04-16 15:58:53 -0700311 job->bo = kvmalloc_array(job->bo_count,
312 sizeof(struct drm_gem_cma_object *),
313 GFP_KERNEL | __GFP_ZERO);
314 if (!job->bo) {
Eric Anholt57692c92018-04-30 11:10:58 -0700315 DRM_DEBUG("Failed to allocate validated BO pointers\n");
316 return -ENOMEM;
317 }
318
Eric Anholta783a092019-04-16 15:58:53 -0700319 handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
Eric Anholt57692c92018-04-30 11:10:58 -0700320 if (!handles) {
321 ret = -ENOMEM;
322 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
323 goto fail;
324 }
325
326 if (copy_from_user(handles,
Eric Anholta783a092019-04-16 15:58:53 -0700327 (void __user *)(uintptr_t)bo_handles,
328 job->bo_count * sizeof(u32))) {
Eric Anholt57692c92018-04-30 11:10:58 -0700329 ret = -EFAULT;
330 DRM_DEBUG("Failed to copy in GEM handles\n");
331 goto fail;
332 }
333
334 spin_lock(&file_priv->table_lock);
Eric Anholta783a092019-04-16 15:58:53 -0700335 for (i = 0; i < job->bo_count; i++) {
Eric Anholt57692c92018-04-30 11:10:58 -0700336 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
337 handles[i]);
338 if (!bo) {
339 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
340 i, handles[i]);
341 ret = -ENOENT;
342 spin_unlock(&file_priv->table_lock);
343 goto fail;
344 }
345 drm_gem_object_get(bo);
Eric Anholta783a092019-04-16 15:58:53 -0700346 job->bo[i] = bo;
Eric Anholt57692c92018-04-30 11:10:58 -0700347 }
348 spin_unlock(&file_priv->table_lock);
349
350fail:
351 kvfree(handles);
352 return ret;
353}
354
355static void
Eric Anholta783a092019-04-16 15:58:53 -0700356v3d_job_free(struct kref *ref)
Eric Anholt57692c92018-04-30 11:10:58 -0700357{
Eric Anholta783a092019-04-16 15:58:53 -0700358 struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
359 int i;
Eric Anholt57692c92018-04-30 11:10:58 -0700360
Eric Anholta783a092019-04-16 15:58:53 -0700361 for (i = 0; i < job->bo_count; i++) {
Eric Anholt1584f162018-11-28 15:09:25 -0800362 if (job->bo[i])
Emil Velikov2b861892020-05-15 10:51:12 +0100363 drm_gem_object_put(job->bo[i]);
Eric Anholt1584f162018-11-28 15:09:25 -0800364 }
Eric Anholta783a092019-04-16 15:58:53 -0700365 kvfree(job->bo);
Eric Anholt1584f162018-11-28 15:09:25 -0800366
Eric Anholta783a092019-04-16 15:58:53 -0700367 dma_fence_put(job->irq_fence);
368 dma_fence_put(job->done_fence);
369
Daniel Vetterbc662522020-04-15 09:39:44 +0200370 pm_runtime_mark_last_busy(job->v3d->drm.dev);
371 pm_runtime_put_autosuspend(job->v3d->drm.dev);
Eric Anholt1584f162018-11-28 15:09:25 -0800372
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200373 if (job->perfmon)
374 v3d_perfmon_put(job->perfmon);
375
Eric Anholt1584f162018-11-28 15:09:25 -0800376 kfree(job);
377}
378
Eric Anholta783a092019-04-16 15:58:53 -0700379static void
380v3d_render_job_free(struct kref *ref)
Eric Anholt1584f162018-11-28 15:09:25 -0800381{
Eric Anholta783a092019-04-16 15:58:53 -0700382 struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
383 base.refcount);
384 struct v3d_bo *bo, *save;
385
386 list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
Emil Velikov2b861892020-05-15 10:51:12 +0100387 drm_gem_object_put(&bo->base.base);
Eric Anholta783a092019-04-16 15:58:53 -0700388 }
389
390 v3d_job_free(ref);
391}
392
Daniel Vetter916044f2021-08-05 12:46:54 +0200393void v3d_job_cleanup(struct v3d_job *job)
394{
395 drm_sched_job_cleanup(&job->base);
396 v3d_job_put(job);
397}
398
Eric Anholta783a092019-04-16 15:58:53 -0700399void v3d_job_put(struct v3d_job *job)
400{
401 kref_put(&job->refcount, job->free);
Eric Anholt1584f162018-11-28 15:09:25 -0800402}
403
Eric Anholt57692c92018-04-30 11:10:58 -0700404int
405v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
406 struct drm_file *file_priv)
407{
408 int ret;
409 struct drm_v3d_wait_bo *args = data;
Eric Anholt57692c92018-04-30 11:10:58 -0700410 ktime_t start = ktime_get();
411 u64 delta_ns;
412 unsigned long timeout_jiffies =
413 nsecs_to_jiffies_timeout(args->timeout_ns);
414
415 if (args->pad != 0)
416 return -EINVAL;
417
Christian König52791ee2019-08-11 10:06:32 +0200418 ret = drm_gem_dma_resv_wait(file_priv, args->handle,
Melissa Wen223583d2021-09-30 17:15:22 +0100419 true, timeout_jiffies);
Eric Anholt57692c92018-04-30 11:10:58 -0700420
421 /* Decrement the user's timeout, in case we got interrupted
422 * such that the ioctl will be restarted.
423 */
424 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
425 if (delta_ns < args->timeout_ns)
426 args->timeout_ns -= delta_ns;
427 else
428 args->timeout_ns = 0;
429
430 /* Asked to wait beyond the jiffie/scheduler precision? */
431 if (ret == -ETIME && args->timeout_ns)
432 ret = -EAGAIN;
433
Eric Anholt57692c92018-04-30 11:10:58 -0700434 return ret;
435}
436
Eric Anholta783a092019-04-16 15:58:53 -0700437static int
Melissa Wen223583d2021-09-30 17:15:22 +0100438v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
439 u32 in_sync, u32 point)
440{
441 struct dma_fence *in_fence = NULL;
442 int ret;
443
444 ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
445 if (ret == -EINVAL)
446 return ret;
447
448 return drm_sched_job_add_dependency(&job->base, in_fence);
449}
450
451static int
Eric Anholta783a092019-04-16 15:58:53 -0700452v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
453 struct v3d_job *job, void (*free)(struct kref *ref),
Daniel Vetter916044f2021-08-05 12:46:54 +0200454 u32 in_sync, enum v3d_queue queue)
Eric Anholta783a092019-04-16 15:58:53 -0700455{
Daniel Vetter916044f2021-08-05 12:46:54 +0200456 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
Eric Anholta783a092019-04-16 15:58:53 -0700457 int ret;
458
459 job->v3d = v3d;
460 job->free = free;
461
Daniel Vetterbc662522020-04-15 09:39:44 +0200462 ret = pm_runtime_get_sync(v3d->drm.dev);
Eric Anholta783a092019-04-16 15:58:53 -0700463 if (ret < 0)
464 return ret;
465
Daniel Vetter916044f2021-08-05 12:46:54 +0200466 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
467 v3d_priv);
468 if (ret)
469 goto fail;
Eric Anholtdffa9b72019-04-16 15:58:56 -0700470
Melissa Wen223583d2021-09-30 17:15:22 +0100471 ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
Eric Anholtdffa9b72019-04-16 15:58:56 -0700472 if (ret)
Daniel Vetter916044f2021-08-05 12:46:54 +0200473 goto fail_job;
Eric Anholta783a092019-04-16 15:58:53 -0700474
475 kref_init(&job->refcount);
476
477 return 0;
Daniel Vetter916044f2021-08-05 12:46:54 +0200478fail_job:
479 drm_sched_job_cleanup(&job->base);
Eric Anholtdffa9b72019-04-16 15:58:56 -0700480fail:
Daniel Vetterbc662522020-04-15 09:39:44 +0200481 pm_runtime_put_autosuspend(v3d->drm.dev);
Eric Anholtdffa9b72019-04-16 15:58:56 -0700482 return ret;
Eric Anholta783a092019-04-16 15:58:53 -0700483}
484
Daniel Vetter916044f2021-08-05 12:46:54 +0200485static void
486v3d_push_job(struct v3d_job *job)
Eric Anholta783a092019-04-16 15:58:53 -0700487{
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200488 drm_sched_job_arm(&job->base);
489
Eric Anholta783a092019-04-16 15:58:53 -0700490 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
491
492 /* put by scheduler job completion */
493 kref_get(&job->refcount);
494
Daniel Vetter0e10e9a2021-08-05 12:46:50 +0200495 drm_sched_entity_push_job(&job->base);
Eric Anholta783a092019-04-16 15:58:53 -0700496}
497
498static void
499v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
500 struct v3d_job *job,
501 struct ww_acquire_ctx *acquire_ctx,
Eric Anholtd223f98f2019-04-16 15:58:54 -0700502 u32 out_sync,
503 struct dma_fence *done_fence)
Eric Anholta783a092019-04-16 15:58:53 -0700504{
505 struct drm_syncobj *sync_out;
506 int i;
507
508 for (i = 0; i < job->bo_count; i++) {
509 /* XXX: Use shared fences for read-only objects. */
Christian König52791ee2019-08-11 10:06:32 +0200510 dma_resv_add_excl_fence(job->bo[i]->resv,
Melissa Wen223583d2021-09-30 17:15:22 +0100511 job->done_fence);
Eric Anholta783a092019-04-16 15:58:53 -0700512 }
513
514 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
515
516 /* Update the return sync object for the job */
517 sync_out = drm_syncobj_find(file_priv, out_sync);
518 if (sync_out) {
Eric Anholtd223f98f2019-04-16 15:58:54 -0700519 drm_syncobj_replace_fence(sync_out, done_fence);
Eric Anholta783a092019-04-16 15:58:53 -0700520 drm_syncobj_put(sync_out);
521 }
522}
523
Eric Anholt57692c92018-04-30 11:10:58 -0700524/**
525 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
526 * @dev: DRM device
527 * @data: ioctl argument
528 * @file_priv: DRM file for this fd
529 *
530 * This is the main entrypoint for userspace to submit a 3D frame to
531 * the GPU. Userspace provides the binner command list (if
532 * applicable), and the kernel sets up the render command list to draw
533 * to the framebuffer described in the ioctl, using the command lists
534 * that the 3D engine's binner will produce.
535 */
536int
537v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
538 struct drm_file *file_priv)
539{
540 struct v3d_dev *v3d = to_v3d_dev(dev);
541 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
542 struct drm_v3d_submit_cl *args = data;
Eric Anholta783a092019-04-16 15:58:53 -0700543 struct v3d_bin_job *bin = NULL;
544 struct v3d_render_job *render;
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200545 struct v3d_job *clean_job = NULL;
546 struct v3d_job *last_job;
Eric Anholt57692c92018-04-30 11:10:58 -0700547 struct ww_acquire_ctx acquire_ctx;
Eric Anholt57692c92018-04-30 11:10:58 -0700548 int ret = 0;
549
Eric Anholt55a9b742018-11-30 16:57:58 -0800550 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
551
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200552 if (args->pad != 0)
553 return -EINVAL;
554
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200555 if (args->flags != 0 &&
556 args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
557 DRM_INFO("invalid flags: %d\n", args->flags);
Eric Anholt57692c92018-04-30 11:10:58 -0700558 return -EINVAL;
559 }
560
Eric Anholta783a092019-04-16 15:58:53 -0700561 render = kcalloc(1, sizeof(*render), GFP_KERNEL);
562 if (!render)
Eric Anholt57692c92018-04-30 11:10:58 -0700563 return -ENOMEM;
564
Eric Anholta783a092019-04-16 15:58:53 -0700565 render->start = args->rcl_start;
566 render->end = args->rcl_end;
567 INIT_LIST_HEAD(&render->unref_list);
568
569 ret = v3d_job_init(v3d, file_priv, &render->base,
Daniel Vetter916044f2021-08-05 12:46:54 +0200570 v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
Eric Anholta783a092019-04-16 15:58:53 -0700571 if (ret) {
572 kfree(render);
Eric Anholt57692c92018-04-30 11:10:58 -0700573 return ret;
574 }
575
Eric Anholta783a092019-04-16 15:58:53 -0700576 if (args->bcl_start != args->bcl_end) {
577 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
Navid Emamdoost29cd13cf2019-10-21 13:52:49 -0500578 if (!bin) {
Melissa Wen9fcb4a82021-09-16 22:27:26 +0100579 v3d_job_cleanup(&render->base);
Eric Anholta783a092019-04-16 15:58:53 -0700580 return -ENOMEM;
Navid Emamdoost29cd13cf2019-10-21 13:52:49 -0500581 }
Eric Anholt57692c92018-04-30 11:10:58 -0700582
Eric Anholta783a092019-04-16 15:58:53 -0700583 ret = v3d_job_init(v3d, file_priv, &bin->base,
Daniel Vetter916044f2021-08-05 12:46:54 +0200584 v3d_job_free, args->in_sync_bcl, V3D_BIN);
Eric Anholta783a092019-04-16 15:58:53 -0700585 if (ret) {
Melissa Wen9fcb4a82021-09-16 22:27:26 +0100586 v3d_job_cleanup(&render->base);
Navid Emamdoost29cd13cf2019-10-21 13:52:49 -0500587 kfree(bin);
Eric Anholta783a092019-04-16 15:58:53 -0700588 return ret;
589 }
Eric Anholt57692c92018-04-30 11:10:58 -0700590
Eric Anholta783a092019-04-16 15:58:53 -0700591 bin->start = args->bcl_start;
592 bin->end = args->bcl_end;
593 bin->qma = args->qma;
594 bin->qms = args->qms;
595 bin->qts = args->qts;
596 bin->render = render;
597 }
Eric Anholt57692c92018-04-30 11:10:58 -0700598
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200599 if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
600 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
601 if (!clean_job) {
602 ret = -ENOMEM;
603 goto fail;
604 }
605
Daniel Vetter916044f2021-08-05 12:46:54 +0200606 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200607 if (ret) {
608 kfree(clean_job);
609 clean_job = NULL;
610 goto fail;
611 }
612
613 last_job = clean_job;
614 } else {
615 last_job = &render->base;
616 }
617
618 ret = v3d_lookup_bos(dev, file_priv, last_job,
Eric Anholta783a092019-04-16 15:58:53 -0700619 args->bo_handles, args->bo_handle_count);
Eric Anholt57692c92018-04-30 11:10:58 -0700620 if (ret)
621 goto fail;
622
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200623 ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
Eric Anholt57692c92018-04-30 11:10:58 -0700624 if (ret)
625 goto fail;
626
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200627 if (args->perfmon_id) {
628 render->base.perfmon = v3d_perfmon_find(v3d_priv,
629 args->perfmon_id);
630
631 if (!render->base.perfmon) {
632 ret = -ENOENT;
633 goto fail;
634 }
635 }
636
Eric Anholt7122b682018-06-06 10:48:51 -0700637 mutex_lock(&v3d->sched_lock);
Eric Anholta783a092019-04-16 15:58:53 -0700638 if (bin) {
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200639 bin->base.perfmon = render->base.perfmon;
640 v3d_perfmon_get(bin->base.perfmon);
Daniel Vetter916044f2021-08-05 12:46:54 +0200641 v3d_push_job(&bin->base);
Eric Anholt57692c92018-04-30 11:10:58 -0700642
Daniel Vetterda3208e2021-08-05 12:46:55 +0200643 ret = drm_sched_job_add_dependency(&render->base.base,
644 dma_fence_get(bin->base.done_fence));
Eric Anholtdffa9b72019-04-16 15:58:56 -0700645 if (ret)
646 goto fail_unreserve;
Eric Anholt57692c92018-04-30 11:10:58 -0700647 }
648
Daniel Vetter916044f2021-08-05 12:46:54 +0200649 v3d_push_job(&render->base);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200650
651 if (clean_job) {
652 struct dma_fence *render_fence =
653 dma_fence_get(render->base.done_fence);
Daniel Vetterda3208e2021-08-05 12:46:55 +0200654 ret = drm_sched_job_add_dependency(&clean_job->base,
655 render_fence);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200656 if (ret)
657 goto fail_unreserve;
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200658 clean_job->perfmon = render->base.perfmon;
659 v3d_perfmon_get(clean_job->perfmon);
Daniel Vetter916044f2021-08-05 12:46:54 +0200660 v3d_push_job(clean_job);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200661 }
662
Eric Anholt7122b682018-06-06 10:48:51 -0700663 mutex_unlock(&v3d->sched_lock);
Eric Anholt57692c92018-04-30 11:10:58 -0700664
Eric Anholta783a092019-04-16 15:58:53 -0700665 v3d_attach_fences_and_unlock_reservation(file_priv,
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200666 last_job,
Eric Anholtd223f98f2019-04-16 15:58:54 -0700667 &acquire_ctx,
668 args->out_sync,
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200669 last_job->done_fence);
Eric Anholt57692c92018-04-30 11:10:58 -0700670
Eric Anholta783a092019-04-16 15:58:53 -0700671 if (bin)
672 v3d_job_put(&bin->base);
673 v3d_job_put(&render->base);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200674 if (clean_job)
675 v3d_job_put(clean_job);
Eric Anholt57692c92018-04-30 11:10:58 -0700676
677 return 0;
678
679fail_unreserve:
Eric Anholt7122b682018-06-06 10:48:51 -0700680 mutex_unlock(&v3d->sched_lock);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200681 drm_gem_unlock_reservations(last_job->bo,
682 last_job->bo_count, &acquire_ctx);
Eric Anholt57692c92018-04-30 11:10:58 -0700683fail:
Eric Anholta783a092019-04-16 15:58:53 -0700684 if (bin)
Daniel Vetter916044f2021-08-05 12:46:54 +0200685 v3d_job_cleanup(&bin->base);
686 v3d_job_cleanup(&render->base);
Iago Toral Quiroga455d56c2019-09-19 09:10:16 +0200687 if (clean_job)
Daniel Vetter916044f2021-08-05 12:46:54 +0200688 v3d_job_cleanup(clean_job);
Eric Anholt57692c92018-04-30 11:10:58 -0700689
690 return ret;
691}
692
Eric Anholt1584f162018-11-28 15:09:25 -0800693/**
694 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
695 * @dev: DRM device
696 * @data: ioctl argument
697 * @file_priv: DRM file for this fd
698 *
699 * Userspace provides the register setup for the TFU, which we don't
700 * need to validate since the TFU is behind the MMU.
701 */
702int
703v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 struct v3d_dev *v3d = to_v3d_dev(dev);
Eric Anholt1584f162018-11-28 15:09:25 -0800707 struct drm_v3d_submit_tfu *args = data;
708 struct v3d_tfu_job *job;
709 struct ww_acquire_ctx acquire_ctx;
Eric Anholt1584f162018-11-28 15:09:25 -0800710 int ret = 0;
Eric Anholt1584f162018-11-28 15:09:25 -0800711
Eric Anholt55a9b742018-11-30 16:57:58 -0800712 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
713
Eric Anholt1584f162018-11-28 15:09:25 -0800714 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
715 if (!job)
716 return -ENOMEM;
717
Eric Anholta783a092019-04-16 15:58:53 -0700718 ret = v3d_job_init(v3d, file_priv, &job->base,
Daniel Vetter916044f2021-08-05 12:46:54 +0200719 v3d_job_free, args->in_sync, V3D_TFU);
Eric Anholta783a092019-04-16 15:58:53 -0700720 if (ret) {
Eric Anholt1584f162018-11-28 15:09:25 -0800721 kfree(job);
722 return ret;
723 }
724
Eric Anholta783a092019-04-16 15:58:53 -0700725 job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
726 sizeof(*job->base.bo), GFP_KERNEL);
727 if (!job->base.bo) {
Melissa Wen9fcb4a82021-09-16 22:27:26 +0100728 v3d_job_cleanup(&job->base);
Eric Anholta783a092019-04-16 15:58:53 -0700729 return -ENOMEM;
730 }
Eric Anholt1584f162018-11-28 15:09:25 -0800731
732 job->args = *args;
Eric Anholt1584f162018-11-28 15:09:25 -0800733
734 spin_lock(&file_priv->table_lock);
Eric Anholta783a092019-04-16 15:58:53 -0700735 for (job->base.bo_count = 0;
736 job->base.bo_count < ARRAY_SIZE(args->bo_handles);
737 job->base.bo_count++) {
Eric Anholt1584f162018-11-28 15:09:25 -0800738 struct drm_gem_object *bo;
739
Eric Anholta783a092019-04-16 15:58:53 -0700740 if (!args->bo_handles[job->base.bo_count])
Eric Anholt1584f162018-11-28 15:09:25 -0800741 break;
742
743 bo = idr_find(&file_priv->object_idr,
Eric Anholta783a092019-04-16 15:58:53 -0700744 args->bo_handles[job->base.bo_count]);
Eric Anholt1584f162018-11-28 15:09:25 -0800745 if (!bo) {
746 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
Eric Anholta783a092019-04-16 15:58:53 -0700747 job->base.bo_count,
748 args->bo_handles[job->base.bo_count]);
Eric Anholt1584f162018-11-28 15:09:25 -0800749 ret = -ENOENT;
750 spin_unlock(&file_priv->table_lock);
751 goto fail;
752 }
753 drm_gem_object_get(bo);
Eric Anholta783a092019-04-16 15:58:53 -0700754 job->base.bo[job->base.bo_count] = bo;
Eric Anholt1584f162018-11-28 15:09:25 -0800755 }
756 spin_unlock(&file_priv->table_lock);
757
Eric Anholtdffa9b72019-04-16 15:58:56 -0700758 ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
Eric Anholt1584f162018-11-28 15:09:25 -0800759 if (ret)
760 goto fail;
761
762 mutex_lock(&v3d->sched_lock);
Daniel Vetter916044f2021-08-05 12:46:54 +0200763 v3d_push_job(&job->base);
Eric Anholt1584f162018-11-28 15:09:25 -0800764 mutex_unlock(&v3d->sched_lock);
765
Eric Anholta783a092019-04-16 15:58:53 -0700766 v3d_attach_fences_and_unlock_reservation(file_priv,
767 &job->base, &acquire_ctx,
Eric Anholtd223f98f2019-04-16 15:58:54 -0700768 args->out_sync,
769 job->base.done_fence);
Eric Anholt1584f162018-11-28 15:09:25 -0800770
Eric Anholta783a092019-04-16 15:58:53 -0700771 v3d_job_put(&job->base);
Eric Anholt1584f162018-11-28 15:09:25 -0800772
773 return 0;
774
Eric Anholt1584f162018-11-28 15:09:25 -0800775fail:
Daniel Vetter916044f2021-08-05 12:46:54 +0200776 v3d_job_cleanup(&job->base);
Eric Anholt1584f162018-11-28 15:09:25 -0800777
778 return ret;
779}
780
Eric Anholtd223f98f2019-04-16 15:58:54 -0700781/**
782 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
783 * @dev: DRM device
784 * @data: ioctl argument
785 * @file_priv: DRM file for this fd
786 *
787 * Userspace provides the register setup for the CSD, which we don't
788 * need to validate since the CSD is behind the MMU.
789 */
790int
791v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
792 struct drm_file *file_priv)
793{
794 struct v3d_dev *v3d = to_v3d_dev(dev);
795 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
796 struct drm_v3d_submit_csd *args = data;
797 struct v3d_csd_job *job;
798 struct v3d_job *clean_job;
799 struct ww_acquire_ctx acquire_ctx;
800 int ret;
801
802 trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
803
804 if (!v3d_has_csd(v3d)) {
805 DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
806 return -EINVAL;
807 }
808
809 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
810 if (!job)
811 return -ENOMEM;
812
813 ret = v3d_job_init(v3d, file_priv, &job->base,
Daniel Vetter916044f2021-08-05 12:46:54 +0200814 v3d_job_free, args->in_sync, V3D_CSD);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700815 if (ret) {
816 kfree(job);
817 return ret;
818 }
819
820 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
821 if (!clean_job) {
Melissa Wen9fcb4a82021-09-16 22:27:26 +0100822 v3d_job_cleanup(&job->base);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700823 return -ENOMEM;
824 }
825
Daniel Vetter916044f2021-08-05 12:46:54 +0200826 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700827 if (ret) {
Melissa Wen9fcb4a82021-09-16 22:27:26 +0100828 v3d_job_cleanup(&job->base);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700829 kfree(clean_job);
830 return ret;
831 }
832
833 job->args = *args;
834
835 ret = v3d_lookup_bos(dev, file_priv, clean_job,
836 args->bo_handles, args->bo_handle_count);
837 if (ret)
838 goto fail;
839
Eric Anholtdffa9b72019-04-16 15:58:56 -0700840 ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700841 if (ret)
842 goto fail;
843
Juan A. Suarez Romero26a4dc22021-06-08 13:15:41 +0200844 if (args->perfmon_id) {
845 job->base.perfmon = v3d_perfmon_find(v3d_priv,
846 args->perfmon_id);
847 if (!job->base.perfmon) {
848 ret = -ENOENT;
849 goto fail;
850 }
851 }
852
Eric Anholtd223f98f2019-04-16 15:58:54 -0700853 mutex_lock(&v3d->sched_lock);
Daniel Vetter916044f2021-08-05 12:46:54 +0200854 v3d_push_job(&job->base);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700855
Daniel Vetterda3208e2021-08-05 12:46:55 +0200856 ret = drm_sched_job_add_dependency(&clean_job->base,
857 dma_fence_get(job->base.done_fence));
Eric Anholtdffa9b72019-04-16 15:58:56 -0700858 if (ret)
859 goto fail_unreserve;
860
Daniel Vetter916044f2021-08-05 12:46:54 +0200861 v3d_push_job(clean_job);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700862 mutex_unlock(&v3d->sched_lock);
863
864 v3d_attach_fences_and_unlock_reservation(file_priv,
865 clean_job,
866 &acquire_ctx,
867 args->out_sync,
868 clean_job->done_fence);
869
870 v3d_job_put(&job->base);
871 v3d_job_put(clean_job);
872
873 return 0;
874
875fail_unreserve:
876 mutex_unlock(&v3d->sched_lock);
877 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
878 &acquire_ctx);
879fail:
Daniel Vetter916044f2021-08-05 12:46:54 +0200880 v3d_job_cleanup(&job->base);
881 v3d_job_cleanup(clean_job);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700882
883 return ret;
884}
885
Eric Anholt57692c92018-04-30 11:10:58 -0700886int
887v3d_gem_init(struct drm_device *dev)
888{
889 struct v3d_dev *v3d = to_v3d_dev(dev);
890 u32 pt_size = 4096 * 1024;
891 int ret, i;
892
893 for (i = 0; i < V3D_MAX_QUEUES; i++)
894 v3d->queue[i].fence_context = dma_fence_context_alloc(1);
895
896 spin_lock_init(&v3d->mm_lock);
897 spin_lock_init(&v3d->job_lock);
898 mutex_init(&v3d->bo_lock);
899 mutex_init(&v3d->reset_lock);
Eric Anholt7122b682018-06-06 10:48:51 -0700900 mutex_init(&v3d->sched_lock);
Eric Anholtd223f98f2019-04-16 15:58:54 -0700901 mutex_init(&v3d->cache_clean_lock);
Eric Anholt57692c92018-04-30 11:10:58 -0700902
903 /* Note: We don't allocate address 0. Various bits of HW
904 * treat 0 as special, such as the occlusion query counters
905 * where 0 means "disabled".
906 */
907 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
908
Daniel Vetterbc662522020-04-15 09:39:44 +0200909 v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
Eric Anholt57692c92018-04-30 11:10:58 -0700910 &v3d->pt_paddr,
911 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
912 if (!v3d->pt) {
913 drm_mm_takedown(&v3d->mm);
Daniel Vetterbc662522020-04-15 09:39:44 +0200914 dev_err(v3d->drm.dev,
Melissa Wen223583d2021-09-30 17:15:22 +0100915 "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
Eric Anholt57692c92018-04-30 11:10:58 -0700916 return -ENOMEM;
917 }
918
919 v3d_init_hw_state(v3d);
920 v3d_mmu_set_page_table(v3d);
921
922 ret = v3d_sched_init(v3d);
923 if (ret) {
924 drm_mm_takedown(&v3d->mm);
Daniel Vetterbc662522020-04-15 09:39:44 +0200925 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
Eric Anholt57692c92018-04-30 11:10:58 -0700926 v3d->pt_paddr);
927 }
928
929 return 0;
930}
931
932void
933v3d_gem_destroy(struct drm_device *dev)
934{
935 struct v3d_dev *v3d = to_v3d_dev(dev);
Eric Anholt57692c92018-04-30 11:10:58 -0700936
937 v3d_sched_fini(v3d);
938
Eric Anholta783a092019-04-16 15:58:53 -0700939 /* Waiting for jobs to finish would need to be done before
Eric Anholt57692c92018-04-30 11:10:58 -0700940 * unregistering V3D.
941 */
Eric Anholt14d1d192018-06-05 12:03:01 -0700942 WARN_ON(v3d->bin_job);
943 WARN_ON(v3d->render_job);
Eric Anholt57692c92018-04-30 11:10:58 -0700944
945 drm_mm_takedown(&v3d->mm);
946
Daniel Vetterbc662522020-04-15 09:39:44 +0200947 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
948 v3d->pt_paddr);
Eric Anholt57692c92018-04-30 11:10:58 -0700949}