blob: bbc22fad8d802b033ca224d8cc0a0b156ef85bf7 [file] [log] [blame]
Lucas Stach1b1f42d2017-12-06 17:49:39 +01001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _DRM_GPU_SCHEDULER_H_
25#define _DRM_GPU_SCHEDULER_H_
26
27#include <drm/spsc_queue.h>
28#include <linux/dma-fence.h>
Stephen Rothwelldc102182019-11-08 16:31:10 +110029#include <linux/completion.h>
Daniel Vetterebd5f742021-08-05 12:46:49 +020030#include <linux/xarray.h>
Andrey Grodzovsky542cff72021-10-28 12:24:03 -040031#include <linux/irq_work.h>
Lucas Stach1b1f42d2017-12-06 17:49:39 +010032
Andrey Grodzovsky741f01e2018-05-30 15:11:01 -040033#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34
Daniel Vetterebd5f742021-08-05 12:46:49 +020035struct drm_gem_object;
36
Lucas Stach1b1f42d2017-12-06 17:49:39 +010037struct drm_gpu_scheduler;
38struct drm_sched_rq;
39
Luben Tuikove2d732f2020-08-11 19:59:58 -040040/* These are often used as an (initial) index
41 * to an array, and as such should start at 0.
42 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010043enum drm_sched_priority {
44 DRM_SCHED_PRIORITY_MIN,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010045 DRM_SCHED_PRIORITY_NORMAL,
Luben Tuikove2d732f2020-08-11 19:59:58 -040046 DRM_SCHED_PRIORITY_HIGH,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010047 DRM_SCHED_PRIORITY_KERNEL,
Luben Tuikove2d732f2020-08-11 19:59:58 -040048
49 DRM_SCHED_PRIORITY_COUNT,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010050 DRM_SCHED_PRIORITY_UNSET = -2
51};
52
53/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +053054 * struct drm_sched_entity - A wrapper around a job queue (typically
55 * attached to the DRM file_priv).
56 *
Eric Anholt1a61ee02018-04-04 15:32:51 -070057 * Entities will emit jobs in order to their corresponding hardware
58 * ring, and the scheduler will alternate between entities based on
59 * scheduling policy.
Nayan Deshmukh2d339482018-05-29 11:23:07 +053060 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010061struct drm_sched_entity {
Daniel Vetter981b04d2021-08-05 12:46:51 +020062 /**
63 * @list:
64 *
65 * Used to append this struct to the list of entities in the runqueue
66 * @rq under &drm_sched_rq.entities.
67 *
68 * Protected by &drm_sched_rq.lock of @rq.
69 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010070 struct list_head list;
Daniel Vetter981b04d2021-08-05 12:46:51 +020071
72 /**
73 * @rq:
74 *
75 * Runqueue on which this entity is currently scheduled.
76 *
77 * FIXME: Locking is very unclear for this. Writers are protected by
78 * @rq_lock, but readers are generally lockless and seem to just race
79 * with not even a READ_ONCE.
80 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010081 struct drm_sched_rq *rq;
Daniel Vetter981b04d2021-08-05 12:46:51 +020082
83 /**
84 * @sched_list:
85 *
86 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
87 * be scheduled on any scheduler on this list.
88 *
89 * This can be modified by calling drm_sched_entity_modify_sched().
90 * Locking is entirely up to the driver, see the above function for more
91 * details.
92 *
93 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
94 * set already.
95 *
96 * FIXME: This means priority changes through
97 * drm_sched_entity_set_priority() will be lost henceforth in this case.
98 */
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010099 struct drm_gpu_scheduler **sched_list;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200100
101 /**
102 * @num_sched_list:
103 *
104 * Number of drm_gpu_schedulers in the @sched_list.
105 */
Nirmoy Das9e3e90c2020-01-14 10:38:42 +0100106 unsigned int num_sched_list;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200107
108 /**
109 * @priority:
110 *
111 * Priority of the entity. This can be modified by calling
112 * drm_sched_entity_set_priority(). Protected by &rq_lock.
113 */
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100114 enum drm_sched_priority priority;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200115
116 /**
117 * @rq_lock:
118 *
119 * Lock to modify the runqueue to which this entity belongs.
120 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100121 spinlock_t rq_lock;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100122
Daniel Vetter981b04d2021-08-05 12:46:51 +0200123 /**
124 * @job_queue: the list of jobs of this entity.
125 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100126 struct spsc_queue job_queue;
127
Daniel Vetter981b04d2021-08-05 12:46:51 +0200128 /**
129 * @fence_seq:
130 *
131 * A linearly increasing seqno incremented with each new
132 * &drm_sched_fence which is part of the entity.
133 *
134 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
135 * this doesn't need to be atomic.
136 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100137 atomic_t fence_seq;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200138
139 /**
140 * @fence_context:
141 *
142 * A unique context for all the fences which belong to this entity. The
143 * &drm_sched_fence.scheduled uses the fence_context but
144 * &drm_sched_fence.finished uses fence_context + 1.
145 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100146 uint64_t fence_context;
147
Daniel Vetter981b04d2021-08-05 12:46:51 +0200148 /**
149 * @dependency:
150 *
151 * The dependency fence of the job which is on the top of the job queue.
152 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100153 struct dma_fence *dependency;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200154
155 /**
156 * @cb:
157 *
158 * Callback for the dependency fence above.
159 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100160 struct dma_fence_cb cb;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200161
162 /**
163 * @guilty:
164 *
165 * Points to entities' guilty.
166 */
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530167 atomic_t *guilty;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200168
169 /**
170 * @last_scheduled:
171 *
172 * Points to the finished fence of the last scheduled job. Only written
173 * by the scheduler thread, can be accessed locklessly from
174 * drm_sched_job_arm() iff the queue is empty.
175 */
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530176 struct dma_fence *last_scheduled;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200177
178 /**
179 * @last_user: last group leader pushing a job into the entity.
180 */
Christian König43bce412018-07-26 13:43:49 +0200181 struct task_struct *last_user;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200182
183 /**
184 * @stopped:
185 *
186 * Marks the enity as removed from rq and destined for
187 * termination. This is set by calling drm_sched_entity_flush() and by
188 * drm_sched_fini().
189 */
Andrey Grodzovsky62347a32018-08-17 10:32:50 -0400190 bool stopped;
Daniel Vetter981b04d2021-08-05 12:46:51 +0200191
192 /**
193 * @entity_idle:
194 *
195 * Signals when entity is not in use, used to sequence entity cleanup in
196 * drm_sched_entity_fini().
197 */
Andrey Grodzovsky83a77722019-11-04 16:30:05 -0500198 struct completion entity_idle;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100199};
200
201/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530202 * struct drm_sched_rq - queue of entities to be scheduled.
203 *
204 * @lock: to modify the entities list.
Nayan Deshmukh8dc9fbb2018-07-13 15:21:13 +0530205 * @sched: the scheduler to which this rq belongs to.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530206 * @entities: list of the entities to be scheduled.
207 * @current_entity: the entity which is to be scheduled.
208 *
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100209 * Run queue is a set of entities scheduling command submissions for
210 * one specific ring. It implements the scheduling policy that selects
211 * the next entity to emit commands from.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530212 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100213struct drm_sched_rq {
214 spinlock_t lock;
Nayan Deshmukh8dc9fbb2018-07-13 15:21:13 +0530215 struct drm_gpu_scheduler *sched;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100216 struct list_head entities;
217 struct drm_sched_entity *current_entity;
218};
219
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530220/**
221 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
222 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100223struct drm_sched_fence {
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530224 /**
225 * @scheduled: this fence is what will be signaled by the scheduler
226 * when the job is scheduled.
227 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100228 struct dma_fence scheduled;
Eric Anholt1a61ee02018-04-04 15:32:51 -0700229
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530230 /**
231 * @finished: this fence is what will be signaled by the scheduler
232 * when the job is completed.
233 *
234 * When setting up an out fence for the job, you should use
235 * this, since it's available immediately upon
236 * drm_sched_job_init(), and the fence returned by the driver
237 * from run_job() won't be created until the dependencies have
238 * resolved.
239 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100240 struct dma_fence finished;
Eric Anholt1a61ee02018-04-04 15:32:51 -0700241
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530242 /**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530243 * @parent: the fence returned by &drm_sched_backend_ops.run_job
244 * when scheduling the job on hardware. We signal the
245 * &drm_sched_fence.finished fence once parent is signalled.
246 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100247 struct dma_fence *parent;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530248 /**
249 * @sched: the scheduler instance to which the job having this struct
250 * belongs to.
251 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100252 struct drm_gpu_scheduler *sched;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530253 /**
254 * @lock: the lock used by the scheduled and the finished fences.
255 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100256 spinlock_t lock;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530257 /**
258 * @owner: job owner for debugging
259 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100260 void *owner;
261};
262
263struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
264
Eric Anholt1a61ee02018-04-04 15:32:51 -0700265/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530266 * struct drm_sched_job - A job to be run by an entity.
267 *
268 * @queue_node: used to append this struct to the queue of jobs in an entity.
Luben Tuikovc365d302020-12-09 17:31:42 -0500269 * @list: a job participates in a "pending" and "done" lists.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530270 * @sched: the scheduler instance on which this job is scheduled.
271 * @s_fence: contains the fences for the scheduling of job.
272 * @finish_cb: the callback for the finished fence.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530273 * @id: a unique id assigned to each job scheduled on the scheduler.
274 * @karma: increment on every hang caused by this job. If this exceeds the hang
275 * limit of the scheduler then the job is marked guilty and will not
276 * be scheduled further.
277 * @s_priority: the priority of the job.
278 * @entity: the entity to which this job belongs.
Andrey Grodzovsky37415402018-12-05 14:21:28 -0500279 * @cb: the callback for the parent fence in s_fence.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700280 *
281 * A job is created by the driver using drm_sched_job_init(), and
282 * should call drm_sched_entity_push_job() once it wants the scheduler
283 * to schedule the job.
284 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100285struct drm_sched_job {
286 struct spsc_node queue_node;
Luben Tuikov8935ff02020-12-03 22:17:18 -0500287 struct list_head list;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100288 struct drm_gpu_scheduler *sched;
289 struct drm_sched_fence *s_fence;
Andrey Grodzovsky542cff72021-10-28 12:24:03 -0400290
291 /*
292 * work is used only after finish_cb has been used and will not be
293 * accessed anymore.
294 */
295 union {
296 struct dma_fence_cb finish_cb;
297 struct irq_work work;
298 };
299
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100300 uint64_t id;
301 atomic_t karma;
302 enum drm_sched_priority s_priority;
Luben Tuikov8935ff02020-12-03 22:17:18 -0500303 struct drm_sched_entity *entity;
Andrey Grodzovsky37415402018-12-05 14:21:28 -0500304 struct dma_fence_cb cb;
Daniel Vetterebd5f742021-08-05 12:46:49 +0200305 /**
306 * @dependencies:
307 *
308 * Contains the dependencies as struct dma_fence for this job, see
309 * drm_sched_job_add_dependency() and
310 * drm_sched_job_add_implicit_dependencies().
311 */
312 struct xarray dependencies;
313
314 /** @last_dependency: tracks @dependencies as they signal */
315 unsigned long last_dependency;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100316};
317
318static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
319 int threshold)
320{
Luben Tuikov6efa4b42020-12-03 22:17:19 -0500321 return s_job && atomic_inc_return(&s_job->karma) > threshold;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100322}
323
Luben Tuikova6a1f032021-01-20 15:09:59 -0500324enum drm_gpu_sched_stat {
325 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
326 DRM_GPU_SCHED_STAT_NOMINAL,
327 DRM_GPU_SCHED_STAT_ENODEV,
328};
329
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100330/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530331 * struct drm_sched_backend_ops
332 *
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100333 * Define the backend operations called by the scheduler,
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530334 * these functions should be implemented in driver side.
335 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100336struct drm_sched_backend_ops {
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530337 /**
Daniel Vetterebd5f742021-08-05 12:46:49 +0200338 * @dependency:
339 *
340 * Called when the scheduler is considering scheduling this job next, to
341 * get another struct dma_fence for this job to block on. Once it
342 * returns NULL, run_job() may be called.
343 *
344 * If a driver exclusively uses drm_sched_job_add_dependency() and
345 * drm_sched_job_add_implicit_dependencies() this can be ommitted and
346 * left as NULL.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700347 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100348 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
349 struct drm_sched_entity *s_entity);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700350
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530351 /**
352 * @run_job: Called to execute the job once all of the dependencies
353 * have been resolved. This may be called multiple times, if
Eric Anholt1a61ee02018-04-04 15:32:51 -0700354 * timedout_job() has happened and drm_sched_job_recovery()
355 * decides to try it again.
356 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100357 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700358
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530359 /**
Luben Tuikova6a1f032021-01-20 15:09:59 -0500360 * @timedout_job: Called when a job has taken too long to execute,
361 * to trigger GPU recovery.
362 *
Boris Brezillon1fad1b72021-06-30 08:27:36 +0200363 * This method is called in a workqueue context.
364 *
365 * Drivers typically issue a reset to recover from GPU hangs, and this
366 * procedure usually follows the following workflow:
367 *
368 * 1. Stop the scheduler using drm_sched_stop(). This will park the
369 * scheduler thread and cancel the timeout work, guaranteeing that
370 * nothing is queued while we reset the hardware queue
371 * 2. Try to gracefully stop non-faulty jobs (optional)
372 * 3. Issue a GPU reset (driver-specific)
373 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
374 * 5. Restart the scheduler using drm_sched_start(). At that point, new
375 * jobs can be queued, and the scheduler thread is unblocked
376 *
Boris Brezillon78efe212021-06-30 08:27:37 +0200377 * Note that some GPUs have distinct hardware queues but need to reset
378 * the GPU globally, which requires extra synchronization between the
379 * timeout handler of the different &drm_gpu_scheduler. One way to
380 * achieve this synchronization is to create an ordered workqueue
381 * (using alloc_ordered_workqueue()) at the driver level, and pass this
382 * queue to drm_sched_init(), to guarantee that timeout handlers are
383 * executed sequentially. The above workflow needs to be slightly
384 * adjusted in that case:
385 *
386 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
387 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
388 * the reset (optional)
389 * 3. Issue a GPU reset on all faulty queues (driver-specific)
390 * 4. Re-submit jobs on all schedulers impacted by the reset using
391 * drm_sched_resubmit_jobs()
392 * 5. Restart all schedulers that were stopped in step #1 using
393 * drm_sched_start()
394 *
Luben Tuikova6a1f032021-01-20 15:09:59 -0500395 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
396 * and the underlying driver has started or completed recovery.
397 *
398 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
399 * available, i.e. has been unplugged.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700400 */
Luben Tuikova6a1f032021-01-20 15:09:59 -0500401 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700402
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530403 /**
404 * @free_job: Called once the job's finished fence has been signaled
405 * and it's time to clean it up.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700406 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100407 void (*free_job)(struct drm_sched_job *sched_job);
408};
409
410/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530411 * struct drm_gpu_scheduler
412 *
413 * @ops: backend operations provided by the driver.
414 * @hw_submission_limit: the max size of the hardware queue.
415 * @timeout: the time after which a job is removed from the scheduler.
416 * @name: name of the ring for which this scheduler is being used.
417 * @sched_rq: priority wise array of run queues.
418 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
419 * is ready to be scheduled.
420 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
421 * waits on this wait queue until all the scheduled jobs are
422 * finished.
423 * @hw_rq_count: the number of jobs currently in the hardware queue.
424 * @job_id_count: used to assign unique id to the each job.
Boris Brezillon78efe212021-06-30 08:27:37 +0200425 * @timeout_wq: workqueue used to queue @work_tdr
Nayan Deshmukh6a962432018-09-26 02:09:02 +0900426 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
427 * timeout interval is over.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530428 * @thread: the kthread on which the scheduler which run.
Luben Tuikov6efa4b42020-12-03 22:17:19 -0500429 * @pending_list: the list of jobs which are currently in the job queue.
430 * @job_list_lock: lock to protect the pending_list.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530431 * @hang_limit: once the hangs by a job crosses this limit then it is marked
Alyssa Rosenzweig95b21512021-05-28 19:51:52 -0400432 * guilty and it will no longer be considered for scheduling.
Nirmoy Dasd41a39d2020-06-25 14:07:23 +0200433 * @score: score to help loadbalancer pick a idle sched
Christian Königbe318fd2021-04-01 14:50:15 +0200434 * @_score: score used when the driver doesn't provide one
Andrey Grodzovskyfaf6e1a2018-10-18 12:32:46 -0400435 * @ready: marks if the underlying HW is ready to work
Andrey Grodzovskya5343b82019-04-18 11:00:23 -0400436 * @free_guilty: A hit to time out handler to free the guilty job.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530437 *
438 * One scheduler is implemented for each hardware ring.
439 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100440struct drm_gpu_scheduler {
441 const struct drm_sched_backend_ops *ops;
442 uint32_t hw_submission_limit;
443 long timeout;
444 const char *name;
Luben Tuikove2d732f2020-08-11 19:59:58 -0400445 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100446 wait_queue_head_t wake_up_worker;
447 wait_queue_head_t job_scheduled;
448 atomic_t hw_rq_count;
449 atomic64_t job_id_count;
Boris Brezillon78efe212021-06-30 08:27:37 +0200450 struct workqueue_struct *timeout_wq;
Nayan Deshmukh6a962432018-09-26 02:09:02 +0900451 struct delayed_work work_tdr;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100452 struct task_struct *thread;
Luben Tuikov6efa4b42020-12-03 22:17:19 -0500453 struct list_head pending_list;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100454 spinlock_t job_list_lock;
455 int hang_limit;
Christian Königf2f12eb2021-02-02 12:40:01 +0100456 atomic_t *score;
457 atomic_t _score;
Nirmoy Dasd41a39d2020-06-25 14:07:23 +0200458 bool ready;
Andrey Grodzovskya5343b82019-04-18 11:00:23 -0400459 bool free_guilty;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100460};
461
462int drm_sched_init(struct drm_gpu_scheduler *sched,
463 const struct drm_sched_backend_ops *ops,
Boris Brezillon78efe212021-06-30 08:27:37 +0200464 uint32_t hw_submission, unsigned hang_limit,
465 long timeout, struct workqueue_struct *timeout_wq,
Christian Königf2f12eb2021-02-02 12:40:01 +0100466 atomic_t *score, const char *name);
Andrey Grodzovskyfaf6e1a2018-10-18 12:32:46 -0400467
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100468void drm_sched_fini(struct drm_gpu_scheduler *sched);
Christian König620e7622018-08-06 14:25:32 +0200469int drm_sched_job_init(struct drm_sched_job *job,
470 struct drm_sched_entity *entity,
471 void *owner);
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200472void drm_sched_job_arm(struct drm_sched_job *job);
Daniel Vetterebd5f742021-08-05 12:46:49 +0200473int drm_sched_job_add_dependency(struct drm_sched_job *job,
474 struct dma_fence *fence);
475int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
476 struct drm_gem_object *obj,
477 bool write);
478
479
Nirmoy Dasb37aced2020-02-27 15:34:15 +0100480void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
481 struct drm_gpu_scheduler **sched_list,
482 unsigned int num_sched_list);
483
Sharat Masetty26efecf2018-10-29 15:02:28 +0530484void drm_sched_job_cleanup(struct drm_sched_job *job);
Christian König620e7622018-08-06 14:25:32 +0200485void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
Christian König59180452019-04-18 11:00:21 -0400486void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
Andrey Grodzovsky222b5f02018-12-04 16:56:14 -0500487void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
488void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
Jack Zhange6c63382021-03-08 12:41:27 +0800489void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
Andrey Grodzovsky222b5f02018-12-04 16:56:14 -0500490void drm_sched_increase_karma(struct drm_sched_job *bad);
Jack Zhange6c63382021-03-08 12:41:27 +0800491void drm_sched_reset_karma(struct drm_sched_job *bad);
492void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
Christian König620e7622018-08-06 14:25:32 +0200493bool drm_sched_dependency_optimized(struct dma_fence* fence,
494 struct drm_sched_entity *entity);
Christian König8fe159b2018-10-12 16:47:13 +0200495void drm_sched_fault(struct drm_gpu_scheduler *sched);
Christian König620e7622018-08-06 14:25:32 +0200496void drm_sched_job_kickout(struct drm_sched_job *s_job);
497
498void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
499 struct drm_sched_entity *entity);
500void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
501 struct drm_sched_entity *entity);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100502
Nayan Deshmukhaa16b6c2018-07-13 15:21:14 +0530503int drm_sched_entity_init(struct drm_sched_entity *entity,
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100504 enum drm_sched_priority priority,
505 struct drm_gpu_scheduler **sched_list,
Nirmoy Das9e3e90c2020-01-14 10:38:42 +0100506 unsigned int num_sched_list,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530507 atomic_t *guilty);
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530508long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
509void drm_sched_entity_fini(struct drm_sched_entity *entity);
510void drm_sched_entity_destroy(struct drm_sched_entity *entity);
Christian König620e7622018-08-06 14:25:32 +0200511void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
512struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
Daniel Vetter0e10e9a2021-08-05 12:46:50 +0200513void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
Christian König7febe4b2018-08-01 16:22:39 +0200514void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
515 enum drm_sched_priority priority);
Christian König620e7622018-08-06 14:25:32 +0200516bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
517
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200518struct drm_sched_fence *drm_sched_fence_alloc(
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100519 struct drm_sched_entity *s_entity, void *owner);
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200520void drm_sched_fence_init(struct drm_sched_fence *fence,
521 struct drm_sched_entity *entity);
Boris Brezillond4c16732021-09-03 14:05:54 +0200522void drm_sched_fence_free(struct drm_sched_fence *fence);
Daniel Vetterdbe48d02021-08-17 10:49:16 +0200523
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100524void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
525void drm_sched_fence_finished(struct drm_sched_fence *fence);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100526
Sharat Masetty1db8c142018-11-29 15:35:20 +0530527unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
528void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
529 unsigned long remaining);
Nirmoy Dasec2edcc2020-03-13 11:39:27 +0100530struct drm_gpu_scheduler *
531drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
532 unsigned int num_sched_list);
Sharat Masetty1db8c142018-11-29 15:35:20 +0530533
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100534#endif