blob: 589be851f8a108ed9e121de40a611f0b4cb8c653 [file] [log] [blame]
Lucas Stach1b1f42d2017-12-06 17:49:39 +01001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _DRM_GPU_SCHEDULER_H_
25#define _DRM_GPU_SCHEDULER_H_
26
27#include <drm/spsc_queue.h>
28#include <linux/dma-fence.h>
Stephen Rothwelldc102182019-11-08 16:31:10 +110029#include <linux/completion.h>
Lucas Stach1b1f42d2017-12-06 17:49:39 +010030
Andrey Grodzovsky741f01e2018-05-30 15:11:01 -040031#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
32
Lucas Stach1b1f42d2017-12-06 17:49:39 +010033struct drm_gpu_scheduler;
34struct drm_sched_rq;
35
36enum drm_sched_priority {
37 DRM_SCHED_PRIORITY_MIN,
38 DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
39 DRM_SCHED_PRIORITY_NORMAL,
40 DRM_SCHED_PRIORITY_HIGH_SW,
41 DRM_SCHED_PRIORITY_HIGH_HW,
42 DRM_SCHED_PRIORITY_KERNEL,
43 DRM_SCHED_PRIORITY_MAX,
44 DRM_SCHED_PRIORITY_INVALID = -1,
45 DRM_SCHED_PRIORITY_UNSET = -2
46};
47
48/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +053049 * struct drm_sched_entity - A wrapper around a job queue (typically
50 * attached to the DRM file_priv).
51 *
52 * @list: used to append this struct to the list of entities in the
53 * runqueue.
Nayan Deshmukhac0a6cf2018-08-01 13:49:59 +053054 * @rq: runqueue on which this entity is currently scheduled.
Nirmoy Das2639f452020-01-22 10:37:56 +010055 * @sched_list: A list of schedulers (drm_gpu_schedulers).
56 * Jobs from this entity can be scheduled on any scheduler
57 * on this list.
Nirmoy Das9e3e90c2020-01-14 10:38:42 +010058 * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
Nayan Deshmukh2d339482018-05-29 11:23:07 +053059 * @rq_lock: lock to modify the runqueue to which this entity belongs.
Nayan Deshmukh2d339482018-05-29 11:23:07 +053060 * @job_queue: the list of jobs of this entity.
61 * @fence_seq: a linearly increasing seqno incremented with each
62 * new &drm_sched_fence which is part of the entity.
63 * @fence_context: a unique context for all the fences which belong
64 * to this entity.
65 * The &drm_sched_fence.scheduled uses the
66 * fence_context but &drm_sched_fence.finished uses
67 * fence_context + 1.
68 * @dependency: the dependency fence of the job which is on the top
69 * of the job queue.
70 * @cb: callback for the dependency fence above.
71 * @guilty: points to ctx's guilty.
72 * @fini_status: contains the exit status in case the process was signalled.
73 * @last_scheduled: points to the finished fence of the last scheduled job.
Christian König43bce412018-07-26 13:43:49 +020074 * @last_user: last group leader pushing a job into the entity.
Andrey Grodzovsky62347a32018-08-17 10:32:50 -040075 * @stopped: Marks the enity as removed from rq and destined for termination.
Andrey Grodzovsky83a77722019-11-04 16:30:05 -050076 * @entity_idle: Signals when enityt is not in use
Eric Anholt1a61ee02018-04-04 15:32:51 -070077 *
78 * Entities will emit jobs in order to their corresponding hardware
79 * ring, and the scheduler will alternate between entities based on
80 * scheduling policy.
Nayan Deshmukh2d339482018-05-29 11:23:07 +053081 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010082struct drm_sched_entity {
83 struct list_head list;
84 struct drm_sched_rq *rq;
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010085 struct drm_gpu_scheduler **sched_list;
Nirmoy Das9e3e90c2020-01-14 10:38:42 +010086 unsigned int num_sched_list;
Nirmoy Dasb3ac1762019-12-05 11:38:00 +010087 enum drm_sched_priority priority;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010088 spinlock_t rq_lock;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010089
Lucas Stach1b1f42d2017-12-06 17:49:39 +010090 struct spsc_queue job_queue;
91
92 atomic_t fence_seq;
93 uint64_t fence_context;
94
95 struct dma_fence *dependency;
96 struct dma_fence_cb cb;
Nayan Deshmukh2d339482018-05-29 11:23:07 +053097 atomic_t *guilty;
Nayan Deshmukh2d339482018-05-29 11:23:07 +053098 struct dma_fence *last_scheduled;
Christian König43bce412018-07-26 13:43:49 +020099 struct task_struct *last_user;
Andrey Grodzovsky62347a32018-08-17 10:32:50 -0400100 bool stopped;
Andrey Grodzovsky83a77722019-11-04 16:30:05 -0500101 struct completion entity_idle;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100102};
103
104/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530105 * struct drm_sched_rq - queue of entities to be scheduled.
106 *
107 * @lock: to modify the entities list.
Nayan Deshmukh8dc9fbb2018-07-13 15:21:13 +0530108 * @sched: the scheduler to which this rq belongs to.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530109 * @entities: list of the entities to be scheduled.
110 * @current_entity: the entity which is to be scheduled.
111 *
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100112 * Run queue is a set of entities scheduling command submissions for
113 * one specific ring. It implements the scheduling policy that selects
114 * the next entity to emit commands from.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530115 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100116struct drm_sched_rq {
117 spinlock_t lock;
Nayan Deshmukh8dc9fbb2018-07-13 15:21:13 +0530118 struct drm_gpu_scheduler *sched;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100119 struct list_head entities;
120 struct drm_sched_entity *current_entity;
121};
122
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530123/**
124 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
125 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100126struct drm_sched_fence {
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530127 /**
128 * @scheduled: this fence is what will be signaled by the scheduler
129 * when the job is scheduled.
130 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100131 struct dma_fence scheduled;
Eric Anholt1a61ee02018-04-04 15:32:51 -0700132
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530133 /**
134 * @finished: this fence is what will be signaled by the scheduler
135 * when the job is completed.
136 *
137 * When setting up an out fence for the job, you should use
138 * this, since it's available immediately upon
139 * drm_sched_job_init(), and the fence returned by the driver
140 * from run_job() won't be created until the dependencies have
141 * resolved.
142 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100143 struct dma_fence finished;
Eric Anholt1a61ee02018-04-04 15:32:51 -0700144
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530145 /**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530146 * @parent: the fence returned by &drm_sched_backend_ops.run_job
147 * when scheduling the job on hardware. We signal the
148 * &drm_sched_fence.finished fence once parent is signalled.
149 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100150 struct dma_fence *parent;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530151 /**
152 * @sched: the scheduler instance to which the job having this struct
153 * belongs to.
154 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100155 struct drm_gpu_scheduler *sched;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530156 /**
157 * @lock: the lock used by the scheduled and the finished fences.
158 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100159 spinlock_t lock;
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530160 /**
161 * @owner: job owner for debugging
162 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100163 void *owner;
164};
165
166struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
167
Eric Anholt1a61ee02018-04-04 15:32:51 -0700168/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530169 * struct drm_sched_job - A job to be run by an entity.
170 *
171 * @queue_node: used to append this struct to the queue of jobs in an entity.
172 * @sched: the scheduler instance on which this job is scheduled.
173 * @s_fence: contains the fences for the scheduling of job.
174 * @finish_cb: the callback for the finished fence.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530175 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530176 * @id: a unique id assigned to each job scheduled on the scheduler.
177 * @karma: increment on every hang caused by this job. If this exceeds the hang
178 * limit of the scheduler then the job is marked guilty and will not
179 * be scheduled further.
180 * @s_priority: the priority of the job.
181 * @entity: the entity to which this job belongs.
Andrey Grodzovsky37415402018-12-05 14:21:28 -0500182 * @cb: the callback for the parent fence in s_fence.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700183 *
184 * A job is created by the driver using drm_sched_job_init(), and
185 * should call drm_sched_entity_push_job() once it wants the scheduler
186 * to schedule the job.
187 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100188struct drm_sched_job {
189 struct spsc_node queue_node;
190 struct drm_gpu_scheduler *sched;
191 struct drm_sched_fence *s_fence;
192 struct dma_fence_cb finish_cb;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100193 struct list_head node;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100194 uint64_t id;
195 atomic_t karma;
196 enum drm_sched_priority s_priority;
Emily Deng8ee3a522018-04-16 10:07:02 +0800197 struct drm_sched_entity *entity;
Andrey Grodzovsky37415402018-12-05 14:21:28 -0500198 struct dma_fence_cb cb;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100199};
200
201static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
202 int threshold)
203{
204 return (s_job && atomic_inc_return(&s_job->karma) > threshold);
205}
206
207/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530208 * struct drm_sched_backend_ops
209 *
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100210 * Define the backend operations called by the scheduler,
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530211 * these functions should be implemented in driver side.
212 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100213struct drm_sched_backend_ops {
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530214 /**
215 * @dependency: Called when the scheduler is considering scheduling
216 * this job next, to get another struct dma_fence for this job to
Eric Anholt1a61ee02018-04-04 15:32:51 -0700217 * block on. Once it returns NULL, run_job() may be called.
218 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100219 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
220 struct drm_sched_entity *s_entity);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700221
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530222 /**
223 * @run_job: Called to execute the job once all of the dependencies
224 * have been resolved. This may be called multiple times, if
Eric Anholt1a61ee02018-04-04 15:32:51 -0700225 * timedout_job() has happened and drm_sched_job_recovery()
226 * decides to try it again.
227 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100228 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700229
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530230 /**
231 * @timedout_job: Called when a job has taken too long to execute,
232 * to trigger GPU recovery.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700233 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100234 void (*timedout_job)(struct drm_sched_job *sched_job);
Eric Anholt1a61ee02018-04-04 15:32:51 -0700235
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530236 /**
237 * @free_job: Called once the job's finished fence has been signaled
238 * and it's time to clean it up.
Eric Anholt1a61ee02018-04-04 15:32:51 -0700239 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100240 void (*free_job)(struct drm_sched_job *sched_job);
241};
242
243/**
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530244 * struct drm_gpu_scheduler
245 *
246 * @ops: backend operations provided by the driver.
247 * @hw_submission_limit: the max size of the hardware queue.
248 * @timeout: the time after which a job is removed from the scheduler.
249 * @name: name of the ring for which this scheduler is being used.
250 * @sched_rq: priority wise array of run queues.
251 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
252 * is ready to be scheduled.
253 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
254 * waits on this wait queue until all the scheduled jobs are
255 * finished.
256 * @hw_rq_count: the number of jobs currently in the hardware queue.
257 * @job_id_count: used to assign unique id to the each job.
Nayan Deshmukh6a962432018-09-26 02:09:02 +0900258 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
259 * timeout interval is over.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530260 * @thread: the kthread on which the scheduler which run.
261 * @ring_mirror_list: the list of jobs which are currently in the job queue.
262 * @job_list_lock: lock to protect the ring_mirror_list.
263 * @hang_limit: once the hangs by a job crosses this limit then it is marked
264 * guilty and it will be considered for scheduling further.
Nirmoy Das56822db2020-01-15 15:06:04 +0100265 * @score: score to help loadbalancer pick a idle sched
Andrey Grodzovskyfaf6e1a2018-10-18 12:32:46 -0400266 * @ready: marks if the underlying HW is ready to work
Andrey Grodzovskya5343b82019-04-18 11:00:23 -0400267 * @free_guilty: A hit to time out handler to free the guilty job.
Nayan Deshmukh2d339482018-05-29 11:23:07 +0530268 *
269 * One scheduler is implemented for each hardware ring.
270 */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100271struct drm_gpu_scheduler {
272 const struct drm_sched_backend_ops *ops;
273 uint32_t hw_submission_limit;
274 long timeout;
275 const char *name;
276 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
277 wait_queue_head_t wake_up_worker;
278 wait_queue_head_t job_scheduled;
279 atomic_t hw_rq_count;
280 atomic64_t job_id_count;
Nayan Deshmukh6a962432018-09-26 02:09:02 +0900281 struct delayed_work work_tdr;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100282 struct task_struct *thread;
283 struct list_head ring_mirror_list;
284 spinlock_t job_list_lock;
285 int hang_limit;
Nirmoy Das56822db2020-01-15 15:06:04 +0100286 atomic_t score;
287 bool ready;
Andrey Grodzovskya5343b82019-04-18 11:00:23 -0400288 bool free_guilty;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100289};
290
291int drm_sched_init(struct drm_gpu_scheduler *sched,
292 const struct drm_sched_backend_ops *ops,
293 uint32_t hw_submission, unsigned hang_limit, long timeout,
294 const char *name);
Andrey Grodzovskyfaf6e1a2018-10-18 12:32:46 -0400295
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100296void drm_sched_fini(struct drm_gpu_scheduler *sched);
Christian König620e7622018-08-06 14:25:32 +0200297int drm_sched_job_init(struct drm_sched_job *job,
298 struct drm_sched_entity *entity,
299 void *owner);
Sharat Masetty26efecf2018-10-29 15:02:28 +0530300void drm_sched_job_cleanup(struct drm_sched_job *job);
Christian König620e7622018-08-06 14:25:32 +0200301void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
Christian König59180452019-04-18 11:00:21 -0400302void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
Andrey Grodzovsky222b5f02018-12-04 16:56:14 -0500303void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
304void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
305void drm_sched_increase_karma(struct drm_sched_job *bad);
Christian König620e7622018-08-06 14:25:32 +0200306bool drm_sched_dependency_optimized(struct dma_fence* fence,
307 struct drm_sched_entity *entity);
Christian König8fe159b2018-10-12 16:47:13 +0200308void drm_sched_fault(struct drm_gpu_scheduler *sched);
Christian König620e7622018-08-06 14:25:32 +0200309void drm_sched_job_kickout(struct drm_sched_job *s_job);
310
311void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
312 struct drm_sched_entity *entity);
313void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
314 struct drm_sched_entity *entity);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100315
Nayan Deshmukhaa16b6c2018-07-13 15:21:14 +0530316int drm_sched_entity_init(struct drm_sched_entity *entity,
Nirmoy Dasb3ac1762019-12-05 11:38:00 +0100317 enum drm_sched_priority priority,
318 struct drm_gpu_scheduler **sched_list,
Nirmoy Das9e3e90c2020-01-14 10:38:42 +0100319 unsigned int num_sched_list,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530320 atomic_t *guilty);
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530321long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
322void drm_sched_entity_fini(struct drm_sched_entity *entity);
323void drm_sched_entity_destroy(struct drm_sched_entity *entity);
Christian König620e7622018-08-06 14:25:32 +0200324void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
325struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100326void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
327 struct drm_sched_entity *entity);
Christian König7febe4b2018-08-01 16:22:39 +0200328void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
329 enum drm_sched_priority priority);
Christian König620e7622018-08-06 14:25:32 +0200330bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
331
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100332struct drm_sched_fence *drm_sched_fence_create(
333 struct drm_sched_entity *s_entity, void *owner);
334void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
335void drm_sched_fence_finished(struct drm_sched_fence *fence);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100336
Sharat Masetty1db8c142018-11-29 15:35:20 +0530337unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
338void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
339 unsigned long remaining);
340
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100341#endif