Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #ifndef _DRM_GPU_SCHEDULER_H_ |
| 25 | #define _DRM_GPU_SCHEDULER_H_ |
| 26 | |
| 27 | #include <drm/spsc_queue.h> |
| 28 | #include <linux/dma-fence.h> |
Stephen Rothwell | dc10218 | 2019-11-08 16:31:10 +1100 | [diff] [blame] | 29 | #include <linux/completion.h> |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 30 | |
Andrey Grodzovsky | 741f01e | 2018-05-30 15:11:01 -0400 | [diff] [blame] | 31 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
| 32 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 33 | struct drm_gpu_scheduler; |
| 34 | struct drm_sched_rq; |
| 35 | |
| 36 | enum drm_sched_priority { |
| 37 | DRM_SCHED_PRIORITY_MIN, |
| 38 | DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, |
| 39 | DRM_SCHED_PRIORITY_NORMAL, |
| 40 | DRM_SCHED_PRIORITY_HIGH_SW, |
| 41 | DRM_SCHED_PRIORITY_HIGH_HW, |
| 42 | DRM_SCHED_PRIORITY_KERNEL, |
| 43 | DRM_SCHED_PRIORITY_MAX, |
| 44 | DRM_SCHED_PRIORITY_INVALID = -1, |
| 45 | DRM_SCHED_PRIORITY_UNSET = -2 |
| 46 | }; |
| 47 | |
| 48 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 49 | * struct drm_sched_entity - A wrapper around a job queue (typically |
| 50 | * attached to the DRM file_priv). |
| 51 | * |
| 52 | * @list: used to append this struct to the list of entities in the |
| 53 | * runqueue. |
Nayan Deshmukh | ac0a6cf | 2018-08-01 13:49:59 +0530 | [diff] [blame] | 54 | * @rq: runqueue on which this entity is currently scheduled. |
Nirmoy Das | 2639f45 | 2020-01-22 10:37:56 +0100 | [diff] [blame^] | 55 | * @sched_list: A list of schedulers (drm_gpu_schedulers). |
| 56 | * Jobs from this entity can be scheduled on any scheduler |
| 57 | * on this list. |
Nirmoy Das | 9e3e90c | 2020-01-14 10:38:42 +0100 | [diff] [blame] | 58 | * @num_sched_list: number of drm_gpu_schedulers in the sched_list. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 59 | * @rq_lock: lock to modify the runqueue to which this entity belongs. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 60 | * @job_queue: the list of jobs of this entity. |
| 61 | * @fence_seq: a linearly increasing seqno incremented with each |
| 62 | * new &drm_sched_fence which is part of the entity. |
| 63 | * @fence_context: a unique context for all the fences which belong |
| 64 | * to this entity. |
| 65 | * The &drm_sched_fence.scheduled uses the |
| 66 | * fence_context but &drm_sched_fence.finished uses |
| 67 | * fence_context + 1. |
| 68 | * @dependency: the dependency fence of the job which is on the top |
| 69 | * of the job queue. |
| 70 | * @cb: callback for the dependency fence above. |
| 71 | * @guilty: points to ctx's guilty. |
| 72 | * @fini_status: contains the exit status in case the process was signalled. |
| 73 | * @last_scheduled: points to the finished fence of the last scheduled job. |
Christian König | 43bce41 | 2018-07-26 13:43:49 +0200 | [diff] [blame] | 74 | * @last_user: last group leader pushing a job into the entity. |
Andrey Grodzovsky | 62347a3 | 2018-08-17 10:32:50 -0400 | [diff] [blame] | 75 | * @stopped: Marks the enity as removed from rq and destined for termination. |
Andrey Grodzovsky | 83a7772 | 2019-11-04 16:30:05 -0500 | [diff] [blame] | 76 | * @entity_idle: Signals when enityt is not in use |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 77 | * |
| 78 | * Entities will emit jobs in order to their corresponding hardware |
| 79 | * ring, and the scheduler will alternate between entities based on |
| 80 | * scheduling policy. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 81 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 82 | struct drm_sched_entity { |
| 83 | struct list_head list; |
| 84 | struct drm_sched_rq *rq; |
Nirmoy Das | b3ac176 | 2019-12-05 11:38:00 +0100 | [diff] [blame] | 85 | struct drm_gpu_scheduler **sched_list; |
Nirmoy Das | 9e3e90c | 2020-01-14 10:38:42 +0100 | [diff] [blame] | 86 | unsigned int num_sched_list; |
Nirmoy Das | b3ac176 | 2019-12-05 11:38:00 +0100 | [diff] [blame] | 87 | enum drm_sched_priority priority; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 88 | spinlock_t rq_lock; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 89 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 90 | struct spsc_queue job_queue; |
| 91 | |
| 92 | atomic_t fence_seq; |
| 93 | uint64_t fence_context; |
| 94 | |
| 95 | struct dma_fence *dependency; |
| 96 | struct dma_fence_cb cb; |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 97 | atomic_t *guilty; |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 98 | struct dma_fence *last_scheduled; |
Christian König | 43bce41 | 2018-07-26 13:43:49 +0200 | [diff] [blame] | 99 | struct task_struct *last_user; |
Andrey Grodzovsky | 62347a3 | 2018-08-17 10:32:50 -0400 | [diff] [blame] | 100 | bool stopped; |
Andrey Grodzovsky | 83a7772 | 2019-11-04 16:30:05 -0500 | [diff] [blame] | 101 | struct completion entity_idle; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 102 | }; |
| 103 | |
| 104 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 105 | * struct drm_sched_rq - queue of entities to be scheduled. |
| 106 | * |
| 107 | * @lock: to modify the entities list. |
Nayan Deshmukh | 8dc9fbb | 2018-07-13 15:21:13 +0530 | [diff] [blame] | 108 | * @sched: the scheduler to which this rq belongs to. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 109 | * @entities: list of the entities to be scheduled. |
| 110 | * @current_entity: the entity which is to be scheduled. |
| 111 | * |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 112 | * Run queue is a set of entities scheduling command submissions for |
| 113 | * one specific ring. It implements the scheduling policy that selects |
| 114 | * the next entity to emit commands from. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 115 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 116 | struct drm_sched_rq { |
| 117 | spinlock_t lock; |
Nayan Deshmukh | 8dc9fbb | 2018-07-13 15:21:13 +0530 | [diff] [blame] | 118 | struct drm_gpu_scheduler *sched; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 119 | struct list_head entities; |
| 120 | struct drm_sched_entity *current_entity; |
| 121 | }; |
| 122 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 123 | /** |
| 124 | * struct drm_sched_fence - fences corresponding to the scheduling of a job. |
| 125 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 126 | struct drm_sched_fence { |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 127 | /** |
| 128 | * @scheduled: this fence is what will be signaled by the scheduler |
| 129 | * when the job is scheduled. |
| 130 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 131 | struct dma_fence scheduled; |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 132 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 133 | /** |
| 134 | * @finished: this fence is what will be signaled by the scheduler |
| 135 | * when the job is completed. |
| 136 | * |
| 137 | * When setting up an out fence for the job, you should use |
| 138 | * this, since it's available immediately upon |
| 139 | * drm_sched_job_init(), and the fence returned by the driver |
| 140 | * from run_job() won't be created until the dependencies have |
| 141 | * resolved. |
| 142 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 143 | struct dma_fence finished; |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 144 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 145 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 146 | * @parent: the fence returned by &drm_sched_backend_ops.run_job |
| 147 | * when scheduling the job on hardware. We signal the |
| 148 | * &drm_sched_fence.finished fence once parent is signalled. |
| 149 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 150 | struct dma_fence *parent; |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 151 | /** |
| 152 | * @sched: the scheduler instance to which the job having this struct |
| 153 | * belongs to. |
| 154 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 155 | struct drm_gpu_scheduler *sched; |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 156 | /** |
| 157 | * @lock: the lock used by the scheduled and the finished fences. |
| 158 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 159 | spinlock_t lock; |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 160 | /** |
| 161 | * @owner: job owner for debugging |
| 162 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 163 | void *owner; |
| 164 | }; |
| 165 | |
| 166 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); |
| 167 | |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 168 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 169 | * struct drm_sched_job - A job to be run by an entity. |
| 170 | * |
| 171 | * @queue_node: used to append this struct to the queue of jobs in an entity. |
| 172 | * @sched: the scheduler instance on which this job is scheduled. |
| 173 | * @s_fence: contains the fences for the scheduling of job. |
| 174 | * @finish_cb: the callback for the finished fence. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 175 | * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 176 | * @id: a unique id assigned to each job scheduled on the scheduler. |
| 177 | * @karma: increment on every hang caused by this job. If this exceeds the hang |
| 178 | * limit of the scheduler then the job is marked guilty and will not |
| 179 | * be scheduled further. |
| 180 | * @s_priority: the priority of the job. |
| 181 | * @entity: the entity to which this job belongs. |
Andrey Grodzovsky | 3741540 | 2018-12-05 14:21:28 -0500 | [diff] [blame] | 182 | * @cb: the callback for the parent fence in s_fence. |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 183 | * |
| 184 | * A job is created by the driver using drm_sched_job_init(), and |
| 185 | * should call drm_sched_entity_push_job() once it wants the scheduler |
| 186 | * to schedule the job. |
| 187 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 188 | struct drm_sched_job { |
| 189 | struct spsc_node queue_node; |
| 190 | struct drm_gpu_scheduler *sched; |
| 191 | struct drm_sched_fence *s_fence; |
| 192 | struct dma_fence_cb finish_cb; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 193 | struct list_head node; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 194 | uint64_t id; |
| 195 | atomic_t karma; |
| 196 | enum drm_sched_priority s_priority; |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 197 | struct drm_sched_entity *entity; |
Andrey Grodzovsky | 3741540 | 2018-12-05 14:21:28 -0500 | [diff] [blame] | 198 | struct dma_fence_cb cb; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 199 | }; |
| 200 | |
| 201 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, |
| 202 | int threshold) |
| 203 | { |
| 204 | return (s_job && atomic_inc_return(&s_job->karma) > threshold); |
| 205 | } |
| 206 | |
| 207 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 208 | * struct drm_sched_backend_ops |
| 209 | * |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 210 | * Define the backend operations called by the scheduler, |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 211 | * these functions should be implemented in driver side. |
| 212 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 213 | struct drm_sched_backend_ops { |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 214 | /** |
| 215 | * @dependency: Called when the scheduler is considering scheduling |
| 216 | * this job next, to get another struct dma_fence for this job to |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 217 | * block on. Once it returns NULL, run_job() may be called. |
| 218 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 219 | struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, |
| 220 | struct drm_sched_entity *s_entity); |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 221 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 222 | /** |
| 223 | * @run_job: Called to execute the job once all of the dependencies |
| 224 | * have been resolved. This may be called multiple times, if |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 225 | * timedout_job() has happened and drm_sched_job_recovery() |
| 226 | * decides to try it again. |
| 227 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 228 | struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 229 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 230 | /** |
| 231 | * @timedout_job: Called when a job has taken too long to execute, |
| 232 | * to trigger GPU recovery. |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 233 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 234 | void (*timedout_job)(struct drm_sched_job *sched_job); |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 235 | |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 236 | /** |
| 237 | * @free_job: Called once the job's finished fence has been signaled |
| 238 | * and it's time to clean it up. |
Eric Anholt | 1a61ee0 | 2018-04-04 15:32:51 -0700 | [diff] [blame] | 239 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 240 | void (*free_job)(struct drm_sched_job *sched_job); |
| 241 | }; |
| 242 | |
| 243 | /** |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 244 | * struct drm_gpu_scheduler |
| 245 | * |
| 246 | * @ops: backend operations provided by the driver. |
| 247 | * @hw_submission_limit: the max size of the hardware queue. |
| 248 | * @timeout: the time after which a job is removed from the scheduler. |
| 249 | * @name: name of the ring for which this scheduler is being used. |
| 250 | * @sched_rq: priority wise array of run queues. |
| 251 | * @wake_up_worker: the wait queue on which the scheduler sleeps until a job |
| 252 | * is ready to be scheduled. |
| 253 | * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler |
| 254 | * waits on this wait queue until all the scheduled jobs are |
| 255 | * finished. |
| 256 | * @hw_rq_count: the number of jobs currently in the hardware queue. |
| 257 | * @job_id_count: used to assign unique id to the each job. |
Nayan Deshmukh | 6a96243 | 2018-09-26 02:09:02 +0900 | [diff] [blame] | 258 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
| 259 | * timeout interval is over. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 260 | * @thread: the kthread on which the scheduler which run. |
| 261 | * @ring_mirror_list: the list of jobs which are currently in the job queue. |
| 262 | * @job_list_lock: lock to protect the ring_mirror_list. |
| 263 | * @hang_limit: once the hangs by a job crosses this limit then it is marked |
| 264 | * guilty and it will be considered for scheduling further. |
Nirmoy Das | 56822db | 2020-01-15 15:06:04 +0100 | [diff] [blame] | 265 | * @score: score to help loadbalancer pick a idle sched |
Andrey Grodzovsky | faf6e1a | 2018-10-18 12:32:46 -0400 | [diff] [blame] | 266 | * @ready: marks if the underlying HW is ready to work |
Andrey Grodzovsky | a5343b8 | 2019-04-18 11:00:23 -0400 | [diff] [blame] | 267 | * @free_guilty: A hit to time out handler to free the guilty job. |
Nayan Deshmukh | 2d33948 | 2018-05-29 11:23:07 +0530 | [diff] [blame] | 268 | * |
| 269 | * One scheduler is implemented for each hardware ring. |
| 270 | */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 271 | struct drm_gpu_scheduler { |
| 272 | const struct drm_sched_backend_ops *ops; |
| 273 | uint32_t hw_submission_limit; |
| 274 | long timeout; |
| 275 | const char *name; |
| 276 | struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; |
| 277 | wait_queue_head_t wake_up_worker; |
| 278 | wait_queue_head_t job_scheduled; |
| 279 | atomic_t hw_rq_count; |
| 280 | atomic64_t job_id_count; |
Nayan Deshmukh | 6a96243 | 2018-09-26 02:09:02 +0900 | [diff] [blame] | 281 | struct delayed_work work_tdr; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 282 | struct task_struct *thread; |
| 283 | struct list_head ring_mirror_list; |
| 284 | spinlock_t job_list_lock; |
| 285 | int hang_limit; |
Nirmoy Das | 56822db | 2020-01-15 15:06:04 +0100 | [diff] [blame] | 286 | atomic_t score; |
| 287 | bool ready; |
Andrey Grodzovsky | a5343b8 | 2019-04-18 11:00:23 -0400 | [diff] [blame] | 288 | bool free_guilty; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 289 | }; |
| 290 | |
| 291 | int drm_sched_init(struct drm_gpu_scheduler *sched, |
| 292 | const struct drm_sched_backend_ops *ops, |
| 293 | uint32_t hw_submission, unsigned hang_limit, long timeout, |
| 294 | const char *name); |
Andrey Grodzovsky | faf6e1a | 2018-10-18 12:32:46 -0400 | [diff] [blame] | 295 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 296 | void drm_sched_fini(struct drm_gpu_scheduler *sched); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 297 | int drm_sched_job_init(struct drm_sched_job *job, |
| 298 | struct drm_sched_entity *entity, |
| 299 | void *owner); |
Sharat Masetty | 26efecf | 2018-10-29 15:02:28 +0530 | [diff] [blame] | 300 | void drm_sched_job_cleanup(struct drm_sched_job *job); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 301 | void drm_sched_wakeup(struct drm_gpu_scheduler *sched); |
Christian König | 5918045 | 2019-04-18 11:00:21 -0400 | [diff] [blame] | 302 | void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); |
Andrey Grodzovsky | 222b5f0 | 2018-12-04 16:56:14 -0500 | [diff] [blame] | 303 | void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); |
| 304 | void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); |
| 305 | void drm_sched_increase_karma(struct drm_sched_job *bad); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 306 | bool drm_sched_dependency_optimized(struct dma_fence* fence, |
| 307 | struct drm_sched_entity *entity); |
Christian König | 8fe159b | 2018-10-12 16:47:13 +0200 | [diff] [blame] | 308 | void drm_sched_fault(struct drm_gpu_scheduler *sched); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 309 | void drm_sched_job_kickout(struct drm_sched_job *s_job); |
| 310 | |
| 311 | void drm_sched_rq_add_entity(struct drm_sched_rq *rq, |
| 312 | struct drm_sched_entity *entity); |
| 313 | void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, |
| 314 | struct drm_sched_entity *entity); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 315 | |
Nayan Deshmukh | aa16b6c | 2018-07-13 15:21:14 +0530 | [diff] [blame] | 316 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
Nirmoy Das | b3ac176 | 2019-12-05 11:38:00 +0100 | [diff] [blame] | 317 | enum drm_sched_priority priority, |
| 318 | struct drm_gpu_scheduler **sched_list, |
Nirmoy Das | 9e3e90c | 2020-01-14 10:38:42 +0100 | [diff] [blame] | 319 | unsigned int num_sched_list, |
Nayan Deshmukh | 8344c53 | 2018-03-29 22:36:32 +0530 | [diff] [blame] | 320 | atomic_t *guilty); |
Nayan Deshmukh | cdc5017 | 2018-07-20 17:51:05 +0530 | [diff] [blame] | 321 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
| 322 | void drm_sched_entity_fini(struct drm_sched_entity *entity); |
| 323 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 324 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
| 325 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 326 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job, |
| 327 | struct drm_sched_entity *entity); |
Christian König | 7febe4b | 2018-08-01 16:22:39 +0200 | [diff] [blame] | 328 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
| 329 | enum drm_sched_priority priority); |
Christian König | 620e762 | 2018-08-06 14:25:32 +0200 | [diff] [blame] | 330 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
| 331 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 332 | struct drm_sched_fence *drm_sched_fence_create( |
| 333 | struct drm_sched_entity *s_entity, void *owner); |
| 334 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence); |
| 335 | void drm_sched_fence_finished(struct drm_sched_fence *fence); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 336 | |
Sharat Masetty | 1db8c14 | 2018-11-29 15:35:20 +0530 | [diff] [blame] | 337 | unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); |
| 338 | void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, |
| 339 | unsigned long remaining); |
| 340 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 341 | #endif |