Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR |
| 3 | * policies) |
| 4 | */ |
| 5 | |
Steven Rostedt | 4fd2917 | 2008-01-25 21:08:06 +0100 | [diff] [blame] | 6 | #ifdef CONFIG_SMP |
| 7 | static cpumask_t rt_overload_mask; |
| 8 | static atomic_t rto_count; |
| 9 | static inline int rt_overloaded(void) |
| 10 | { |
| 11 | return atomic_read(&rto_count); |
| 12 | } |
| 13 | static inline cpumask_t *rt_overload(void) |
| 14 | { |
| 15 | return &rt_overload_mask; |
| 16 | } |
| 17 | static inline void rt_set_overload(struct rq *rq) |
| 18 | { |
| 19 | cpu_set(rq->cpu, rt_overload_mask); |
| 20 | /* |
| 21 | * Make sure the mask is visible before we set |
| 22 | * the overload count. That is checked to determine |
| 23 | * if we should look at the mask. It would be a shame |
| 24 | * if we looked at the mask, but the mask was not |
| 25 | * updated yet. |
| 26 | */ |
| 27 | wmb(); |
| 28 | atomic_inc(&rto_count); |
| 29 | } |
| 30 | static inline void rt_clear_overload(struct rq *rq) |
| 31 | { |
| 32 | /* the order here really doesn't matter */ |
| 33 | atomic_dec(&rto_count); |
| 34 | cpu_clear(rq->cpu, rt_overload_mask); |
| 35 | } |
| 36 | #endif /* CONFIG_SMP */ |
| 37 | |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 38 | /* |
| 39 | * Update the current task's runtime statistics. Skip current tasks that |
| 40 | * are not in our scheduling class. |
| 41 | */ |
Alexey Dobriyan | a995744 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 42 | static void update_curr_rt(struct rq *rq) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 | { |
| 44 | struct task_struct *curr = rq->curr; |
| 45 | u64 delta_exec; |
| 46 | |
| 47 | if (!task_has_rt_policy(curr)) |
| 48 | return; |
| 49 | |
Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 50 | delta_exec = rq->clock - curr->se.exec_start; |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 51 | if (unlikely((s64)delta_exec < 0)) |
| 52 | delta_exec = 0; |
Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 53 | |
| 54 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 55 | |
| 56 | curr->se.sum_exec_runtime += delta_exec; |
Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 57 | curr->se.exec_start = rq->clock; |
Srivatsa Vaddagiri | d842de8 | 2007-12-02 20:04:49 +0100 | [diff] [blame] | 58 | cpuacct_charge(curr, delta_exec); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 59 | } |
| 60 | |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 61 | static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) |
| 62 | { |
| 63 | WARN_ON(!rt_task(p)); |
| 64 | rq->rt.rt_nr_running++; |
Steven Rostedt | 764a9d6 | 2008-01-25 21:08:04 +0100 | [diff] [blame] | 65 | #ifdef CONFIG_SMP |
| 66 | if (p->prio < rq->rt.highest_prio) |
| 67 | rq->rt.highest_prio = p->prio; |
Steven Rostedt | 4fd2917 | 2008-01-25 21:08:06 +0100 | [diff] [blame] | 68 | if (rq->rt.rt_nr_running > 1) |
| 69 | rt_set_overload(rq); |
Steven Rostedt | 764a9d6 | 2008-01-25 21:08:04 +0100 | [diff] [blame] | 70 | #endif /* CONFIG_SMP */ |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) |
| 74 | { |
| 75 | WARN_ON(!rt_task(p)); |
| 76 | WARN_ON(!rq->rt.rt_nr_running); |
| 77 | rq->rt.rt_nr_running--; |
Steven Rostedt | 764a9d6 | 2008-01-25 21:08:04 +0100 | [diff] [blame] | 78 | #ifdef CONFIG_SMP |
| 79 | if (rq->rt.rt_nr_running) { |
| 80 | struct rt_prio_array *array; |
| 81 | |
| 82 | WARN_ON(p->prio < rq->rt.highest_prio); |
| 83 | if (p->prio == rq->rt.highest_prio) { |
| 84 | /* recalculate */ |
| 85 | array = &rq->rt.active; |
| 86 | rq->rt.highest_prio = |
| 87 | sched_find_first_bit(array->bitmap); |
| 88 | } /* otherwise leave rq->highest prio alone */ |
| 89 | } else |
| 90 | rq->rt.highest_prio = MAX_RT_PRIO; |
Steven Rostedt | 4fd2917 | 2008-01-25 21:08:06 +0100 | [diff] [blame] | 91 | if (rq->rt.rt_nr_running < 2) |
| 92 | rt_clear_overload(rq); |
Steven Rostedt | 764a9d6 | 2008-01-25 21:08:04 +0100 | [diff] [blame] | 93 | #endif /* CONFIG_SMP */ |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 94 | } |
| 95 | |
Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 96 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 97 | { |
| 98 | struct rt_prio_array *array = &rq->rt.active; |
| 99 | |
| 100 | list_add_tail(&p->run_list, array->queue + p->prio); |
| 101 | __set_bit(p->prio, array->bitmap); |
Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 102 | inc_cpu_load(rq, p->se.load.weight); |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 103 | |
| 104 | inc_rt_tasks(p, rq); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Adding/removing a task to/from a priority array: |
| 109 | */ |
Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 110 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 111 | { |
| 112 | struct rt_prio_array *array = &rq->rt.active; |
| 113 | |
Ingo Molnar | f1e14ef | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 114 | update_curr_rt(rq); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 115 | |
| 116 | list_del(&p->run_list); |
| 117 | if (list_empty(array->queue + p->prio)) |
| 118 | __clear_bit(p->prio, array->bitmap); |
Srivatsa Vaddagiri | 58e2d4c | 2008-01-25 21:08:00 +0100 | [diff] [blame] | 119 | dec_cpu_load(rq, p->se.load.weight); |
Steven Rostedt | 63489e4 | 2008-01-25 21:08:03 +0100 | [diff] [blame] | 120 | |
| 121 | dec_rt_tasks(p, rq); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Put task to the end of the run list without the overhead of dequeue |
| 126 | * followed by enqueue. |
| 127 | */ |
| 128 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
| 129 | { |
| 130 | struct rt_prio_array *array = &rq->rt.active; |
| 131 | |
| 132 | list_move_tail(&p->run_list, array->queue + p->prio); |
| 133 | } |
| 134 | |
| 135 | static void |
Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 136 | yield_task_rt(struct rq *rq) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 137 | { |
Dmitry Adamushko | 4530d7a | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 138 | requeue_task_rt(rq, rq->curr); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Preempt the current task with a newly woken task if needed: |
| 143 | */ |
| 144 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) |
| 145 | { |
| 146 | if (p->prio < rq->curr->prio) |
| 147 | resched_task(rq->curr); |
| 148 | } |
| 149 | |
Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 150 | static struct task_struct *pick_next_task_rt(struct rq *rq) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 151 | { |
| 152 | struct rt_prio_array *array = &rq->rt.active; |
| 153 | struct task_struct *next; |
| 154 | struct list_head *queue; |
| 155 | int idx; |
| 156 | |
| 157 | idx = sched_find_first_bit(array->bitmap); |
| 158 | if (idx >= MAX_RT_PRIO) |
| 159 | return NULL; |
| 160 | |
| 161 | queue = array->queue + idx; |
| 162 | next = list_entry(queue->next, struct task_struct, run_list); |
| 163 | |
Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 164 | next->se.exec_start = rq->clock; |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 165 | |
| 166 | return next; |
| 167 | } |
| 168 | |
Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 169 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 170 | { |
Ingo Molnar | f1e14ef | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 171 | update_curr_rt(rq); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 172 | p->se.exec_start = 0; |
| 173 | } |
| 174 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 175 | #ifdef CONFIG_SMP |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 176 | /* Only try algorithms three times */ |
| 177 | #define RT_MAX_TRIES 3 |
| 178 | |
| 179 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
| 180 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
| 181 | |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 182 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
| 183 | { |
| 184 | if (!task_running(rq, p) && |
| 185 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed))) |
| 186 | return 1; |
| 187 | return 0; |
| 188 | } |
| 189 | |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 190 | /* Return the second highest RT task, NULL otherwise */ |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 191 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, |
| 192 | int cpu) |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 193 | { |
| 194 | struct rt_prio_array *array = &rq->rt.active; |
| 195 | struct task_struct *next; |
| 196 | struct list_head *queue; |
| 197 | int idx; |
| 198 | |
| 199 | assert_spin_locked(&rq->lock); |
| 200 | |
| 201 | if (likely(rq->rt.rt_nr_running < 2)) |
| 202 | return NULL; |
| 203 | |
| 204 | idx = sched_find_first_bit(array->bitmap); |
| 205 | if (unlikely(idx >= MAX_RT_PRIO)) { |
| 206 | WARN_ON(1); /* rt_nr_running is bad */ |
| 207 | return NULL; |
| 208 | } |
| 209 | |
| 210 | queue = array->queue + idx; |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 211 | BUG_ON(list_empty(queue)); |
| 212 | |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 213 | next = list_entry(queue->next, struct task_struct, run_list); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 214 | if (unlikely(pick_rt_task(rq, next, cpu))) |
| 215 | goto out; |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 216 | |
| 217 | if (queue->next->next != queue) { |
| 218 | /* same prio task */ |
| 219 | next = list_entry(queue->next->next, struct task_struct, run_list); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 220 | if (pick_rt_task(rq, next, cpu)) |
| 221 | goto out; |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 222 | } |
| 223 | |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 224 | retry: |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 225 | /* slower, but more flexible */ |
| 226 | idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 227 | if (unlikely(idx >= MAX_RT_PRIO)) |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 228 | return NULL; |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 229 | |
| 230 | queue = array->queue + idx; |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 231 | BUG_ON(list_empty(queue)); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 232 | |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 233 | list_for_each_entry(next, queue, run_list) { |
| 234 | if (pick_rt_task(rq, next, cpu)) |
| 235 | goto out; |
| 236 | } |
| 237 | |
| 238 | goto retry; |
| 239 | |
| 240 | out: |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 241 | return next; |
| 242 | } |
| 243 | |
| 244 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); |
| 245 | |
| 246 | /* Will lock the rq it finds */ |
| 247 | static struct rq *find_lock_lowest_rq(struct task_struct *task, |
| 248 | struct rq *this_rq) |
| 249 | { |
| 250 | struct rq *lowest_rq = NULL; |
| 251 | int cpu; |
| 252 | int tries; |
| 253 | cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask); |
| 254 | |
| 255 | cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed); |
| 256 | |
| 257 | for (tries = 0; tries < RT_MAX_TRIES; tries++) { |
| 258 | /* |
| 259 | * Scan each rq for the lowest prio. |
| 260 | */ |
| 261 | for_each_cpu_mask(cpu, *cpu_mask) { |
| 262 | struct rq *rq = &per_cpu(runqueues, cpu); |
| 263 | |
| 264 | if (cpu == this_rq->cpu) |
| 265 | continue; |
| 266 | |
| 267 | /* We look for lowest RT prio or non-rt CPU */ |
| 268 | if (rq->rt.highest_prio >= MAX_RT_PRIO) { |
| 269 | lowest_rq = rq; |
| 270 | break; |
| 271 | } |
| 272 | |
| 273 | /* no locking for now */ |
| 274 | if (rq->rt.highest_prio > task->prio && |
| 275 | (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { |
| 276 | lowest_rq = rq; |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | if (!lowest_rq) |
| 281 | break; |
| 282 | |
| 283 | /* if the prio of this runqueue changed, try again */ |
| 284 | if (double_lock_balance(this_rq, lowest_rq)) { |
| 285 | /* |
| 286 | * We had to unlock the run queue. In |
| 287 | * the mean time, task could have |
| 288 | * migrated already or had its affinity changed. |
| 289 | * Also make sure that it wasn't scheduled on its rq. |
| 290 | */ |
| 291 | if (unlikely(task_rq(task) != this_rq || |
| 292 | !cpu_isset(lowest_rq->cpu, task->cpus_allowed) || |
| 293 | task_running(this_rq, task) || |
| 294 | !task->se.on_rq)) { |
| 295 | spin_unlock(&lowest_rq->lock); |
| 296 | lowest_rq = NULL; |
| 297 | break; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | /* If this rq is still suitable use it. */ |
| 302 | if (lowest_rq->rt.highest_prio > task->prio) |
| 303 | break; |
| 304 | |
| 305 | /* try again */ |
| 306 | spin_unlock(&lowest_rq->lock); |
| 307 | lowest_rq = NULL; |
| 308 | } |
| 309 | |
| 310 | return lowest_rq; |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * If the current CPU has more than one RT task, see if the non |
| 315 | * running task can migrate over to a CPU that is running a task |
| 316 | * of lesser priority. |
| 317 | */ |
| 318 | static int push_rt_task(struct rq *this_rq) |
| 319 | { |
| 320 | struct task_struct *next_task; |
| 321 | struct rq *lowest_rq; |
| 322 | int ret = 0; |
| 323 | int paranoid = RT_MAX_TRIES; |
| 324 | |
| 325 | assert_spin_locked(&this_rq->lock); |
| 326 | |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 327 | next_task = pick_next_highest_task_rt(this_rq, -1); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 328 | if (!next_task) |
| 329 | return 0; |
| 330 | |
| 331 | retry: |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 332 | if (unlikely(next_task == this_rq->curr)) { |
| 333 | WARN_ON(1); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 334 | return 0; |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 335 | } |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 336 | |
| 337 | /* |
| 338 | * It's possible that the next_task slipped in of |
| 339 | * higher priority than current. If that's the case |
| 340 | * just reschedule current. |
| 341 | */ |
| 342 | if (unlikely(next_task->prio < this_rq->curr->prio)) { |
| 343 | resched_task(this_rq->curr); |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | /* We might release this_rq lock */ |
| 348 | get_task_struct(next_task); |
| 349 | |
| 350 | /* find_lock_lowest_rq locks the rq if found */ |
| 351 | lowest_rq = find_lock_lowest_rq(next_task, this_rq); |
| 352 | if (!lowest_rq) { |
| 353 | struct task_struct *task; |
| 354 | /* |
| 355 | * find lock_lowest_rq releases this_rq->lock |
| 356 | * so it is possible that next_task has changed. |
| 357 | * If it has, then try again. |
| 358 | */ |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 359 | task = pick_next_highest_task_rt(this_rq, -1); |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 360 | if (unlikely(task != next_task) && task && paranoid--) { |
| 361 | put_task_struct(next_task); |
| 362 | next_task = task; |
| 363 | goto retry; |
| 364 | } |
| 365 | goto out; |
| 366 | } |
| 367 | |
| 368 | assert_spin_locked(&lowest_rq->lock); |
| 369 | |
| 370 | deactivate_task(this_rq, next_task, 0); |
| 371 | set_task_cpu(next_task, lowest_rq->cpu); |
| 372 | activate_task(lowest_rq, next_task, 0); |
| 373 | |
| 374 | resched_task(lowest_rq->curr); |
| 375 | |
| 376 | spin_unlock(&lowest_rq->lock); |
| 377 | |
| 378 | ret = 1; |
| 379 | out: |
| 380 | put_task_struct(next_task); |
| 381 | |
| 382 | return ret; |
| 383 | } |
| 384 | |
| 385 | /* |
| 386 | * TODO: Currently we just use the second highest prio task on |
| 387 | * the queue, and stop when it can't migrate (or there's |
| 388 | * no more RT tasks). There may be a case where a lower |
| 389 | * priority RT task has a different affinity than the |
| 390 | * higher RT task. In this case the lower RT task could |
| 391 | * possibly be able to migrate where as the higher priority |
| 392 | * RT task could not. We currently ignore this issue. |
| 393 | * Enhancements are welcome! |
| 394 | */ |
| 395 | static void push_rt_tasks(struct rq *rq) |
| 396 | { |
| 397 | /* push_rt_task will return true if it moved an RT */ |
| 398 | while (push_rt_task(rq)) |
| 399 | ; |
| 400 | } |
| 401 | |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 402 | static int pull_rt_task(struct rq *this_rq) |
| 403 | { |
| 404 | struct task_struct *next; |
| 405 | struct task_struct *p; |
| 406 | struct rq *src_rq; |
| 407 | cpumask_t *rto_cpumask; |
| 408 | int this_cpu = this_rq->cpu; |
| 409 | int cpu; |
| 410 | int ret = 0; |
| 411 | |
| 412 | assert_spin_locked(&this_rq->lock); |
| 413 | |
| 414 | /* |
| 415 | * If cpusets are used, and we have overlapping |
| 416 | * run queue cpusets, then this algorithm may not catch all. |
| 417 | * This is just the price you pay on trying to keep |
| 418 | * dirtying caches down on large SMP machines. |
| 419 | */ |
| 420 | if (likely(!rt_overloaded())) |
| 421 | return 0; |
| 422 | |
| 423 | next = pick_next_task_rt(this_rq); |
| 424 | |
| 425 | rto_cpumask = rt_overload(); |
| 426 | |
| 427 | for_each_cpu_mask(cpu, *rto_cpumask) { |
| 428 | if (this_cpu == cpu) |
| 429 | continue; |
| 430 | |
| 431 | src_rq = cpu_rq(cpu); |
| 432 | if (unlikely(src_rq->rt.rt_nr_running <= 1)) { |
| 433 | /* |
| 434 | * It is possible that overlapping cpusets |
| 435 | * will miss clearing a non overloaded runqueue. |
| 436 | * Clear it now. |
| 437 | */ |
| 438 | if (double_lock_balance(this_rq, src_rq)) { |
| 439 | /* unlocked our runqueue lock */ |
| 440 | struct task_struct *old_next = next; |
| 441 | next = pick_next_task_rt(this_rq); |
| 442 | if (next != old_next) |
| 443 | ret = 1; |
| 444 | } |
| 445 | if (likely(src_rq->rt.rt_nr_running <= 1)) |
| 446 | /* |
| 447 | * Small chance that this_rq->curr changed |
| 448 | * but it's really harmless here. |
| 449 | */ |
| 450 | rt_clear_overload(this_rq); |
| 451 | else |
| 452 | /* |
| 453 | * Heh, the src_rq is now overloaded, since |
| 454 | * we already have the src_rq lock, go straight |
| 455 | * to pulling tasks from it. |
| 456 | */ |
| 457 | goto try_pulling; |
| 458 | spin_unlock(&src_rq->lock); |
| 459 | continue; |
| 460 | } |
| 461 | |
| 462 | /* |
| 463 | * We can potentially drop this_rq's lock in |
| 464 | * double_lock_balance, and another CPU could |
| 465 | * steal our next task - hence we must cause |
| 466 | * the caller to recalculate the next task |
| 467 | * in that case: |
| 468 | */ |
| 469 | if (double_lock_balance(this_rq, src_rq)) { |
| 470 | struct task_struct *old_next = next; |
| 471 | next = pick_next_task_rt(this_rq); |
| 472 | if (next != old_next) |
| 473 | ret = 1; |
| 474 | } |
| 475 | |
| 476 | /* |
| 477 | * Are there still pullable RT tasks? |
| 478 | */ |
| 479 | if (src_rq->rt.rt_nr_running <= 1) { |
| 480 | spin_unlock(&src_rq->lock); |
| 481 | continue; |
| 482 | } |
| 483 | |
| 484 | try_pulling: |
| 485 | p = pick_next_highest_task_rt(src_rq, this_cpu); |
| 486 | |
| 487 | /* |
| 488 | * Do we have an RT task that preempts |
| 489 | * the to-be-scheduled task? |
| 490 | */ |
| 491 | if (p && (!next || (p->prio < next->prio))) { |
| 492 | WARN_ON(p == src_rq->curr); |
| 493 | WARN_ON(!p->se.on_rq); |
| 494 | |
| 495 | /* |
| 496 | * There's a chance that p is higher in priority |
| 497 | * than what's currently running on its cpu. |
| 498 | * This is just that p is wakeing up and hasn't |
| 499 | * had a chance to schedule. We only pull |
| 500 | * p if it is lower in priority than the |
| 501 | * current task on the run queue or |
| 502 | * this_rq next task is lower in prio than |
| 503 | * the current task on that rq. |
| 504 | */ |
| 505 | if (p->prio < src_rq->curr->prio || |
| 506 | (next && next->prio < src_rq->curr->prio)) |
| 507 | goto bail; |
| 508 | |
| 509 | ret = 1; |
| 510 | |
| 511 | deactivate_task(src_rq, p, 0); |
| 512 | set_task_cpu(p, this_cpu); |
| 513 | activate_task(this_rq, p, 0); |
| 514 | /* |
| 515 | * We continue with the search, just in |
| 516 | * case there's an even higher prio task |
| 517 | * in another runqueue. (low likelyhood |
| 518 | * but possible) |
| 519 | */ |
| 520 | |
| 521 | /* |
| 522 | * Update next so that we won't pick a task |
| 523 | * on another cpu with a priority lower (or equal) |
| 524 | * than the one we just picked. |
| 525 | */ |
| 526 | next = p; |
| 527 | |
| 528 | } |
| 529 | bail: |
| 530 | spin_unlock(&src_rq->lock); |
| 531 | } |
| 532 | |
| 533 | return ret; |
| 534 | } |
| 535 | |
| 536 | static void schedule_balance_rt(struct rq *rq, |
| 537 | struct task_struct *prev) |
| 538 | { |
| 539 | /* Try to pull RT tasks here if we lower this rq's prio */ |
| 540 | if (unlikely(rt_task(prev)) && |
| 541 | rq->rt.highest_prio > prev->prio) |
| 542 | pull_rt_task(rq); |
| 543 | } |
| 544 | |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 545 | static void schedule_tail_balance_rt(struct rq *rq) |
| 546 | { |
| 547 | /* |
| 548 | * If we have more than one rt_task queued, then |
| 549 | * see if we can push the other rt_tasks off to other CPUS. |
| 550 | * Note we may release the rq lock, and since |
| 551 | * the lock was owned by prev, we need to release it |
| 552 | * first via finish_lock_switch and then reaquire it here. |
| 553 | */ |
| 554 | if (unlikely(rq->rt.rt_nr_running > 1)) { |
| 555 | spin_lock_irq(&rq->lock); |
| 556 | push_rt_tasks(rq); |
| 557 | spin_unlock_irq(&rq->lock); |
| 558 | } |
| 559 | } |
| 560 | |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 561 | /* |
| 562 | * Load-balancing iterator. Note: while the runqueue stays locked |
| 563 | * during the whole iteration, the current task might be |
| 564 | * dequeued so the iterator has to be dequeue-safe. Here we |
| 565 | * achieve that by always pre-iterating before returning |
| 566 | * the current task: |
| 567 | */ |
| 568 | static struct task_struct *load_balance_start_rt(void *arg) |
| 569 | { |
| 570 | struct rq *rq = arg; |
| 571 | struct rt_prio_array *array = &rq->rt.active; |
| 572 | struct list_head *head, *curr; |
| 573 | struct task_struct *p; |
| 574 | int idx; |
| 575 | |
| 576 | idx = sched_find_first_bit(array->bitmap); |
| 577 | if (idx >= MAX_RT_PRIO) |
| 578 | return NULL; |
| 579 | |
| 580 | head = array->queue + idx; |
| 581 | curr = head->prev; |
| 582 | |
| 583 | p = list_entry(curr, struct task_struct, run_list); |
| 584 | |
| 585 | curr = curr->prev; |
| 586 | |
| 587 | rq->rt.rt_load_balance_idx = idx; |
| 588 | rq->rt.rt_load_balance_head = head; |
| 589 | rq->rt.rt_load_balance_curr = curr; |
| 590 | |
| 591 | return p; |
| 592 | } |
| 593 | |
| 594 | static struct task_struct *load_balance_next_rt(void *arg) |
| 595 | { |
| 596 | struct rq *rq = arg; |
| 597 | struct rt_prio_array *array = &rq->rt.active; |
| 598 | struct list_head *head, *curr; |
| 599 | struct task_struct *p; |
| 600 | int idx; |
| 601 | |
| 602 | idx = rq->rt.rt_load_balance_idx; |
| 603 | head = rq->rt.rt_load_balance_head; |
| 604 | curr = rq->rt.rt_load_balance_curr; |
| 605 | |
| 606 | /* |
| 607 | * If we arrived back to the head again then |
| 608 | * iterate to the next queue (if any): |
| 609 | */ |
| 610 | if (unlikely(head == curr)) { |
| 611 | int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); |
| 612 | |
| 613 | if (next_idx >= MAX_RT_PRIO) |
| 614 | return NULL; |
| 615 | |
| 616 | idx = next_idx; |
| 617 | head = array->queue + idx; |
| 618 | curr = head->prev; |
| 619 | |
| 620 | rq->rt.rt_load_balance_idx = idx; |
| 621 | rq->rt.rt_load_balance_head = head; |
| 622 | } |
| 623 | |
| 624 | p = list_entry(curr, struct task_struct, run_list); |
| 625 | |
| 626 | curr = curr->prev; |
| 627 | |
| 628 | rq->rt.rt_load_balance_curr = curr; |
| 629 | |
| 630 | return p; |
| 631 | } |
| 632 | |
Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 633 | static unsigned long |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 634 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 635 | unsigned long max_load_move, |
| 636 | struct sched_domain *sd, enum cpu_idle_type idle, |
| 637 | int *all_pinned, int *this_best_prio) |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 638 | { |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 639 | struct rq_iterator rt_rq_iterator; |
| 640 | |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 641 | rt_rq_iterator.start = load_balance_start_rt; |
| 642 | rt_rq_iterator.next = load_balance_next_rt; |
| 643 | /* pass 'busiest' rq argument into |
| 644 | * load_balance_[start|next]_rt iterators |
| 645 | */ |
| 646 | rt_rq_iterator.arg = busiest; |
| 647 | |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 648 | return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, |
| 649 | idle, all_pinned, this_best_prio, &rt_rq_iterator); |
| 650 | } |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 651 | |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 652 | static int |
| 653 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
| 654 | struct sched_domain *sd, enum cpu_idle_type idle) |
| 655 | { |
| 656 | struct rq_iterator rt_rq_iterator; |
| 657 | |
| 658 | rt_rq_iterator.start = load_balance_start_rt; |
| 659 | rt_rq_iterator.next = load_balance_next_rt; |
| 660 | rt_rq_iterator.arg = busiest; |
| 661 | |
| 662 | return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, |
| 663 | &rt_rq_iterator); |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 664 | } |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 665 | #else /* CONFIG_SMP */ |
| 666 | # define schedule_tail_balance_rt(rq) do { } while (0) |
Steven Rostedt | f65eda4 | 2008-01-25 21:08:07 +0100 | [diff] [blame^] | 667 | # define schedule_balance_rt(rq, prev) do { } while (0) |
Steven Rostedt | e8fa136 | 2008-01-25 21:08:05 +0100 | [diff] [blame] | 668 | #endif /* CONFIG_SMP */ |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 669 | |
| 670 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |
| 671 | { |
Peter Zijlstra | 67e2be0 | 2007-12-20 15:01:17 +0100 | [diff] [blame] | 672 | update_curr_rt(rq); |
| 673 | |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 674 | /* |
| 675 | * RR tasks need a special form of timeslice management. |
| 676 | * FIFO tasks have no timeslices. |
| 677 | */ |
| 678 | if (p->policy != SCHED_RR) |
| 679 | return; |
| 680 | |
| 681 | if (--p->time_slice) |
| 682 | return; |
| 683 | |
Dmitry Adamushko | a4ec24b | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 684 | p->time_slice = DEF_TIMESLICE; |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 685 | |
Dmitry Adamushko | 98fbc79 | 2007-08-24 20:39:10 +0200 | [diff] [blame] | 686 | /* |
| 687 | * Requeue to the end of queue if we are not the only element |
| 688 | * on the queue: |
| 689 | */ |
| 690 | if (p->run_list.prev != p->run_list.next) { |
| 691 | requeue_task_rt(rq, p); |
| 692 | set_tsk_need_resched(p); |
| 693 | } |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 694 | } |
| 695 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 696 | static void set_curr_task_rt(struct rq *rq) |
| 697 | { |
| 698 | struct task_struct *p = rq->curr; |
| 699 | |
| 700 | p->se.exec_start = rq->clock; |
| 701 | } |
| 702 | |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 703 | const struct sched_class rt_sched_class = { |
| 704 | .next = &fair_sched_class, |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 705 | .enqueue_task = enqueue_task_rt, |
| 706 | .dequeue_task = dequeue_task_rt, |
| 707 | .yield_task = yield_task_rt, |
| 708 | |
| 709 | .check_preempt_curr = check_preempt_curr_rt, |
| 710 | |
| 711 | .pick_next_task = pick_next_task_rt, |
| 712 | .put_prev_task = put_prev_task_rt, |
| 713 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 714 | #ifdef CONFIG_SMP |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 715 | .load_balance = load_balance_rt, |
Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 716 | .move_one_task = move_one_task_rt, |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 717 | #endif |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 718 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 719 | .set_curr_task = set_curr_task_rt, |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 720 | .task_tick = task_tick_rt, |
Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 721 | }; |