Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 2 | /* |
| 3 | * <linux/swait.h> (simple wait queues ) implementation: |
| 4 | */ |
Ingo Molnar | 325ea10 | 2018-03-03 12:20:47 +0100 | [diff] [blame] | 5 | #include "sched.h" |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 6 | |
| 7 | void __init_swait_queue_head(struct swait_queue_head *q, const char *name, |
| 8 | struct lock_class_key *key) |
| 9 | { |
| 10 | raw_spin_lock_init(&q->lock); |
| 11 | lockdep_set_class_and_name(&q->lock, key, name); |
| 12 | INIT_LIST_HEAD(&q->task_list); |
| 13 | } |
| 14 | EXPORT_SYMBOL(__init_swait_queue_head); |
| 15 | |
| 16 | /* |
| 17 | * The thing about the wake_up_state() return value; I think we can ignore it. |
| 18 | * |
| 19 | * If for some reason it would return 0, that means the previously waiting |
| 20 | * task is already running, so it will observe condition true (or has already). |
| 21 | */ |
| 22 | void swake_up_locked(struct swait_queue_head *q) |
| 23 | { |
| 24 | struct swait_queue *curr; |
| 25 | |
| 26 | if (list_empty(&q->task_list)) |
| 27 | return; |
| 28 | |
| 29 | curr = list_first_entry(&q->task_list, typeof(*curr), task_list); |
| 30 | wake_up_process(curr->task); |
| 31 | list_del_init(&curr->task_list); |
| 32 | } |
| 33 | EXPORT_SYMBOL(swake_up_locked); |
| 34 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 35 | void swake_up_one(struct swait_queue_head *q) |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 36 | { |
| 37 | unsigned long flags; |
| 38 | |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 39 | raw_spin_lock_irqsave(&q->lock, flags); |
| 40 | swake_up_locked(q); |
| 41 | raw_spin_unlock_irqrestore(&q->lock, flags); |
| 42 | } |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 43 | EXPORT_SYMBOL(swake_up_one); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * Does not allow usage from IRQ disabled, since we must be able to |
| 47 | * release IRQs to guarantee bounded hold time. |
| 48 | */ |
| 49 | void swake_up_all(struct swait_queue_head *q) |
| 50 | { |
| 51 | struct swait_queue *curr; |
| 52 | LIST_HEAD(tmp); |
| 53 | |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 54 | raw_spin_lock_irq(&q->lock); |
| 55 | list_splice_init(&q->task_list, &tmp); |
| 56 | while (!list_empty(&tmp)) { |
| 57 | curr = list_first_entry(&tmp, typeof(*curr), task_list); |
| 58 | |
| 59 | wake_up_state(curr->task, TASK_NORMAL); |
| 60 | list_del_init(&curr->task_list); |
| 61 | |
| 62 | if (list_empty(&tmp)) |
| 63 | break; |
| 64 | |
| 65 | raw_spin_unlock_irq(&q->lock); |
| 66 | raw_spin_lock_irq(&q->lock); |
| 67 | } |
| 68 | raw_spin_unlock_irq(&q->lock); |
| 69 | } |
| 70 | EXPORT_SYMBOL(swake_up_all); |
| 71 | |
Peter Zijlstra | 6519750 | 2018-06-12 10:34:50 +0200 | [diff] [blame] | 72 | static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 73 | { |
| 74 | wait->task = current; |
| 75 | if (list_empty(&wait->task_list)) |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 76 | list_add_tail(&wait->task_list, &q->task_list); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 77 | } |
| 78 | |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 79 | void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 80 | { |
| 81 | unsigned long flags; |
| 82 | |
| 83 | raw_spin_lock_irqsave(&q->lock, flags); |
| 84 | __prepare_to_swait(q, wait); |
| 85 | set_current_state(state); |
| 86 | raw_spin_unlock_irqrestore(&q->lock, flags); |
| 87 | } |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 88 | EXPORT_SYMBOL(prepare_to_swait_exclusive); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 89 | |
| 90 | long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) |
| 91 | { |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 92 | unsigned long flags; |
| 93 | long ret = 0; |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 94 | |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 95 | raw_spin_lock_irqsave(&q->lock, flags); |
| 96 | if (unlikely(signal_pending_state(state, current))) { |
| 97 | /* |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 98 | * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one() |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 99 | * must not see us. |
| 100 | */ |
| 101 | list_del_init(&wait->task_list); |
| 102 | ret = -ERESTARTSYS; |
| 103 | } else { |
| 104 | __prepare_to_swait(q, wait); |
| 105 | set_current_state(state); |
| 106 | } |
| 107 | raw_spin_unlock_irqrestore(&q->lock, flags); |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 108 | |
Peter Zijlstra | 0abf17b | 2018-06-12 10:34:51 +0200 | [diff] [blame] | 109 | return ret; |
Peter Zijlstra (Intel) | 13b3568 | 2016-02-19 09:46:37 +0100 | [diff] [blame] | 110 | } |
| 111 | EXPORT_SYMBOL(prepare_to_swait_event); |
| 112 | |
| 113 | void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) |
| 114 | { |
| 115 | __set_current_state(TASK_RUNNING); |
| 116 | if (!list_empty(&wait->task_list)) |
| 117 | list_del_init(&wait->task_list); |
| 118 | } |
| 119 | |
| 120 | void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) |
| 121 | { |
| 122 | unsigned long flags; |
| 123 | |
| 124 | __set_current_state(TASK_RUNNING); |
| 125 | |
| 126 | if (!list_empty_careful(&wait->task_list)) { |
| 127 | raw_spin_lock_irqsave(&q->lock, flags); |
| 128 | list_del_init(&wait->task_list); |
| 129 | raw_spin_unlock_irqrestore(&q->lock, flags); |
| 130 | } |
| 131 | } |
| 132 | EXPORT_SYMBOL(finish_swait); |