Oleg Nesterov | 9390ef0 | 2012-12-17 16:01:36 -0800 | [diff] [blame] | 1 | #include <linux/atomic.h> |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 2 | #include <linux/rwsem.h> |
| 3 | #include <linux/percpu.h> |
Oleg Nesterov | 8ebe347 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 4 | #include <linux/lockdep.h> |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 5 | #include <linux/percpu-rwsem.h> |
| 6 | #include <linux/rcupdate.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/errno.h> |
| 9 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 10 | int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, |
Oleg Nesterov | 8ebe347 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 11 | const char *name, struct lock_class_key *rwsem_key) |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 12 | { |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 13 | sem->read_count = alloc_percpu(int); |
| 14 | if (unlikely(!sem->read_count)) |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 15 | return -ENOMEM; |
| 16 | |
Oleg Nesterov | 8ebe347 | 2012-12-17 16:01:38 -0800 | [diff] [blame] | 17 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 18 | rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); |
| 19 | __init_rwsem(&sem->rw_sem, name, rwsem_key); |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 20 | rcuwait_init(&sem->writer); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 21 | sem->readers_block = 0; |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 22 | return 0; |
| 23 | } |
Paul E. McKenney | 302707f | 2015-08-31 20:21:59 -0700 | [diff] [blame] | 24 | EXPORT_SYMBOL_GPL(__percpu_init_rwsem); |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 25 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 26 | void percpu_free_rwsem(struct percpu_rw_semaphore *sem) |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 27 | { |
Oleg Nesterov | 95b19f6 | 2015-08-21 19:42:55 +0200 | [diff] [blame] | 28 | /* |
| 29 | * XXX: temporary kludge. The error path in alloc_super() |
| 30 | * assumes that percpu_free_rwsem() is safe after kzalloc(). |
| 31 | */ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 32 | if (!sem->read_count) |
Oleg Nesterov | 95b19f6 | 2015-08-21 19:42:55 +0200 | [diff] [blame] | 33 | return; |
| 34 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 35 | rcu_sync_dtor(&sem->rss); |
| 36 | free_percpu(sem->read_count); |
| 37 | sem->read_count = NULL; /* catch use after free bugs */ |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 38 | } |
Daeho Jeong | c8585c6 | 2016-04-25 23:22:35 -0400 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(percpu_free_rwsem); |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 40 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 41 | int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 42 | { |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 43 | /* |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 44 | * Due to having preemption disabled the decrement happens on |
| 45 | * the same CPU as the increment, avoiding the |
| 46 | * increment-on-one-CPU-and-decrement-on-another problem. |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 47 | * |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 48 | * If the reader misses the writer's assignment of readers_block, then |
| 49 | * the writer is guaranteed to see the reader's increment. |
| 50 | * |
| 51 | * Conversely, any readers that increment their sem->read_count after |
| 52 | * the writer looks are guaranteed to see the readers_block value, |
| 53 | * which in turn means that they are guaranteed to immediately |
| 54 | * decrement their sem->read_count, so that it doesn't matter that the |
| 55 | * writer missed them. |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 56 | */ |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 57 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 58 | smp_mb(); /* A matches D */ |
Oleg Nesterov | 9390ef0 | 2012-12-17 16:01:36 -0800 | [diff] [blame] | 59 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 60 | /* |
| 61 | * If !readers_block the critical section starts here, matched by the |
| 62 | * release in percpu_up_write(). |
| 63 | */ |
| 64 | if (likely(!smp_load_acquire(&sem->readers_block))) |
| 65 | return 1; |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 66 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 67 | /* |
| 68 | * Per the above comment; we still have preemption disabled and |
| 69 | * will thus decrement on the same CPU as we incremented. |
| 70 | */ |
| 71 | __percpu_up_read(sem); |
| 72 | |
| 73 | if (try) |
| 74 | return 0; |
| 75 | |
| 76 | /* |
| 77 | * We either call schedule() in the wait, or we'll fall through |
| 78 | * and reschedule on the preempt_enable() in percpu_down_read(). |
| 79 | */ |
| 80 | preempt_enable_no_resched(); |
| 81 | |
| 82 | /* |
| 83 | * Avoid lockdep for the down/up_read() we already have them. |
| 84 | */ |
| 85 | __down_read(&sem->rw_sem); |
| 86 | this_cpu_inc(*sem->read_count); |
| 87 | __up_read(&sem->rw_sem); |
| 88 | |
| 89 | preempt_disable(); |
| 90 | return 1; |
| 91 | } |
| 92 | EXPORT_SYMBOL_GPL(__percpu_down_read); |
| 93 | |
| 94 | void __percpu_up_read(struct percpu_rw_semaphore *sem) |
| 95 | { |
| 96 | smp_mb(); /* B matches C */ |
| 97 | /* |
| 98 | * In other words, if they see our decrement (presumably to aggregate |
| 99 | * zero, as that is the only time it matters) they will also see our |
| 100 | * critical section. |
| 101 | */ |
| 102 | __this_cpu_dec(*sem->read_count); |
| 103 | |
| 104 | /* Prod writer to recheck readers_active */ |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 105 | rcuwait_wake_up(&sem->writer); |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 106 | } |
| 107 | EXPORT_SYMBOL_GPL(__percpu_up_read); |
| 108 | |
| 109 | #define per_cpu_sum(var) \ |
| 110 | ({ \ |
| 111 | typeof(var) __sum = 0; \ |
| 112 | int cpu; \ |
| 113 | compiletime_assert_atomic_type(__sum); \ |
| 114 | for_each_possible_cpu(cpu) \ |
| 115 | __sum += per_cpu(var, cpu); \ |
| 116 | __sum; \ |
| 117 | }) |
| 118 | |
| 119 | /* |
| 120 | * Return true if the modular sum of the sem->read_count per-CPU variable is |
| 121 | * zero. If this sum is zero, then it is stable due to the fact that if any |
| 122 | * newly arriving readers increment a given counter, they will immediately |
| 123 | * decrement that same counter. |
| 124 | */ |
| 125 | static bool readers_active_check(struct percpu_rw_semaphore *sem) |
| 126 | { |
| 127 | if (per_cpu_sum(*sem->read_count) != 0) |
| 128 | return false; |
| 129 | |
| 130 | /* |
| 131 | * If we observed the decrement; ensure we see the entire critical |
| 132 | * section. |
| 133 | */ |
| 134 | |
| 135 | smp_mb(); /* C matches B */ |
| 136 | |
| 137 | return true; |
| 138 | } |
| 139 | |
| 140 | void percpu_down_write(struct percpu_rw_semaphore *sem) |
| 141 | { |
| 142 | /* Notify readers to take the slow path. */ |
| 143 | rcu_sync_enter(&sem->rss); |
| 144 | |
| 145 | down_write(&sem->rw_sem); |
| 146 | |
| 147 | /* |
| 148 | * Notify new readers to block; up until now, and thus throughout the |
| 149 | * longish rcu_sync_enter() above, new readers could still come in. |
| 150 | */ |
| 151 | WRITE_ONCE(sem->readers_block, 1); |
| 152 | |
| 153 | smp_mb(); /* D matches A */ |
| 154 | |
| 155 | /* |
| 156 | * If they don't see our writer of readers_block, then we are |
| 157 | * guaranteed to see their sem->read_count increment, and therefore |
| 158 | * will wait for them. |
| 159 | */ |
| 160 | |
| 161 | /* Wait for all now active readers to complete. */ |
Davidlohr Bueso | 52b9412 | 2017-01-11 07:22:26 -0800 | [diff] [blame] | 162 | rcuwait_wait_event(&sem->writer, readers_active_check(sem)); |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 163 | } |
Paul E. McKenney | 302707f | 2015-08-31 20:21:59 -0700 | [diff] [blame] | 164 | EXPORT_SYMBOL_GPL(percpu_down_write); |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 165 | |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 166 | void percpu_up_write(struct percpu_rw_semaphore *sem) |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 167 | { |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 168 | /* |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 169 | * Signal the writer is done, no fast path yet. |
| 170 | * |
| 171 | * One reason that we cannot just immediately flip to readers_fast is |
| 172 | * that new readers might fail to see the results of this writer's |
| 173 | * critical section. |
| 174 | * |
| 175 | * Therefore we force it through the slow path which guarantees an |
| 176 | * acquire and thereby guarantees the critical section's consistency. |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 177 | */ |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 178 | smp_store_release(&sem->readers_block, 0); |
| 179 | |
| 180 | /* |
| 181 | * Release the write lock, this will allow readers back in the game. |
| 182 | */ |
| 183 | up_write(&sem->rw_sem); |
| 184 | |
| 185 | /* |
| 186 | * Once this completes (at least one RCU-sched grace period hence) the |
| 187 | * reader fast path will be available again. Safe to use outside the |
| 188 | * exclusive write lock because its counting. |
| 189 | */ |
| 190 | rcu_sync_exit(&sem->rss); |
Oleg Nesterov | a1fd3e2 | 2012-12-17 16:01:32 -0800 | [diff] [blame] | 191 | } |
Paul E. McKenney | 302707f | 2015-08-31 20:21:59 -0700 | [diff] [blame] | 192 | EXPORT_SYMBOL_GPL(percpu_up_write); |