blob: bec0b647f9cc5291df0211c6532edd5ffe167eb2 [file] [log] [blame]
Oleg Nesterov9390ef02012-12-17 16:01:36 -08001#include <linux/atomic.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08002#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/wait.h>
Oleg Nesterov8ebe3472012-12-17 16:01:38 -08005#include <linux/lockdep.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08006#include <linux/percpu-rwsem.h>
7#include <linux/rcupdate.h>
8#include <linux/sched.h>
9#include <linux/errno.h>
10
Oleg Nesterov8ebe3472012-12-17 16:01:38 -080011int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 const char *name, struct lock_class_key *rwsem_key)
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080013{
14 brw->fast_read_ctr = alloc_percpu(int);
15 if (unlikely(!brw->fast_read_ctr))
16 return -ENOMEM;
17
Oleg Nesterov8ebe3472012-12-17 16:01:38 -080018 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
Oleg Nesterov001dac62015-08-21 19:42:57 +020020 rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080021 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq);
23 return 0;
24}
Paul E. McKenney302707f2015-08-31 20:21:59 -070025EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080026
27void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
28{
Oleg Nesterov95b19f62015-08-21 19:42:55 +020029 /*
30 * XXX: temporary kludge. The error path in alloc_super()
31 * assumes that percpu_free_rwsem() is safe after kzalloc().
32 */
33 if (!brw->fast_read_ctr)
34 return;
35
Oleg Nesterov001dac62015-08-21 19:42:57 +020036 rcu_sync_dtor(&brw->rss);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080037 free_percpu(brw->fast_read_ctr);
38 brw->fast_read_ctr = NULL; /* catch use after free bugs */
39}
Daeho Jeongc8585c62016-04-25 23:22:35 -040040EXPORT_SYMBOL_GPL(percpu_free_rwsem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080041
42/*
Oleg Nesterovf324a762015-08-21 19:43:00 +020043 * This is the fast-path for down_read/up_read. If it succeeds we rely
44 * on the barriers provided by rcu_sync_enter/exit; see the comments in
45 * percpu_down_write() and percpu_up_write().
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080046 *
47 * If this helper fails the callers rely on the normal rw_semaphore and
48 * atomic_dec_and_test(), so in this case we have the necessary barriers.
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080049 */
50static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
51{
Oleg Nesterov001dac62015-08-21 19:42:57 +020052 bool success;
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080053
54 preempt_disable();
Oleg Nesterov001dac62015-08-21 19:42:57 +020055 success = rcu_sync_is_idle(&brw->rss);
56 if (likely(success))
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080057 __this_cpu_add(*brw->fast_read_ctr, val);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080058 preempt_enable();
59
60 return success;
61}
62
63/*
64 * Like the normal down_read() this is not recursive, the writer can
65 * come after the first percpu_down_read() and create the deadlock.
Oleg Nesterov8ebe3472012-12-17 16:01:38 -080066 *
67 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
68 * percpu_up_read() does rwsem_release(). This pairs with the usage
69 * of ->rw_sem in percpu_down/up_write().
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080070 */
71void percpu_down_read(struct percpu_rw_semaphore *brw)
72{
Oleg Nesterov8ebe3472012-12-17 16:01:38 -080073 might_sleep();
Oleg Nesterovcc5f7302015-08-21 19:43:03 +020074 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080075
Oleg Nesterovcc5f7302015-08-21 19:43:03 +020076 if (likely(update_fast_ctr(brw, +1)))
77 return;
78
79 /* Avoid rwsem_acquire_read() and rwsem_release() */
80 __down_read(&brw->rw_sem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080081 atomic_inc(&brw->slow_read_ctr);
Oleg Nesterov8ebe3472012-12-17 16:01:38 -080082 __up_read(&brw->rw_sem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080083}
Paul E. McKenney302707f2015-08-31 20:21:59 -070084EXPORT_SYMBOL_GPL(percpu_down_read);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080085
Oleg Nesterov9287f692015-07-21 17:45:57 +020086int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
87{
88 if (unlikely(!update_fast_ctr(brw, +1))) {
89 if (!__down_read_trylock(&brw->rw_sem))
90 return 0;
91 atomic_inc(&brw->slow_read_ctr);
92 __up_read(&brw->rw_sem);
93 }
94
95 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
96 return 1;
97}
98
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080099void percpu_up_read(struct percpu_rw_semaphore *brw)
100{
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800101 rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
102
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800103 if (likely(update_fast_ctr(brw, -1)))
104 return;
105
106 /* false-positive is possible but harmless */
107 if (atomic_dec_and_test(&brw->slow_read_ctr))
108 wake_up_all(&brw->write_waitq);
109}
Paul E. McKenney302707f2015-08-31 20:21:59 -0700110EXPORT_SYMBOL_GPL(percpu_up_read);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800111
112static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
113{
114 unsigned int sum = 0;
115 int cpu;
116
117 for_each_possible_cpu(cpu) {
118 sum += per_cpu(*brw->fast_read_ctr, cpu);
119 per_cpu(*brw->fast_read_ctr, cpu) = 0;
120 }
121
122 return sum;
123}
124
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800125void percpu_down_write(struct percpu_rw_semaphore *brw)
126{
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800127 /*
Oleg Nesterovf324a762015-08-21 19:43:00 +0200128 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
129 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800130 *
Oleg Nesterovf324a762015-08-21 19:43:00 +0200131 * The latter synchronises us with the preceding readers which used
132 * the fast-past, so we can not miss the result of __this_cpu_add()
133 * or anything else inside their criticial sections.
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800134 */
Oleg Nesterov001dac62015-08-21 19:42:57 +0200135 rcu_sync_enter(&brw->rss);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800136
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800137 /* exclude other writers, and block the new readers completely */
138 down_write(&brw->rw_sem);
139
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800140 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
141 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
142
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800143 /* wait for all readers to complete their percpu_up_read() */
144 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
145}
Paul E. McKenney302707f2015-08-31 20:21:59 -0700146EXPORT_SYMBOL_GPL(percpu_down_write);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800147
148void percpu_up_write(struct percpu_rw_semaphore *brw)
149{
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800150 /* release the lock, but the readers can't use the fast-path */
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800151 up_write(&brw->rw_sem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800152 /*
Oleg Nesterovf324a762015-08-21 19:43:00 +0200153 * Enable the fast-path in percpu_down_read() and percpu_up_read()
154 * but only after another gp pass; this adds the necessary barrier
155 * to ensure the reader can't miss the changes done by us.
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800156 */
Oleg Nesterov001dac62015-08-21 19:42:57 +0200157 rcu_sync_exit(&brw->rss);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800158}
Paul E. McKenney302707f2015-08-31 20:21:59 -0700159EXPORT_SYMBOL_GPL(percpu_up_write);