Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Fast batching percpu counters. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/percpu_counter.h> |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 7 | #include <linux/mutex.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/cpu.h> |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 10 | #include <linux/module.h> |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 11 | #include <linux/debugobjects.h> |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 12 | |
Glauber Costa | 3a8495c | 2011-10-31 17:12:34 -0700 | [diff] [blame] | 13 | #ifdef CONFIG_HOTPLUG_CPU |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 14 | static LIST_HEAD(percpu_counters); |
Al Viro | d87aae2 | 2012-07-31 09:28:31 +0400 | [diff] [blame] | 15 | static DEFINE_SPINLOCK(percpu_counters_lock); |
Glauber Costa | 3a8495c | 2011-10-31 17:12:34 -0700 | [diff] [blame] | 16 | #endif |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 17 | |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER |
| 19 | |
Stephen Boyd | f9e62f3 | 2020-08-14 17:40:27 -0700 | [diff] [blame] | 20 | static const struct debug_obj_descr percpu_counter_debug_descr; |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 21 | |
Du, Changbin | d99b1d8 | 2016-05-19 17:09:35 -0700 | [diff] [blame] | 22 | static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state) |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 23 | { |
| 24 | struct percpu_counter *fbc = addr; |
| 25 | |
| 26 | switch (state) { |
| 27 | case ODEBUG_STATE_ACTIVE: |
| 28 | percpu_counter_destroy(fbc); |
| 29 | debug_object_free(fbc, &percpu_counter_debug_descr); |
Du, Changbin | d99b1d8 | 2016-05-19 17:09:35 -0700 | [diff] [blame] | 30 | return true; |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 31 | default: |
Du, Changbin | d99b1d8 | 2016-05-19 17:09:35 -0700 | [diff] [blame] | 32 | return false; |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 33 | } |
| 34 | } |
| 35 | |
Stephen Boyd | f9e62f3 | 2020-08-14 17:40:27 -0700 | [diff] [blame] | 36 | static const struct debug_obj_descr percpu_counter_debug_descr = { |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 37 | .name = "percpu_counter", |
| 38 | .fixup_free = percpu_counter_fixup_free, |
| 39 | }; |
| 40 | |
| 41 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) |
| 42 | { |
| 43 | debug_object_init(fbc, &percpu_counter_debug_descr); |
| 44 | debug_object_activate(fbc, &percpu_counter_debug_descr); |
| 45 | } |
| 46 | |
| 47 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) |
| 48 | { |
| 49 | debug_object_deactivate(fbc, &percpu_counter_debug_descr); |
| 50 | debug_object_free(fbc, &percpu_counter_debug_descr); |
| 51 | } |
| 52 | |
| 53 | #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ |
| 54 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) |
| 55 | { } |
| 56 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) |
| 57 | { } |
| 58 | #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ |
| 59 | |
Peter Zijlstra | 3a587f4 | 2007-10-16 23:25:44 -0700 | [diff] [blame] | 60 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
| 61 | { |
| 62 | int cpu; |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 63 | unsigned long flags; |
Peter Zijlstra | 3a587f4 | 2007-10-16 23:25:44 -0700 | [diff] [blame] | 64 | |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 65 | raw_spin_lock_irqsave(&fbc->lock, flags); |
Peter Zijlstra | 3a587f4 | 2007-10-16 23:25:44 -0700 | [diff] [blame] | 66 | for_each_possible_cpu(cpu) { |
| 67 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 68 | *pcount = 0; |
| 69 | } |
| 70 | fbc->count = amount; |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 71 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
Peter Zijlstra | 3a587f4 | 2007-10-16 23:25:44 -0700 | [diff] [blame] | 72 | } |
| 73 | EXPORT_SYMBOL(percpu_counter_set); |
| 74 | |
Nikolay Borisov | 3e8f399 | 2017-07-12 14:37:51 -0700 | [diff] [blame] | 75 | /** |
| 76 | * This function is both preempt and irq safe. The former is due to explicit |
| 77 | * preemption disable. The latter is guaranteed by the fact that the slow path |
| 78 | * is explicitly protected by an irq-safe spinlock whereas the fast patch uses |
| 79 | * this_cpu_add which is irq-safe by definition. Hence there is no need muck |
| 80 | * with irq state before calling this one |
| 81 | */ |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 82 | void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 83 | { |
Peter Zijlstra | 20e8976 | 2007-10-16 23:25:43 -0700 | [diff] [blame] | 84 | s64 count; |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 85 | |
Christoph Lameter | ea00c30 | 2010-10-26 14:23:09 -0700 | [diff] [blame] | 86 | preempt_disable(); |
Christoph Lameter | 819a72a | 2010-12-06 11:16:19 -0600 | [diff] [blame] | 87 | count = __this_cpu_read(*fbc->counters) + amount; |
Miaohe Lin | 1d33963 | 2020-10-15 20:11:28 -0700 | [diff] [blame] | 88 | if (abs(count) >= batch) { |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 89 | unsigned long flags; |
| 90 | raw_spin_lock_irqsave(&fbc->lock, flags); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 91 | fbc->count += count; |
Hugh Dickins | d1969a84d | 2014-01-16 15:26:48 -0800 | [diff] [blame] | 92 | __this_cpu_sub(*fbc->counters, count - amount); |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 93 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 94 | } else { |
Ming Lei | 74e72f8 | 2014-01-14 17:56:42 -0800 | [diff] [blame] | 95 | this_cpu_add(*fbc->counters, amount); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 96 | } |
Christoph Lameter | ea00c30 | 2010-10-26 14:23:09 -0700 | [diff] [blame] | 97 | preempt_enable(); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 98 | } |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 99 | EXPORT_SYMBOL(percpu_counter_add_batch); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
Feng Tang | 0a4954a | 2020-08-06 23:23:11 -0700 | [diff] [blame] | 102 | * For percpu_counter with a big batch, the devication of its count could |
| 103 | * be big, and there is requirement to reduce the deviation, like when the |
| 104 | * counter's batch could be runtime decreased to get a better accuracy, |
| 105 | * which can be achieved by running this sync function on each CPU. |
| 106 | */ |
| 107 | void percpu_counter_sync(struct percpu_counter *fbc) |
| 108 | { |
| 109 | unsigned long flags; |
| 110 | s64 count; |
| 111 | |
| 112 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 113 | count = __this_cpu_read(*fbc->counters); |
| 114 | fbc->count += count; |
| 115 | __this_cpu_sub(*fbc->counters, count); |
| 116 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 117 | } |
| 118 | EXPORT_SYMBOL(percpu_counter_sync); |
| 119 | |
| 120 | /* |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 121 | * Add up all the per-cpu counts, return the result. This is a more accurate |
| 122 | * but much slower version of percpu_counter_read_positive() |
| 123 | */ |
Andrew Morton | 02d211688 | 2008-12-09 13:14:14 -0800 | [diff] [blame] | 124 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 125 | { |
Mingming Cao | 0216bfc | 2006-06-23 02:05:41 -0700 | [diff] [blame] | 126 | s64 ret; |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 127 | int cpu; |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 128 | unsigned long flags; |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 129 | |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 130 | raw_spin_lock_irqsave(&fbc->lock, flags); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 131 | ret = fbc->count; |
Andrew Morton | b4ef029 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 132 | for_each_online_cpu(cpu) { |
Mingming Cao | 0216bfc | 2006-06-23 02:05:41 -0700 | [diff] [blame] | 133 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 134 | ret += *pcount; |
| 135 | } |
Shaohua Li | 098faf5 | 2013-10-24 09:06:45 +0100 | [diff] [blame] | 136 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
Peter Zijlstra | bf1d89c | 2007-10-16 23:25:45 -0700 | [diff] [blame] | 137 | return ret; |
Ravikiran G Thirumalai | 3cbc564 | 2006-06-23 02:05:40 -0700 | [diff] [blame] | 138 | } |
Peter Zijlstra | bf1d89c | 2007-10-16 23:25:45 -0700 | [diff] [blame] | 139 | EXPORT_SYMBOL(__percpu_counter_sum); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 140 | |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 141 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, |
Peter Zijlstra | ea31951 | 2008-12-26 15:08:55 +0100 | [diff] [blame] | 142 | struct lock_class_key *key) |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 143 | { |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 144 | unsigned long flags __maybe_unused; |
| 145 | |
Thomas Gleixner | f032a45 | 2009-07-25 16:21:48 +0200 | [diff] [blame] | 146 | raw_spin_lock_init(&fbc->lock); |
Peter Zijlstra | ea31951 | 2008-12-26 15:08:55 +0100 | [diff] [blame] | 147 | lockdep_set_class(&fbc->lock, key); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 148 | fbc->count = amount; |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 149 | fbc->counters = alloc_percpu_gfp(s32, gfp); |
Peter Zijlstra | 833f407 | 2007-10-16 23:25:45 -0700 | [diff] [blame] | 150 | if (!fbc->counters) |
| 151 | return -ENOMEM; |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 152 | |
| 153 | debug_percpu_counter_activate(fbc); |
| 154 | |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 155 | #ifdef CONFIG_HOTPLUG_CPU |
Masanori ITOH | 8474b59 | 2010-10-26 14:21:20 -0700 | [diff] [blame] | 156 | INIT_LIST_HEAD(&fbc->list); |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 157 | spin_lock_irqsave(&percpu_counters_lock, flags); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 158 | list_add(&fbc->list, &percpu_counters); |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 159 | spin_unlock_irqrestore(&percpu_counters_lock, flags); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 160 | #endif |
Peter Zijlstra | 833f407 | 2007-10-16 23:25:45 -0700 | [diff] [blame] | 161 | return 0; |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 162 | } |
Peter Zijlstra | ea31951 | 2008-12-26 15:08:55 +0100 | [diff] [blame] | 163 | EXPORT_SYMBOL(__percpu_counter_init); |
Peter Zijlstra | dc62a30 | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 164 | |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 165 | void percpu_counter_destroy(struct percpu_counter *fbc) |
| 166 | { |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 167 | unsigned long flags __maybe_unused; |
| 168 | |
Peter Zijlstra | 833f407 | 2007-10-16 23:25:45 -0700 | [diff] [blame] | 169 | if (!fbc->counters) |
| 170 | return; |
| 171 | |
Tejun Heo | e2852ae | 2010-10-26 14:23:05 -0700 | [diff] [blame] | 172 | debug_percpu_counter_deactivate(fbc); |
| 173 | |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 174 | #ifdef CONFIG_HOTPLUG_CPU |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 175 | spin_lock_irqsave(&percpu_counters_lock, flags); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 176 | list_del(&fbc->list); |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 177 | spin_unlock_irqrestore(&percpu_counters_lock, flags); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 178 | #endif |
Eric Dumazet | fd3d664 | 2008-12-09 13:14:11 -0800 | [diff] [blame] | 179 | free_percpu(fbc->counters); |
| 180 | fbc->counters = NULL; |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 181 | } |
| 182 | EXPORT_SYMBOL(percpu_counter_destroy); |
| 183 | |
Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 184 | int percpu_counter_batch __read_mostly = 32; |
| 185 | EXPORT_SYMBOL(percpu_counter_batch); |
| 186 | |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 187 | static int compute_batch_value(unsigned int cpu) |
Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 188 | { |
| 189 | int nr = num_online_cpus(); |
| 190 | |
| 191 | percpu_counter_batch = max(32, nr*2); |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 192 | return 0; |
Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 193 | } |
| 194 | |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 195 | static int percpu_counter_cpu_dead(unsigned int cpu) |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 196 | { |
Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 197 | #ifdef CONFIG_HOTPLUG_CPU |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 198 | struct percpu_counter *fbc; |
| 199 | |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 200 | compute_batch_value(cpu); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 201 | |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 202 | spin_lock_irq(&percpu_counters_lock); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 203 | list_for_each_entry(fbc, &percpu_counters, list) { |
| 204 | s32 *pcount; |
| 205 | |
Eric Dumazet | aaf0f2f | 2017-01-20 06:34:22 -0800 | [diff] [blame] | 206 | raw_spin_lock(&fbc->lock); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 207 | pcount = per_cpu_ptr(fbc->counters, cpu); |
| 208 | fbc->count += *pcount; |
| 209 | *pcount = 0; |
Eric Dumazet | aaf0f2f | 2017-01-20 06:34:22 -0800 | [diff] [blame] | 210 | raw_spin_unlock(&fbc->lock); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 211 | } |
Tejun Heo | ebd8fef | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 212 | spin_unlock_irq(&percpu_counters_lock); |
Eric Dumazet | 179f7eb | 2009-01-06 14:41:04 -0800 | [diff] [blame] | 213 | #endif |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 214 | return 0; |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Tim Chen | 27f5e0f | 2010-08-09 17:19:04 -0700 | [diff] [blame] | 217 | /* |
| 218 | * Compare counter against given value. |
| 219 | * Return 1 if greater, 0 if equal and -1 if less |
| 220 | */ |
Dave Chinner | 80188b0 | 2015-05-29 07:39:34 +1000 | [diff] [blame] | 221 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) |
Tim Chen | 27f5e0f | 2010-08-09 17:19:04 -0700 | [diff] [blame] | 222 | { |
| 223 | s64 count; |
| 224 | |
| 225 | count = percpu_counter_read(fbc); |
| 226 | /* Check to see if rough count will be sufficient for comparison */ |
Dave Chinner | 80188b0 | 2015-05-29 07:39:34 +1000 | [diff] [blame] | 227 | if (abs(count - rhs) > (batch * num_online_cpus())) { |
Tim Chen | 27f5e0f | 2010-08-09 17:19:04 -0700 | [diff] [blame] | 228 | if (count > rhs) |
| 229 | return 1; |
| 230 | else |
| 231 | return -1; |
| 232 | } |
| 233 | /* Need to use precise count */ |
| 234 | count = percpu_counter_sum(fbc); |
| 235 | if (count > rhs) |
| 236 | return 1; |
| 237 | else if (count < rhs) |
| 238 | return -1; |
| 239 | else |
| 240 | return 0; |
| 241 | } |
Dave Chinner | 80188b0 | 2015-05-29 07:39:34 +1000 | [diff] [blame] | 242 | EXPORT_SYMBOL(__percpu_counter_compare); |
Tim Chen | 27f5e0f | 2010-08-09 17:19:04 -0700 | [diff] [blame] | 243 | |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 244 | static int __init percpu_counter_startup(void) |
| 245 | { |
Sebastian Andrzej Siewior | 5588f5a | 2016-11-03 15:50:00 +0100 | [diff] [blame] | 246 | int ret; |
| 247 | |
| 248 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online", |
| 249 | compute_batch_value, NULL); |
| 250 | WARN_ON(ret < 0); |
| 251 | ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD, |
| 252 | "lib/percpu_cnt:dead", NULL, |
| 253 | percpu_counter_cpu_dead); |
| 254 | WARN_ON(ret < 0); |
Andrew Morton | c67ad91 | 2007-07-15 23:39:51 -0700 | [diff] [blame] | 255 | return 0; |
| 256 | } |
| 257 | module_init(percpu_counter_startup); |