Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 1 | /* |
| 2 | * jump label support |
| 3 | * |
| 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 5 | * Copyright (C) 2011 Peter Zijlstra |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 6 | * |
| 7 | */ |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 8 | #include <linux/memory.h> |
| 9 | #include <linux/uaccess.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/list.h> |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 12 | #include <linux/slab.h> |
| 13 | #include <linux/sort.h> |
| 14 | #include <linux/err.h> |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 15 | #include <linux/static_key.h> |
Andrew Jones | 851cf6e | 2013-08-09 19:51:57 +0530 | [diff] [blame] | 16 | #include <linux/jump_label_ratelimit.h> |
Jason Baron | 1f69bf9 | 2016-08-03 13:46:36 -0700 | [diff] [blame] | 17 | #include <linux/bug.h> |
Josh Poimboeuf | bfd08c5 | 2018-03-19 13:18:57 -0500 | [diff] [blame] | 18 | #include <asm/sections.h> |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 19 | |
| 20 | #ifdef HAVE_JUMP_LABEL |
| 21 | |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 22 | /* mutex to protect coming/going of the the jump_label table */ |
| 23 | static DEFINE_MUTEX(jump_label_mutex); |
| 24 | |
Jason Baron | 91bad2f | 2010-10-01 17:23:48 -0400 | [diff] [blame] | 25 | void jump_label_lock(void) |
| 26 | { |
| 27 | mutex_lock(&jump_label_mutex); |
| 28 | } |
| 29 | |
| 30 | void jump_label_unlock(void) |
| 31 | { |
| 32 | mutex_unlock(&jump_label_mutex); |
| 33 | } |
| 34 | |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 35 | static int jump_label_cmp(const void *a, const void *b) |
| 36 | { |
| 37 | const struct jump_entry *jea = a; |
| 38 | const struct jump_entry *jeb = b; |
| 39 | |
| 40 | if (jea->key < jeb->key) |
| 41 | return -1; |
| 42 | |
| 43 | if (jea->key > jeb->key) |
| 44 | return 1; |
| 45 | |
| 46 | return 0; |
| 47 | } |
| 48 | |
| 49 | static void |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 50 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 51 | { |
| 52 | unsigned long size; |
| 53 | |
| 54 | size = (((unsigned long)stop - (unsigned long)start) |
| 55 | / sizeof(struct jump_entry)); |
| 56 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
| 57 | } |
| 58 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 59 | static void jump_label_update(struct static_key *key); |
Peter Zijlstra | a1efb01 | 2015-07-24 14:55:40 +0200 | [diff] [blame] | 60 | |
Jason Baron | 1f69bf9 | 2016-08-03 13:46:36 -0700 | [diff] [blame] | 61 | /* |
| 62 | * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. |
| 63 | * The use of 'atomic_read()' requires atomic.h and its problematic for some |
| 64 | * kernel headers such as kernel.h and others. Since static_key_count() is not |
| 65 | * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok |
| 66 | * to have it be a function here. Similarly, for 'static_key_enable()' and |
| 67 | * 'static_key_disable()', which require bug.h. This should allow jump_label.h |
| 68 | * to be included from most/all places for HAVE_JUMP_LABEL. |
| 69 | */ |
| 70 | int static_key_count(struct static_key *key) |
| 71 | { |
| 72 | /* |
| 73 | * -1 means the first static_key_slow_inc() is in progress. |
| 74 | * static_key_enabled() must return true, so return 1 here. |
| 75 | */ |
| 76 | int n = atomic_read(&key->enabled); |
| 77 | |
| 78 | return n >= 0 ? n : 1; |
| 79 | } |
| 80 | EXPORT_SYMBOL_GPL(static_key_count); |
| 81 | |
| 82 | void static_key_enable(struct static_key *key) |
| 83 | { |
| 84 | int count = static_key_count(key); |
| 85 | |
| 86 | WARN_ON_ONCE(count < 0 || count > 1); |
| 87 | |
| 88 | if (!count) |
| 89 | static_key_slow_inc(key); |
| 90 | } |
| 91 | EXPORT_SYMBOL_GPL(static_key_enable); |
| 92 | |
| 93 | void static_key_disable(struct static_key *key) |
| 94 | { |
| 95 | int count = static_key_count(key); |
| 96 | |
| 97 | WARN_ON_ONCE(count < 0 || count > 1); |
| 98 | |
| 99 | if (count) |
| 100 | static_key_slow_dec(key); |
| 101 | } |
| 102 | EXPORT_SYMBOL_GPL(static_key_disable); |
| 103 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 104 | void static_key_slow_inc(struct static_key *key) |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 105 | { |
Paolo Bonzini | 4c5ea0a | 2016-06-21 18:52:17 +0200 | [diff] [blame] | 106 | int v, v1; |
| 107 | |
Hannes Frederic Sowa | c4b2c0c | 2013-10-19 21:48:53 +0200 | [diff] [blame] | 108 | STATIC_KEY_CHECK_USE(); |
Paolo Bonzini | 4c5ea0a | 2016-06-21 18:52:17 +0200 | [diff] [blame] | 109 | |
| 110 | /* |
| 111 | * Careful if we get concurrent static_key_slow_inc() calls; |
| 112 | * later calls must wait for the first one to _finish_ the |
| 113 | * jump_label_update() process. At the same time, however, |
| 114 | * the jump_label_update() call below wants to see |
| 115 | * static_key_enabled(&key) for jumps to be updated properly. |
| 116 | * |
| 117 | * So give a special meaning to negative key->enabled: it sends |
| 118 | * static_key_slow_inc() down the slow path, and it is non-zero |
| 119 | * so it counts as "enabled" in jump_label_update(). Note that |
| 120 | * atomic_inc_unless_negative() checks >= 0, so roll our own. |
| 121 | */ |
| 122 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { |
| 123 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); |
| 124 | if (likely(v1 == v)) |
| 125 | return; |
| 126 | } |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 127 | |
Jason Baron | 91bad2f | 2010-10-01 17:23:48 -0400 | [diff] [blame] | 128 | jump_label_lock(); |
Paolo Bonzini | 4c5ea0a | 2016-06-21 18:52:17 +0200 | [diff] [blame] | 129 | if (atomic_read(&key->enabled) == 0) { |
| 130 | atomic_set(&key->enabled, -1); |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 131 | jump_label_update(key); |
Paolo Bonzini | 4c5ea0a | 2016-06-21 18:52:17 +0200 | [diff] [blame] | 132 | atomic_set(&key->enabled, 1); |
| 133 | } else { |
| 134 | atomic_inc(&key->enabled); |
| 135 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 136 | jump_label_unlock(); |
| 137 | } |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 138 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 139 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 140 | static void __static_key_slow_dec(struct static_key *key, |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 141 | unsigned long rate_limit, struct delayed_work *work) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 142 | { |
Paolo Bonzini | 4c5ea0a | 2016-06-21 18:52:17 +0200 | [diff] [blame] | 143 | /* |
| 144 | * The negative count check is valid even when a negative |
| 145 | * key->enabled is in use by static_key_slow_inc(); a |
| 146 | * __static_key_slow_dec() before the first static_key_slow_inc() |
| 147 | * returns is unbalanced, because all other static_key_slow_inc() |
| 148 | * instances block while the update is in progress. |
| 149 | */ |
Jason Baron | fadf046 | 2012-02-21 15:02:53 -0500 | [diff] [blame] | 150 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
| 151 | WARN(atomic_read(&key->enabled) < 0, |
| 152 | "jump label: negative count!\n"); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 153 | return; |
Jason Baron | fadf046 | 2012-02-21 15:02:53 -0500 | [diff] [blame] | 154 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 155 | |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 156 | if (rate_limit) { |
| 157 | atomic_inc(&key->enabled); |
| 158 | schedule_delayed_work(work, rate_limit); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 159 | } else { |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 160 | jump_label_update(key); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 161 | } |
Jason Baron | 91bad2f | 2010-10-01 17:23:48 -0400 | [diff] [blame] | 162 | jump_label_unlock(); |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 163 | } |
| 164 | |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 165 | static void jump_label_update_timeout(struct work_struct *work) |
| 166 | { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 167 | struct static_key_deferred *key = |
| 168 | container_of(work, struct static_key_deferred, work.work); |
| 169 | __static_key_slow_dec(&key->key, 0, NULL); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 170 | } |
| 171 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 172 | void static_key_slow_dec(struct static_key *key) |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 173 | { |
Hannes Frederic Sowa | c4b2c0c | 2013-10-19 21:48:53 +0200 | [diff] [blame] | 174 | STATIC_KEY_CHECK_USE(); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 175 | __static_key_slow_dec(key, 0, NULL); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 176 | } |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 177 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 178 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 179 | void static_key_slow_dec_deferred(struct static_key_deferred *key) |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 180 | { |
Hannes Frederic Sowa | c4b2c0c | 2013-10-19 21:48:53 +0200 | [diff] [blame] | 181 | STATIC_KEY_CHECK_USE(); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 182 | __static_key_slow_dec(&key->key, key->timeout, &key->work); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 183 | } |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 184 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 185 | |
David Matlack | 483eceb | 2016-12-16 14:30:35 -0800 | [diff] [blame] | 186 | void static_key_deferred_flush(struct static_key_deferred *key) |
| 187 | { |
| 188 | STATIC_KEY_CHECK_USE(); |
| 189 | flush_delayed_work(&key->work); |
| 190 | } |
| 191 | EXPORT_SYMBOL_GPL(static_key_deferred_flush); |
| 192 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 193 | void jump_label_rate_limit(struct static_key_deferred *key, |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 194 | unsigned long rl) |
| 195 | { |
Hannes Frederic Sowa | c4b2c0c | 2013-10-19 21:48:53 +0200 | [diff] [blame] | 196 | STATIC_KEY_CHECK_USE(); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 197 | key->timeout = rl; |
| 198 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); |
| 199 | } |
Gleb Natapov | a181dc1 | 2012-08-05 15:58:29 +0300 | [diff] [blame] | 200 | EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 201 | |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 202 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
| 203 | { |
| 204 | if (entry->code <= (unsigned long)end && |
| 205 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
| 206 | return 1; |
| 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 211 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
| 212 | struct jump_entry *iter_stop, void *start, void *end) |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 213 | { |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 214 | struct jump_entry *iter; |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 215 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 216 | iter = iter_start; |
| 217 | while (iter < iter_stop) { |
| 218 | if (addr_conflict(iter, start, end)) |
| 219 | return 1; |
| 220 | iter++; |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 221 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 222 | |
| 223 | return 0; |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 224 | } |
| 225 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 226 | /* |
Jeremy Fitzhardinge | 20284aa | 2011-10-03 11:01:46 -0700 | [diff] [blame] | 227 | * Update code which is definitely not currently executing. |
| 228 | * Architectures which need heavyweight synchronization to modify |
| 229 | * running code can override this to make the non-live update case |
| 230 | * cheaper. |
| 231 | */ |
Peter Zijlstra | 9cdbe1c | 2011-12-06 17:27:29 +0100 | [diff] [blame] | 232 | void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, |
Jeremy Fitzhardinge | 20284aa | 2011-10-03 11:01:46 -0700 | [diff] [blame] | 233 | enum jump_label_type type) |
| 234 | { |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 235 | arch_jump_label_transform(entry, type); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 236 | } |
| 237 | |
Peter Zijlstra | a1efb01 | 2015-07-24 14:55:40 +0200 | [diff] [blame] | 238 | static inline struct jump_entry *static_key_entries(struct static_key *key) |
| 239 | { |
| 240 | return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); |
| 241 | } |
| 242 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 243 | static inline bool static_key_type(struct static_key *key) |
| 244 | { |
| 245 | return (unsigned long)key->entries & JUMP_TYPE_MASK; |
| 246 | } |
| 247 | |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 248 | static inline struct static_key *jump_entry_key(struct jump_entry *entry) |
| 249 | { |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 250 | return (struct static_key *)((unsigned long)entry->key & ~1UL); |
| 251 | } |
| 252 | |
| 253 | static bool jump_entry_branch(struct jump_entry *entry) |
| 254 | { |
| 255 | return (unsigned long)entry->key & 1UL; |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 256 | } |
| 257 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 258 | static enum jump_label_type jump_label_type(struct jump_entry *entry) |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 259 | { |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 260 | struct static_key *key = jump_entry_key(entry); |
Peter Zijlstra | a1efb01 | 2015-07-24 14:55:40 +0200 | [diff] [blame] | 261 | bool enabled = static_key_enabled(key); |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 262 | bool branch = jump_entry_branch(entry); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 263 | |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 264 | /* See the comment in linux/jump_label.h */ |
| 265 | return enabled ^ branch; |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 266 | } |
| 267 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 268 | static void __jump_label_update(struct static_key *key, |
| 269 | struct jump_entry *entry, |
| 270 | struct jump_entry *stop) |
| 271 | { |
| 272 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
| 273 | /* |
Josh Poimboeuf | 26cbe2c | 2018-02-20 11:37:52 -0600 | [diff] [blame] | 274 | * An entry->code of 0 indicates an entry which has been |
| 275 | * disabled because it was in an init text area. |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 276 | */ |
Josh Poimboeuf | 26cbe2c | 2018-02-20 11:37:52 -0600 | [diff] [blame] | 277 | if (entry->code) { |
| 278 | if (kernel_text_address(entry->code)) |
| 279 | arch_jump_label_transform(entry, jump_label_type(entry)); |
| 280 | else |
| 281 | WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code); |
| 282 | } |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 283 | } |
| 284 | } |
| 285 | |
Jeremy Fitzhardinge | 97ce2c8 | 2011-10-12 16:17:54 -0700 | [diff] [blame] | 286 | void __init jump_label_init(void) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 287 | { |
| 288 | struct jump_entry *iter_start = __start___jump_table; |
| 289 | struct jump_entry *iter_stop = __stop___jump_table; |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 290 | struct static_key *key = NULL; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 291 | struct jump_entry *iter; |
| 292 | |
Jason Baron | 1f69bf9 | 2016-08-03 13:46:36 -0700 | [diff] [blame] | 293 | /* |
| 294 | * Since we are initializing the static_key.enabled field with |
| 295 | * with the 'raw' int values (to avoid pulling in atomic.h) in |
| 296 | * jump_label.h, let's make sure that is safe. There are only two |
| 297 | * cases to check since we initialize to 0 or 1. |
| 298 | */ |
| 299 | BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); |
| 300 | BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); |
| 301 | |
Kevin Hao | e3f9108 | 2016-07-23 14:42:37 +0530 | [diff] [blame] | 302 | if (static_key_initialized) |
| 303 | return; |
| 304 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 305 | jump_label_lock(); |
| 306 | jump_label_sort_entries(iter_start, iter_stop); |
| 307 | |
| 308 | for (iter = iter_start; iter < iter_stop; iter++) { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 309 | struct static_key *iterk; |
Jeremy Fitzhardinge | 3734880 | 2011-09-29 11:10:05 -0700 | [diff] [blame] | 310 | |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 311 | /* rewrite NOPs */ |
| 312 | if (jump_label_type(iter) == JUMP_LABEL_NOP) |
| 313 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
| 314 | |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 315 | iterk = jump_entry_key(iter); |
Jeremy Fitzhardinge | 3734880 | 2011-09-29 11:10:05 -0700 | [diff] [blame] | 316 | if (iterk == key) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 317 | continue; |
| 318 | |
Jeremy Fitzhardinge | 3734880 | 2011-09-29 11:10:05 -0700 | [diff] [blame] | 319 | key = iterk; |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 320 | /* |
| 321 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
| 322 | */ |
| 323 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 324 | #ifdef CONFIG_MODULES |
| 325 | key->next = NULL; |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 326 | #endif |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 327 | } |
Hannes Frederic Sowa | c4b2c0c | 2013-10-19 21:48:53 +0200 | [diff] [blame] | 328 | static_key_initialized = true; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 329 | jump_label_unlock(); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 330 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 331 | |
Josh Poimboeuf | bfd08c5 | 2018-03-19 13:18:57 -0500 | [diff] [blame] | 332 | /* Disable any jump label entries in __init/__exit code */ |
| 333 | void __init jump_label_invalidate_initmem(void) |
Josh Poimboeuf | 21fd775 | 2018-02-20 11:37:51 -0600 | [diff] [blame] | 334 | { |
| 335 | struct jump_entry *iter_start = __start___jump_table; |
| 336 | struct jump_entry *iter_stop = __stop___jump_table; |
| 337 | struct jump_entry *iter; |
| 338 | |
| 339 | for (iter = iter_start; iter < iter_stop; iter++) { |
Josh Poimboeuf | bfd08c5 | 2018-03-19 13:18:57 -0500 | [diff] [blame] | 340 | if (init_section_contains((void *)(unsigned long)iter->code, 1)) |
Josh Poimboeuf | 21fd775 | 2018-02-20 11:37:51 -0600 | [diff] [blame] | 341 | iter->code = 0; |
| 342 | } |
| 343 | } |
| 344 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 345 | #ifdef CONFIG_MODULES |
| 346 | |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 347 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
| 348 | { |
| 349 | struct static_key *key = jump_entry_key(entry); |
| 350 | bool type = static_key_type(key); |
| 351 | bool branch = jump_entry_branch(entry); |
| 352 | |
| 353 | /* See the comment in linux/jump_label.h */ |
| 354 | return type ^ branch; |
| 355 | } |
| 356 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 357 | struct static_key_mod { |
| 358 | struct static_key_mod *next; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 359 | struct jump_entry *entries; |
| 360 | struct module *mod; |
| 361 | }; |
| 362 | |
| 363 | static int __jump_label_mod_text_reserved(void *start, void *end) |
| 364 | { |
| 365 | struct module *mod; |
| 366 | |
Rusty Russell | bdc9f37 | 2016-07-27 12:17:35 +0930 | [diff] [blame] | 367 | preempt_disable(); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 368 | mod = __module_text_address((unsigned long)start); |
Rusty Russell | bdc9f37 | 2016-07-27 12:17:35 +0930 | [diff] [blame] | 369 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); |
| 370 | preempt_enable(); |
| 371 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 372 | if (!mod) |
| 373 | return 0; |
| 374 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 375 | |
| 376 | return __jump_label_text_reserved(mod->jump_entries, |
| 377 | mod->jump_entries + mod->num_jump_entries, |
| 378 | start, end); |
| 379 | } |
| 380 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 381 | static void __jump_label_mod_update(struct static_key *key) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 382 | { |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 383 | struct static_key_mod *mod; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 384 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 385 | for (mod = key->next; mod; mod = mod->next) { |
Jiri Olsa | 7cbc5b8 | 2011-05-10 12:43:46 +0200 | [diff] [blame] | 386 | struct module *m = mod->mod; |
| 387 | |
| 388 | __jump_label_update(key, mod->entries, |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 389 | m->jump_entries + m->num_jump_entries); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 390 | } |
| 391 | } |
| 392 | |
| 393 | /*** |
| 394 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() |
| 395 | * @mod: module to patch |
| 396 | * |
| 397 | * Allow for run-time selection of the optimal nops. Before the module |
| 398 | * loads patch these with arch_get_jump_label_nop(), which is specified by |
| 399 | * the arch specific jump label code. |
| 400 | */ |
| 401 | void jump_label_apply_nops(struct module *mod) |
| 402 | { |
| 403 | struct jump_entry *iter_start = mod->jump_entries; |
| 404 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 405 | struct jump_entry *iter; |
| 406 | |
| 407 | /* if the module doesn't have jump label entries, just return */ |
| 408 | if (iter_start == iter_stop) |
| 409 | return; |
| 410 | |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 411 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 412 | /* Only write NOPs for arch_branch_static(). */ |
| 413 | if (jump_label_init_type(iter) == JUMP_LABEL_NOP) |
| 414 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
| 415 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | static int jump_label_add_module(struct module *mod) |
| 419 | { |
| 420 | struct jump_entry *iter_start = mod->jump_entries; |
| 421 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 422 | struct jump_entry *iter; |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 423 | struct static_key *key = NULL; |
| 424 | struct static_key_mod *jlm; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 425 | |
| 426 | /* if the module doesn't have jump label entries, just return */ |
| 427 | if (iter_start == iter_stop) |
| 428 | return 0; |
| 429 | |
| 430 | jump_label_sort_entries(iter_start, iter_stop); |
| 431 | |
| 432 | for (iter = iter_start; iter < iter_stop; iter++) { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 433 | struct static_key *iterk; |
| 434 | |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 435 | iterk = jump_entry_key(iter); |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 436 | if (iterk == key) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 437 | continue; |
| 438 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 439 | key = iterk; |
Peter Zijlstra | bed831f | 2015-05-27 11:09:35 +0930 | [diff] [blame] | 440 | if (within_module(iter->key, mod)) { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 441 | /* |
| 442 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
| 443 | */ |
| 444 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 445 | key->next = NULL; |
| 446 | continue; |
| 447 | } |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 448 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 449 | if (!jlm) |
| 450 | return -ENOMEM; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 451 | jlm->mod = mod; |
| 452 | jlm->entries = iter; |
| 453 | jlm->next = key->next; |
| 454 | key->next = jlm; |
| 455 | |
Peter Zijlstra | 11276d5 | 2015-07-24 15:09:55 +0200 | [diff] [blame] | 456 | /* Only update if we've changed from our initial state */ |
| 457 | if (jump_label_type(iter) != jump_label_init_type(iter)) |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 458 | __jump_label_update(key, iter, iter_stop); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | static void jump_label_del_module(struct module *mod) |
| 465 | { |
| 466 | struct jump_entry *iter_start = mod->jump_entries; |
| 467 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 468 | struct jump_entry *iter; |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 469 | struct static_key *key = NULL; |
| 470 | struct static_key_mod *jlm, **prev; |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 471 | |
| 472 | for (iter = iter_start; iter < iter_stop; iter++) { |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 473 | if (jump_entry_key(iter) == key) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 474 | continue; |
| 475 | |
Peter Zijlstra | 7dcfd91 | 2015-07-24 15:02:27 +0200 | [diff] [blame] | 476 | key = jump_entry_key(iter); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 477 | |
Peter Zijlstra | bed831f | 2015-05-27 11:09:35 +0930 | [diff] [blame] | 478 | if (within_module(iter->key, mod)) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 479 | continue; |
| 480 | |
| 481 | prev = &key->next; |
| 482 | jlm = key->next; |
| 483 | |
| 484 | while (jlm && jlm->mod != mod) { |
| 485 | prev = &jlm->next; |
| 486 | jlm = jlm->next; |
| 487 | } |
| 488 | |
| 489 | if (jlm) { |
| 490 | *prev = jlm->next; |
| 491 | kfree(jlm); |
| 492 | } |
| 493 | } |
| 494 | } |
| 495 | |
Josh Poimboeuf | 21fd775 | 2018-02-20 11:37:51 -0600 | [diff] [blame] | 496 | /* Disable any jump label entries in module init code */ |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 497 | static void jump_label_invalidate_module_init(struct module *mod) |
| 498 | { |
| 499 | struct jump_entry *iter_start = mod->jump_entries; |
| 500 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
| 501 | struct jump_entry *iter; |
| 502 | |
| 503 | for (iter = iter_start; iter < iter_stop; iter++) { |
| 504 | if (within_module_init(iter->code, mod)) |
| 505 | iter->code = 0; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | static int |
| 510 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
| 511 | void *data) |
| 512 | { |
| 513 | struct module *mod = data; |
| 514 | int ret = 0; |
| 515 | |
| 516 | switch (val) { |
| 517 | case MODULE_STATE_COMING: |
| 518 | jump_label_lock(); |
| 519 | ret = jump_label_add_module(mod); |
| 520 | if (ret) |
| 521 | jump_label_del_module(mod); |
| 522 | jump_label_unlock(); |
| 523 | break; |
| 524 | case MODULE_STATE_GOING: |
| 525 | jump_label_lock(); |
| 526 | jump_label_del_module(mod); |
| 527 | jump_label_unlock(); |
| 528 | break; |
| 529 | case MODULE_STATE_LIVE: |
| 530 | jump_label_lock(); |
| 531 | jump_label_invalidate_module_init(mod); |
| 532 | jump_label_unlock(); |
| 533 | break; |
| 534 | } |
| 535 | |
| 536 | return notifier_from_errno(ret); |
| 537 | } |
| 538 | |
Wei Yongjun | 885885f | 2016-06-17 17:19:40 +0000 | [diff] [blame] | 539 | static struct notifier_block jump_label_module_nb = { |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 540 | .notifier_call = jump_label_module_notify, |
| 541 | .priority = 1, /* higher than tracepoints */ |
| 542 | }; |
| 543 | |
| 544 | static __init int jump_label_init_module(void) |
| 545 | { |
| 546 | return register_module_notifier(&jump_label_module_nb); |
| 547 | } |
| 548 | early_initcall(jump_label_init_module); |
| 549 | |
| 550 | #endif /* CONFIG_MODULES */ |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 551 | |
| 552 | /*** |
| 553 | * jump_label_text_reserved - check if addr range is reserved |
| 554 | * @start: start text addr |
| 555 | * @end: end text addr |
| 556 | * |
| 557 | * checks if the text addr located between @start and @end |
| 558 | * overlaps with any of the jump label patch addresses. Code |
| 559 | * that wants to modify kernel text should first verify that |
| 560 | * it does not overlap with any of the jump label addresses. |
Jason Baron | 91bad2f | 2010-10-01 17:23:48 -0400 | [diff] [blame] | 561 | * Caller must hold jump_label_mutex. |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 562 | * |
| 563 | * returns 1 if there is an overlap, 0 otherwise |
| 564 | */ |
| 565 | int jump_label_text_reserved(void *start, void *end) |
| 566 | { |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 567 | int ret = __jump_label_text_reserved(__start___jump_table, |
| 568 | __stop___jump_table, start, end); |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 569 | |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 570 | if (ret) |
| 571 | return ret; |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 572 | |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 573 | #ifdef CONFIG_MODULES |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 574 | ret = __jump_label_mod_text_reserved(start, end); |
Jason Baron | 4c3ef6d | 2010-09-17 11:09:08 -0400 | [diff] [blame] | 575 | #endif |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 576 | return ret; |
| 577 | } |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 578 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 579 | static void jump_label_update(struct static_key *key) |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 580 | { |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 581 | struct jump_entry *stop = __stop___jump_table; |
Peter Zijlstra | a1efb01 | 2015-07-24 14:55:40 +0200 | [diff] [blame] | 582 | struct jump_entry *entry = static_key_entries(key); |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 583 | #ifdef CONFIG_MODULES |
Peter Zijlstra | bed831f | 2015-05-27 11:09:35 +0930 | [diff] [blame] | 584 | struct module *mod; |
Xiao Guangrong | 140fe3b | 2011-06-21 10:35:55 +0800 | [diff] [blame] | 585 | |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 586 | __jump_label_mod_update(key); |
Xiao Guangrong | 140fe3b | 2011-06-21 10:35:55 +0800 | [diff] [blame] | 587 | |
Peter Zijlstra | bed831f | 2015-05-27 11:09:35 +0930 | [diff] [blame] | 588 | preempt_disable(); |
| 589 | mod = __module_address((unsigned long)key); |
Xiao Guangrong | 140fe3b | 2011-06-21 10:35:55 +0800 | [diff] [blame] | 590 | if (mod) |
| 591 | stop = mod->jump_entries + mod->num_jump_entries; |
Peter Zijlstra | bed831f | 2015-05-27 11:09:35 +0930 | [diff] [blame] | 592 | preempt_enable(); |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 593 | #endif |
Xiao Guangrong | 140fe3b | 2011-06-21 10:35:55 +0800 | [diff] [blame] | 594 | /* if there are no users, entry can be NULL */ |
| 595 | if (entry) |
Peter Zijlstra | 706249c | 2015-07-24 15:06:37 +0200 | [diff] [blame] | 596 | __jump_label_update(key, entry, stop); |
Jason Baron | bf5438fc | 2010-09-17 11:09:00 -0400 | [diff] [blame] | 597 | } |
| 598 | |
Peter Zijlstra | 1987c94 | 2015-07-27 18:32:09 +0200 | [diff] [blame] | 599 | #ifdef CONFIG_STATIC_KEYS_SELFTEST |
| 600 | static DEFINE_STATIC_KEY_TRUE(sk_true); |
| 601 | static DEFINE_STATIC_KEY_FALSE(sk_false); |
| 602 | |
| 603 | static __init int jump_label_test(void) |
| 604 | { |
| 605 | int i; |
| 606 | |
| 607 | for (i = 0; i < 2; i++) { |
| 608 | WARN_ON(static_key_enabled(&sk_true.key) != true); |
| 609 | WARN_ON(static_key_enabled(&sk_false.key) != false); |
| 610 | |
| 611 | WARN_ON(!static_branch_likely(&sk_true)); |
| 612 | WARN_ON(!static_branch_unlikely(&sk_true)); |
| 613 | WARN_ON(static_branch_likely(&sk_false)); |
| 614 | WARN_ON(static_branch_unlikely(&sk_false)); |
| 615 | |
| 616 | static_branch_disable(&sk_true); |
| 617 | static_branch_enable(&sk_false); |
| 618 | |
| 619 | WARN_ON(static_key_enabled(&sk_true.key) == true); |
| 620 | WARN_ON(static_key_enabled(&sk_false.key) == false); |
| 621 | |
| 622 | WARN_ON(static_branch_likely(&sk_true)); |
| 623 | WARN_ON(static_branch_unlikely(&sk_true)); |
| 624 | WARN_ON(!static_branch_likely(&sk_false)); |
| 625 | WARN_ON(!static_branch_unlikely(&sk_false)); |
| 626 | |
| 627 | static_branch_enable(&sk_true); |
| 628 | static_branch_disable(&sk_false); |
| 629 | } |
| 630 | |
| 631 | return 0; |
| 632 | } |
Jason Baron | 74b470c | 2017-11-13 16:48:47 -0500 | [diff] [blame] | 633 | early_initcall(jump_label_test); |
Peter Zijlstra | 1987c94 | 2015-07-27 18:32:09 +0200 | [diff] [blame] | 634 | #endif /* STATIC_KEYS_SELFTEST */ |
| 635 | |
| 636 | #endif /* HAVE_JUMP_LABEL */ |