Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Runtime locking correctness validator |
| 4 | * |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 7 | * |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame] | 8 | * see Documentation/locking/lockdep-design.rst for more details. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 9 | */ |
| 10 | #ifndef __LINUX_LOCKDEP_H |
| 11 | #define __LINUX_LOCKDEP_H |
| 12 | |
Herbert Xu | c935cd6 | 2020-06-17 17:17:19 +1000 | [diff] [blame] | 13 | #include <linux/lockdep_types.h> |
Peter Zijlstra | 0cd39f4 | 2020-08-06 14:35:11 +0200 | [diff] [blame] | 14 | #include <linux/smp.h> |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 15 | #include <asm/percpu.h> |
Herbert Xu | c935cd6 | 2020-06-17 17:17:19 +1000 | [diff] [blame] | 16 | |
Heiko Carstens | a1e96b0 | 2007-02-12 00:52:20 -0800 | [diff] [blame] | 17 | struct task_struct; |
| 18 | |
Dave Young | 2edf5e4 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 19 | /* for sysctl */ |
| 20 | extern int prove_locking; |
| 21 | extern int lock_stat; |
| 22 | |
Michael S. Tsirkin | db0b0ea | 2006-09-29 01:59:28 -0700 | [diff] [blame] | 23 | #ifdef CONFIG_LOCKDEP |
| 24 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 25 | #include <linux/linkage.h> |
Herbert Xu | 5be542e | 2020-07-16 16:36:50 +1000 | [diff] [blame] | 26 | #include <linux/list.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 27 | #include <linux/debug_locks.h> |
| 28 | #include <linux/stacktrace.h> |
| 29 | |
Peter Zijlstra | 4d82a1d | 2012-05-15 08:06:19 -0700 | [diff] [blame] | 30 | static inline void lockdep_copy_map(struct lockdep_map *to, |
| 31 | struct lockdep_map *from) |
| 32 | { |
| 33 | int i; |
| 34 | |
| 35 | *to = *from; |
| 36 | /* |
| 37 | * Since the class cache can be modified concurrently we could observe |
| 38 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
| 39 | * the caches and take the performance hit. |
| 40 | * |
| 41 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
| 42 | * that relies on cache abuse. |
| 43 | */ |
| 44 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
| 45 | to->class_cache[i] = NULL; |
| 46 | } |
| 47 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 48 | /* |
| 49 | * Every lock has a list of other locks that were taken after it. |
| 50 | * We only grow the list, never remove from it: |
| 51 | */ |
| 52 | struct lock_list { |
| 53 | struct list_head entry; |
| 54 | struct lock_class *class; |
Bart Van Assche | 86cffb8 | 2019-02-14 15:00:41 -0800 | [diff] [blame] | 55 | struct lock_class *links_to; |
Bart Van Assche | 12593b7 | 2019-07-22 11:24:42 -0700 | [diff] [blame] | 56 | const struct lock_trace *trace; |
Boqun Feng | bd76eca | 2020-08-07 15:42:24 +0800 | [diff] [blame] | 57 | u16 distance; |
Boqun Feng | 3454a36 | 2020-08-07 15:42:25 +0800 | [diff] [blame] | 58 | /* bitmap of different dependencies from head to this */ |
| 59 | u8 dep; |
Boqun Feng | 6971c0f | 2020-08-07 15:42:26 +0800 | [diff] [blame] | 60 | /* used by BFS to record whether "prev -> this" only has -(*R)-> */ |
| 61 | u8 only_xr; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 62 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 63 | /* |
| 64 | * The parent field is used to implement breadth-first search, and the |
| 65 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 66 | */ |
| 67 | struct lock_list *parent; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 68 | }; |
| 69 | |
Yuyang Du | d16dbd1 | 2019-05-06 16:19:22 +0800 | [diff] [blame] | 70 | /** |
| 71 | * struct lock_chain - lock dependency chain record |
| 72 | * |
| 73 | * @irq_context: the same as irq_context in held_lock below |
| 74 | * @depth: the number of held locks in this chain |
| 75 | * @base: the index in chain_hlocks for this chain |
| 76 | * @entry: the collided lock chains in lock_chain hash list |
| 77 | * @chain_key: the hash key of this lock_chain |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 78 | */ |
| 79 | struct lock_chain { |
Yuyang Du | d16dbd1 | 2019-05-06 16:19:22 +0800 | [diff] [blame] | 80 | /* see BUILD_BUG_ON()s in add_chain_cache() */ |
Peter Zijlstra | 75dd602 | 2016-03-30 11:36:59 +0200 | [diff] [blame] | 81 | unsigned int irq_context : 2, |
| 82 | depth : 6, |
| 83 | base : 24; |
| 84 | /* 4 byte hole */ |
Andrew Morton | a63f38c | 2016-02-03 13:44:12 -0800 | [diff] [blame] | 85 | struct hlist_node entry; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 86 | u64 chain_key; |
| 87 | }; |
| 88 | |
Ingo Molnar | e5f363e3 | 2008-08-11 12:37:27 +0200 | [diff] [blame] | 89 | #define MAX_LOCKDEP_KEYS_BITS 13 |
Yuyang Du | 01bb6f0 | 2019-05-06 16:19:25 +0800 | [diff] [blame] | 90 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) |
| 91 | #define INITIAL_CHAIN_KEY -1 |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 92 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 93 | struct held_lock { |
| 94 | /* |
| 95 | * One-way hash of the dependency chain up to this point. We |
| 96 | * hash the hashes step by step as the dependency chain grows. |
| 97 | * |
| 98 | * We use it for dependency-caching and we skip detection |
| 99 | * passes and dependency-updates if there is a cache-hit, so |
| 100 | * it is absolutely critical for 100% coverage of the validator |
| 101 | * to have a unique key value for every unique dependency path |
| 102 | * that can occur in the system, to make a unique hash value |
| 103 | * as likely as possible - hence the 64-bit width. |
| 104 | * |
| 105 | * The task struct holds the current hash value (initialized |
| 106 | * with zero), here we store the previous hash value: |
| 107 | */ |
| 108 | u64 prev_chain_key; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 109 | unsigned long acquire_ip; |
| 110 | struct lockdep_map *instance; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 111 | struct lockdep_map *nest_lock; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 112 | #ifdef CONFIG_LOCK_STAT |
| 113 | u64 waittime_stamp; |
| 114 | u64 holdtime_stamp; |
| 115 | #endif |
Yuyang Du | 01bb6f0 | 2019-05-06 16:19:25 +0800 | [diff] [blame] | 116 | /* |
| 117 | * class_idx is zero-indexed; it points to the element in |
| 118 | * lock_classes this held lock instance belongs to. class_idx is in |
| 119 | * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. |
| 120 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 121 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 122 | /* |
| 123 | * The lock-stack is unified in that the lock chains of interrupt |
| 124 | * contexts nest ontop of process context chains, but we 'separate' |
| 125 | * the hashes by starting with 0 if we cross into an interrupt |
| 126 | * context, and we also keep do not add cross-context lock |
| 127 | * dependencies - the lock usage graph walking covers that area |
| 128 | * anyway, and we'd just unnecessarily increase the number of |
| 129 | * dependencies otherwise. [Note: hardirq and softirq contexts |
| 130 | * are separated from each other too.] |
| 131 | * |
| 132 | * The following field is used to detect when we cross into an |
| 133 | * interrupt context: |
| 134 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 135 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 136 | unsigned int trylock:1; /* 16 bits */ |
| 137 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 138 | unsigned int read:2; /* see lock_acquire() comment */ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 139 | unsigned int check:1; /* see lock_acquire() comment */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 140 | unsigned int hardirqs_off:1; |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 141 | unsigned int references:12; /* 32 bits */ |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 142 | unsigned int pin_count; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 143 | }; |
| 144 | |
| 145 | /* |
| 146 | * Initialization, self-test and debugging-output methods: |
| 147 | */ |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 148 | extern void lockdep_init(void); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 149 | extern void lockdep_reset(void); |
| 150 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
| 151 | extern void lockdep_free_key_range(void *start, unsigned long size); |
Andi Kleen | 63f9a7f | 2014-02-08 08:52:01 +0100 | [diff] [blame] | 152 | extern asmlinkage void lockdep_sys_exit(void); |
Bart Van Assche | cdc84d7 | 2019-02-14 15:00:44 -0800 | [diff] [blame] | 153 | extern void lockdep_set_selftest_task(struct task_struct *task); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 154 | |
Yuyang Du | e196e47 | 2019-05-06 16:19:23 +0800 | [diff] [blame] | 155 | extern void lockdep_init_task(struct task_struct *task); |
| 156 | |
Peter Zijlstra | e616cb8 | 2020-02-24 22:14:51 +0100 | [diff] [blame] | 157 | /* |
| 158 | * Split the recrursion counter in two to readily detect 'off' vs recursion. |
| 159 | */ |
| 160 | #define LOCKDEP_RECURSION_BITS 16 |
| 161 | #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) |
| 162 | #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) |
| 163 | |
| 164 | /* |
| 165 | * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due |
| 166 | * to header dependencies. |
| 167 | */ |
| 168 | |
| 169 | #define lockdep_off() \ |
| 170 | do { \ |
| 171 | current->lockdep_recursion += LOCKDEP_OFF; \ |
| 172 | } while (0) |
| 173 | |
| 174 | #define lockdep_on() \ |
| 175 | do { \ |
| 176 | current->lockdep_recursion -= LOCKDEP_OFF; \ |
| 177 | } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 178 | |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 179 | extern void lockdep_register_key(struct lock_class_key *key); |
| 180 | extern void lockdep_unregister_key(struct lock_class_key *key); |
| 181 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 182 | /* |
| 183 | * These methods are used by specific locking variants (spinlocks, |
| 184 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
| 185 | * to lockdep: |
| 186 | */ |
| 187 | |
Peter Zijlstra | d5462a6 | 2020-12-09 16:06:21 +0100 | [diff] [blame] | 188 | extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, |
| 189 | struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); |
| 190 | |
| 191 | static inline void |
| 192 | lockdep_init_map_waits(struct lockdep_map *lock, const char *name, |
| 193 | struct lock_class_key *key, int subclass, u8 inner, u8 outer) |
| 194 | { |
| 195 | lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); |
| 196 | } |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 197 | |
| 198 | static inline void |
| 199 | lockdep_init_map_wait(struct lockdep_map *lock, const char *name, |
Peter Zijlstra | d5462a6 | 2020-12-09 16:06:21 +0100 | [diff] [blame] | 200 | struct lock_class_key *key, int subclass, u8 inner) |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 201 | { |
| 202 | lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); |
| 203 | } |
| 204 | |
| 205 | static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 206 | struct lock_class_key *key, int subclass) |
| 207 | { |
| 208 | lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); |
| 209 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 210 | |
| 211 | /* |
| 212 | * Reinitialize a lock key - for cases where there is special locking or |
| 213 | * special initialization of locks so that the validator gets the scope |
| 214 | * of dependencies wrong: they are either too broad (they need a class-split) |
| 215 | * or they are too narrow (they suffer from a false class-split): |
| 216 | */ |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 217 | #define lockdep_set_class(lock, key) \ |
| 218 | lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ |
| 219 | (lock)->dep_map.wait_type_inner, \ |
| 220 | (lock)->dep_map.wait_type_outer) |
| 221 | |
| 222 | #define lockdep_set_class_and_name(lock, key, name) \ |
| 223 | lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ |
| 224 | (lock)->dep_map.wait_type_inner, \ |
| 225 | (lock)->dep_map.wait_type_outer) |
| 226 | |
| 227 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 228 | lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ |
| 229 | (lock)->dep_map.wait_type_inner, \ |
| 230 | (lock)->dep_map.wait_type_outer) |
| 231 | |
| 232 | #define lockdep_set_subclass(lock, sub) \ |
| 233 | lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ |
| 234 | (lock)->dep_map.wait_type_inner, \ |
| 235 | (lock)->dep_map.wait_type_outer) |
Peter Zijlstra | 1704f47 | 2010-03-19 01:37:42 +0100 | [diff] [blame] | 236 | |
| 237 | #define lockdep_set_novalidate_class(lock) \ |
Oleg Nesterov | 47be1c1 | 2014-01-20 19:20:16 +0100 | [diff] [blame] | 238 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 239 | |
Jan Kara | 9a7aa12 | 2009-06-04 15:26:49 +0200 | [diff] [blame] | 240 | /* |
| 241 | * Compare locking classes |
| 242 | */ |
| 243 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
| 244 | |
| 245 | static inline int lockdep_match_key(struct lockdep_map *lock, |
| 246 | struct lock_class_key *key) |
| 247 | { |
| 248 | return lock->key == key; |
| 249 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * Acquire a lock. |
| 253 | * |
| 254 | * Values for "read": |
| 255 | * |
| 256 | * 0: exclusive (write) acquire |
| 257 | * 1: read-acquire (no recursion allowed) |
| 258 | * 2: read-acquire with same-instance recursion allowed |
| 259 | * |
| 260 | * Values for check: |
| 261 | * |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 262 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
| 263 | * 1: full validation |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 264 | */ |
| 265 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 266 | int trylock, int read, int check, |
| 267 | struct lockdep_map *nest_lock, unsigned long ip); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 268 | |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 269 | extern void lock_release(struct lockdep_map *lock, unsigned long ip); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 270 | |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 271 | /* |
| 272 | * Same "read" as for lock_acquire(), except -1 means any. |
| 273 | */ |
Matthew Wilcox | 08f36ff | 2018-01-17 07:14:13 -0800 | [diff] [blame] | 274 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 275 | |
Matthew Wilcox | 08f36ff | 2018-01-17 07:14:13 -0800 | [diff] [blame] | 276 | static inline int lock_is_held(const struct lockdep_map *lock) |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 277 | { |
| 278 | return lock_is_held_type(lock, -1); |
| 279 | } |
| 280 | |
| 281 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
| 282 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 283 | |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 284 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
| 285 | struct lock_class_key *key, unsigned int subclass, |
| 286 | unsigned long ip); |
| 287 | |
| 288 | static inline void lock_set_subclass(struct lockdep_map *lock, |
| 289 | unsigned int subclass, unsigned long ip) |
| 290 | { |
| 291 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
| 292 | } |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 293 | |
J. R. Okajima | 6419c4a | 2017-02-03 01:38:17 +0900 | [diff] [blame] | 294 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
| 295 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 296 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } |
| 297 | |
| 298 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); |
| 299 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); |
| 300 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 301 | |
Jarek Poplawski | e3a55fd | 2007-03-22 00:11:26 -0800 | [diff] [blame] | 302 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 303 | |
Johannes Berg | b1ae345 | 2013-02-21 16:42:47 -0800 | [diff] [blame] | 304 | #define lockdep_assert_held(l) do { \ |
| 305 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
| 306 | } while (0) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 307 | |
Nikolay Borisov | 9ffbe8a | 2019-05-31 13:06:51 +0300 | [diff] [blame] | 308 | #define lockdep_assert_held_write(l) do { \ |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 309 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ |
| 310 | } while (0) |
| 311 | |
| 312 | #define lockdep_assert_held_read(l) do { \ |
| 313 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ |
| 314 | } while (0) |
| 315 | |
Peter Hurley | 9a37110 | 2014-09-10 14:31:39 -0400 | [diff] [blame] | 316 | #define lockdep_assert_held_once(l) do { \ |
| 317 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
| 318 | } while (0) |
| 319 | |
Peter Zijlstra | 94d24fc | 2011-06-07 11:17:30 +0200 | [diff] [blame] | 320 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
| 321 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 322 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
| 323 | #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) |
| 324 | #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 325 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 326 | #else /* !CONFIG_LOCKDEP */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 327 | |
Yuyang Du | e196e47 | 2019-05-06 16:19:23 +0800 | [diff] [blame] | 328 | static inline void lockdep_init_task(struct task_struct *task) |
| 329 | { |
| 330 | } |
| 331 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 332 | static inline void lockdep_off(void) |
| 333 | { |
| 334 | } |
| 335 | |
| 336 | static inline void lockdep_on(void) |
| 337 | { |
| 338 | } |
| 339 | |
Bart Van Assche | cdc84d7 | 2019-02-14 15:00:44 -0800 | [diff] [blame] | 340 | static inline void lockdep_set_selftest_task(struct task_struct *task) |
| 341 | { |
| 342 | } |
| 343 | |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 344 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 345 | # define lock_release(l, i) do { } while (0) |
J. R. Okajima | 6419c4a | 2017-02-03 01:38:17 +0900 | [diff] [blame] | 346 | # define lock_downgrade(l, i) do { } while (0) |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 347 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 348 | # define lock_set_subclass(l, s, i) do { } while (0) |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 349 | # define lockdep_init() do { } while (0) |
Peter Zijlstra | d5462a6 | 2020-12-09 16:06:21 +0100 | [diff] [blame] | 350 | # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ |
| 351 | do { (void)(name); (void)(key); } while (0) |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 352 | # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ |
| 353 | do { (void)(name); (void)(key); } while (0) |
| 354 | # define lockdep_init_map_wait(lock, name, key, sub, inner) \ |
| 355 | do { (void)(name); (void)(key); } while (0) |
Ingo Molnar | e25cf3d | 2008-10-17 15:55:07 +0200 | [diff] [blame] | 356 | # define lockdep_init_map(lock, name, key, sub) \ |
| 357 | do { (void)(name); (void)(key); } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 358 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
| 359 | # define lockdep_set_class_and_name(lock, key, name) \ |
Ingo Molnar | e25cf3d | 2008-10-17 15:55:07 +0200 | [diff] [blame] | 360 | do { (void)(key); (void)(name); } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 361 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 362 | do { (void)(key); } while (0) |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 363 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
Peter Zijlstra | 1704f47 | 2010-03-19 01:37:42 +0100 | [diff] [blame] | 364 | |
| 365 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
| 366 | |
Jan Kara | 9a7aa12 | 2009-06-04 15:26:49 +0200 | [diff] [blame] | 367 | /* |
| 368 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
| 369 | * case since the result is not well defined and the caller should rather |
| 370 | * #ifdef the call himself. |
| 371 | */ |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 372 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 373 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
| 374 | # define lockdep_free_key_range(start, size) do { } while (0) |
Peter Zijlstra | b351d16 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 375 | # define lockdep_sys_exit() do { } while (0) |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 376 | |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 377 | static inline void lockdep_register_key(struct lock_class_key *key) |
| 378 | { |
| 379 | } |
| 380 | |
| 381 | static inline void lockdep_unregister_key(struct lock_class_key *key) |
| 382 | { |
| 383 | } |
| 384 | |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 385 | #define lockdep_depth(tsk) (0) |
| 386 | |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 387 | #define lockdep_is_held_type(l, r) (1) |
| 388 | |
Paul Bolle | 5cd3f5a | 2013-01-24 21:53:17 +0100 | [diff] [blame] | 389 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
Nikolay Borisov | 9ffbe8a | 2019-05-31 13:06:51 +0300 | [diff] [blame] | 390 | #define lockdep_assert_held_write(l) do { (void)(l); } while (0) |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 391 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) |
Peter Hurley | 9a37110 | 2014-09-10 14:31:39 -0400 | [diff] [blame] | 392 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 393 | |
Peter Zijlstra | 94d24fc | 2011-06-07 11:17:30 +0200 | [diff] [blame] | 394 | #define lockdep_recursing(tsk) (0) |
| 395 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 396 | #define NIL_COOKIE (struct pin_cookie){ } |
| 397 | |
Arnd Bergmann | 3771b0f | 2019-03-25 13:57:57 +0100 | [diff] [blame] | 398 | #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 399 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
| 400 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 401 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 402 | #endif /* !LOCKDEP */ |
| 403 | |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 404 | enum xhlock_context_t { |
| 405 | XHLOCK_HARD, |
| 406 | XHLOCK_SOFT, |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 407 | XHLOCK_CTX_NR, |
| 408 | }; |
| 409 | |
Boqun Feng | 52fa5bc | 2017-08-17 17:46:12 +0800 | [diff] [blame] | 410 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 411 | /* |
| 412 | * To initialize a lockdep_map statically use this macro. |
| 413 | * Note that _name must not be NULL. |
| 414 | */ |
| 415 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
| 416 | { .name = (_name), .key = (void *)(_key), } |
| 417 | |
Peter Zijlstra | f52be57 | 2017-08-29 10:59:39 +0200 | [diff] [blame] | 418 | static inline void lockdep_invariant_state(bool force) {} |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 419 | static inline void lockdep_free_task(struct task_struct *task) {} |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 420 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 421 | #ifdef CONFIG_LOCK_STAT |
| 422 | |
| 423 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 424 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 425 | |
| 426 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 427 | do { \ |
| 428 | if (!try(_lock)) { \ |
| 429 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
| 430 | lock(_lock); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 431 | } \ |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 432 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 433 | } while (0) |
| 434 | |
Michal Hocko | 916633a | 2016-04-07 17:12:31 +0200 | [diff] [blame] | 435 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
| 436 | ({ \ |
| 437 | int ____err = 0; \ |
| 438 | if (!try(_lock)) { \ |
| 439 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
| 440 | ____err = lock(_lock); \ |
| 441 | } \ |
| 442 | if (!____err) \ |
| 443 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
| 444 | ____err; \ |
| 445 | }) |
| 446 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 447 | #else /* CONFIG_LOCK_STAT */ |
| 448 | |
| 449 | #define lock_contended(lockdep_map, ip) do {} while (0) |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 450 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 451 | |
| 452 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 453 | lock(_lock) |
| 454 | |
Michal Hocko | 916633a | 2016-04-07 17:12:31 +0200 | [diff] [blame] | 455 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
| 456 | lock(_lock) |
| 457 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 458 | #endif /* CONFIG_LOCK_STAT */ |
| 459 | |
Robin Holt | e8c158b | 2009-04-02 16:59:45 -0700 | [diff] [blame] | 460 | #ifdef CONFIG_LOCKDEP |
| 461 | |
| 462 | /* |
| 463 | * On lockdep we dont want the hand-coded irq-enable of |
| 464 | * _raw_*_lock_flags() code, because lockdep assumes |
| 465 | * that interrupts are not re-enabled during lock-acquire: |
| 466 | */ |
| 467 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
| 468 | LOCK_CONTENDED((_lock), (try), (lock)) |
| 469 | |
| 470 | #else /* CONFIG_LOCKDEP */ |
| 471 | |
| 472 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
| 473 | lockfl((_lock), (flags)) |
| 474 | |
| 475 | #endif /* CONFIG_LOCKDEP */ |
| 476 | |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 477 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 478 | extern void print_irqtrace_events(struct task_struct *curr); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 479 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 480 | static inline void print_irqtrace_events(struct task_struct *curr) |
| 481 | { |
| 482 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 483 | #endif |
| 484 | |
Boqun Feng | e918188 | 2020-08-07 15:42:20 +0800 | [diff] [blame] | 485 | /* Variable used to make lockdep treat read_lock() as recursive in selftests */ |
| 486 | #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS |
| 487 | extern unsigned int force_read_lock_recursive; |
| 488 | #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
| 489 | #define force_read_lock_recursive 0 |
| 490 | #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ |
| 491 | |
| 492 | #ifdef CONFIG_LOCKDEP |
| 493 | extern bool read_lock_is_recursive(void); |
| 494 | #else /* CONFIG_LOCKDEP */ |
| 495 | /* If !LOCKDEP, the value is meaningless */ |
| 496 | #define read_lock_is_recursive() 0 |
| 497 | #endif |
| 498 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 499 | /* |
| 500 | * For trivial one-depth nesting of a lock-class, the following |
| 501 | * global define can be used. (Subsystems with multiple levels |
| 502 | * of nesting should define their own lock-nesting subclasses.) |
| 503 | */ |
| 504 | #define SINGLE_DEPTH_NESTING 1 |
| 505 | |
| 506 | /* |
| 507 | * Map the dependency ops to NOP or to real lockdep ops, depending |
| 508 | * on the per lock-class debug mode: |
| 509 | */ |
| 510 | |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 511 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
| 512 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
| 513 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 514 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 515 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 516 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 517 | #define spin_release(l, i) lock_release(l, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 518 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 519 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
Boqun Feng | e918188 | 2020-08-07 15:42:20 +0800 | [diff] [blame] | 520 | #define rwlock_acquire_read(l, s, t, i) \ |
| 521 | do { \ |
| 522 | if (read_lock_is_recursive()) \ |
| 523 | lock_acquire_shared_recursive(l, s, t, NULL, i); \ |
| 524 | else \ |
| 525 | lock_acquire_shared(l, s, t, NULL, i); \ |
| 526 | } while (0) |
| 527 | |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 528 | #define rwlock_release(l, i) lock_release(l, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 529 | |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 530 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 531 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 532 | #define seqcount_release(l, i) lock_release(l, i) |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 533 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 534 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 535 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 536 | #define mutex_release(l, i) lock_release(l, i) |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 537 | |
| 538 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 539 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
| 540 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 541 | #define rwsem_release(l, i) lock_release(l, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 542 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 543 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
| 544 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
Paul E. McKenney | dd56af4 | 2014-08-25 20:25:06 -0700 | [diff] [blame] | 545 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 546 | #define lock_map_release(l) lock_release(l, _THIS_IP_) |
Peter Zijlstra | 4f3e752 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 547 | |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 548 | #ifdef CONFIG_PROVE_LOCKING |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 549 | # define might_lock(lock) \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 550 | do { \ |
| 551 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 552 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 553 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 554 | } while (0) |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 555 | # define might_lock_read(lock) \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 556 | do { \ |
| 557 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 558 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 559 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 560 | } while (0) |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 561 | # define might_lock_nested(lock, subclass) \ |
Daniel Vetter | e692b40 | 2019-11-04 18:37:19 +0100 | [diff] [blame] | 562 | do { \ |
| 563 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
| 564 | lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ |
| 565 | _THIS_IP_); \ |
Jani Nikula | 023265e | 2019-12-11 10:35:37 +0200 | [diff] [blame] | 566 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
Daniel Vetter | e692b40 | 2019-11-04 18:37:19 +0100 | [diff] [blame] | 567 | } while (0) |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 568 | |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 569 | DECLARE_PER_CPU(int, hardirqs_enabled); |
| 570 | DECLARE_PER_CPU(int, hardirq_context); |
Peter Zijlstra | 4d00409 | 2020-10-02 11:04:21 +0200 | [diff] [blame] | 571 | DECLARE_PER_CPU(unsigned int, lockdep_recursion); |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 572 | |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 573 | #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) |
Peter Zijlstra | fddf905 | 2020-08-20 09:13:30 +0200 | [diff] [blame] | 574 | |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 575 | #define lockdep_assert_irqs_enabled() \ |
| 576 | do { \ |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 577 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 578 | } while (0) |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 579 | |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 580 | #define lockdep_assert_irqs_disabled() \ |
| 581 | do { \ |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 582 | WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 583 | } while (0) |
| 584 | |
| 585 | #define lockdep_assert_in_irq() \ |
| 586 | do { \ |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 587 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 588 | } while (0) |
Joel Fernandes (Google) | 71d8d15 | 2019-03-26 15:24:08 -0400 | [diff] [blame] | 589 | |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 590 | #define lockdep_assert_preemption_enabled() \ |
| 591 | do { \ |
| 592 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
Peter Zijlstra | 4d00409 | 2020-10-02 11:04:21 +0200 | [diff] [blame] | 593 | __lockdep_enabled && \ |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 594 | (preempt_count() != 0 || \ |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 595 | !this_cpu_read(hardirqs_enabled))); \ |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 596 | } while (0) |
| 597 | |
| 598 | #define lockdep_assert_preemption_disabled() \ |
| 599 | do { \ |
| 600 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ |
Peter Zijlstra | 4d00409 | 2020-10-02 11:04:21 +0200 | [diff] [blame] | 601 | __lockdep_enabled && \ |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 602 | (preempt_count() == 0 && \ |
Peter Zijlstra | baffd72 | 2020-10-05 09:56:57 +0200 | [diff] [blame] | 603 | this_cpu_read(hardirqs_enabled))); \ |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 604 | } while (0) |
| 605 | |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 606 | #else |
| 607 | # define might_lock(lock) do { } while (0) |
| 608 | # define might_lock_read(lock) do { } while (0) |
Daniel Vetter | e692b40 | 2019-11-04 18:37:19 +0100 | [diff] [blame] | 609 | # define might_lock_nested(lock, subclass) do { } while (0) |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 610 | |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 611 | # define lockdep_assert_irqs_enabled() do { } while (0) |
| 612 | # define lockdep_assert_irqs_disabled() do { } while (0) |
Joel Fernandes (Google) | 71d8d15 | 2019-03-26 15:24:08 -0400 | [diff] [blame] | 613 | # define lockdep_assert_in_irq() do { } while (0) |
Ahmed S. Darwish | 8fd8ad5 | 2020-07-20 17:55:13 +0200 | [diff] [blame] | 614 | |
| 615 | # define lockdep_assert_preemption_enabled() do { } while (0) |
| 616 | # define lockdep_assert_preemption_disabled() do { } while (0) |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 617 | #endif |
| 618 | |
Sebastian Siewior | 8bf6c67 | 2020-03-23 16:20:19 +0100 | [diff] [blame] | 619 | #ifdef CONFIG_PROVE_RAW_LOCK_NESTING |
| 620 | |
| 621 | # define lockdep_assert_RT_in_threaded_ctx() do { \ |
| 622 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
Peter Zijlstra | f9ad4a5 | 2020-05-27 13:03:26 +0200 | [diff] [blame] | 623 | lockdep_hardirq_context() && \ |
Sebastian Siewior | 8bf6c67 | 2020-03-23 16:20:19 +0100 | [diff] [blame] | 624 | !(current->hardirq_threaded || current->irq_config), \ |
| 625 | "Not in threaded context on PREEMPT_RT as expected\n"); \ |
| 626 | } while (0) |
| 627 | |
| 628 | #else |
| 629 | |
| 630 | # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) |
| 631 | |
| 632 | #endif |
| 633 | |
Paul E. McKenney | d24209bb | 2015-01-21 15:26:03 -0800 | [diff] [blame] | 634 | #ifdef CONFIG_LOCKDEP |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 635 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
Paul E. McKenney | d24209bb | 2015-01-21 15:26:03 -0800 | [diff] [blame] | 636 | #else |
| 637 | static inline void |
| 638 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) |
| 639 | { |
| 640 | } |
Paul E. McKenney | 0632eb3 | 2010-02-22 17:04:47 -0800 | [diff] [blame] | 641 | #endif |
| 642 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 643 | #endif /* __LINUX_LOCKDEP_H */ |