Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Runtime locking correctness validator |
| 4 | * |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 7 | * |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame^] | 8 | * see Documentation/locking/lockdep-design.rst for more details. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 9 | */ |
| 10 | #ifndef __LINUX_LOCKDEP_H |
| 11 | #define __LINUX_LOCKDEP_H |
| 12 | |
Heiko Carstens | a1e96b0 | 2007-02-12 00:52:20 -0800 | [diff] [blame] | 13 | struct task_struct; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 14 | struct lockdep_map; |
Heiko Carstens | a1e96b0 | 2007-02-12 00:52:20 -0800 | [diff] [blame] | 15 | |
Dave Young | 2edf5e4 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 16 | /* for sysctl */ |
| 17 | extern int prove_locking; |
| 18 | extern int lock_stat; |
| 19 | |
Bartosz Golaszewski | a5ecddf | 2016-09-16 18:02:43 +0200 | [diff] [blame] | 20 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
| 21 | |
Peter Zijlstra | e6f3faa | 2017-08-23 13:23:30 +0200 | [diff] [blame] | 22 | #include <linux/types.h> |
| 23 | |
Michael S. Tsirkin | db0b0ea | 2006-09-29 01:59:28 -0700 | [diff] [blame] | 24 | #ifdef CONFIG_LOCKDEP |
| 25 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 26 | #include <linux/linkage.h> |
| 27 | #include <linux/list.h> |
| 28 | #include <linux/debug_locks.h> |
| 29 | #include <linux/stacktrace.h> |
| 30 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 31 | /* |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 32 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
| 33 | * the total number of states... :-( |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 34 | */ |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 35 | #define XXX_LOCK_USAGE_STATES (1+2*4) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 36 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 37 | /* |
Hitoshi Mitake | 6201625 | 2010-10-05 18:01:51 +0900 | [diff] [blame] | 38 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
| 39 | * cached in the instance of lockdep_map |
| 40 | * |
| 41 | * Currently main class (subclass == 0) and signle depth subclass |
| 42 | * are cached in lockdep_map. This optimization is mainly targeting |
| 43 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
| 44 | * single depth. |
| 45 | */ |
| 46 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
| 47 | |
| 48 | /* |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 49 | * A lockdep key is associated with each lock object. For static locks we use |
| 50 | * the lock address itself as the key. Dynamically allocated lock objects can |
| 51 | * have a statically or dynamically allocated key. Dynamically allocated lock |
| 52 | * keys must be registered before being used and must be unregistered before |
| 53 | * the key memory is freed. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 54 | */ |
| 55 | struct lockdep_subclass_key { |
| 56 | char __one_byte; |
| 57 | } __attribute__ ((__packed__)); |
| 58 | |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 59 | /* hash_entry is used to keep track of dynamically allocated keys. */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 60 | struct lock_class_key { |
Peter Zijlstra | 28d49e2 | 2019-02-26 18:19:09 +0100 | [diff] [blame] | 61 | union { |
| 62 | struct hlist_node hash_entry; |
| 63 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
| 64 | }; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 65 | }; |
| 66 | |
Peter Zijlstra | 1704f47 | 2010-03-19 01:37:42 +0100 | [diff] [blame] | 67 | extern struct lock_class_key __lockdep_no_validate__; |
| 68 | |
Thomas Gleixner | c120bce | 2019-04-25 11:45:12 +0200 | [diff] [blame] | 69 | struct lock_trace { |
| 70 | unsigned int nr_entries; |
| 71 | unsigned int offset; |
| 72 | }; |
| 73 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 74 | #define LOCKSTAT_POINTS 4 |
| 75 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 76 | /* |
Bart Van Assche | a0b0fd5 | 2019-02-14 15:00:46 -0800 | [diff] [blame] | 77 | * The lock-class itself. The order of the structure members matters. |
| 78 | * reinit_class() zeroes the key member and all subsequent members. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 79 | */ |
| 80 | struct lock_class { |
| 81 | /* |
| 82 | * class-hash: |
| 83 | */ |
Andrew Morton | a63f38c | 2016-02-03 13:44:12 -0800 | [diff] [blame] | 84 | struct hlist_node hash_entry; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 85 | |
| 86 | /* |
Bart Van Assche | a0b0fd5 | 2019-02-14 15:00:46 -0800 | [diff] [blame] | 87 | * Entry in all_lock_classes when in use. Entry in free_lock_classes |
| 88 | * when not in use. Instances that are being freed are on one of the |
| 89 | * zapped_classes lists. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 90 | */ |
| 91 | struct list_head lock_entry; |
| 92 | |
Bart Van Assche | 09329d1 | 2019-02-14 15:00:40 -0800 | [diff] [blame] | 93 | /* |
| 94 | * These fields represent a directed graph of lock dependencies, |
| 95 | * to every node we attach a list of "forward" and a list of |
| 96 | * "backward" graph nodes. |
| 97 | */ |
| 98 | struct list_head locks_after, locks_before; |
| 99 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 100 | struct lockdep_subclass_key *key; |
| 101 | unsigned int subclass; |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 102 | unsigned int dep_gen_id; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 103 | |
| 104 | /* |
| 105 | * IRQ/softirq usage tracking bits: |
| 106 | */ |
| 107 | unsigned long usage_mask; |
Thomas Gleixner | c120bce | 2019-04-25 11:45:12 +0200 | [diff] [blame] | 108 | struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 109 | |
| 110 | /* |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 111 | * Generation counter, when doing certain classes of graph walking, |
| 112 | * to ensure that we check one node only once: |
| 113 | */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 114 | int name_version; |
Waiman Long | 8ca2b56c | 2018-10-03 13:07:18 -0400 | [diff] [blame] | 115 | const char *name; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 116 | |
| 117 | #ifdef CONFIG_LOCK_STAT |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 118 | unsigned long contention_point[LOCKSTAT_POINTS]; |
| 119 | unsigned long contending_point[LOCKSTAT_POINTS]; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 120 | #endif |
Bart Van Assche | a0b0fd5 | 2019-02-14 15:00:46 -0800 | [diff] [blame] | 121 | } __no_randomize_layout; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 122 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 123 | #ifdef CONFIG_LOCK_STAT |
| 124 | struct lock_time { |
| 125 | s64 min; |
| 126 | s64 max; |
| 127 | s64 total; |
| 128 | unsigned long nr; |
| 129 | }; |
| 130 | |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 131 | enum bounce_type { |
| 132 | bounce_acquired_write, |
| 133 | bounce_acquired_read, |
| 134 | bounce_contended_write, |
| 135 | bounce_contended_read, |
| 136 | nr_bounce_types, |
| 137 | |
| 138 | bounce_acquired = bounce_acquired_write, |
| 139 | bounce_contended = bounce_contended_write, |
| 140 | }; |
| 141 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 142 | struct lock_class_stats { |
George Beshers | 6872210 | 2015-06-18 10:25:13 -0500 | [diff] [blame] | 143 | unsigned long contention_point[LOCKSTAT_POINTS]; |
| 144 | unsigned long contending_point[LOCKSTAT_POINTS]; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 145 | struct lock_time read_waittime; |
| 146 | struct lock_time write_waittime; |
| 147 | struct lock_time read_holdtime; |
| 148 | struct lock_time write_holdtime; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 149 | unsigned long bounces[nr_bounce_types]; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 150 | }; |
| 151 | |
| 152 | struct lock_class_stats lock_stats(struct lock_class *class); |
| 153 | void clear_lock_stats(struct lock_class *class); |
| 154 | #endif |
| 155 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 156 | /* |
| 157 | * Map the lock object (the lock instance) to the lock-class object. |
| 158 | * This is embedded into specific lock instances: |
| 159 | */ |
| 160 | struct lockdep_map { |
| 161 | struct lock_class_key *key; |
Hitoshi Mitake | 6201625 | 2010-10-05 18:01:51 +0900 | [diff] [blame] | 162 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 163 | const char *name; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 164 | #ifdef CONFIG_LOCK_STAT |
| 165 | int cpu; |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 166 | unsigned long ip; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 167 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 168 | }; |
| 169 | |
Peter Zijlstra | 4d82a1d | 2012-05-15 08:06:19 -0700 | [diff] [blame] | 170 | static inline void lockdep_copy_map(struct lockdep_map *to, |
| 171 | struct lockdep_map *from) |
| 172 | { |
| 173 | int i; |
| 174 | |
| 175 | *to = *from; |
| 176 | /* |
| 177 | * Since the class cache can be modified concurrently we could observe |
| 178 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
| 179 | * the caches and take the performance hit. |
| 180 | * |
| 181 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
| 182 | * that relies on cache abuse. |
| 183 | */ |
| 184 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
| 185 | to->class_cache[i] = NULL; |
| 186 | } |
| 187 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 188 | /* |
| 189 | * Every lock has a list of other locks that were taken after it. |
| 190 | * We only grow the list, never remove from it: |
| 191 | */ |
| 192 | struct lock_list { |
| 193 | struct list_head entry; |
| 194 | struct lock_class *class; |
Bart Van Assche | 86cffb8 | 2019-02-14 15:00:41 -0800 | [diff] [blame] | 195 | struct lock_class *links_to; |
Thomas Gleixner | c120bce | 2019-04-25 11:45:12 +0200 | [diff] [blame] | 196 | struct lock_trace trace; |
Jason Baron | 068135e | 2007-02-10 01:44:59 -0800 | [diff] [blame] | 197 | int distance; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 198 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 199 | /* |
| 200 | * The parent field is used to implement breadth-first search, and the |
| 201 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 202 | */ |
| 203 | struct lock_list *parent; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 204 | }; |
| 205 | |
Yuyang Du | d16dbd1 | 2019-05-06 16:19:22 +0800 | [diff] [blame] | 206 | /** |
| 207 | * struct lock_chain - lock dependency chain record |
| 208 | * |
| 209 | * @irq_context: the same as irq_context in held_lock below |
| 210 | * @depth: the number of held locks in this chain |
| 211 | * @base: the index in chain_hlocks for this chain |
| 212 | * @entry: the collided lock chains in lock_chain hash list |
| 213 | * @chain_key: the hash key of this lock_chain |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 214 | */ |
| 215 | struct lock_chain { |
Yuyang Du | d16dbd1 | 2019-05-06 16:19:22 +0800 | [diff] [blame] | 216 | /* see BUILD_BUG_ON()s in add_chain_cache() */ |
Peter Zijlstra | 75dd602 | 2016-03-30 11:36:59 +0200 | [diff] [blame] | 217 | unsigned int irq_context : 2, |
| 218 | depth : 6, |
| 219 | base : 24; |
| 220 | /* 4 byte hole */ |
Andrew Morton | a63f38c | 2016-02-03 13:44:12 -0800 | [diff] [blame] | 221 | struct hlist_node entry; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 222 | u64 chain_key; |
| 223 | }; |
| 224 | |
Ingo Molnar | e5f363e3 | 2008-08-11 12:37:27 +0200 | [diff] [blame] | 225 | #define MAX_LOCKDEP_KEYS_BITS 13 |
Yuyang Du | 01bb6f0 | 2019-05-06 16:19:25 +0800 | [diff] [blame] | 226 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) |
| 227 | #define INITIAL_CHAIN_KEY -1 |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 228 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 229 | struct held_lock { |
| 230 | /* |
| 231 | * One-way hash of the dependency chain up to this point. We |
| 232 | * hash the hashes step by step as the dependency chain grows. |
| 233 | * |
| 234 | * We use it for dependency-caching and we skip detection |
| 235 | * passes and dependency-updates if there is a cache-hit, so |
| 236 | * it is absolutely critical for 100% coverage of the validator |
| 237 | * to have a unique key value for every unique dependency path |
| 238 | * that can occur in the system, to make a unique hash value |
| 239 | * as likely as possible - hence the 64-bit width. |
| 240 | * |
| 241 | * The task struct holds the current hash value (initialized |
| 242 | * with zero), here we store the previous hash value: |
| 243 | */ |
| 244 | u64 prev_chain_key; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 245 | unsigned long acquire_ip; |
| 246 | struct lockdep_map *instance; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 247 | struct lockdep_map *nest_lock; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 248 | #ifdef CONFIG_LOCK_STAT |
| 249 | u64 waittime_stamp; |
| 250 | u64 holdtime_stamp; |
| 251 | #endif |
Yuyang Du | 01bb6f0 | 2019-05-06 16:19:25 +0800 | [diff] [blame] | 252 | /* |
| 253 | * class_idx is zero-indexed; it points to the element in |
| 254 | * lock_classes this held lock instance belongs to. class_idx is in |
| 255 | * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. |
| 256 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 257 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 258 | /* |
| 259 | * The lock-stack is unified in that the lock chains of interrupt |
| 260 | * contexts nest ontop of process context chains, but we 'separate' |
| 261 | * the hashes by starting with 0 if we cross into an interrupt |
| 262 | * context, and we also keep do not add cross-context lock |
| 263 | * dependencies - the lock usage graph walking covers that area |
| 264 | * anyway, and we'd just unnecessarily increase the number of |
| 265 | * dependencies otherwise. [Note: hardirq and softirq contexts |
| 266 | * are separated from each other too.] |
| 267 | * |
| 268 | * The following field is used to detect when we cross into an |
| 269 | * interrupt context: |
| 270 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 271 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 272 | unsigned int trylock:1; /* 16 bits */ |
| 273 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 274 | unsigned int read:2; /* see lock_acquire() comment */ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 275 | unsigned int check:1; /* see lock_acquire() comment */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 276 | unsigned int hardirqs_off:1; |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 277 | unsigned int references:12; /* 32 bits */ |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 278 | unsigned int pin_count; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | /* |
| 282 | * Initialization, self-test and debugging-output methods: |
| 283 | */ |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 284 | extern void lockdep_init(void); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 285 | extern void lockdep_reset(void); |
| 286 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
| 287 | extern void lockdep_free_key_range(void *start, unsigned long size); |
Andi Kleen | 63f9a7f | 2014-02-08 08:52:01 +0100 | [diff] [blame] | 288 | extern asmlinkage void lockdep_sys_exit(void); |
Bart Van Assche | cdc84d7 | 2019-02-14 15:00:44 -0800 | [diff] [blame] | 289 | extern void lockdep_set_selftest_task(struct task_struct *task); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 290 | |
Yuyang Du | e196e47 | 2019-05-06 16:19:23 +0800 | [diff] [blame] | 291 | extern void lockdep_init_task(struct task_struct *task); |
| 292 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 293 | extern void lockdep_off(void); |
| 294 | extern void lockdep_on(void); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 295 | |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 296 | extern void lockdep_register_key(struct lock_class_key *key); |
| 297 | extern void lockdep_unregister_key(struct lock_class_key *key); |
| 298 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 299 | /* |
| 300 | * These methods are used by specific locking variants (spinlocks, |
| 301 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
| 302 | * to lockdep: |
| 303 | */ |
| 304 | |
| 305 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 306 | struct lock_class_key *key, int subclass); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 307 | |
| 308 | /* |
| 309 | * Reinitialize a lock key - for cases where there is special locking or |
| 310 | * special initialization of locks so that the validator gets the scope |
| 311 | * of dependencies wrong: they are either too broad (they need a class-split) |
| 312 | * or they are too narrow (they suffer from a false class-split): |
| 313 | */ |
| 314 | #define lockdep_set_class(lock, key) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 315 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 316 | #define lockdep_set_class_and_name(lock, key, name) \ |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 317 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
| 318 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 319 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
| 320 | #define lockdep_set_subclass(lock, sub) \ |
| 321 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
| 322 | (lock)->dep_map.key, sub) |
Peter Zijlstra | 1704f47 | 2010-03-19 01:37:42 +0100 | [diff] [blame] | 323 | |
| 324 | #define lockdep_set_novalidate_class(lock) \ |
Oleg Nesterov | 47be1c1 | 2014-01-20 19:20:16 +0100 | [diff] [blame] | 325 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
Jan Kara | 9a7aa12 | 2009-06-04 15:26:49 +0200 | [diff] [blame] | 326 | /* |
| 327 | * Compare locking classes |
| 328 | */ |
| 329 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
| 330 | |
| 331 | static inline int lockdep_match_key(struct lockdep_map *lock, |
| 332 | struct lock_class_key *key) |
| 333 | { |
| 334 | return lock->key == key; |
| 335 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 336 | |
| 337 | /* |
| 338 | * Acquire a lock. |
| 339 | * |
| 340 | * Values for "read": |
| 341 | * |
| 342 | * 0: exclusive (write) acquire |
| 343 | * 1: read-acquire (no recursion allowed) |
| 344 | * 2: read-acquire with same-instance recursion allowed |
| 345 | * |
| 346 | * Values for check: |
| 347 | * |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 348 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
| 349 | * 1: full validation |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 350 | */ |
| 351 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 352 | int trylock, int read, int check, |
| 353 | struct lockdep_map *nest_lock, unsigned long ip); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 354 | |
| 355 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 356 | unsigned long ip); |
| 357 | |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 358 | /* |
| 359 | * Same "read" as for lock_acquire(), except -1 means any. |
| 360 | */ |
Matthew Wilcox | 08f36ff | 2018-01-17 07:14:13 -0800 | [diff] [blame] | 361 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 362 | |
Matthew Wilcox | 08f36ff | 2018-01-17 07:14:13 -0800 | [diff] [blame] | 363 | static inline int lock_is_held(const struct lockdep_map *lock) |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 364 | { |
| 365 | return lock_is_held_type(lock, -1); |
| 366 | } |
| 367 | |
| 368 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
| 369 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 370 | |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 371 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
| 372 | struct lock_class_key *key, unsigned int subclass, |
| 373 | unsigned long ip); |
| 374 | |
| 375 | static inline void lock_set_subclass(struct lockdep_map *lock, |
| 376 | unsigned int subclass, unsigned long ip) |
| 377 | { |
| 378 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
| 379 | } |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 380 | |
J. R. Okajima | 6419c4a | 2017-02-03 01:38:17 +0900 | [diff] [blame] | 381 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
| 382 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 383 | struct pin_cookie { unsigned int val; }; |
| 384 | |
| 385 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } |
| 386 | |
| 387 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); |
| 388 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); |
| 389 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 390 | |
Jarek Poplawski | e3a55fd | 2007-03-22 00:11:26 -0800 | [diff] [blame] | 391 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 392 | |
Johannes Berg | b1ae345 | 2013-02-21 16:42:47 -0800 | [diff] [blame] | 393 | #define lockdep_assert_held(l) do { \ |
| 394 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
| 395 | } while (0) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 396 | |
Nikolay Borisov | 9ffbe8a | 2019-05-31 13:06:51 +0300 | [diff] [blame] | 397 | #define lockdep_assert_held_write(l) do { \ |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 398 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ |
| 399 | } while (0) |
| 400 | |
| 401 | #define lockdep_assert_held_read(l) do { \ |
| 402 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ |
| 403 | } while (0) |
| 404 | |
Peter Hurley | 9a37110 | 2014-09-10 14:31:39 -0400 | [diff] [blame] | 405 | #define lockdep_assert_held_once(l) do { \ |
| 406 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
| 407 | } while (0) |
| 408 | |
Peter Zijlstra | 94d24fc | 2011-06-07 11:17:30 +0200 | [diff] [blame] | 409 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
| 410 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 411 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
| 412 | #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) |
| 413 | #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 414 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 415 | #else /* !CONFIG_LOCKDEP */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 416 | |
Yuyang Du | e196e47 | 2019-05-06 16:19:23 +0800 | [diff] [blame] | 417 | static inline void lockdep_init_task(struct task_struct *task) |
| 418 | { |
| 419 | } |
| 420 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 421 | static inline void lockdep_off(void) |
| 422 | { |
| 423 | } |
| 424 | |
| 425 | static inline void lockdep_on(void) |
| 426 | { |
| 427 | } |
| 428 | |
Bart Van Assche | cdc84d7 | 2019-02-14 15:00:44 -0800 | [diff] [blame] | 429 | static inline void lockdep_set_selftest_task(struct task_struct *task) |
| 430 | { |
| 431 | } |
| 432 | |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 433 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 434 | # define lock_release(l, n, i) do { } while (0) |
J. R. Okajima | 6419c4a | 2017-02-03 01:38:17 +0900 | [diff] [blame] | 435 | # define lock_downgrade(l, i) do { } while (0) |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 436 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 437 | # define lock_set_subclass(l, s, i) do { } while (0) |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 438 | # define lockdep_init() do { } while (0) |
Ingo Molnar | e25cf3d | 2008-10-17 15:55:07 +0200 | [diff] [blame] | 439 | # define lockdep_init_map(lock, name, key, sub) \ |
| 440 | do { (void)(name); (void)(key); } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 441 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
| 442 | # define lockdep_set_class_and_name(lock, key, name) \ |
Ingo Molnar | e25cf3d | 2008-10-17 15:55:07 +0200 | [diff] [blame] | 443 | do { (void)(key); (void)(name); } while (0) |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 444 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
| 445 | do { (void)(key); } while (0) |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 446 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
Peter Zijlstra | 1704f47 | 2010-03-19 01:37:42 +0100 | [diff] [blame] | 447 | |
| 448 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
| 449 | |
Jan Kara | 9a7aa12 | 2009-06-04 15:26:49 +0200 | [diff] [blame] | 450 | /* |
| 451 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
| 452 | * case since the result is not well defined and the caller should rather |
| 453 | * #ifdef the call himself. |
| 454 | */ |
Andrew Morton | 07646e2 | 2006-10-11 23:45:23 -0400 | [diff] [blame] | 455 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 456 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
| 457 | # define lockdep_free_key_range(start, size) do { } while (0) |
Peter Zijlstra | b351d16 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 458 | # define lockdep_sys_exit() do { } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 459 | /* |
| 460 | * The class key takes no space if lockdep is disabled: |
| 461 | */ |
| 462 | struct lock_class_key { }; |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 463 | |
Bart Van Assche | 108c148 | 2019-02-14 15:00:53 -0800 | [diff] [blame] | 464 | static inline void lockdep_register_key(struct lock_class_key *key) |
| 465 | { |
| 466 | } |
| 467 | |
| 468 | static inline void lockdep_unregister_key(struct lock_class_key *key) |
| 469 | { |
| 470 | } |
| 471 | |
Byungchul Park | 6f0397d | 2017-10-25 17:55:58 +0900 | [diff] [blame] | 472 | /* |
| 473 | * The lockdep_map takes no space if lockdep is disabled: |
| 474 | */ |
| 475 | struct lockdep_map { }; |
| 476 | |
Peter Zijlstra | d5abe66 | 2006-12-06 20:37:26 -0800 | [diff] [blame] | 477 | #define lockdep_depth(tsk) (0) |
| 478 | |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 479 | #define lockdep_is_held_type(l, r) (1) |
| 480 | |
Paul Bolle | 5cd3f5a | 2013-01-24 21:53:17 +0100 | [diff] [blame] | 481 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
Nikolay Borisov | 9ffbe8a | 2019-05-31 13:06:51 +0300 | [diff] [blame] | 482 | #define lockdep_assert_held_write(l) do { (void)(l); } while (0) |
Peter Zijlstra | f831948 | 2016-11-30 14:32:25 +1100 | [diff] [blame] | 483 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) |
Peter Hurley | 9a37110 | 2014-09-10 14:31:39 -0400 | [diff] [blame] | 484 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 485 | |
Peter Zijlstra | 94d24fc | 2011-06-07 11:17:30 +0200 | [diff] [blame] | 486 | #define lockdep_recursing(tsk) (0) |
| 487 | |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 488 | struct pin_cookie { }; |
| 489 | |
| 490 | #define NIL_COOKIE (struct pin_cookie){ } |
| 491 | |
Arnd Bergmann | 3771b0f | 2019-03-25 13:57:57 +0100 | [diff] [blame] | 492 | #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 493 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
| 494 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
Peter Zijlstra | a24fc60 | 2015-06-11 14:46:53 +0200 | [diff] [blame] | 495 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 496 | #endif /* !LOCKDEP */ |
| 497 | |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 498 | enum xhlock_context_t { |
| 499 | XHLOCK_HARD, |
| 500 | XHLOCK_SOFT, |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 501 | XHLOCK_CTX_NR, |
| 502 | }; |
| 503 | |
Boqun Feng | 52fa5bc | 2017-08-17 17:46:12 +0800 | [diff] [blame] | 504 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 505 | /* |
| 506 | * To initialize a lockdep_map statically use this macro. |
| 507 | * Note that _name must not be NULL. |
| 508 | */ |
| 509 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
| 510 | { .name = (_name), .key = (void *)(_key), } |
| 511 | |
Peter Zijlstra | f52be57 | 2017-08-29 10:59:39 +0200 | [diff] [blame] | 512 | static inline void lockdep_invariant_state(bool force) {} |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 513 | static inline void lockdep_free_task(struct task_struct *task) {} |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 514 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 515 | #ifdef CONFIG_LOCK_STAT |
| 516 | |
| 517 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 518 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 519 | |
| 520 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 521 | do { \ |
| 522 | if (!try(_lock)) { \ |
| 523 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
| 524 | lock(_lock); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 525 | } \ |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 526 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 527 | } while (0) |
| 528 | |
Michal Hocko | 916633a | 2016-04-07 17:12:31 +0200 | [diff] [blame] | 529 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
| 530 | ({ \ |
| 531 | int ____err = 0; \ |
| 532 | if (!try(_lock)) { \ |
| 533 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
| 534 | ____err = lock(_lock); \ |
| 535 | } \ |
| 536 | if (!____err) \ |
| 537 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
| 538 | ____err; \ |
| 539 | }) |
| 540 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 541 | #else /* CONFIG_LOCK_STAT */ |
| 542 | |
| 543 | #define lock_contended(lockdep_map, ip) do {} while (0) |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 544 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 545 | |
| 546 | #define LOCK_CONTENDED(_lock, try, lock) \ |
| 547 | lock(_lock) |
| 548 | |
Michal Hocko | 916633a | 2016-04-07 17:12:31 +0200 | [diff] [blame] | 549 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
| 550 | lock(_lock) |
| 551 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 552 | #endif /* CONFIG_LOCK_STAT */ |
| 553 | |
Robin Holt | e8c158b | 2009-04-02 16:59:45 -0700 | [diff] [blame] | 554 | #ifdef CONFIG_LOCKDEP |
| 555 | |
| 556 | /* |
| 557 | * On lockdep we dont want the hand-coded irq-enable of |
| 558 | * _raw_*_lock_flags() code, because lockdep assumes |
| 559 | * that interrupts are not re-enabled during lock-acquire: |
| 560 | */ |
| 561 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
| 562 | LOCK_CONTENDED((_lock), (try), (lock)) |
| 563 | |
| 564 | #else /* CONFIG_LOCKDEP */ |
| 565 | |
| 566 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
| 567 | lockfl((_lock), (flags)) |
| 568 | |
| 569 | #endif /* CONFIG_LOCKDEP */ |
| 570 | |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 571 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 572 | extern void print_irqtrace_events(struct task_struct *curr); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 573 | #else |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 574 | static inline void print_irqtrace_events(struct task_struct *curr) |
| 575 | { |
| 576 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 577 | #endif |
| 578 | |
| 579 | /* |
| 580 | * For trivial one-depth nesting of a lock-class, the following |
| 581 | * global define can be used. (Subsystems with multiple levels |
| 582 | * of nesting should define their own lock-nesting subclasses.) |
| 583 | */ |
| 584 | #define SINGLE_DEPTH_NESTING 1 |
| 585 | |
| 586 | /* |
| 587 | * Map the dependency ops to NOP or to real lockdep ops, depending |
| 588 | * on the per lock-class debug mode: |
| 589 | */ |
| 590 | |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 591 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
| 592 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
| 593 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 594 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 595 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 596 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
| 597 | #define spin_release(l, n, i) lock_release(l, n, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 598 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 599 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 600 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
| 601 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 602 | |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 603 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 604 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
| 605 | #define seqcount_release(l, n, i) lock_release(l, n, i) |
| 606 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 607 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 608 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
| 609 | #define mutex_release(l, n, i) lock_release(l, n, i) |
| 610 | |
| 611 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
| 612 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
| 613 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 614 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 615 | |
Michel Lespinasse | a51805e | 2013-07-08 14:23:49 -0700 | [diff] [blame] | 616 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
| 617 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
Paul E. McKenney | dd56af4 | 2014-08-25 20:25:06 -0700 | [diff] [blame] | 618 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 619 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
Peter Zijlstra | 4f3e752 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 620 | |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 621 | #ifdef CONFIG_PROVE_LOCKING |
| 622 | # define might_lock(lock) \ |
| 623 | do { \ |
| 624 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 625 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 626 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
| 627 | } while (0) |
| 628 | # define might_lock_read(lock) \ |
| 629 | do { \ |
| 630 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 631 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 632 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
| 633 | } while (0) |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 634 | |
| 635 | #define lockdep_assert_irqs_enabled() do { \ |
| 636 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
| 637 | !current->hardirqs_enabled, \ |
| 638 | "IRQs not enabled as expected\n"); \ |
| 639 | } while (0) |
| 640 | |
| 641 | #define lockdep_assert_irqs_disabled() do { \ |
| 642 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
| 643 | current->hardirqs_enabled, \ |
| 644 | "IRQs not disabled as expected\n"); \ |
| 645 | } while (0) |
| 646 | |
Joel Fernandes (Google) | 71d8d15 | 2019-03-26 15:24:08 -0400 | [diff] [blame] | 647 | #define lockdep_assert_in_irq() do { \ |
| 648 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ |
| 649 | !current->hardirq_context, \ |
| 650 | "Not in hardirq as expected\n"); \ |
| 651 | } while (0) |
| 652 | |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 653 | #else |
| 654 | # define might_lock(lock) do { } while (0) |
| 655 | # define might_lock_read(lock) do { } while (0) |
Frederic Weisbecker | f54bb2e | 2017-11-06 16:01:17 +0100 | [diff] [blame] | 656 | # define lockdep_assert_irqs_enabled() do { } while (0) |
| 657 | # define lockdep_assert_irqs_disabled() do { } while (0) |
Joel Fernandes (Google) | 71d8d15 | 2019-03-26 15:24:08 -0400 | [diff] [blame] | 658 | # define lockdep_assert_in_irq() do { } while (0) |
Peter Zijlstra | 76b189e | 2008-09-10 09:57:35 +0200 | [diff] [blame] | 659 | #endif |
| 660 | |
Paul E. McKenney | d24209bb | 2015-01-21 15:26:03 -0800 | [diff] [blame] | 661 | #ifdef CONFIG_LOCKDEP |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 662 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
Paul E. McKenney | d24209bb | 2015-01-21 15:26:03 -0800 | [diff] [blame] | 663 | #else |
| 664 | static inline void |
| 665 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) |
| 666 | { |
| 667 | } |
Paul E. McKenney | 0632eb3 | 2010-02-22 17:04:47 -0800 | [diff] [blame] | 668 | #endif |
| 669 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 670 | #endif /* __LINUX_LOCKDEP_H */ |