blob: 6f165d6253200a63ca5ab0a88bd2ea81c01f8ba3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002/*
3 * Runtime locking correctness validator
4 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07005 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Peter Zijlstra90eec102015-11-16 11:08:45 +01006 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07007 *
Davidlohr Bueso214e0ae2014-07-30 13:41:55 -07008 * see Documentation/locking/lockdep-design.txt for more details.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07009 */
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
Heiko Carstensa1e96b02007-02-12 00:52:20 -080013struct task_struct;
Peter Zijlstraf20786f2007-07-19 01:48:56 -070014struct lockdep_map;
Heiko Carstensa1e96b02007-02-12 00:52:20 -080015
Dave Young2edf5e42010-03-10 15:24:10 -080016/* for sysctl */
17extern int prove_locking;
18extern int lock_stat;
19
Bartosz Golaszewskia5ecddf2016-09-16 18:02:43 +020020#define MAX_LOCKDEP_SUBCLASSES 8UL
21
Peter Zijlstrae6f3faa2017-08-23 13:23:30 +020022#include <linux/types.h>
23
Michael S. Tsirkindb0b0ea2006-09-29 01:59:28 -070024#ifdef CONFIG_LOCKDEP
25
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070026#include <linux/linkage.h>
27#include <linux/list.h>
28#include <linux/debug_locks.h>
29#include <linux/stacktrace.h>
30
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070031/*
Peter Zijlstra98516732009-01-22 14:18:40 +010032 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33 * the total number of states... :-(
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070034 */
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010035#define XXX_LOCK_USAGE_STATES (1+2*4)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070036
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070037/*
Hitoshi Mitake62016252010-10-05 18:01:51 +090038 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39 * cached in the instance of lockdep_map
40 *
41 * Currently main class (subclass == 0) and signle depth subclass
42 * are cached in lockdep_map. This optimization is mainly targeting
43 * on rq->lock. double_rq_lock() acquires this highly competitive with
44 * single depth.
45 */
46#define NR_LOCKDEP_CACHING_CLASSES 2
47
48/*
Bart Van Assche108c1482019-02-14 15:00:53 -080049 * A lockdep key is associated with each lock object. For static locks we use
50 * the lock address itself as the key. Dynamically allocated lock objects can
51 * have a statically or dynamically allocated key. Dynamically allocated lock
52 * keys must be registered before being used and must be unregistered before
53 * the key memory is freed.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070054 */
55struct lockdep_subclass_key {
56 char __one_byte;
57} __attribute__ ((__packed__));
58
Bart Van Assche108c1482019-02-14 15:00:53 -080059/* hash_entry is used to keep track of dynamically allocated keys. */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070060struct lock_class_key {
Peter Zijlstra28d49e22019-02-26 18:19:09 +010061 union {
62 struct hlist_node hash_entry;
63 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
64 };
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070065};
66
Peter Zijlstra1704f472010-03-19 01:37:42 +010067extern struct lock_class_key __lockdep_no_validate__;
68
Thomas Gleixnerc120bce2019-04-25 11:45:12 +020069struct lock_trace {
70 unsigned int nr_entries;
71 unsigned int offset;
72};
73
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +020074#define LOCKSTAT_POINTS 4
75
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070076/*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -080077 * The lock-class itself. The order of the structure members matters.
78 * reinit_class() zeroes the key member and all subsequent members.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070079 */
80struct lock_class {
81 /*
82 * class-hash:
83 */
Andrew Mortona63f38c2016-02-03 13:44:12 -080084 struct hlist_node hash_entry;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070085
86 /*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -080087 * Entry in all_lock_classes when in use. Entry in free_lock_classes
88 * when not in use. Instances that are being freed are on one of the
89 * zapped_classes lists.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070090 */
91 struct list_head lock_entry;
92
Bart Van Assche09329d12019-02-14 15:00:40 -080093 /*
94 * These fields represent a directed graph of lock dependencies,
95 * to every node we attach a list of "forward" and a list of
96 * "backward" graph nodes.
97 */
98 struct list_head locks_after, locks_before;
99
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700100 struct lockdep_subclass_key *key;
101 unsigned int subclass;
Ming Leie351b662009-07-22 22:48:09 +0800102 unsigned int dep_gen_id;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700103
104 /*
105 * IRQ/softirq usage tracking bits:
106 */
107 unsigned long usage_mask;
Thomas Gleixnerc120bce2019-04-25 11:45:12 +0200108 struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700109
110 /*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700111 * Generation counter, when doing certain classes of graph walking,
112 * to ensure that we check one node only once:
113 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700114 int name_version;
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400115 const char *name;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700116
117#ifdef CONFIG_LOCK_STAT
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200118 unsigned long contention_point[LOCKSTAT_POINTS];
119 unsigned long contending_point[LOCKSTAT_POINTS];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700120#endif
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800121} __no_randomize_layout;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700122
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700123#ifdef CONFIG_LOCK_STAT
124struct lock_time {
125 s64 min;
126 s64 max;
127 s64 total;
128 unsigned long nr;
129};
130
Peter Zijlstra96645672007-07-19 01:49:00 -0700131enum bounce_type {
132 bounce_acquired_write,
133 bounce_acquired_read,
134 bounce_contended_write,
135 bounce_contended_read,
136 nr_bounce_types,
137
138 bounce_acquired = bounce_acquired_write,
139 bounce_contended = bounce_contended_write,
140};
141
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700142struct lock_class_stats {
George Beshers68722102015-06-18 10:25:13 -0500143 unsigned long contention_point[LOCKSTAT_POINTS];
144 unsigned long contending_point[LOCKSTAT_POINTS];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700145 struct lock_time read_waittime;
146 struct lock_time write_waittime;
147 struct lock_time read_holdtime;
148 struct lock_time write_holdtime;
Peter Zijlstra96645672007-07-19 01:49:00 -0700149 unsigned long bounces[nr_bounce_types];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700150};
151
152struct lock_class_stats lock_stats(struct lock_class *class);
153void clear_lock_stats(struct lock_class *class);
154#endif
155
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700156/*
157 * Map the lock object (the lock instance) to the lock-class object.
158 * This is embedded into specific lock instances:
159 */
160struct lockdep_map {
161 struct lock_class_key *key;
Hitoshi Mitake62016252010-10-05 18:01:51 +0900162 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700163 const char *name;
Peter Zijlstra96645672007-07-19 01:49:00 -0700164#ifdef CONFIG_LOCK_STAT
165 int cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200166 unsigned long ip;
Peter Zijlstra96645672007-07-19 01:49:00 -0700167#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700168};
169
Peter Zijlstra4d82a1d2012-05-15 08:06:19 -0700170static inline void lockdep_copy_map(struct lockdep_map *to,
171 struct lockdep_map *from)
172{
173 int i;
174
175 *to = *from;
176 /*
177 * Since the class cache can be modified concurrently we could observe
178 * half pointers (64bit arch using 32bit copy insns). Therefore clear
179 * the caches and take the performance hit.
180 *
181 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
182 * that relies on cache abuse.
183 */
184 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
185 to->class_cache[i] = NULL;
186}
187
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700188/*
189 * Every lock has a list of other locks that were taken after it.
190 * We only grow the list, never remove from it:
191 */
192struct lock_list {
193 struct list_head entry;
194 struct lock_class *class;
Bart Van Assche86cffb82019-02-14 15:00:41 -0800195 struct lock_class *links_to;
Thomas Gleixnerc120bce2019-04-25 11:45:12 +0200196 struct lock_trace trace;
Jason Baron068135e2007-02-10 01:44:59 -0800197 int distance;
Ming Leic94aa5c2009-07-16 15:44:29 +0200198
Peter Zijlstraaf012962009-07-16 15:44:29 +0200199 /*
200 * The parent field is used to implement breadth-first search, and the
201 * bit 0 is reused to indicate if the lock has been accessed in BFS.
Ming Leic94aa5c2009-07-16 15:44:29 +0200202 */
203 struct lock_list *parent;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700204};
205
206/*
207 * We record lock dependency chains, so that we can cache them:
208 */
209struct lock_chain {
Peter Zijlstra75dd6022016-03-30 11:36:59 +0200210 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
211 unsigned int irq_context : 2,
212 depth : 6,
213 base : 24;
214 /* 4 byte hole */
Andrew Mortona63f38c2016-02-03 13:44:12 -0800215 struct hlist_node entry;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700216 u64 chain_key;
217};
218
Ingo Molnare5f363e32008-08-11 12:37:27 +0200219#define MAX_LOCKDEP_KEYS_BITS 13
Peter Zijlstrab42e7372008-08-11 12:34:42 +0200220/*
221 * Subtract one because we offset hlock->class_idx by 1 in order
222 * to make 0 mean no class. This avoids overflowing the class_idx
223 * bitfield and hitting the BUG in hlock_class().
224 */
225#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
Dave Jonesf82b2172008-08-11 09:30:23 +0200226
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700227struct held_lock {
228 /*
229 * One-way hash of the dependency chain up to this point. We
230 * hash the hashes step by step as the dependency chain grows.
231 *
232 * We use it for dependency-caching and we skip detection
233 * passes and dependency-updates if there is a cache-hit, so
234 * it is absolutely critical for 100% coverage of the validator
235 * to have a unique key value for every unique dependency path
236 * that can occur in the system, to make a unique hash value
237 * as likely as possible - hence the 64-bit width.
238 *
239 * The task struct holds the current hash value (initialized
240 * with zero), here we store the previous hash value:
241 */
242 u64 prev_chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700243 unsigned long acquire_ip;
244 struct lockdep_map *instance;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200245 struct lockdep_map *nest_lock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700246#ifdef CONFIG_LOCK_STAT
247 u64 waittime_stamp;
248 u64 holdtime_stamp;
249#endif
Dave Jonesf82b2172008-08-11 09:30:23 +0200250 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700251 /*
252 * The lock-stack is unified in that the lock chains of interrupt
253 * contexts nest ontop of process context chains, but we 'separate'
254 * the hashes by starting with 0 if we cross into an interrupt
255 * context, and we also keep do not add cross-context lock
256 * dependencies - the lock usage graph walking covers that area
257 * anyway, and we'd just unnecessarily increase the number of
258 * dependencies otherwise. [Note: hardirq and softirq contexts
259 * are separated from each other too.]
260 *
261 * The following field is used to detect when we cross into an
262 * interrupt context:
263 */
Dave Jonesf82b2172008-08-11 09:30:23 +0200264 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
Peter Zijlstrabb97a912009-07-20 19:15:35 +0200265 unsigned int trylock:1; /* 16 bits */
266
Dave Jonesf82b2172008-08-11 09:30:23 +0200267 unsigned int read:2; /* see lock_acquire() comment */
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100268 unsigned int check:1; /* see lock_acquire() comment */
Dave Jonesf82b2172008-08-11 09:30:23 +0200269 unsigned int hardirqs_off:1;
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100270 unsigned int references:12; /* 32 bits */
Peter Zijlstraa24fc602015-06-11 14:46:53 +0200271 unsigned int pin_count;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700272};
273
274/*
275 * Initialization, self-test and debugging-output methods:
276 */
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700277extern void lockdep_init(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700278extern void lockdep_reset(void);
279extern void lockdep_reset_lock(struct lockdep_map *lock);
280extern void lockdep_free_key_range(void *start, unsigned long size);
Andi Kleen63f9a7f2014-02-08 08:52:01 +0100281extern asmlinkage void lockdep_sys_exit(void);
Bart Van Asschecdc84d72019-02-14 15:00:44 -0800282extern void lockdep_set_selftest_task(struct task_struct *task);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700283
284extern void lockdep_off(void);
285extern void lockdep_on(void);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700286
Bart Van Assche108c1482019-02-14 15:00:53 -0800287extern void lockdep_register_key(struct lock_class_key *key);
288extern void lockdep_unregister_key(struct lock_class_key *key);
289
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700290/*
291 * These methods are used by specific locking variants (spinlocks,
292 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
293 * to lockdep:
294 */
295
296extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400297 struct lock_class_key *key, int subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700298
299/*
300 * Reinitialize a lock key - for cases where there is special locking or
301 * special initialization of locks so that the validator gets the scope
302 * of dependencies wrong: they are either too broad (they need a class-split)
303 * or they are too narrow (they suffer from a false class-split):
304 */
305#define lockdep_set_class(lock, key) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400306 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700307#define lockdep_set_class_and_name(lock, key, name) \
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400308 lockdep_init_map(&(lock)->dep_map, name, key, 0)
309#define lockdep_set_class_and_subclass(lock, key, sub) \
310 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
311#define lockdep_set_subclass(lock, sub) \
312 lockdep_init_map(&(lock)->dep_map, #lock, \
313 (lock)->dep_map.key, sub)
Peter Zijlstra1704f472010-03-19 01:37:42 +0100314
315#define lockdep_set_novalidate_class(lock) \
Oleg Nesterov47be1c12014-01-20 19:20:16 +0100316 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
Jan Kara9a7aa122009-06-04 15:26:49 +0200317/*
318 * Compare locking classes
319 */
320#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
321
322static inline int lockdep_match_key(struct lockdep_map *lock,
323 struct lock_class_key *key)
324{
325 return lock->key == key;
326}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700327
328/*
329 * Acquire a lock.
330 *
331 * Values for "read":
332 *
333 * 0: exclusive (write) acquire
334 * 1: read-acquire (no recursion allowed)
335 * 2: read-acquire with same-instance recursion allowed
336 *
337 * Values for check:
338 *
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100339 * 0: simple checks (freeing, held-at-exit-time, etc.)
340 * 1: full validation
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700341 */
342extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200343 int trylock, int read, int check,
344 struct lockdep_map *nest_lock, unsigned long ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700345
346extern void lock_release(struct lockdep_map *lock, int nested,
347 unsigned long ip);
348
Peter Zijlstraf8319482016-11-30 14:32:25 +1100349/*
350 * Same "read" as for lock_acquire(), except -1 means any.
351 */
Matthew Wilcox08f36ff2018-01-17 07:14:13 -0800352extern int lock_is_held_type(const struct lockdep_map *lock, int read);
Peter Zijlstraf607c662009-07-20 19:16:29 +0200353
Matthew Wilcox08f36ff2018-01-17 07:14:13 -0800354static inline int lock_is_held(const struct lockdep_map *lock)
Peter Zijlstraf8319482016-11-30 14:32:25 +1100355{
356 return lock_is_held_type(lock, -1);
357}
358
359#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
360#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
Peter Zijlstraf607c662009-07-20 19:16:29 +0200361
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100362extern void lock_set_class(struct lockdep_map *lock, const char *name,
363 struct lock_class_key *key, unsigned int subclass,
364 unsigned long ip);
365
366static inline void lock_set_subclass(struct lockdep_map *lock,
367 unsigned int subclass, unsigned long ip)
368{
369 lock_set_class(lock, lock->name, lock->key, subclass, ip);
370}
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200371
J. R. Okajima6419c4a2017-02-03 01:38:17 +0900372extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
373
Peter Zijlstrae7904a22015-08-01 19:25:08 +0200374struct pin_cookie { unsigned int val; };
375
376#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
377
378extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
379extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
380extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
Peter Zijlstraa24fc602015-06-11 14:46:53 +0200381
Jarek Poplawskie3a55fd2007-03-22 00:11:26 -0800382#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800383
Johannes Bergb1ae3452013-02-21 16:42:47 -0800384#define lockdep_assert_held(l) do { \
385 WARN_ON(debug_locks && !lockdep_is_held(l)); \
386 } while (0)
Peter Zijlstraf607c662009-07-20 19:16:29 +0200387
Peter Zijlstraf8319482016-11-30 14:32:25 +1100388#define lockdep_assert_held_exclusive(l) do { \
389 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
390 } while (0)
391
392#define lockdep_assert_held_read(l) do { \
393 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
394 } while (0)
395
Peter Hurley9a371102014-09-10 14:31:39 -0400396#define lockdep_assert_held_once(l) do { \
397 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
398 } while (0)
399
Peter Zijlstra94d24fc2011-06-07 11:17:30 +0200400#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
401
Peter Zijlstrae7904a22015-08-01 19:25:08 +0200402#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
403#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
404#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
Peter Zijlstraa24fc602015-06-11 14:46:53 +0200405
Michel Lespinassea51805e2013-07-08 14:23:49 -0700406#else /* !CONFIG_LOCKDEP */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700407
408static inline void lockdep_off(void)
409{
410}
411
412static inline void lockdep_on(void)
413{
414}
415
Bart Van Asschecdc84d72019-02-14 15:00:44 -0800416static inline void lockdep_set_selftest_task(struct task_struct *task)
417{
418}
419
Peter Zijlstra7531e2f2008-08-11 09:30:24 +0200420# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700421# define lock_release(l, n, i) do { } while (0)
J. R. Okajima6419c4a2017-02-03 01:38:17 +0900422# define lock_downgrade(l, i) do { } while (0)
Peter Zijlstra00ef9f72008-12-04 09:00:17 +0100423# define lock_set_class(l, n, k, s, i) do { } while (0)
Peter Zijlstra64aa3482008-08-11 09:30:21 +0200424# define lock_set_subclass(l, s, i) do { } while (0)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700425# define lockdep_init() do { } while (0)
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200426# define lockdep_init_map(lock, name, key, sub) \
427 do { (void)(name); (void)(key); } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700428# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
429# define lockdep_set_class_and_name(lock, key, name) \
Ingo Molnare25cf3d2008-10-17 15:55:07 +0200430 do { (void)(key); (void)(name); } while (0)
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400431#define lockdep_set_class_and_subclass(lock, key, sub) \
432 do { (void)(key); } while (0)
Andrew Morton07646e22006-10-11 23:45:23 -0400433#define lockdep_set_subclass(lock, sub) do { } while (0)
Peter Zijlstra1704f472010-03-19 01:37:42 +0100434
435#define lockdep_set_novalidate_class(lock) do { } while (0)
436
Jan Kara9a7aa122009-06-04 15:26:49 +0200437/*
438 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
439 * case since the result is not well defined and the caller should rather
440 * #ifdef the call himself.
441 */
Andrew Morton07646e22006-10-11 23:45:23 -0400442
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700443# define lockdep_reset() do { debug_locks = 1; } while (0)
444# define lockdep_free_key_range(start, size) do { } while (0)
Peter Zijlstrab351d162007-10-11 22:11:12 +0200445# define lockdep_sys_exit() do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700446/*
447 * The class key takes no space if lockdep is disabled:
448 */
449struct lock_class_key { };
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800450
Bart Van Assche108c1482019-02-14 15:00:53 -0800451static inline void lockdep_register_key(struct lock_class_key *key)
452{
453}
454
455static inline void lockdep_unregister_key(struct lock_class_key *key)
456{
457}
458
Byungchul Park6f0397d2017-10-25 17:55:58 +0900459/*
460 * The lockdep_map takes no space if lockdep is disabled:
461 */
462struct lockdep_map { };
463
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800464#define lockdep_depth(tsk) (0)
465
Peter Zijlstraf8319482016-11-30 14:32:25 +1100466#define lockdep_is_held_type(l, r) (1)
467
Paul Bolle5cd3f5a2013-01-24 21:53:17 +0100468#define lockdep_assert_held(l) do { (void)(l); } while (0)
Peter Zijlstraf8319482016-11-30 14:32:25 +1100469#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
470#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
Peter Hurley9a371102014-09-10 14:31:39 -0400471#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
Peter Zijlstraf607c662009-07-20 19:16:29 +0200472
Peter Zijlstra94d24fc2011-06-07 11:17:30 +0200473#define lockdep_recursing(tsk) (0)
474
Peter Zijlstrae7904a22015-08-01 19:25:08 +0200475struct pin_cookie { };
476
477#define NIL_COOKIE (struct pin_cookie){ }
478
479#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
480#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
481#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
Peter Zijlstraa24fc602015-06-11 14:46:53 +0200482
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700483#endif /* !LOCKDEP */
484
Byungchul Parkb09be672017-08-07 16:12:52 +0900485enum xhlock_context_t {
486 XHLOCK_HARD,
487 XHLOCK_SOFT,
Byungchul Parkb09be672017-08-07 16:12:52 +0900488 XHLOCK_CTX_NR,
489};
490
Boqun Feng52fa5bc2017-08-17 17:46:12 +0800491#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
Byungchul Parkb09be672017-08-07 16:12:52 +0900492/*
493 * To initialize a lockdep_map statically use this macro.
494 * Note that _name must not be NULL.
495 */
496#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
497 { .name = (_name), .key = (void *)(_key), }
498
Peter Zijlstraf52be572017-08-29 10:59:39 +0200499static inline void lockdep_invariant_state(bool force) {}
Byungchul Parkb09be672017-08-07 16:12:52 +0900500static inline void lockdep_init_task(struct task_struct *task) {}
501static inline void lockdep_free_task(struct task_struct *task) {}
Byungchul Parkb09be672017-08-07 16:12:52 +0900502
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700503#ifdef CONFIG_LOCK_STAT
504
505extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200506extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700507
508#define LOCK_CONTENDED(_lock, try, lock) \
509do { \
510 if (!try(_lock)) { \
511 lock_contended(&(_lock)->dep_map, _RET_IP_); \
512 lock(_lock); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700513 } \
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200514 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700515} while (0)
516
Michal Hocko916633a2016-04-07 17:12:31 +0200517#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
518({ \
519 int ____err = 0; \
520 if (!try(_lock)) { \
521 lock_contended(&(_lock)->dep_map, _RET_IP_); \
522 ____err = lock(_lock); \
523 } \
524 if (!____err) \
525 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
526 ____err; \
527})
528
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700529#else /* CONFIG_LOCK_STAT */
530
531#define lock_contended(lockdep_map, ip) do {} while (0)
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200532#define lock_acquired(lockdep_map, ip) do {} while (0)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700533
534#define LOCK_CONTENDED(_lock, try, lock) \
535 lock(_lock)
536
Michal Hocko916633a2016-04-07 17:12:31 +0200537#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
538 lock(_lock)
539
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700540#endif /* CONFIG_LOCK_STAT */
541
Robin Holte8c158b2009-04-02 16:59:45 -0700542#ifdef CONFIG_LOCKDEP
543
544/*
545 * On lockdep we dont want the hand-coded irq-enable of
546 * _raw_*_lock_flags() code, because lockdep assumes
547 * that interrupts are not re-enabled during lock-acquire:
548 */
549#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
550 LOCK_CONTENDED((_lock), (try), (lock))
551
552#else /* CONFIG_LOCKDEP */
553
554#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
555 lockfl((_lock), (flags))
556
557#endif /* CONFIG_LOCKDEP */
558
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700559#ifdef CONFIG_PROVE_LOCKING
Ingo Molnar3117df02006-12-13 00:34:43 -0800560extern void print_irqtrace_events(struct task_struct *curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700561#else
Ingo Molnar3117df02006-12-13 00:34:43 -0800562static inline void print_irqtrace_events(struct task_struct *curr)
563{
564}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700565#endif
566
567/*
568 * For trivial one-depth nesting of a lock-class, the following
569 * global define can be used. (Subsystems with multiple levels
570 * of nesting should define their own lock-nesting subclasses.)
571 */
572#define SINGLE_DEPTH_NESTING 1
573
574/*
575 * Map the dependency ops to NOP or to real lockdep ops, depending
576 * on the per lock-class debug mode:
577 */
578
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100579#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
580#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
581#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700582
Michel Lespinassea51805e2013-07-08 14:23:49 -0700583#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
584#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
585#define spin_release(l, n, i) lock_release(l, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700586
Michel Lespinassea51805e2013-07-08 14:23:49 -0700587#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
588#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
589#define rwlock_release(l, n, i) lock_release(l, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700590
John Stultz1ca7d672013-10-07 15:51:59 -0700591#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
592#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
593#define seqcount_release(l, n, i) lock_release(l, n, i)
594
Michel Lespinassea51805e2013-07-08 14:23:49 -0700595#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
596#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
597#define mutex_release(l, n, i) lock_release(l, n, i)
598
599#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
600#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
601#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
John Stultz1ca7d672013-10-07 15:51:59 -0700602#define rwsem_release(l, n, i) lock_release(l, n, i)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700603
Michel Lespinassea51805e2013-07-08 14:23:49 -0700604#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
605#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700606#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
John Stultz1ca7d672013-10-07 15:51:59 -0700607#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
Peter Zijlstra4f3e7522008-08-11 09:30:23 +0200608
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200609#ifdef CONFIG_PROVE_LOCKING
610# define might_lock(lock) \
611do { \
612 typecheck(struct lockdep_map *, &(lock)->dep_map); \
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100613 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200614 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
615} while (0)
616# define might_lock_read(lock) \
617do { \
618 typecheck(struct lockdep_map *, &(lock)->dep_map); \
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100619 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200620 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
621} while (0)
Frederic Weisbeckerf54bb2e2017-11-06 16:01:17 +0100622
623#define lockdep_assert_irqs_enabled() do { \
624 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
625 !current->hardirqs_enabled, \
626 "IRQs not enabled as expected\n"); \
627 } while (0)
628
629#define lockdep_assert_irqs_disabled() do { \
630 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
631 current->hardirqs_enabled, \
632 "IRQs not disabled as expected\n"); \
633 } while (0)
634
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200635#else
636# define might_lock(lock) do { } while (0)
637# define might_lock_read(lock) do { } while (0)
Frederic Weisbeckerf54bb2e2017-11-06 16:01:17 +0100638# define lockdep_assert_irqs_enabled() do { } while (0)
639# define lockdep_assert_irqs_disabled() do { } while (0)
Peter Zijlstra76b189e2008-09-10 09:57:35 +0200640#endif
641
Paul E. McKenneyd24209bb2015-01-21 15:26:03 -0800642#ifdef CONFIG_LOCKDEP
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700643void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
Paul E. McKenneyd24209bb2015-01-21 15:26:03 -0800644#else
645static inline void
646lockdep_rcu_suspicious(const char *file, const int line, const char *s)
647{
648}
Paul E. McKenney0632eb32010-02-22 17:04:47 -0800649#endif
650
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700651#endif /* __LINUX_LOCKDEP_H */