blob: 8d24279fad0553c6a327b74066c49a41a500422f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002/*
3 * lib/locking-selftest.c
4 *
5 * Testsuite for various locking APIs: spinlocks, rwlocks,
6 * mutexes and rw-semaphores.
7 *
8 * It is checking both false positives and false negatives.
9 *
10 * Started by Ingo Molnar:
11 *
12 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
13 */
14#include <linux/rwsem.h>
15#include <linux/mutex.h>
Maarten Lankhorst1b375dc2013-07-05 09:29:32 +020016#include <linux/ww_mutex.h>
Ingo Molnarcae2ed92006-07-03 00:24:48 -070017#include <linux/sched.h>
Daniel Vetterd5037d12020-12-14 19:08:38 -080018#include <linux/sched/mm.h>
Ingo Molnarcae2ed92006-07-03 00:24:48 -070019#include <linux/delay.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070020#include <linux/lockdep.h>
Ingo Molnarcae2ed92006-07-03 00:24:48 -070021#include <linux/spinlock.h>
22#include <linux/kallsyms.h>
23#include <linux/interrupt.h>
24#include <linux/debug_locks.h>
25#include <linux/irqflags.h>
Peter Zijlstra018956d2017-05-31 17:11:49 +020026#include <linux/rtmutex.h>
Peter Zijlstra7e923e62020-12-09 16:06:06 +010027#include <linux/local_lock.h>
Ingo Molnarcae2ed92006-07-03 00:24:48 -070028
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +010029#ifdef CONFIG_PREEMPT_RT
30# define NON_RT(...)
31#else
32# define NON_RT(...) __VA_ARGS__
33#endif
34
Ingo Molnarcae2ed92006-07-03 00:24:48 -070035/*
36 * Change this to 1 if you want to see the failure printouts:
37 */
38static unsigned int debug_locks_verbose;
Boqun Fenge9181882020-08-07 15:42:20 +080039unsigned int force_read_lock_recursive;
Ingo Molnarcae2ed92006-07-03 00:24:48 -070040
Thomas Hellstrom08295b32018-06-15 10:17:38 +020041static DEFINE_WD_CLASS(ww_lockdep);
Maarten Lankhorst1de99442013-06-20 13:31:24 +020042
Ingo Molnarcae2ed92006-07-03 00:24:48 -070043static int __init setup_debug_locks_verbose(char *str)
44{
45 get_option(&str, &debug_locks_verbose);
46
47 return 1;
48}
49
50__setup("debug_locks_verbose=", setup_debug_locks_verbose);
51
52#define FAILURE 0
53#define SUCCESS 1
54
55#define LOCKTYPE_SPIN 0x1
56#define LOCKTYPE_RWLOCK 0x2
57#define LOCKTYPE_MUTEX 0x4
58#define LOCKTYPE_RWSEM 0x8
Maarten Lankhorst1de99442013-06-20 13:31:24 +020059#define LOCKTYPE_WW 0x10
Peter Zijlstra018956d2017-05-31 17:11:49 +020060#define LOCKTYPE_RTMUTEX 0x20
Peter Zijlstra7e923e62020-12-09 16:06:06 +010061#define LOCKTYPE_LL 0x40
Boqun Feng8946ccc2021-06-19 01:01:10 +080062#define LOCKTYPE_SPECIAL 0x80
Maarten Lankhorst1de99442013-06-20 13:31:24 +020063
64static struct ww_acquire_ctx t, t2;
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +020065static struct ww_mutex o, o2, o3;
Ingo Molnarcae2ed92006-07-03 00:24:48 -070066
67/*
68 * Normal standalone locks, for the circular and irq-context
69 * dependency tests:
70 */
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +010071static DEFINE_SPINLOCK(lock_A);
72static DEFINE_SPINLOCK(lock_B);
73static DEFINE_SPINLOCK(lock_C);
74static DEFINE_SPINLOCK(lock_D);
Ingo Molnarcae2ed92006-07-03 00:24:48 -070075
Boqun Feng9271a402020-12-08 18:31:12 +080076static DEFINE_RAW_SPINLOCK(raw_lock_A);
77static DEFINE_RAW_SPINLOCK(raw_lock_B);
78
Ingo Molnarcae2ed92006-07-03 00:24:48 -070079static DEFINE_RWLOCK(rwlock_A);
80static DEFINE_RWLOCK(rwlock_B);
81static DEFINE_RWLOCK(rwlock_C);
82static DEFINE_RWLOCK(rwlock_D);
83
84static DEFINE_MUTEX(mutex_A);
85static DEFINE_MUTEX(mutex_B);
86static DEFINE_MUTEX(mutex_C);
87static DEFINE_MUTEX(mutex_D);
88
89static DECLARE_RWSEM(rwsem_A);
90static DECLARE_RWSEM(rwsem_B);
91static DECLARE_RWSEM(rwsem_C);
92static DECLARE_RWSEM(rwsem_D);
93
Peter Zijlstra018956d2017-05-31 17:11:49 +020094#ifdef CONFIG_RT_MUTEXES
95
96static DEFINE_RT_MUTEX(rtmutex_A);
97static DEFINE_RT_MUTEX(rtmutex_B);
98static DEFINE_RT_MUTEX(rtmutex_C);
99static DEFINE_RT_MUTEX(rtmutex_D);
100
101#endif
102
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700103/*
104 * Locks that we initialize dynamically as well so that
105 * e.g. X1 and X2 becomes two instances of the same class,
106 * but X* and Y* are different classes. We do this so that
107 * we do not trigger a real lockup:
108 */
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +0100109static DEFINE_SPINLOCK(lock_X1);
110static DEFINE_SPINLOCK(lock_X2);
111static DEFINE_SPINLOCK(lock_Y1);
112static DEFINE_SPINLOCK(lock_Y2);
113static DEFINE_SPINLOCK(lock_Z1);
114static DEFINE_SPINLOCK(lock_Z2);
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700115
116static DEFINE_RWLOCK(rwlock_X1);
117static DEFINE_RWLOCK(rwlock_X2);
118static DEFINE_RWLOCK(rwlock_Y1);
119static DEFINE_RWLOCK(rwlock_Y2);
120static DEFINE_RWLOCK(rwlock_Z1);
121static DEFINE_RWLOCK(rwlock_Z2);
122
123static DEFINE_MUTEX(mutex_X1);
124static DEFINE_MUTEX(mutex_X2);
125static DEFINE_MUTEX(mutex_Y1);
126static DEFINE_MUTEX(mutex_Y2);
127static DEFINE_MUTEX(mutex_Z1);
128static DEFINE_MUTEX(mutex_Z2);
129
130static DECLARE_RWSEM(rwsem_X1);
131static DECLARE_RWSEM(rwsem_X2);
132static DECLARE_RWSEM(rwsem_Y1);
133static DECLARE_RWSEM(rwsem_Y2);
134static DECLARE_RWSEM(rwsem_Z1);
135static DECLARE_RWSEM(rwsem_Z2);
136
Peter Zijlstra018956d2017-05-31 17:11:49 +0200137#ifdef CONFIG_RT_MUTEXES
138
139static DEFINE_RT_MUTEX(rtmutex_X1);
140static DEFINE_RT_MUTEX(rtmutex_X2);
141static DEFINE_RT_MUTEX(rtmutex_Y1);
142static DEFINE_RT_MUTEX(rtmutex_Y2);
143static DEFINE_RT_MUTEX(rtmutex_Z1);
144static DEFINE_RT_MUTEX(rtmutex_Z2);
145
146#endif
147
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +0100148static DEFINE_PER_CPU(local_lock_t, local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +0100149
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700150/*
151 * non-inlined runtime initializers, to let separate locks share
152 * the same lock-class:
153 */
154#define INIT_CLASS_FUNC(class) \
155static noinline void \
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +0100156init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
Yong Zhang9fb1b902012-04-16 15:01:55 +0800157 struct mutex *mutex, struct rw_semaphore *rwsem)\
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700158{ \
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +0100159 spin_lock_init(lock); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700160 rwlock_init(rwlock); \
161 mutex_init(mutex); \
162 init_rwsem(rwsem); \
163}
164
165INIT_CLASS_FUNC(X)
166INIT_CLASS_FUNC(Y)
167INIT_CLASS_FUNC(Z)
168
169static void init_shared_classes(void)
170{
Peter Zijlstra018956d2017-05-31 17:11:49 +0200171#ifdef CONFIG_RT_MUTEXES
172 static struct lock_class_key rt_X, rt_Y, rt_Z;
173
174 __rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
175 __rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
176 __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
177 __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
178 __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
179 __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
180#endif
181
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700182 init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
183 init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
184
185 init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
186 init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
187
188 init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
189 init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
190}
191
192/*
193 * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
194 * The following functions use a lock from a simulated hardirq/softirq
195 * context, causing the locks to be marked as hardirq-safe/softirq-safe:
196 */
197
198#define HARDIRQ_DISABLE local_irq_disable
199#define HARDIRQ_ENABLE local_irq_enable
200
201#define HARDIRQ_ENTER() \
202 local_irq_disable(); \
Frederic Weisbeckerba9f2072011-05-20 02:09:54 +0200203 __irq_enter(); \
Peter Zijlstrac0c2c0d2021-06-17 20:57:19 +0200204 lockdep_hardirq_threaded(); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700205 WARN_ON(!in_irq());
206
207#define HARDIRQ_EXIT() \
208 __irq_exit(); \
209 local_irq_enable();
210
211#define SOFTIRQ_DISABLE local_bh_disable
212#define SOFTIRQ_ENABLE local_bh_enable
213
214#define SOFTIRQ_ENTER() \
215 local_bh_disable(); \
216 local_irq_disable(); \
Ingo Molnard820ac42009-03-13 01:30:40 +0100217 lockdep_softirq_enter(); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700218 WARN_ON(!in_softirq());
219
220#define SOFTIRQ_EXIT() \
Ingo Molnard820ac42009-03-13 01:30:40 +0100221 lockdep_softirq_exit(); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700222 local_irq_enable(); \
223 local_bh_enable();
224
225/*
226 * Shortcuts for lock/unlock API variants, to keep
227 * the testcases compact:
228 */
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +0100229#define L(x) spin_lock(&lock_##x)
230#define U(x) spin_unlock(&lock_##x)
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700231#define LU(x) L(x); U(x)
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +0100232#define SI(x) spin_lock_init(&lock_##x)
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700233
234#define WL(x) write_lock(&rwlock_##x)
235#define WU(x) write_unlock(&rwlock_##x)
236#define WLU(x) WL(x); WU(x)
237
238#define RL(x) read_lock(&rwlock_##x)
239#define RU(x) read_unlock(&rwlock_##x)
240#define RLU(x) RL(x); RU(x)
241#define RWI(x) rwlock_init(&rwlock_##x)
242
243#define ML(x) mutex_lock(&mutex_##x)
244#define MU(x) mutex_unlock(&mutex_##x)
245#define MI(x) mutex_init(&mutex_##x)
246
Peter Zijlstra018956d2017-05-31 17:11:49 +0200247#define RTL(x) rt_mutex_lock(&rtmutex_##x)
248#define RTU(x) rt_mutex_unlock(&rtmutex_##x)
249#define RTI(x) rt_mutex_init(&rtmutex_##x)
250
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700251#define WSL(x) down_write(&rwsem_##x)
252#define WSU(x) up_write(&rwsem_##x)
253
254#define RSL(x) down_read(&rwsem_##x)
255#define RSU(x) up_read(&rwsem_##x)
256#define RWSI(x) init_rwsem(&rwsem_##x)
257
Maarten Lankhorst1de99442013-06-20 13:31:24 +0200258#ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
259#define WWAI(x) ww_acquire_init(x, &ww_lockdep)
260#else
261#define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
262#endif
263#define WWAD(x) ww_acquire_done(x)
264#define WWAF(x) ww_acquire_fini(x)
265
266#define WWL(x, c) ww_mutex_lock(x, c)
Maarten Lankhorst12235da2021-09-09 11:32:18 +0200267#define WWT(x) ww_mutex_trylock(x, NULL)
Maarten Lankhorst1de99442013-06-20 13:31:24 +0200268#define WWL1(x) ww_mutex_lock(x, NULL)
269#define WWU(x) ww_mutex_unlock(x)
270
271
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700272#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
273
274/*
275 * Generate different permutations of the same testcase, using
276 * the same basic lock-dependency/state events:
277 */
278
279#define GENERATE_TESTCASE(name) \
280 \
281static void name(void) { E(); }
282
283#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
284 \
285static void name##_12(void) { E1(); E2(); } \
286static void name##_21(void) { E2(); E1(); }
287
288#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
289 \
290static void name##_123(void) { E1(); E2(); E3(); } \
291static void name##_132(void) { E1(); E3(); E2(); } \
292static void name##_213(void) { E2(); E1(); E3(); } \
293static void name##_231(void) { E2(); E3(); E1(); } \
294static void name##_312(void) { E3(); E1(); E2(); } \
295static void name##_321(void) { E3(); E2(); E1(); }
296
297/*
298 * AA deadlock:
299 */
300
301#define E() \
302 \
303 LOCK(X1); \
304 LOCK(X2); /* this one should fail */
305
306/*
307 * 6 testcases:
308 */
309#include "locking-selftest-spin.h"
310GENERATE_TESTCASE(AA_spin)
311#include "locking-selftest-wlock.h"
312GENERATE_TESTCASE(AA_wlock)
313#include "locking-selftest-rlock.h"
314GENERATE_TESTCASE(AA_rlock)
315#include "locking-selftest-mutex.h"
316GENERATE_TESTCASE(AA_mutex)
317#include "locking-selftest-wsem.h"
318GENERATE_TESTCASE(AA_wsem)
319#include "locking-selftest-rsem.h"
320GENERATE_TESTCASE(AA_rsem)
321
Peter Zijlstra018956d2017-05-31 17:11:49 +0200322#ifdef CONFIG_RT_MUTEXES
323#include "locking-selftest-rtmutex.h"
324GENERATE_TESTCASE(AA_rtmutex);
325#endif
326
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700327#undef E
328
329/*
330 * Special-case for read-locking, they are
Ingo Molnar6c9076e2006-07-03 00:24:51 -0700331 * allowed to recurse on the same lock class:
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700332 */
333static void rlock_AA1(void)
334{
335 RL(X1);
336 RL(X1); // this one should NOT fail
337}
338
339static void rlock_AA1B(void)
340{
341 RL(X1);
Ingo Molnar6c9076e2006-07-03 00:24:51 -0700342 RL(X2); // this one should NOT fail
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700343}
344
345static void rsem_AA1(void)
346{
347 RSL(X1);
348 RSL(X1); // this one should fail
349}
350
351static void rsem_AA1B(void)
352{
353 RSL(X1);
354 RSL(X2); // this one should fail
355}
356/*
357 * The mixing of read and write locks is not allowed:
358 */
359static void rlock_AA2(void)
360{
361 RL(X1);
362 WL(X2); // this one should fail
363}
364
365static void rsem_AA2(void)
366{
367 RSL(X1);
368 WSL(X2); // this one should fail
369}
370
371static void rlock_AA3(void)
372{
373 WL(X1);
374 RL(X2); // this one should fail
375}
376
377static void rsem_AA3(void)
378{
379 WSL(X1);
380 RSL(X2); // this one should fail
381}
382
383/*
Peter Zijlstrae9149852017-08-23 13:13:11 +0200384 * read_lock(A)
385 * spin_lock(B)
386 * spin_lock(B)
387 * write_lock(A)
388 */
389static void rlock_ABBA1(void)
390{
391 RL(X1);
392 L(Y1);
393 U(Y1);
394 RU(X1);
395
396 L(Y1);
397 WL(X1);
398 WU(X1);
399 U(Y1); // should fail
400}
401
402static void rwsem_ABBA1(void)
403{
404 RSL(X1);
405 ML(Y1);
406 MU(Y1);
407 RSU(X1);
408
409 ML(Y1);
410 WSL(X1);
411 WSU(X1);
412 MU(Y1); // should fail
413}
414
415/*
416 * read_lock(A)
417 * spin_lock(B)
418 * spin_lock(B)
Boqun Fengd4f200e52020-08-07 15:42:32 +0800419 * write_lock(A)
420 *
421 * This test case is aimed at poking whether the chain cache prevents us from
422 * detecting a read-lock/lock-write deadlock: if the chain cache doesn't differ
423 * read/write locks, the following case may happen
424 *
425 * { read_lock(A)->lock(B) dependency exists }
426 *
427 * P0:
428 * lock(B);
429 * read_lock(A);
430 *
431 * { Not a deadlock, B -> A is added in the chain cache }
432 *
433 * P1:
434 * lock(B);
435 * write_lock(A);
436 *
437 * { B->A found in chain cache, not reported as a deadlock }
438 *
439 */
440static void rlock_chaincache_ABBA1(void)
441{
442 RL(X1);
443 L(Y1);
444 U(Y1);
445 RU(X1);
446
447 L(Y1);
448 RL(X1);
449 RU(X1);
450 U(Y1);
451
452 L(Y1);
453 WL(X1);
454 WU(X1);
455 U(Y1); // should fail
456}
457
458/*
459 * read_lock(A)
460 * spin_lock(B)
461 * spin_lock(B)
Peter Zijlstrae9149852017-08-23 13:13:11 +0200462 * read_lock(A)
463 */
464static void rlock_ABBA2(void)
465{
466 RL(X1);
467 L(Y1);
468 U(Y1);
469 RU(X1);
470
471 L(Y1);
472 RL(X1);
473 RU(X1);
474 U(Y1); // should NOT fail
475}
476
477static void rwsem_ABBA2(void)
478{
479 RSL(X1);
480 ML(Y1);
481 MU(Y1);
482 RSU(X1);
483
484 ML(Y1);
485 RSL(X1);
486 RSU(X1);
487 MU(Y1); // should fail
488}
489
490
491/*
492 * write_lock(A)
493 * spin_lock(B)
494 * spin_lock(B)
495 * write_lock(A)
496 */
497static void rlock_ABBA3(void)
498{
499 WL(X1);
500 L(Y1);
501 U(Y1);
502 WU(X1);
503
504 L(Y1);
505 WL(X1);
506 WU(X1);
507 U(Y1); // should fail
508}
509
510static void rwsem_ABBA3(void)
511{
512 WSL(X1);
513 ML(Y1);
514 MU(Y1);
515 WSU(X1);
516
517 ML(Y1);
518 WSL(X1);
519 WSU(X1);
520 MU(Y1); // should fail
521}
522
523/*
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700524 * ABBA deadlock:
525 */
526
527#define E() \
528 \
529 LOCK_UNLOCK_2(A, B); \
530 LOCK_UNLOCK_2(B, A); /* fail */
531
532/*
533 * 6 testcases:
534 */
535#include "locking-selftest-spin.h"
536GENERATE_TESTCASE(ABBA_spin)
537#include "locking-selftest-wlock.h"
538GENERATE_TESTCASE(ABBA_wlock)
539#include "locking-selftest-rlock.h"
540GENERATE_TESTCASE(ABBA_rlock)
541#include "locking-selftest-mutex.h"
542GENERATE_TESTCASE(ABBA_mutex)
543#include "locking-selftest-wsem.h"
544GENERATE_TESTCASE(ABBA_wsem)
545#include "locking-selftest-rsem.h"
546GENERATE_TESTCASE(ABBA_rsem)
547
Peter Zijlstra018956d2017-05-31 17:11:49 +0200548#ifdef CONFIG_RT_MUTEXES
549#include "locking-selftest-rtmutex.h"
550GENERATE_TESTCASE(ABBA_rtmutex);
551#endif
552
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700553#undef E
554
555/*
556 * AB BC CA deadlock:
557 */
558
559#define E() \
560 \
561 LOCK_UNLOCK_2(A, B); \
562 LOCK_UNLOCK_2(B, C); \
563 LOCK_UNLOCK_2(C, A); /* fail */
564
565/*
566 * 6 testcases:
567 */
568#include "locking-selftest-spin.h"
569GENERATE_TESTCASE(ABBCCA_spin)
570#include "locking-selftest-wlock.h"
571GENERATE_TESTCASE(ABBCCA_wlock)
572#include "locking-selftest-rlock.h"
573GENERATE_TESTCASE(ABBCCA_rlock)
574#include "locking-selftest-mutex.h"
575GENERATE_TESTCASE(ABBCCA_mutex)
576#include "locking-selftest-wsem.h"
577GENERATE_TESTCASE(ABBCCA_wsem)
578#include "locking-selftest-rsem.h"
579GENERATE_TESTCASE(ABBCCA_rsem)
580
Peter Zijlstra018956d2017-05-31 17:11:49 +0200581#ifdef CONFIG_RT_MUTEXES
582#include "locking-selftest-rtmutex.h"
583GENERATE_TESTCASE(ABBCCA_rtmutex);
584#endif
585
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700586#undef E
587
588/*
589 * AB CA BC deadlock:
590 */
591
592#define E() \
593 \
594 LOCK_UNLOCK_2(A, B); \
595 LOCK_UNLOCK_2(C, A); \
596 LOCK_UNLOCK_2(B, C); /* fail */
597
598/*
599 * 6 testcases:
600 */
601#include "locking-selftest-spin.h"
602GENERATE_TESTCASE(ABCABC_spin)
603#include "locking-selftest-wlock.h"
604GENERATE_TESTCASE(ABCABC_wlock)
605#include "locking-selftest-rlock.h"
606GENERATE_TESTCASE(ABCABC_rlock)
607#include "locking-selftest-mutex.h"
608GENERATE_TESTCASE(ABCABC_mutex)
609#include "locking-selftest-wsem.h"
610GENERATE_TESTCASE(ABCABC_wsem)
611#include "locking-selftest-rsem.h"
612GENERATE_TESTCASE(ABCABC_rsem)
613
Peter Zijlstra018956d2017-05-31 17:11:49 +0200614#ifdef CONFIG_RT_MUTEXES
615#include "locking-selftest-rtmutex.h"
616GENERATE_TESTCASE(ABCABC_rtmutex);
617#endif
618
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700619#undef E
620
621/*
622 * AB BC CD DA deadlock:
623 */
624
625#define E() \
626 \
627 LOCK_UNLOCK_2(A, B); \
628 LOCK_UNLOCK_2(B, C); \
629 LOCK_UNLOCK_2(C, D); \
630 LOCK_UNLOCK_2(D, A); /* fail */
631
632/*
633 * 6 testcases:
634 */
635#include "locking-selftest-spin.h"
636GENERATE_TESTCASE(ABBCCDDA_spin)
637#include "locking-selftest-wlock.h"
638GENERATE_TESTCASE(ABBCCDDA_wlock)
639#include "locking-selftest-rlock.h"
640GENERATE_TESTCASE(ABBCCDDA_rlock)
641#include "locking-selftest-mutex.h"
642GENERATE_TESTCASE(ABBCCDDA_mutex)
643#include "locking-selftest-wsem.h"
644GENERATE_TESTCASE(ABBCCDDA_wsem)
645#include "locking-selftest-rsem.h"
646GENERATE_TESTCASE(ABBCCDDA_rsem)
647
Peter Zijlstra018956d2017-05-31 17:11:49 +0200648#ifdef CONFIG_RT_MUTEXES
649#include "locking-selftest-rtmutex.h"
650GENERATE_TESTCASE(ABBCCDDA_rtmutex);
651#endif
652
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700653#undef E
654
655/*
656 * AB CD BD DA deadlock:
657 */
658#define E() \
659 \
660 LOCK_UNLOCK_2(A, B); \
661 LOCK_UNLOCK_2(C, D); \
662 LOCK_UNLOCK_2(B, D); \
663 LOCK_UNLOCK_2(D, A); /* fail */
664
665/*
666 * 6 testcases:
667 */
668#include "locking-selftest-spin.h"
669GENERATE_TESTCASE(ABCDBDDA_spin)
670#include "locking-selftest-wlock.h"
671GENERATE_TESTCASE(ABCDBDDA_wlock)
672#include "locking-selftest-rlock.h"
673GENERATE_TESTCASE(ABCDBDDA_rlock)
674#include "locking-selftest-mutex.h"
675GENERATE_TESTCASE(ABCDBDDA_mutex)
676#include "locking-selftest-wsem.h"
677GENERATE_TESTCASE(ABCDBDDA_wsem)
678#include "locking-selftest-rsem.h"
679GENERATE_TESTCASE(ABCDBDDA_rsem)
680
Peter Zijlstra018956d2017-05-31 17:11:49 +0200681#ifdef CONFIG_RT_MUTEXES
682#include "locking-selftest-rtmutex.h"
683GENERATE_TESTCASE(ABCDBDDA_rtmutex);
684#endif
685
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700686#undef E
687
688/*
689 * AB CD BC DA deadlock:
690 */
691#define E() \
692 \
693 LOCK_UNLOCK_2(A, B); \
694 LOCK_UNLOCK_2(C, D); \
695 LOCK_UNLOCK_2(B, C); \
696 LOCK_UNLOCK_2(D, A); /* fail */
697
698/*
699 * 6 testcases:
700 */
701#include "locking-selftest-spin.h"
702GENERATE_TESTCASE(ABCDBCDA_spin)
703#include "locking-selftest-wlock.h"
704GENERATE_TESTCASE(ABCDBCDA_wlock)
705#include "locking-selftest-rlock.h"
706GENERATE_TESTCASE(ABCDBCDA_rlock)
707#include "locking-selftest-mutex.h"
708GENERATE_TESTCASE(ABCDBCDA_mutex)
709#include "locking-selftest-wsem.h"
710GENERATE_TESTCASE(ABCDBCDA_wsem)
711#include "locking-selftest-rsem.h"
712GENERATE_TESTCASE(ABCDBCDA_rsem)
713
Peter Zijlstra018956d2017-05-31 17:11:49 +0200714#ifdef CONFIG_RT_MUTEXES
715#include "locking-selftest-rtmutex.h"
716GENERATE_TESTCASE(ABCDBCDA_rtmutex);
717#endif
718
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700719#undef E
720
Sebastian Andrzej Siewior512bf712021-11-29 18:46:50 +0100721#ifdef CONFIG_PREEMPT_RT
722# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
723#else
724# define RT_PREPARE_DBL_UNLOCK()
725#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700726/*
727 * Double unlock:
728 */
729#define E() \
730 \
731 LOCK(A); \
Sebastian Andrzej Siewior512bf712021-11-29 18:46:50 +0100732 RT_PREPARE_DBL_UNLOCK(); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700733 UNLOCK(A); \
734 UNLOCK(A); /* fail */
735
736/*
737 * 6 testcases:
738 */
739#include "locking-selftest-spin.h"
740GENERATE_TESTCASE(double_unlock_spin)
741#include "locking-selftest-wlock.h"
742GENERATE_TESTCASE(double_unlock_wlock)
743#include "locking-selftest-rlock.h"
744GENERATE_TESTCASE(double_unlock_rlock)
745#include "locking-selftest-mutex.h"
746GENERATE_TESTCASE(double_unlock_mutex)
747#include "locking-selftest-wsem.h"
748GENERATE_TESTCASE(double_unlock_wsem)
749#include "locking-selftest-rsem.h"
750GENERATE_TESTCASE(double_unlock_rsem)
751
Peter Zijlstra018956d2017-05-31 17:11:49 +0200752#ifdef CONFIG_RT_MUTEXES
753#include "locking-selftest-rtmutex.h"
754GENERATE_TESTCASE(double_unlock_rtmutex);
755#endif
756
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700757#undef E
758
759/*
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700760 * initializing a held lock:
761 */
762#define E() \
763 \
764 LOCK(A); \
765 INIT(A); /* fail */
766
767/*
768 * 6 testcases:
769 */
770#include "locking-selftest-spin.h"
771GENERATE_TESTCASE(init_held_spin)
772#include "locking-selftest-wlock.h"
773GENERATE_TESTCASE(init_held_wlock)
774#include "locking-selftest-rlock.h"
775GENERATE_TESTCASE(init_held_rlock)
776#include "locking-selftest-mutex.h"
777GENERATE_TESTCASE(init_held_mutex)
778#include "locking-selftest-wsem.h"
779GENERATE_TESTCASE(init_held_wsem)
780#include "locking-selftest-rsem.h"
781GENERATE_TESTCASE(init_held_rsem)
782
Peter Zijlstra018956d2017-05-31 17:11:49 +0200783#ifdef CONFIG_RT_MUTEXES
784#include "locking-selftest-rtmutex.h"
785GENERATE_TESTCASE(init_held_rtmutex);
786#endif
787
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700788#undef E
789
790/*
791 * locking an irq-safe lock with irqs enabled:
792 */
793#define E1() \
794 \
795 IRQ_ENTER(); \
796 LOCK(A); \
797 UNLOCK(A); \
798 IRQ_EXIT();
799
800#define E2() \
801 \
802 LOCK(A); \
803 UNLOCK(A);
804
805/*
806 * Generate 24 testcases:
807 */
808#include "locking-selftest-spin-hardirq.h"
809GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
810
811#include "locking-selftest-rlock-hardirq.h"
812GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
813
814#include "locking-selftest-wlock-hardirq.h"
815GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
816
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100817#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700818#include "locking-selftest-spin-softirq.h"
819GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
820
821#include "locking-selftest-rlock-softirq.h"
822GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
823
824#include "locking-selftest-wlock-softirq.h"
825GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100826#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700827
828#undef E1
829#undef E2
830
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100831#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700832/*
833 * Enabling hardirqs with a softirq-safe lock held:
834 */
835#define E1() \
836 \
837 SOFTIRQ_ENTER(); \
838 LOCK(A); \
839 UNLOCK(A); \
840 SOFTIRQ_EXIT();
841
842#define E2() \
843 \
844 HARDIRQ_DISABLE(); \
845 LOCK(A); \
846 HARDIRQ_ENABLE(); \
847 UNLOCK(A);
848
849/*
850 * Generate 12 testcases:
851 */
852#include "locking-selftest-spin.h"
853GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
854
855#include "locking-selftest-wlock.h"
856GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
857
858#include "locking-selftest-rlock.h"
859GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
860
861#undef E1
862#undef E2
863
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100864#endif
865
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700866/*
867 * Enabling irqs with an irq-safe lock held:
868 */
869#define E1() \
870 \
871 IRQ_ENTER(); \
872 LOCK(A); \
873 UNLOCK(A); \
874 IRQ_EXIT();
875
876#define E2() \
877 \
878 IRQ_DISABLE(); \
879 LOCK(A); \
880 IRQ_ENABLE(); \
881 UNLOCK(A);
882
883/*
884 * Generate 24 testcases:
885 */
886#include "locking-selftest-spin-hardirq.h"
887GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
888
889#include "locking-selftest-rlock-hardirq.h"
890GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
891
892#include "locking-selftest-wlock-hardirq.h"
893GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
894
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100895#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700896#include "locking-selftest-spin-softirq.h"
897GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
898
899#include "locking-selftest-rlock-softirq.h"
900GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
901
902#include "locking-selftest-wlock-softirq.h"
903GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100904#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700905
906#undef E1
907#undef E2
908
909/*
910 * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
911 */
912#define E1() \
913 \
914 LOCK(A); \
915 LOCK(B); \
916 UNLOCK(B); \
917 UNLOCK(A); \
918
919#define E2() \
920 \
921 LOCK(B); \
922 UNLOCK(B);
923
924#define E3() \
925 \
926 IRQ_ENTER(); \
927 LOCK(A); \
928 UNLOCK(A); \
929 IRQ_EXIT();
930
931/*
932 * Generate 36 testcases:
933 */
934#include "locking-selftest-spin-hardirq.h"
935GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
936
937#include "locking-selftest-rlock-hardirq.h"
938GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
939
940#include "locking-selftest-wlock-hardirq.h"
941GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
942
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100943#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700944#include "locking-selftest-spin-softirq.h"
945GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
946
947#include "locking-selftest-rlock-softirq.h"
948GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
949
950#include "locking-selftest-wlock-softirq.h"
951GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100952#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700953
954#undef E1
955#undef E2
956#undef E3
957
958/*
959 * If a lock turns into softirq-safe, but earlier it took
960 * a softirq-unsafe lock:
961 */
962
963#define E1() \
964 IRQ_DISABLE(); \
965 LOCK(A); \
966 LOCK(B); \
967 UNLOCK(B); \
968 UNLOCK(A); \
969 IRQ_ENABLE();
970
971#define E2() \
972 LOCK(B); \
973 UNLOCK(B);
974
975#define E3() \
976 IRQ_ENTER(); \
977 LOCK(A); \
978 UNLOCK(A); \
979 IRQ_EXIT();
980
981/*
982 * Generate 36 testcases:
983 */
984#include "locking-selftest-spin-hardirq.h"
985GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
986
987#include "locking-selftest-rlock-hardirq.h"
988GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
989
990#include "locking-selftest-wlock-hardirq.h"
991GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
992
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +0100993#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -0700994#include "locking-selftest-spin-softirq.h"
995GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
996
997#include "locking-selftest-rlock-softirq.h"
998GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
999
1000#include "locking-selftest-wlock-softirq.h"
1001GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001002#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001003
1004#undef E1
1005#undef E2
1006#undef E3
1007
1008/*
1009 * read-lock / write-lock irq inversion.
1010 *
1011 * Deadlock scenario:
1012 *
1013 * CPU#1 is at #1, i.e. it has write-locked A, but has not
1014 * taken B yet.
1015 *
1016 * CPU#2 is at #2, i.e. it has locked B.
1017 *
1018 * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
1019 *
1020 * The deadlock occurs because CPU#1 will spin on B, and CPU#2
1021 * will spin on A.
1022 */
1023
1024#define E1() \
1025 \
1026 IRQ_DISABLE(); \
1027 WL(A); \
1028 LOCK(B); \
1029 UNLOCK(B); \
1030 WU(A); \
1031 IRQ_ENABLE();
1032
1033#define E2() \
1034 \
1035 LOCK(B); \
1036 UNLOCK(B);
1037
1038#define E3() \
1039 \
1040 IRQ_ENTER(); \
1041 RL(A); \
1042 RU(A); \
1043 IRQ_EXIT();
1044
1045/*
1046 * Generate 36 testcases:
1047 */
1048#include "locking-selftest-spin-hardirq.h"
1049GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
1050
1051#include "locking-selftest-rlock-hardirq.h"
1052GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
1053
1054#include "locking-selftest-wlock-hardirq.h"
1055GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
1056
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001057#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001058#include "locking-selftest-spin-softirq.h"
1059GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
1060
1061#include "locking-selftest-rlock-softirq.h"
1062GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
1063
1064#include "locking-selftest-wlock-softirq.h"
1065GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001066#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001067
1068#undef E1
1069#undef E2
1070#undef E3
1071
1072/*
Boqun Feng8ef7ca72020-08-07 15:42:35 +08001073 * write-read / write-read / write-read deadlock even if read is recursive
1074 */
1075
1076#define E1() \
1077 \
1078 WL(X1); \
1079 RL(Y1); \
1080 RU(Y1); \
1081 WU(X1);
1082
1083#define E2() \
1084 \
1085 WL(Y1); \
1086 RL(Z1); \
1087 RU(Z1); \
1088 WU(Y1);
1089
1090#define E3() \
1091 \
1092 WL(Z1); \
1093 RL(X1); \
1094 RU(X1); \
1095 WU(Z1);
1096
1097#include "locking-selftest-rlock.h"
1098GENERATE_PERMUTATIONS_3_EVENTS(W1R2_W2R3_W3R1)
1099
1100#undef E1
1101#undef E2
1102#undef E3
1103
1104/*
1105 * write-write / read-read / write-read deadlock even if read is recursive
1106 */
1107
1108#define E1() \
1109 \
1110 WL(X1); \
1111 WL(Y1); \
1112 WU(Y1); \
1113 WU(X1);
1114
1115#define E2() \
1116 \
1117 RL(Y1); \
1118 RL(Z1); \
1119 RU(Z1); \
1120 RU(Y1);
1121
1122#define E3() \
1123 \
1124 WL(Z1); \
1125 RL(X1); \
1126 RU(X1); \
1127 WU(Z1);
1128
1129#include "locking-selftest-rlock.h"
1130GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_W3R1)
1131
1132#undef E1
1133#undef E2
1134#undef E3
1135
1136/*
1137 * write-write / read-read / read-write is not deadlock when read is recursive
1138 */
1139
1140#define E1() \
1141 \
1142 WL(X1); \
1143 WL(Y1); \
1144 WU(Y1); \
1145 WU(X1);
1146
1147#define E2() \
1148 \
1149 RL(Y1); \
1150 RL(Z1); \
1151 RU(Z1); \
1152 RU(Y1);
1153
1154#define E3() \
1155 \
1156 RL(Z1); \
1157 WL(X1); \
1158 WU(X1); \
1159 RU(Z1);
1160
1161#include "locking-selftest-rlock.h"
1162GENERATE_PERMUTATIONS_3_EVENTS(W1R2_R2R3_W3W1)
1163
1164#undef E1
1165#undef E2
1166#undef E3
1167
1168/*
1169 * write-read / read-read / write-write is not deadlock when read is recursive
1170 */
1171
1172#define E1() \
1173 \
1174 WL(X1); \
1175 RL(Y1); \
1176 RU(Y1); \
1177 WU(X1);
1178
1179#define E2() \
1180 \
1181 RL(Y1); \
1182 RL(Z1); \
1183 RU(Z1); \
1184 RU(Y1);
1185
1186#define E3() \
1187 \
1188 WL(Z1); \
1189 WL(X1); \
1190 WU(X1); \
1191 WU(Z1);
1192
1193#include "locking-selftest-rlock.h"
1194GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
1195
1196#undef E1
1197#undef E2
1198#undef E3
1199/*
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001200 * read-lock / write-lock recursion that is actually safe.
1201 */
1202
1203#define E1() \
1204 \
1205 IRQ_DISABLE(); \
1206 WL(A); \
1207 WU(A); \
1208 IRQ_ENABLE();
1209
1210#define E2() \
1211 \
1212 RL(A); \
1213 RU(A); \
1214
1215#define E3() \
1216 \
1217 IRQ_ENTER(); \
Boqun Feng31e0d742020-08-07 15:42:34 +08001218 LOCK(A); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001219 L(B); \
1220 U(B); \
Boqun Feng31e0d742020-08-07 15:42:34 +08001221 UNLOCK(A); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001222 IRQ_EXIT();
1223
1224/*
Boqun Feng31e0d742020-08-07 15:42:34 +08001225 * Generate 24 testcases:
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001226 */
1227#include "locking-selftest-hardirq.h"
Boqun Feng31e0d742020-08-07 15:42:34 +08001228#include "locking-selftest-rlock.h"
1229GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
1230
1231#include "locking-selftest-wlock.h"
1232GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001233
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001234#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001235#include "locking-selftest-softirq.h"
Boqun Feng31e0d742020-08-07 15:42:34 +08001236#include "locking-selftest-rlock.h"
1237GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
1238
1239#include "locking-selftest-wlock.h"
1240GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001241#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001242
1243#undef E1
1244#undef E2
1245#undef E3
1246
1247/*
1248 * read-lock / write-lock recursion that is unsafe.
1249 */
1250
1251#define E1() \
1252 \
1253 IRQ_DISABLE(); \
1254 L(B); \
Boqun Feng31e0d742020-08-07 15:42:34 +08001255 LOCK(A); \
1256 UNLOCK(A); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001257 U(B); \
1258 IRQ_ENABLE();
1259
1260#define E2() \
1261 \
1262 RL(A); \
1263 RU(A); \
1264
1265#define E3() \
1266 \
1267 IRQ_ENTER(); \
1268 L(B); \
1269 U(B); \
1270 IRQ_EXIT();
1271
1272/*
Boqun Feng31e0d742020-08-07 15:42:34 +08001273 * Generate 24 testcases:
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001274 */
1275#include "locking-selftest-hardirq.h"
Boqun Feng31e0d742020-08-07 15:42:34 +08001276#include "locking-selftest-rlock.h"
1277GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
1278
1279#include "locking-selftest-wlock.h"
1280GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001281
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001282#ifndef CONFIG_PREEMPT_RT
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001283#include "locking-selftest-softirq.h"
Boqun Feng31e0d742020-08-07 15:42:34 +08001284#include "locking-selftest-rlock.h"
1285GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
1286
1287#include "locking-selftest-wlock.h"
1288GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001289#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001290
Boqun Feng96a16f42020-08-07 15:42:38 +08001291#undef E1
1292#undef E2
1293#undef E3
1294/*
1295 * read-lock / write-lock recursion that is unsafe.
1296 *
1297 * A is a ENABLED_*_READ lock
1298 * B is a USED_IN_*_READ lock
1299 *
1300 * read_lock(A);
1301 * write_lock(B);
1302 * <interrupt>
1303 * read_lock(B);
1304 * write_lock(A); // if this one is read_lock(), no deadlock
1305 */
1306
1307#define E1() \
1308 \
1309 IRQ_DISABLE(); \
1310 WL(B); \
1311 LOCK(A); \
1312 UNLOCK(A); \
1313 WU(B); \
1314 IRQ_ENABLE();
1315
1316#define E2() \
1317 \
1318 RL(A); \
1319 RU(A); \
1320
1321#define E3() \
1322 \
1323 IRQ_ENTER(); \
1324 RL(B); \
1325 RU(B); \
1326 IRQ_EXIT();
1327
1328/*
1329 * Generate 24 testcases:
1330 */
1331#include "locking-selftest-hardirq.h"
1332#include "locking-selftest-rlock.h"
1333GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
1334
1335#include "locking-selftest-wlock.h"
1336GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
1337
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001338#ifndef CONFIG_PREEMPT_RT
Boqun Feng96a16f42020-08-07 15:42:38 +08001339#include "locking-selftest-softirq.h"
1340#include "locking-selftest-rlock.h"
1341GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
1342
1343#include "locking-selftest-wlock.h"
1344GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001345#endif
Boqun Feng96a16f42020-08-07 15:42:38 +08001346
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001347#ifdef CONFIG_DEBUG_LOCK_ALLOC
1348# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
Boqun Feng9271a402020-12-08 18:31:12 +08001349# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001350# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
1351# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
1352# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001353# define I_WW(x) lockdep_reset_lock(&x.dep_map)
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01001354# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
Peter Zijlstra018956d2017-05-31 17:11:49 +02001355#ifdef CONFIG_RT_MUTEXES
1356# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
1357#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001358#else
1359# define I_SPINLOCK(x)
Boqun Feng9271a402020-12-08 18:31:12 +08001360# define I_RAW_SPINLOCK(x)
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001361# define I_RWLOCK(x)
1362# define I_MUTEX(x)
1363# define I_RWSEM(x)
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001364# define I_WW(x)
Peter Zijlstra7e923e62020-12-09 16:06:06 +01001365# define I_LOCAL_LOCK(x)
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001366#endif
1367
Peter Zijlstra018956d2017-05-31 17:11:49 +02001368#ifndef I_RTMUTEX
1369# define I_RTMUTEX(x)
1370#endif
1371
1372#ifdef CONFIG_RT_MUTEXES
1373#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
1374#else
1375#define I2_RTMUTEX(x)
1376#endif
1377
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001378#define I1(x) \
1379 do { \
1380 I_SPINLOCK(x); \
1381 I_RWLOCK(x); \
1382 I_MUTEX(x); \
1383 I_RWSEM(x); \
Peter Zijlstra018956d2017-05-31 17:11:49 +02001384 I_RTMUTEX(x); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001385 } while (0)
1386
1387#define I2(x) \
1388 do { \
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +01001389 spin_lock_init(&lock_##x); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001390 rwlock_init(&rwlock_##x); \
1391 mutex_init(&mutex_##x); \
1392 init_rwsem(&rwsem_##x); \
Peter Zijlstra018956d2017-05-31 17:11:49 +02001393 I2_RTMUTEX(x); \
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001394 } while (0)
1395
1396static void reset_locks(void)
1397{
1398 local_irq_disable();
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001399 lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
1400 lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
1401
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001402 I1(A); I1(B); I1(C); I1(D);
1403 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001404 I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
Boqun Feng9271a402020-12-08 18:31:12 +08001405 I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01001406 I_LOCAL_LOCK(A);
1407
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001408 lockdep_reset();
Peter Zijlstra7e923e62020-12-09 16:06:06 +01001409
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001410 I2(A); I2(B); I2(C); I2(D);
1411 init_shared_classes();
Boqun Feng9271a402020-12-08 18:31:12 +08001412 raw_spin_lock_init(&raw_lock_A);
1413 raw_spin_lock_init(&raw_lock_B);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01001414 local_lock_init(this_cpu_ptr(&local_A));
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001415
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001416 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001417 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
1418 memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
1419 memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001420 local_irq_enable();
1421}
1422
1423#undef I
1424
1425static int testcase_total;
1426static int testcase_successes;
1427static int expected_testcase_failures;
1428static int unexpected_testcase_failures;
1429
1430static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
1431{
Sebastian Andrzej Siewior512bf712021-11-29 18:46:50 +01001432 int saved_preempt_count = preempt_count();
1433#ifdef CONFIG_PREEMPT_RT
1434#ifdef CONFIG_SMP
1435 int saved_mgd_count = current->migration_disabled;
1436#endif
1437 int saved_rcu_count = current->rcu_read_lock_nesting;
1438#endif
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001439
1440 WARN_ON(irqs_disabled());
1441
Peter Zijlstra5831c0f2020-12-09 16:42:57 +01001442 debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
1443
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001444 testcase_fn();
1445 /*
1446 * Filter out expected failures:
1447 */
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001448#ifndef CONFIG_PROVE_LOCKING
Maarten Lankhorst166989e2013-06-20 13:31:51 +02001449 if (expected == FAILURE && debug_locks) {
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001450 expected_testcase_failures++;
Michael Ellerman25139402016-11-25 09:45:28 +11001451 pr_cont("failed|");
Maarten Lankhorst166989e2013-06-20 13:31:51 +02001452 }
1453 else
1454#endif
1455 if (debug_locks != expected) {
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001456 unexpected_testcase_failures++;
Michael Ellerman25139402016-11-25 09:45:28 +11001457 pr_cont("FAILED|");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001458 } else {
1459 testcase_successes++;
Michael Ellerman25139402016-11-25 09:45:28 +11001460 pr_cont(" ok |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001461 }
1462 testcase_total++;
1463
Peter Zijlstra5831c0f2020-12-09 16:42:57 +01001464 if (debug_locks_verbose & lockclass_mask)
Michael Ellerman25139402016-11-25 09:45:28 +11001465 pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001466 lockclass_mask, debug_locks, expected);
1467 /*
1468 * Some tests (e.g. double-unlock) might corrupt the preemption
1469 * count, so restore it:
1470 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001471 preempt_count_set(saved_preempt_count);
Sebastian Andrzej Siewior512bf712021-11-29 18:46:50 +01001472
1473#ifdef CONFIG_PREEMPT_RT
1474#ifdef CONFIG_SMP
1475 while (current->migration_disabled > saved_mgd_count)
1476 migrate_enable();
1477#endif
1478
1479 while (current->rcu_read_lock_nesting > saved_rcu_count)
1480 rcu_read_unlock();
1481 WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
1482#endif
1483
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001484#ifdef CONFIG_TRACE_IRQFLAGS
1485 if (softirq_count())
1486 current->softirqs_enabled = 0;
1487 else
1488 current->softirqs_enabled = 1;
1489#endif
1490
1491 reset_locks();
1492}
1493
Peter Zijlstra018956d2017-05-31 17:11:49 +02001494#ifdef CONFIG_RT_MUTEXES
1495#define dotest_rt(fn, e, m) dotest((fn), (e), (m))
1496#else
1497#define dotest_rt(fn, e, m)
1498#endif
1499
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001500static inline void print_testname(const char *testname)
1501{
1502 printk("%33s:", testname);
1503}
1504
1505#define DO_TESTCASE_1(desc, name, nr) \
1506 print_testname(desc"/"#nr); \
1507 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
Michael Ellerman25139402016-11-25 09:45:28 +11001508 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001509
1510#define DO_TESTCASE_1B(desc, name, nr) \
1511 print_testname(desc"/"#nr); \
1512 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
Michael Ellerman25139402016-11-25 09:45:28 +11001513 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001514
Boqun Feng8ef7ca72020-08-07 15:42:35 +08001515#define DO_TESTCASE_1RR(desc, name, nr) \
1516 print_testname(desc"/"#nr); \
1517 pr_cont(" |"); \
1518 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1519 pr_cont("\n");
1520
1521#define DO_TESTCASE_1RRB(desc, name, nr) \
1522 print_testname(desc"/"#nr); \
1523 pr_cont(" |"); \
1524 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1525 pr_cont("\n");
1526
1527
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001528#define DO_TESTCASE_3(desc, name, nr) \
1529 print_testname(desc"/"#nr); \
1530 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
1531 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1532 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
Michael Ellerman25139402016-11-25 09:45:28 +11001533 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001534
1535#define DO_TESTCASE_3RW(desc, name, nr) \
1536 print_testname(desc"/"#nr); \
1537 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
1538 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1539 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
Michael Ellerman25139402016-11-25 09:45:28 +11001540 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001541
Boqun Feng31e0d742020-08-07 15:42:34 +08001542#define DO_TESTCASE_2RW(desc, name, nr) \
1543 print_testname(desc"/"#nr); \
1544 pr_cont(" |"); \
1545 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1546 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1547 pr_cont("\n");
1548
1549#define DO_TESTCASE_2x2RW(desc, name, nr) \
1550 DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001551 NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
Boqun Feng31e0d742020-08-07 15:42:34 +08001552
1553#define DO_TESTCASE_6x2x2RW(desc, name) \
1554 DO_TESTCASE_2x2RW(desc, name, 123); \
1555 DO_TESTCASE_2x2RW(desc, name, 132); \
1556 DO_TESTCASE_2x2RW(desc, name, 213); \
1557 DO_TESTCASE_2x2RW(desc, name, 231); \
1558 DO_TESTCASE_2x2RW(desc, name, 312); \
1559 DO_TESTCASE_2x2RW(desc, name, 321);
1560
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001561#define DO_TESTCASE_6(desc, name) \
1562 print_testname(desc); \
1563 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1564 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1565 dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
1566 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1567 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1568 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
Peter Zijlstra018956d2017-05-31 17:11:49 +02001569 dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
Michael Ellerman25139402016-11-25 09:45:28 +11001570 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001571
1572#define DO_TESTCASE_6_SUCCESS(desc, name) \
1573 print_testname(desc); \
1574 dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
1575 dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
1576 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1577 dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
1578 dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
1579 dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
Peter Zijlstra018956d2017-05-31 17:11:49 +02001580 dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
Michael Ellerman25139402016-11-25 09:45:28 +11001581 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001582
1583/*
1584 * 'read' variant: rlocks must not trigger.
1585 */
1586#define DO_TESTCASE_6R(desc, name) \
1587 print_testname(desc); \
1588 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1589 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1590 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1591 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1592 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1593 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
Peter Zijlstra018956d2017-05-31 17:11:49 +02001594 dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
Michael Ellerman25139402016-11-25 09:45:28 +11001595 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001596
1597#define DO_TESTCASE_2I(desc, name, nr) \
1598 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001599 NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001600
1601#define DO_TESTCASE_2IB(desc, name, nr) \
1602 DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001603 NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001604
1605#define DO_TESTCASE_6I(desc, name, nr) \
1606 DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001607 NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001608
1609#define DO_TESTCASE_6IRW(desc, name, nr) \
1610 DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01001611 NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001612
1613#define DO_TESTCASE_2x3(desc, name) \
1614 DO_TESTCASE_3(desc, name, 12); \
1615 DO_TESTCASE_3(desc, name, 21);
1616
1617#define DO_TESTCASE_2x6(desc, name) \
1618 DO_TESTCASE_6I(desc, name, 12); \
1619 DO_TESTCASE_6I(desc, name, 21);
1620
1621#define DO_TESTCASE_6x2(desc, name) \
1622 DO_TESTCASE_2I(desc, name, 123); \
1623 DO_TESTCASE_2I(desc, name, 132); \
1624 DO_TESTCASE_2I(desc, name, 213); \
1625 DO_TESTCASE_2I(desc, name, 231); \
1626 DO_TESTCASE_2I(desc, name, 312); \
1627 DO_TESTCASE_2I(desc, name, 321);
1628
1629#define DO_TESTCASE_6x2B(desc, name) \
1630 DO_TESTCASE_2IB(desc, name, 123); \
1631 DO_TESTCASE_2IB(desc, name, 132); \
1632 DO_TESTCASE_2IB(desc, name, 213); \
1633 DO_TESTCASE_2IB(desc, name, 231); \
1634 DO_TESTCASE_2IB(desc, name, 312); \
1635 DO_TESTCASE_2IB(desc, name, 321);
1636
Boqun Feng8ef7ca72020-08-07 15:42:35 +08001637#define DO_TESTCASE_6x1RR(desc, name) \
1638 DO_TESTCASE_1RR(desc, name, 123); \
1639 DO_TESTCASE_1RR(desc, name, 132); \
1640 DO_TESTCASE_1RR(desc, name, 213); \
1641 DO_TESTCASE_1RR(desc, name, 231); \
1642 DO_TESTCASE_1RR(desc, name, 312); \
1643 DO_TESTCASE_1RR(desc, name, 321);
1644
1645#define DO_TESTCASE_6x1RRB(desc, name) \
1646 DO_TESTCASE_1RRB(desc, name, 123); \
1647 DO_TESTCASE_1RRB(desc, name, 132); \
1648 DO_TESTCASE_1RRB(desc, name, 213); \
1649 DO_TESTCASE_1RRB(desc, name, 231); \
1650 DO_TESTCASE_1RRB(desc, name, 312); \
1651 DO_TESTCASE_1RRB(desc, name, 321);
1652
Ingo Molnarcae2ed92006-07-03 00:24:48 -07001653#define DO_TESTCASE_6x6(desc, name) \
1654 DO_TESTCASE_6I(desc, name, 123); \
1655 DO_TESTCASE_6I(desc, name, 132); \
1656 DO_TESTCASE_6I(desc, name, 213); \
1657 DO_TESTCASE_6I(desc, name, 231); \
1658 DO_TESTCASE_6I(desc, name, 312); \
1659 DO_TESTCASE_6I(desc, name, 321);
1660
1661#define DO_TESTCASE_6x6RW(desc, name) \
1662 DO_TESTCASE_6IRW(desc, name, 123); \
1663 DO_TESTCASE_6IRW(desc, name, 132); \
1664 DO_TESTCASE_6IRW(desc, name, 213); \
1665 DO_TESTCASE_6IRW(desc, name, 231); \
1666 DO_TESTCASE_6IRW(desc, name, 312); \
1667 DO_TESTCASE_6IRW(desc, name, 321);
1668
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001669static void ww_test_fail_acquire(void)
1670{
1671 int ret;
1672
1673 WWAI(&t);
1674 t.stamp++;
1675
1676 ret = WWL(&o, &t);
1677
1678 if (WARN_ON(!o.ctx) ||
1679 WARN_ON(ret))
1680 return;
1681
1682 /* No lockdep test, pure API */
1683 ret = WWL(&o, &t);
1684 WARN_ON(ret != -EALREADY);
1685
1686 ret = WWT(&o);
1687 WARN_ON(ret);
1688
1689 t2 = t;
1690 t2.stamp++;
1691 ret = WWL(&o, &t2);
1692 WARN_ON(ret != -EDEADLK);
1693 WWU(&o);
1694
1695 if (WWT(&o))
1696 WWU(&o);
1697#ifdef CONFIG_DEBUG_LOCK_ALLOC
1698 else
1699 DEBUG_LOCKS_WARN_ON(1);
1700#endif
1701}
1702
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001703#ifdef CONFIG_PREEMPT_RT
1704#define ww_mutex_base_lock(b) rt_mutex_lock(b)
1705#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
1706#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
1707#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
1708#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
1709#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
1710#else
1711#define ww_mutex_base_lock(b) mutex_lock(b)
1712#define ww_mutex_base_trylock(b) mutex_trylock(b)
1713#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
1714#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
1715#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
1716#define ww_mutex_base_unlock(b) mutex_unlock(b)
1717#endif
1718
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001719static void ww_test_normal(void)
1720{
1721 int ret;
1722
1723 WWAI(&t);
1724
1725 /*
1726 * None of the ww_mutex codepaths should be taken in the 'normal'
1727 * mutex calls. The easiest way to verify this is by using the
1728 * normal mutex calls, and making sure o.ctx is unmodified.
1729 */
1730
1731 /* mutex_lock (and indirectly, mutex_lock_nested) */
1732 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001733 ww_mutex_base_lock(&o.base);
1734 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001735 WARN_ON(o.ctx != (void *)~0UL);
1736
1737 /* mutex_lock_interruptible (and *_nested) */
1738 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001739 ret = ww_mutex_base_lock_interruptible(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001740 if (!ret)
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001741 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001742 else
1743 WARN_ON(1);
1744 WARN_ON(o.ctx != (void *)~0UL);
1745
1746 /* mutex_lock_killable (and *_nested) */
1747 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001748 ret = ww_mutex_base_lock_killable(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001749 if (!ret)
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001750 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001751 else
1752 WARN_ON(1);
1753 WARN_ON(o.ctx != (void *)~0UL);
1754
1755 /* trylock, succeeding */
1756 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001757 ret = ww_mutex_base_trylock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001758 WARN_ON(!ret);
1759 if (ret)
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001760 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001761 else
1762 WARN_ON(1);
1763 WARN_ON(o.ctx != (void *)~0UL);
1764
1765 /* trylock, failing */
1766 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001767 ww_mutex_base_lock(&o.base);
1768 ret = ww_mutex_base_trylock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001769 WARN_ON(ret);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001770 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001771 WARN_ON(o.ctx != (void *)~0UL);
1772
1773 /* nest_lock */
1774 o.ctx = (void *)~0UL;
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001775 ww_mutex_base_lock_nest_lock(&o.base, &t);
1776 ww_mutex_base_unlock(&o.base);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02001777 WARN_ON(o.ctx != (void *)~0UL);
1778}
1779
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001780static void ww_test_two_contexts(void)
1781{
1782 WWAI(&t);
1783 WWAI(&t2);
1784}
1785
1786static void ww_test_diff_class(void)
1787{
1788 WWAI(&t);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001789#ifdef DEBUG_WW_MUTEXES
Maarten Lankhorst1de99442013-06-20 13:31:24 +02001790 t.ww_class = NULL;
1791#endif
1792 WWL(&o, &t);
1793}
1794
1795static void ww_test_context_done_twice(void)
1796{
1797 WWAI(&t);
1798 WWAD(&t);
1799 WWAD(&t);
1800 WWAF(&t);
1801}
1802
1803static void ww_test_context_unlock_twice(void)
1804{
1805 WWAI(&t);
1806 WWAD(&t);
1807 WWAF(&t);
1808 WWAF(&t);
1809}
1810
1811static void ww_test_context_fini_early(void)
1812{
1813 WWAI(&t);
1814 WWL(&o, &t);
1815 WWAD(&t);
1816 WWAF(&t);
1817}
1818
1819static void ww_test_context_lock_after_done(void)
1820{
1821 WWAI(&t);
1822 WWAD(&t);
1823 WWL(&o, &t);
1824}
1825
1826static void ww_test_object_unlock_twice(void)
1827{
1828 WWL1(&o);
1829 WWU(&o);
1830 WWU(&o);
1831}
1832
1833static void ww_test_object_lock_unbalanced(void)
1834{
1835 WWAI(&t);
1836 WWL(&o, &t);
1837 t.acquired = 0;
1838 WWU(&o);
1839 WWAF(&t);
1840}
1841
1842static void ww_test_object_lock_stale_context(void)
1843{
1844 WWAI(&t);
1845 o.ctx = &t2;
1846 WWL(&o, &t);
1847}
1848
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001849static void ww_test_edeadlk_normal(void)
1850{
1851 int ret;
1852
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001853 ww_mutex_base_lock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001854 o2.ctx = &t2;
Qian Cai5facae42019-09-19 12:09:40 -04001855 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001856
1857 WWAI(&t);
1858 t2 = t;
1859 t2.stamp--;
1860
1861 ret = WWL(&o, &t);
1862 WARN_ON(ret);
1863
1864 ret = WWL(&o2, &t);
1865 WARN_ON(ret != -EDEADLK);
1866
1867 o2.ctx = NULL;
1868 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001869 ww_mutex_base_unlock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001870 WWU(&o);
1871
1872 WWL(&o2, &t);
1873}
1874
1875static void ww_test_edeadlk_normal_slow(void)
1876{
1877 int ret;
1878
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001879 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04001880 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001881 o2.ctx = &t2;
1882
1883 WWAI(&t);
1884 t2 = t;
1885 t2.stamp--;
1886
1887 ret = WWL(&o, &t);
1888 WARN_ON(ret);
1889
1890 ret = WWL(&o2, &t);
1891 WARN_ON(ret != -EDEADLK);
1892
1893 o2.ctx = NULL;
1894 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001895 ww_mutex_base_unlock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001896 WWU(&o);
1897
1898 ww_mutex_lock_slow(&o2, &t);
1899}
1900
1901static void ww_test_edeadlk_no_unlock(void)
1902{
1903 int ret;
1904
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001905 ww_mutex_base_lock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001906 o2.ctx = &t2;
Qian Cai5facae42019-09-19 12:09:40 -04001907 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001908
1909 WWAI(&t);
1910 t2 = t;
1911 t2.stamp--;
1912
1913 ret = WWL(&o, &t);
1914 WARN_ON(ret);
1915
1916 ret = WWL(&o2, &t);
1917 WARN_ON(ret != -EDEADLK);
1918
1919 o2.ctx = NULL;
1920 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001921 ww_mutex_base_unlock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001922
1923 WWL(&o2, &t);
1924}
1925
1926static void ww_test_edeadlk_no_unlock_slow(void)
1927{
1928 int ret;
1929
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001930 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04001931 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001932 o2.ctx = &t2;
1933
1934 WWAI(&t);
1935 t2 = t;
1936 t2.stamp--;
1937
1938 ret = WWL(&o, &t);
1939 WARN_ON(ret);
1940
1941 ret = WWL(&o2, &t);
1942 WARN_ON(ret != -EDEADLK);
1943
1944 o2.ctx = NULL;
1945 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001946 ww_mutex_base_unlock(&o2.base);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001947
1948 ww_mutex_lock_slow(&o2, &t);
1949}
1950
1951static void ww_test_edeadlk_acquire_more(void)
1952{
1953 int ret;
1954
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001955 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04001956 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001957 o2.ctx = &t2;
1958
1959 WWAI(&t);
1960 t2 = t;
1961 t2.stamp--;
1962
1963 ret = WWL(&o, &t);
1964 WARN_ON(ret);
1965
1966 ret = WWL(&o2, &t);
1967 WARN_ON(ret != -EDEADLK);
1968
1969 ret = WWL(&o3, &t);
1970}
1971
1972static void ww_test_edeadlk_acquire_more_slow(void)
1973{
1974 int ret;
1975
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001976 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04001977 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001978 o2.ctx = &t2;
1979
1980 WWAI(&t);
1981 t2 = t;
1982 t2.stamp--;
1983
1984 ret = WWL(&o, &t);
1985 WARN_ON(ret);
1986
1987 ret = WWL(&o2, &t);
1988 WARN_ON(ret != -EDEADLK);
1989
1990 ww_mutex_lock_slow(&o3, &t);
1991}
1992
1993static void ww_test_edeadlk_acquire_more_edeadlk(void)
1994{
1995 int ret;
1996
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01001997 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04001998 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02001999 o2.ctx = &t2;
2000
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01002001 ww_mutex_base_lock(&o3.base);
Qian Cai5facae42019-09-19 12:09:40 -04002002 mutex_release(&o3.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002003 o3.ctx = &t2;
2004
2005 WWAI(&t);
2006 t2 = t;
2007 t2.stamp--;
2008
2009 ret = WWL(&o, &t);
2010 WARN_ON(ret);
2011
2012 ret = WWL(&o2, &t);
2013 WARN_ON(ret != -EDEADLK);
2014
2015 ret = WWL(&o3, &t);
2016 WARN_ON(ret != -EDEADLK);
2017}
2018
2019static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
2020{
2021 int ret;
2022
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01002023 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04002024 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002025 o2.ctx = &t2;
2026
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01002027 ww_mutex_base_lock(&o3.base);
Qian Cai5facae42019-09-19 12:09:40 -04002028 mutex_release(&o3.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002029 o3.ctx = &t2;
2030
2031 WWAI(&t);
2032 t2 = t;
2033 t2.stamp--;
2034
2035 ret = WWL(&o, &t);
2036 WARN_ON(ret);
2037
2038 ret = WWL(&o2, &t);
2039 WARN_ON(ret != -EDEADLK);
2040
2041 ww_mutex_lock_slow(&o3, &t);
2042}
2043
2044static void ww_test_edeadlk_acquire_wrong(void)
2045{
2046 int ret;
2047
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01002048 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04002049 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002050 o2.ctx = &t2;
2051
2052 WWAI(&t);
2053 t2 = t;
2054 t2.stamp--;
2055
2056 ret = WWL(&o, &t);
2057 WARN_ON(ret);
2058
2059 ret = WWL(&o2, &t);
2060 WARN_ON(ret != -EDEADLK);
2061 if (!ret)
2062 WWU(&o2);
2063
2064 WWU(&o);
2065
2066 ret = WWL(&o3, &t);
2067}
2068
2069static void ww_test_edeadlk_acquire_wrong_slow(void)
2070{
2071 int ret;
2072
Sebastian Andrzej Siewior9a75bd02021-11-29 18:46:52 +01002073 ww_mutex_base_lock(&o2.base);
Qian Cai5facae42019-09-19 12:09:40 -04002074 mutex_release(&o2.base.dep_map, _THIS_IP_);
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002075 o2.ctx = &t2;
2076
2077 WWAI(&t);
2078 t2 = t;
2079 t2.stamp--;
2080
2081 ret = WWL(&o, &t);
2082 WARN_ON(ret);
2083
2084 ret = WWL(&o2, &t);
2085 WARN_ON(ret != -EDEADLK);
2086 if (!ret)
2087 WWU(&o2);
2088
2089 WWU(&o);
2090
2091 ww_mutex_lock_slow(&o3, &t);
2092}
2093
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002094static void ww_test_spin_nest_unlocked(void)
2095{
Peter Zijlstraa2e9ae52020-10-30 12:37:43 +01002096 spin_lock_nest_lock(&lock_A, &o.base);
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002097 U(A);
2098}
2099
Boqun Fenge04ce672020-11-02 13:37:42 +08002100/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
2101static void ww_test_spin_nest_lock(void)
2102{
2103 spin_lock(&lock_X1);
2104 spin_lock_nest_lock(&lock_Y1, &lock_X1);
2105 spin_lock(&lock_A);
2106 spin_lock_nest_lock(&lock_Y2, &lock_X1);
2107 spin_unlock(&lock_A);
2108 spin_unlock(&lock_Y2);
2109 spin_unlock(&lock_Y1);
2110 spin_unlock(&lock_X1);
2111}
2112
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002113static void ww_test_unneeded_slow(void)
2114{
2115 WWAI(&t);
2116
2117 ww_mutex_lock_slow(&o, &t);
2118}
2119
2120static void ww_test_context_block(void)
2121{
2122 int ret;
2123
2124 WWAI(&t);
2125
2126 ret = WWL(&o, &t);
2127 WARN_ON(ret);
2128 WWL1(&o2);
2129}
2130
2131static void ww_test_context_try(void)
2132{
2133 int ret;
2134
2135 WWAI(&t);
2136
2137 ret = WWL(&o, &t);
2138 WARN_ON(ret);
2139
2140 ret = WWT(&o2);
2141 WARN_ON(!ret);
2142 WWU(&o2);
2143 WWU(&o);
2144}
2145
2146static void ww_test_context_context(void)
2147{
2148 int ret;
2149
2150 WWAI(&t);
2151
2152 ret = WWL(&o, &t);
2153 WARN_ON(ret);
2154
2155 ret = WWL(&o2, &t);
2156 WARN_ON(ret);
2157
2158 WWU(&o2);
2159 WWU(&o);
2160}
2161
2162static void ww_test_try_block(void)
2163{
2164 bool ret;
2165
2166 ret = WWT(&o);
2167 WARN_ON(!ret);
2168
2169 WWL1(&o2);
2170 WWU(&o2);
2171 WWU(&o);
2172}
2173
2174static void ww_test_try_try(void)
2175{
2176 bool ret;
2177
2178 ret = WWT(&o);
2179 WARN_ON(!ret);
2180 ret = WWT(&o2);
2181 WARN_ON(!ret);
2182 WWU(&o2);
2183 WWU(&o);
2184}
2185
2186static void ww_test_try_context(void)
2187{
2188 int ret;
2189
2190 ret = WWT(&o);
2191 WARN_ON(!ret);
2192
2193 WWAI(&t);
2194
2195 ret = WWL(&o2, &t);
2196 WARN_ON(ret);
2197}
2198
2199static void ww_test_block_block(void)
2200{
2201 WWL1(&o);
2202 WWL1(&o2);
2203}
2204
2205static void ww_test_block_try(void)
2206{
2207 bool ret;
2208
2209 WWL1(&o);
2210 ret = WWT(&o2);
2211 WARN_ON(!ret);
2212}
2213
2214static void ww_test_block_context(void)
2215{
2216 int ret;
2217
2218 WWL1(&o);
2219 WWAI(&t);
2220
2221 ret = WWL(&o2, &t);
2222 WARN_ON(ret);
2223}
2224
2225static void ww_test_spin_block(void)
2226{
2227 L(A);
2228 U(A);
2229
2230 WWL1(&o);
2231 L(A);
2232 U(A);
2233 WWU(&o);
2234
2235 L(A);
2236 WWL1(&o);
2237 WWU(&o);
2238 U(A);
2239}
2240
2241static void ww_test_spin_try(void)
2242{
2243 bool ret;
2244
2245 L(A);
2246 U(A);
2247
2248 ret = WWT(&o);
2249 WARN_ON(!ret);
2250 L(A);
2251 U(A);
2252 WWU(&o);
2253
2254 L(A);
2255 ret = WWT(&o);
2256 WARN_ON(!ret);
2257 WWU(&o);
2258 U(A);
2259}
2260
2261static void ww_test_spin_context(void)
2262{
2263 int ret;
2264
2265 L(A);
2266 U(A);
2267
2268 WWAI(&t);
2269
2270 ret = WWL(&o, &t);
2271 WARN_ON(ret);
2272 L(A);
2273 U(A);
2274 WWU(&o);
2275
2276 L(A);
2277 ret = WWL(&o, &t);
2278 WARN_ON(ret);
2279 WWU(&o);
2280 U(A);
2281}
2282
2283static void ww_tests(void)
2284{
2285 printk(" --------------------------------------------------------------------------\n");
2286 printk(" | Wound/wait tests |\n");
2287 printk(" ---------------------\n");
2288
2289 print_testname("ww api failures");
2290 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
Maarten Lankhorst2fe3d4b2013-06-20 13:31:30 +02002291 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002292 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002293 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002294
2295 print_testname("ww contexts mixing");
2296 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
2297 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002298 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002299
2300 print_testname("finishing ww context");
2301 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
2302 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
2303 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
2304 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002305 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002306
2307 print_testname("locking mismatches");
2308 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
2309 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
2310 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002311 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002312
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002313 print_testname("EDEADLK handling");
2314 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
2315 dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
2316 dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
2317 dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
2318 dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
2319 dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
2320 dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
2321 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
2322 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
2323 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002324 pr_cont("\n");
Maarten Lankhorstf3cf1392013-06-20 13:31:42 +02002325
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002326 print_testname("spinlock nest unlocked");
2327 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002328 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002329
Boqun Fenge04ce672020-11-02 13:37:42 +08002330 print_testname("spinlock nest test");
2331 dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
2332 pr_cont("\n");
2333
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002334 printk(" -----------------------------------------------------\n");
2335 printk(" |block | try |context|\n");
2336 printk(" -----------------------------------------------------\n");
2337
2338 print_testname("context");
2339 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
2340 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
2341 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002342 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002343
2344 print_testname("try");
2345 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
2346 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
2347 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002348 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002349
2350 print_testname("block");
2351 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
2352 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
2353 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002354 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002355
2356 print_testname("spinlock");
2357 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
2358 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
2359 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
Michael Ellerman25139402016-11-25 09:45:28 +11002360 pr_cont("\n");
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002361}
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002362
Boqun Fengad564502020-08-07 15:42:37 +08002363
2364/*
2365 * <in hardirq handler>
2366 * read_lock(&A);
2367 * <hardirq disable>
2368 * spin_lock(&B);
2369 * spin_lock(&B);
2370 * read_lock(&A);
2371 *
2372 * is a deadlock.
2373 */
2374static void queued_read_lock_hardirq_RE_Er(void)
2375{
2376 HARDIRQ_ENTER();
2377 read_lock(&rwlock_A);
2378 LOCK(B);
2379 UNLOCK(B);
2380 read_unlock(&rwlock_A);
2381 HARDIRQ_EXIT();
2382
2383 HARDIRQ_DISABLE();
2384 LOCK(B);
2385 read_lock(&rwlock_A);
2386 read_unlock(&rwlock_A);
2387 UNLOCK(B);
2388 HARDIRQ_ENABLE();
2389}
2390
2391/*
2392 * <in hardirq handler>
2393 * spin_lock(&B);
2394 * <hardirq disable>
2395 * read_lock(&A);
2396 * read_lock(&A);
2397 * spin_lock(&B);
2398 *
2399 * is not a deadlock.
2400 */
2401static void queued_read_lock_hardirq_ER_rE(void)
2402{
2403 HARDIRQ_ENTER();
2404 LOCK(B);
2405 read_lock(&rwlock_A);
2406 read_unlock(&rwlock_A);
2407 UNLOCK(B);
2408 HARDIRQ_EXIT();
2409
2410 HARDIRQ_DISABLE();
2411 read_lock(&rwlock_A);
2412 LOCK(B);
2413 UNLOCK(B);
2414 read_unlock(&rwlock_A);
2415 HARDIRQ_ENABLE();
2416}
2417
2418/*
2419 * <hardirq disable>
2420 * spin_lock(&B);
2421 * read_lock(&A);
2422 * <in hardirq handler>
2423 * spin_lock(&B);
2424 * read_lock(&A);
2425 *
2426 * is a deadlock. Because the two read_lock()s are both non-recursive readers.
2427 */
2428static void queued_read_lock_hardirq_inversion(void)
2429{
2430
2431 HARDIRQ_ENTER();
2432 LOCK(B);
2433 UNLOCK(B);
2434 HARDIRQ_EXIT();
2435
2436 HARDIRQ_DISABLE();
2437 LOCK(B);
2438 read_lock(&rwlock_A);
2439 read_unlock(&rwlock_A);
2440 UNLOCK(B);
2441 HARDIRQ_ENABLE();
2442
2443 read_lock(&rwlock_A);
2444 read_unlock(&rwlock_A);
2445}
2446
2447static void queued_read_lock_tests(void)
2448{
2449 printk(" --------------------------------------------------------------------------\n");
2450 printk(" | queued read lock tests |\n");
2451 printk(" ---------------------------\n");
2452 print_testname("hardirq read-lock/lock-read");
2453 dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
2454 pr_cont("\n");
2455
2456 print_testname("hardirq lock-read/read-lock");
2457 dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
2458 pr_cont("\n");
2459
2460 print_testname("hardirq inversion");
2461 dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
2462 pr_cont("\n");
2463}
2464
Daniel Vetterd5037d12020-12-14 19:08:38 -08002465static void fs_reclaim_correct_nesting(void)
2466{
2467 fs_reclaim_acquire(GFP_KERNEL);
2468 might_alloc(GFP_NOFS);
2469 fs_reclaim_release(GFP_KERNEL);
2470}
2471
2472static void fs_reclaim_wrong_nesting(void)
2473{
2474 fs_reclaim_acquire(GFP_KERNEL);
2475 might_alloc(GFP_KERNEL);
2476 fs_reclaim_release(GFP_KERNEL);
2477}
2478
2479static void fs_reclaim_protected_nesting(void)
2480{
2481 unsigned int flags;
2482
2483 fs_reclaim_acquire(GFP_KERNEL);
2484 flags = memalloc_nofs_save();
2485 might_alloc(GFP_KERNEL);
2486 memalloc_nofs_restore(flags);
2487 fs_reclaim_release(GFP_KERNEL);
2488}
2489
2490static void fs_reclaim_tests(void)
2491{
2492 printk(" --------------------\n");
2493 printk(" | fs_reclaim tests |\n");
2494 printk(" --------------------\n");
2495
2496 print_testname("correct nesting");
2497 dotest(fs_reclaim_correct_nesting, SUCCESS, 0);
2498 pr_cont("\n");
2499
2500 print_testname("wrong nesting");
2501 dotest(fs_reclaim_wrong_nesting, FAILURE, 0);
2502 pr_cont("\n");
2503
2504 print_testname("protected nesting");
2505 dotest(fs_reclaim_protected_nesting, SUCCESS, 0);
2506 pr_cont("\n");
2507}
2508
Boqun Feng9271a402020-12-08 18:31:12 +08002509#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
2510
2511static void hardirq_exit(int *_)
2512{
2513 HARDIRQ_EXIT();
2514}
2515
2516#define HARDIRQ_CONTEXT(name, ...) \
2517 int hardirq_guard_##name __guard(hardirq_exit); \
2518 HARDIRQ_ENTER();
2519
2520#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
2521 int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
2522 local_irq_disable(); \
2523 __irq_enter(); \
2524 WARN_ON(!in_irq());
2525
2526static void softirq_exit(int *_)
2527{
2528 SOFTIRQ_EXIT();
2529}
2530
2531#define SOFTIRQ_CONTEXT(name, ...) \
2532 int softirq_guard_##name __guard(softirq_exit); \
2533 SOFTIRQ_ENTER();
2534
2535static void rcu_exit(int *_)
2536{
2537 rcu_read_unlock();
2538}
2539
2540#define RCU_CONTEXT(name, ...) \
2541 int rcu_guard_##name __guard(rcu_exit); \
2542 rcu_read_lock();
2543
2544static void rcu_bh_exit(int *_)
2545{
2546 rcu_read_unlock_bh();
2547}
2548
2549#define RCU_BH_CONTEXT(name, ...) \
2550 int rcu_bh_guard_##name __guard(rcu_bh_exit); \
2551 rcu_read_lock_bh();
2552
2553static void rcu_sched_exit(int *_)
2554{
2555 rcu_read_unlock_sched();
2556}
2557
2558#define RCU_SCHED_CONTEXT(name, ...) \
2559 int rcu_sched_guard_##name __guard(rcu_sched_exit); \
2560 rcu_read_lock_sched();
2561
Boqun Feng9271a402020-12-08 18:31:12 +08002562static void raw_spinlock_exit(raw_spinlock_t **lock)
2563{
2564 raw_spin_unlock(*lock);
2565}
2566
2567#define RAW_SPINLOCK_CONTEXT(name, lock) \
2568 raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
2569 raw_spin_lock(&(lock));
2570
2571static void spinlock_exit(spinlock_t **lock)
2572{
2573 spin_unlock(*lock);
2574}
2575
2576#define SPINLOCK_CONTEXT(name, lock) \
2577 spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
2578 spin_lock(&(lock));
2579
2580static void mutex_exit(struct mutex **lock)
2581{
2582 mutex_unlock(*lock);
2583}
2584
2585#define MUTEX_CONTEXT(name, lock) \
2586 struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
2587 mutex_lock(&(lock));
2588
2589#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
2590 \
2591static void __maybe_unused inner##_in_##outer(void) \
2592{ \
2593 outer##_CONTEXT(_, outer_lock); \
2594 { \
2595 inner##_CONTEXT(_, inner_lock); \
2596 } \
2597}
2598
2599/*
2600 * wait contexts (considering PREEMPT_RT)
2601 *
2602 * o: inner is allowed in outer
2603 * x: inner is disallowed in outer
2604 *
2605 * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
2606 * outer \ | | | |
2607 * ---------------+-------+----------+------+-------
2608 * HARDIRQ | o | o | o | x
2609 * ---------------+-------+----------+------+-------
2610 * NOTTHREADED_IRQ| o | o | x | x
2611 * ---------------+-------+----------+------+-------
2612 * SOFTIRQ | o | o | o | x
2613 * ---------------+-------+----------+------+-------
2614 * RCU | o | o | o | x
2615 * ---------------+-------+----------+------+-------
2616 * RCU_BH | o | o | o | x
2617 * ---------------+-------+----------+------+-------
Boqun Feng9271a402020-12-08 18:31:12 +08002618 * RCU_SCHED | o | o | x | x
2619 * ---------------+-------+----------+------+-------
2620 * RAW_SPIN | o | o | x | x
2621 * ---------------+-------+----------+------+-------
2622 * SPIN | o | o | o | x
2623 * ---------------+-------+----------+------+-------
2624 * MUTEX | o | o | o | o
2625 * ---------------+-------+----------+------+-------
2626 */
2627
2628#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
2629GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
2630GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
2631GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
2632GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
2633GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
Boqun Feng9271a402020-12-08 18:31:12 +08002634GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
2635GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
2636GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
2637GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
2638
2639GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
2640GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
2641GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
2642GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
2643
2644/* the outer context allows all kinds of preemption */
2645#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
2646 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2647 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2648 dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2649 dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
2650
2651/*
2652 * the outer context only allows the preemption introduced by spinlock_t (which
2653 * is a sleepable lock for PREEMPT_RT)
2654 */
2655#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
2656 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2657 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2658 dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2659 dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
2660
2661/* the outer doesn't allows any kind of preemption */
2662#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
2663 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2664 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2665 dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
2666 dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
2667
2668static void wait_context_tests(void)
2669{
2670 printk(" --------------------------------------------------------------------------\n");
2671 printk(" | wait context tests |\n");
2672 printk(" --------------------------------------------------------------------------\n");
2673 printk(" | rcu | raw | spin |mutex |\n");
2674 printk(" --------------------------------------------------------------------------\n");
2675 print_testname("in hardirq context");
2676 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
2677 pr_cont("\n");
2678
2679 print_testname("in hardirq context (not threaded)");
2680 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
2681 pr_cont("\n");
2682
2683 print_testname("in softirq context");
2684 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
2685 pr_cont("\n");
2686
2687 print_testname("in RCU context");
2688 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
2689 pr_cont("\n");
2690
2691 print_testname("in RCU-bh context");
2692 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
2693 pr_cont("\n");
2694
Boqun Feng9271a402020-12-08 18:31:12 +08002695 print_testname("in RCU-sched context");
2696 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
2697 pr_cont("\n");
2698
2699 print_testname("in RAW_SPINLOCK context");
2700 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
2701 pr_cont("\n");
2702
2703 print_testname("in SPINLOCK context");
2704 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
2705 pr_cont("\n");
2706
2707 print_testname("in MUTEX context");
2708 DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
2709 pr_cont("\n");
2710}
2711
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002712static void local_lock_2(void)
2713{
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002714 local_lock(&local_A); /* IRQ-ON */
2715 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002716
2717 HARDIRQ_ENTER();
2718 spin_lock(&lock_A); /* IN-IRQ */
2719 spin_unlock(&lock_A);
2720 HARDIRQ_EXIT()
2721
2722 HARDIRQ_DISABLE();
2723 spin_lock(&lock_A);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002724 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
2725 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002726 spin_unlock(&lock_A);
2727 HARDIRQ_ENABLE();
2728}
2729
2730static void local_lock_3A(void)
2731{
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002732 local_lock(&local_A); /* IRQ-ON */
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002733 spin_lock(&lock_B); /* IRQ-ON */
2734 spin_unlock(&lock_B);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002735 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002736
2737 HARDIRQ_ENTER();
2738 spin_lock(&lock_A); /* IN-IRQ */
2739 spin_unlock(&lock_A);
2740 HARDIRQ_EXIT()
2741
2742 HARDIRQ_DISABLE();
2743 spin_lock(&lock_A);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002744 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
2745 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002746 spin_unlock(&lock_A);
2747 HARDIRQ_ENABLE();
2748}
2749
2750static void local_lock_3B(void)
2751{
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002752 local_lock(&local_A); /* IRQ-ON */
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002753 spin_lock(&lock_B); /* IRQ-ON */
2754 spin_unlock(&lock_B);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002755 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002756
2757 HARDIRQ_ENTER();
2758 spin_lock(&lock_A); /* IN-IRQ */
2759 spin_unlock(&lock_A);
2760 HARDIRQ_EXIT()
2761
2762 HARDIRQ_DISABLE();
2763 spin_lock(&lock_A);
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002764 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
2765 local_unlock(&local_A);
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002766 spin_unlock(&lock_A);
2767 HARDIRQ_ENABLE();
2768
2769 HARDIRQ_DISABLE();
2770 spin_lock(&lock_A);
2771 spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
2772 spin_unlock(&lock_B);
2773 spin_unlock(&lock_A);
2774 HARDIRQ_DISABLE();
2775
2776}
2777
2778static void local_lock_tests(void)
2779{
2780 printk(" --------------------------------------------------------------------------\n");
2781 printk(" | local_lock tests |\n");
2782 printk(" ---------------------\n");
2783
2784 print_testname("local_lock inversion 2");
2785 dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
2786 pr_cont("\n");
2787
2788 print_testname("local_lock inversion 3A");
2789 dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
2790 pr_cont("\n");
2791
2792 print_testname("local_lock inversion 3B");
2793 dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
2794 pr_cont("\n");
2795}
2796
Boqun Feng8946ccc2021-06-19 01:01:10 +08002797static void hardirq_deadlock_softirq_not_deadlock(void)
2798{
2799 /* mutex_A is hardirq-unsafe and softirq-unsafe */
2800 /* mutex_A -> lock_C */
2801 mutex_lock(&mutex_A);
2802 HARDIRQ_DISABLE();
2803 spin_lock(&lock_C);
2804 spin_unlock(&lock_C);
2805 HARDIRQ_ENABLE();
2806 mutex_unlock(&mutex_A);
2807
2808 /* lock_A is hardirq-safe */
2809 HARDIRQ_ENTER();
2810 spin_lock(&lock_A);
2811 spin_unlock(&lock_A);
2812 HARDIRQ_EXIT();
2813
2814 /* lock_A -> lock_B */
2815 HARDIRQ_DISABLE();
2816 spin_lock(&lock_A);
2817 spin_lock(&lock_B);
2818 spin_unlock(&lock_B);
2819 spin_unlock(&lock_A);
2820 HARDIRQ_ENABLE();
2821
2822 /* lock_B -> lock_C */
2823 HARDIRQ_DISABLE();
2824 spin_lock(&lock_B);
2825 spin_lock(&lock_C);
2826 spin_unlock(&lock_C);
2827 spin_unlock(&lock_B);
2828 HARDIRQ_ENABLE();
2829
2830 /* lock_D is softirq-safe */
2831 SOFTIRQ_ENTER();
2832 spin_lock(&lock_D);
2833 spin_unlock(&lock_D);
2834 SOFTIRQ_EXIT();
2835
2836 /* And lock_D is hardirq-unsafe */
2837 SOFTIRQ_DISABLE();
2838 spin_lock(&lock_D);
2839 spin_unlock(&lock_D);
2840 SOFTIRQ_ENABLE();
2841
2842 /*
2843 * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
2844 * deadlock.
2845 *
2846 * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
2847 * hardirq-unsafe, deadlock.
2848 */
2849 HARDIRQ_DISABLE();
2850 spin_lock(&lock_C);
2851 spin_lock(&lock_D);
2852 spin_unlock(&lock_D);
2853 spin_unlock(&lock_C);
2854 HARDIRQ_ENABLE();
2855}
2856
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002857void locking_selftest(void)
2858{
2859 /*
2860 * Got a locking failure before the selftest ran?
2861 */
2862 if (!debug_locks) {
2863 printk("----------------------------------\n");
2864 printk("| Locking API testsuite disabled |\n");
2865 printk("----------------------------------\n");
2866 return;
2867 }
2868
2869 /*
Boqun Fenge9181882020-08-07 15:42:20 +08002870 * treats read_lock() as recursive read locks for testing purpose
2871 */
2872 force_read_lock_recursive = 1;
2873
2874 /*
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002875 * Run the testsuite:
2876 */
2877 printk("------------------------\n");
2878 printk("| Locking API testsuite:\n");
2879 printk("----------------------------------------------------------------------------\n");
Sebastian Andrzej Siewiorfc78dd02021-11-29 18:46:49 +01002880 printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002881 printk(" --------------------------------------------------------------------------\n");
2882
2883 init_shared_classes();
Bart Van Asschecdc84d72019-02-14 15:00:44 -08002884 lockdep_set_selftest_task(current);
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002885
Ingo Molnar6c9076e2006-07-03 00:24:51 -07002886 DO_TESTCASE_6R("A-A deadlock", AA);
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002887 DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
2888 DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
2889 DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
2890 DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
2891 DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
2892 DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
2893 DO_TESTCASE_6("double unlock", double_unlock);
2894 DO_TESTCASE_6("initialize held", init_held);
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002895
2896 printk(" --------------------------------------------------------------------------\n");
2897 print_testname("recursive read-lock");
Michael Ellerman25139402016-11-25 09:45:28 +11002898 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002899 dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
Michael Ellerman25139402016-11-25 09:45:28 +11002900 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002901 dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
Michael Ellerman25139402016-11-25 09:45:28 +11002902 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002903
2904 print_testname("recursive read-lock #2");
Michael Ellerman25139402016-11-25 09:45:28 +11002905 pr_cont(" |");
Ingo Molnar6c9076e2006-07-03 00:24:51 -07002906 dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
Michael Ellerman25139402016-11-25 09:45:28 +11002907 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002908 dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
Michael Ellerman25139402016-11-25 09:45:28 +11002909 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002910
2911 print_testname("mixed read-write-lock");
Michael Ellerman25139402016-11-25 09:45:28 +11002912 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002913 dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
Michael Ellerman25139402016-11-25 09:45:28 +11002914 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002915 dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
Michael Ellerman25139402016-11-25 09:45:28 +11002916 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002917
2918 print_testname("mixed write-read-lock");
Michael Ellerman25139402016-11-25 09:45:28 +11002919 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002920 dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
Michael Ellerman25139402016-11-25 09:45:28 +11002921 pr_cont(" |");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002922 dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
Michael Ellerman25139402016-11-25 09:45:28 +11002923 pr_cont("\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002924
Peter Zijlstrae9149852017-08-23 13:13:11 +02002925 print_testname("mixed read-lock/lock-write ABBA");
2926 pr_cont(" |");
2927 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2928 pr_cont(" |");
2929 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
2930
2931 print_testname("mixed read-lock/lock-read ABBA");
2932 pr_cont(" |");
2933 dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK);
2934 pr_cont(" |");
2935 dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM);
2936
2937 print_testname("mixed write-lock/lock-write ABBA");
2938 pr_cont(" |");
2939 dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK);
2940 pr_cont(" |");
2941 dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
2942
Boqun Fengd4f200e52020-08-07 15:42:32 +08002943 print_testname("chain cached mixed R-L/L-W ABBA");
2944 pr_cont(" |");
2945 dotest(rlock_chaincache_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2946
Boqun Feng8ef7ca72020-08-07 15:42:35 +08002947 DO_TESTCASE_6x1RRB("rlock W1R2/W2R3/W3R1", W1R2_W2R3_W3R1);
2948 DO_TESTCASE_6x1RRB("rlock W1W2/R2R3/W3R1", W1W2_R2R3_W3R1);
2949 DO_TESTCASE_6x1RR("rlock W1W2/R2R3/R3W1", W1W2_R2R3_R3W1);
2950 DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
2951
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002952 printk(" --------------------------------------------------------------------------\n");
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002953 /*
2954 * irq-context testcases:
2955 */
2956 DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
Sebastian Andrzej Siewiora529f8db2021-11-29 18:46:51 +01002957 NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002958 DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
2959 DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
2960 DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
2961 DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
2962
Boqun Feng31e0d742020-08-07 15:42:34 +08002963 DO_TESTCASE_6x2x2RW("irq read-recursion", irq_read_recursion);
2964 DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
Boqun Feng96a16f42020-08-07 15:42:38 +08002965 DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002966
Maarten Lankhorst1de99442013-06-20 13:31:24 +02002967 ww_tests();
2968
Boqun Fenge9181882020-08-07 15:42:20 +08002969 force_read_lock_recursive = 0;
2970 /*
2971 * queued_read_lock() specific test cases can be put here
2972 */
Boqun Fengad564502020-08-07 15:42:37 +08002973 if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
2974 queued_read_lock_tests();
Boqun Fenge9181882020-08-07 15:42:20 +08002975
Daniel Vetterd5037d12020-12-14 19:08:38 -08002976 fs_reclaim_tests();
2977
Boqun Feng9271a402020-12-08 18:31:12 +08002978 /* Wait context test cases that are specific for RAW_LOCK_NESTING */
2979 if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
2980 wait_context_tests();
2981
Peter Zijlstra7e923e62020-12-09 16:06:06 +01002982 local_lock_tests();
2983
Boqun Feng8946ccc2021-06-19 01:01:10 +08002984 print_testname("hardirq_unsafe_softirq_safe");
2985 dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
2986 pr_cont("\n");
2987
Ingo Molnarcae2ed92006-07-03 00:24:48 -07002988 if (unexpected_testcase_failures) {
2989 printk("-----------------------------------------------------------------\n");
2990 debug_locks = 0;
2991 printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
2992 unexpected_testcase_failures, testcase_total);
2993 printk("-----------------------------------------------------------------\n");
2994 } else if (expected_testcase_failures && testcase_successes) {
2995 printk("--------------------------------------------------------\n");
2996 printk("%3d out of %3d testcases failed, as expected. |\n",
2997 expected_testcase_failures, testcase_total);
2998 printk("----------------------------------------------------\n");
2999 debug_locks = 1;
3000 } else if (expected_testcase_failures && !testcase_successes) {
3001 printk("--------------------------------------------------------\n");
3002 printk("All %3d testcases failed, as expected. |\n",
3003 expected_testcase_failures);
3004 printk("----------------------------------------\n");
3005 debug_locks = 1;
3006 } else {
3007 printk("-------------------------------------------------------\n");
3008 printk("Good, all %3d testcases passed! |\n",
3009 testcase_successes);
3010 printk("---------------------------------\n");
3011 debug_locks = 1;
3012 }
Bart Van Asschecdc84d72019-02-14 15:00:44 -08003013 lockdep_set_selftest_task(NULL);
Ingo Molnarcae2ed92006-07-03 00:24:48 -07003014 debug_locks_silent = 0;
3015}