Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * kernel/lockdep.c |
| 3 | * |
| 4 | * Runtime locking correctness validator |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 8 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 10 | * |
| 11 | * this code maps all the lock dependencies as they occur in a live kernel |
| 12 | * and will warn about the following classes of locking bugs: |
| 13 | * |
| 14 | * - lock inversion scenarios |
| 15 | * - circular lock dependencies |
| 16 | * - hardirq/softirq safe/unsafe locking bugs |
| 17 | * |
| 18 | * Bugs are reported even if the current locking scenario does not cause |
| 19 | * any deadlock at this point. |
| 20 | * |
| 21 | * I.e. if anytime in the past two locks were taken in a different order, |
| 22 | * even if it happened for another task, even if those were different |
| 23 | * locks (but of the same class as this lock), this code will detect it. |
| 24 | * |
| 25 | * Thanks to Arjan van de Ven for coming up with the initial idea of |
| 26 | * mapping lock dependencies runtime. |
| 27 | */ |
Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 28 | #define DISABLE_BRANCH_PROFILING |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 29 | #include <linux/mutex.h> |
| 30 | #include <linux/sched.h> |
| 31 | #include <linux/delay.h> |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/proc_fs.h> |
| 34 | #include <linux/seq_file.h> |
| 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/kallsyms.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/stacktrace.h> |
| 39 | #include <linux/debug_locks.h> |
| 40 | #include <linux/irqflags.h> |
Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 41 | #include <linux/utsname.h> |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 42 | #include <linux/hash.h> |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 43 | #include <linux/ftrace.h> |
Peter Zijlstra | b4b136f | 2009-01-29 14:50:36 +0100 | [diff] [blame] | 44 | #include <linux/stringify.h> |
Ming Lei | d588e46 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 45 | #include <linux/bitops.h> |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 46 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 47 | #include <asm/sections.h> |
| 48 | |
| 49 | #include "lockdep_internals.h" |
| 50 | |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 51 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 52 | #include <trace/events/lockdep.h> |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 53 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 54 | #ifdef CONFIG_PROVE_LOCKING |
| 55 | int prove_locking = 1; |
| 56 | module_param(prove_locking, int, 0644); |
| 57 | #else |
| 58 | #define prove_locking 0 |
| 59 | #endif |
| 60 | |
| 61 | #ifdef CONFIG_LOCK_STAT |
| 62 | int lock_stat = 1; |
| 63 | module_param(lock_stat, int, 0644); |
| 64 | #else |
| 65 | #define lock_stat 0 |
| 66 | #endif |
| 67 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 68 | /* |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 69 | * lockdep_lock: protects the lockdep graph, the hashes and the |
| 70 | * class/list/hash allocators. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 71 | * |
| 72 | * This is one of the rare exceptions where it's justified |
| 73 | * to use a raw spinlock - we really dont want the spinlock |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 74 | * code to recurse back into the lockdep code... |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 75 | */ |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 76 | static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
| 77 | |
| 78 | static int graph_lock(void) |
| 79 | { |
| 80 | __raw_spin_lock(&lockdep_lock); |
| 81 | /* |
| 82 | * Make sure that if another CPU detected a bug while |
| 83 | * walking the graph we dont change it (while the other |
| 84 | * CPU is busy printing out stuff with the graph lock |
| 85 | * dropped already) |
| 86 | */ |
| 87 | if (!debug_locks) { |
| 88 | __raw_spin_unlock(&lockdep_lock); |
| 89 | return 0; |
| 90 | } |
Steven Rostedt | bb065af | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 91 | /* prevent any recursions within lockdep from causing deadlocks */ |
| 92 | current->lockdep_recursion++; |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 93 | return 1; |
| 94 | } |
| 95 | |
| 96 | static inline int graph_unlock(void) |
| 97 | { |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 98 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) |
| 99 | return DEBUG_LOCKS_WARN_ON(1); |
| 100 | |
Steven Rostedt | bb065af | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 101 | current->lockdep_recursion--; |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 102 | __raw_spin_unlock(&lockdep_lock); |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Turn lock debugging off and return with 0 if it was off already, |
| 108 | * and also release the graph lock: |
| 109 | */ |
| 110 | static inline int debug_locks_off_graph_unlock(void) |
| 111 | { |
| 112 | int ret = debug_locks_off(); |
| 113 | |
| 114 | __raw_spin_unlock(&lockdep_lock); |
| 115 | |
| 116 | return ret; |
| 117 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 118 | |
| 119 | static int lockdep_initialized; |
| 120 | |
| 121 | unsigned long nr_list_entries; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 122 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 123 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 124 | /* |
| 125 | * All data structures here are protected by the global debug_lock. |
| 126 | * |
| 127 | * Mutex key structs only get allocated, once during bootup, and never |
| 128 | * get freed - this significantly simplifies the debugging code. |
| 129 | */ |
| 130 | unsigned long nr_lock_classes; |
| 131 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
| 132 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 133 | static inline struct lock_class *hlock_class(struct held_lock *hlock) |
| 134 | { |
| 135 | if (!hlock->class_idx) { |
| 136 | DEBUG_LOCKS_WARN_ON(1); |
| 137 | return NULL; |
| 138 | } |
| 139 | return lock_classes + hlock->class_idx - 1; |
| 140 | } |
| 141 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 142 | #ifdef CONFIG_LOCK_STAT |
| 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
| 144 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 145 | static int lock_point(unsigned long points[], unsigned long ip) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 146 | { |
| 147 | int i; |
| 148 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 149 | for (i = 0; i < LOCKSTAT_POINTS; i++) { |
| 150 | if (points[i] == 0) { |
| 151 | points[i] = ip; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 152 | break; |
| 153 | } |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 154 | if (points[i] == ip) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 155 | break; |
| 156 | } |
| 157 | |
| 158 | return i; |
| 159 | } |
| 160 | |
| 161 | static void lock_time_inc(struct lock_time *lt, s64 time) |
| 162 | { |
| 163 | if (time > lt->max) |
| 164 | lt->max = time; |
| 165 | |
| 166 | if (time < lt->min || !lt->min) |
| 167 | lt->min = time; |
| 168 | |
| 169 | lt->total += time; |
| 170 | lt->nr++; |
| 171 | } |
| 172 | |
Peter Zijlstra | c46261d | 2007-07-19 01:48:57 -0700 | [diff] [blame] | 173 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) |
| 174 | { |
| 175 | dst->min += src->min; |
| 176 | dst->max += src->max; |
| 177 | dst->total += src->total; |
| 178 | dst->nr += src->nr; |
| 179 | } |
| 180 | |
| 181 | struct lock_class_stats lock_stats(struct lock_class *class) |
| 182 | { |
| 183 | struct lock_class_stats stats; |
| 184 | int cpu, i; |
| 185 | |
| 186 | memset(&stats, 0, sizeof(struct lock_class_stats)); |
| 187 | for_each_possible_cpu(cpu) { |
| 188 | struct lock_class_stats *pcs = |
| 189 | &per_cpu(lock_stats, cpu)[class - lock_classes]; |
| 190 | |
| 191 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) |
| 192 | stats.contention_point[i] += pcs->contention_point[i]; |
| 193 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 194 | for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) |
| 195 | stats.contending_point[i] += pcs->contending_point[i]; |
| 196 | |
Peter Zijlstra | c46261d | 2007-07-19 01:48:57 -0700 | [diff] [blame] | 197 | lock_time_add(&pcs->read_waittime, &stats.read_waittime); |
| 198 | lock_time_add(&pcs->write_waittime, &stats.write_waittime); |
| 199 | |
| 200 | lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); |
| 201 | lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 202 | |
| 203 | for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) |
| 204 | stats.bounces[i] += pcs->bounces[i]; |
Peter Zijlstra | c46261d | 2007-07-19 01:48:57 -0700 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | return stats; |
| 208 | } |
| 209 | |
| 210 | void clear_lock_stats(struct lock_class *class) |
| 211 | { |
| 212 | int cpu; |
| 213 | |
| 214 | for_each_possible_cpu(cpu) { |
| 215 | struct lock_class_stats *cpu_stats = |
| 216 | &per_cpu(lock_stats, cpu)[class - lock_classes]; |
| 217 | |
| 218 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); |
| 219 | } |
| 220 | memset(class->contention_point, 0, sizeof(class->contention_point)); |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 221 | memset(class->contending_point, 0, sizeof(class->contending_point)); |
Peter Zijlstra | c46261d | 2007-07-19 01:48:57 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 224 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) |
| 225 | { |
| 226 | return &get_cpu_var(lock_stats)[class - lock_classes]; |
| 227 | } |
| 228 | |
| 229 | static void put_lock_stats(struct lock_class_stats *stats) |
| 230 | { |
| 231 | put_cpu_var(lock_stats); |
| 232 | } |
| 233 | |
| 234 | static void lock_release_holdtime(struct held_lock *hlock) |
| 235 | { |
| 236 | struct lock_class_stats *stats; |
| 237 | s64 holdtime; |
| 238 | |
| 239 | if (!lock_stat) |
| 240 | return; |
| 241 | |
| 242 | holdtime = sched_clock() - hlock->holdtime_stamp; |
| 243 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 244 | stats = get_lock_stats(hlock_class(hlock)); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 245 | if (hlock->read) |
| 246 | lock_time_inc(&stats->read_holdtime, holdtime); |
| 247 | else |
| 248 | lock_time_inc(&stats->write_holdtime, holdtime); |
| 249 | put_lock_stats(stats); |
| 250 | } |
| 251 | #else |
| 252 | static inline void lock_release_holdtime(struct held_lock *hlock) |
| 253 | { |
| 254 | } |
| 255 | #endif |
| 256 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 257 | /* |
| 258 | * We keep a global list of all lock classes. The list only grows, |
| 259 | * never shrinks. The list is only accessed with the lockdep |
| 260 | * spinlock lock held. |
| 261 | */ |
| 262 | LIST_HEAD(all_lock_classes); |
| 263 | |
| 264 | /* |
| 265 | * The lockdep classes are in a hash-table as well, for fast lookup: |
| 266 | */ |
| 267 | #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) |
| 268 | #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 269 | #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 270 | #define classhashentry(key) (classhash_table + __classhashfn((key))) |
| 271 | |
| 272 | static struct list_head classhash_table[CLASSHASH_SIZE]; |
| 273 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 274 | /* |
| 275 | * We put the lock dependency chains into a hash-table as well, to cache |
| 276 | * their existence: |
| 277 | */ |
| 278 | #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) |
| 279 | #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 280 | #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 281 | #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) |
| 282 | |
| 283 | static struct list_head chainhash_table[CHAINHASH_SIZE]; |
| 284 | |
| 285 | /* |
| 286 | * The hash key of the lock dependency chains is a hash itself too: |
| 287 | * it's a hash of all locks taken up to that lock, including that lock. |
| 288 | * It's a 64-bit hash, because it's important for the keys to be |
| 289 | * unique. |
| 290 | */ |
| 291 | #define iterate_chain_key(key1, key2) \ |
Ingo Molnar | 03cbc35 | 2006-09-29 02:01:46 -0700 | [diff] [blame] | 292 | (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ |
| 293 | ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 294 | (key2)) |
| 295 | |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 296 | void lockdep_off(void) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 297 | { |
| 298 | current->lockdep_recursion++; |
| 299 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 300 | EXPORT_SYMBOL(lockdep_off); |
| 301 | |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 302 | void lockdep_on(void) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 303 | { |
| 304 | current->lockdep_recursion--; |
| 305 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 306 | EXPORT_SYMBOL(lockdep_on); |
| 307 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 308 | /* |
| 309 | * Debugging switches: |
| 310 | */ |
| 311 | |
| 312 | #define VERBOSE 0 |
Ingo Molnar | 33e94e9 | 2006-12-13 00:34:41 -0800 | [diff] [blame] | 313 | #define VERY_VERBOSE 0 |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 314 | |
| 315 | #if VERBOSE |
| 316 | # define HARDIRQ_VERBOSE 1 |
| 317 | # define SOFTIRQ_VERBOSE 1 |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 318 | # define RECLAIM_VERBOSE 1 |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 319 | #else |
| 320 | # define HARDIRQ_VERBOSE 0 |
| 321 | # define SOFTIRQ_VERBOSE 0 |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 322 | # define RECLAIM_VERBOSE 0 |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 323 | #endif |
| 324 | |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 325 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 326 | /* |
| 327 | * Quick filtering for interesting events: |
| 328 | */ |
| 329 | static int class_filter(struct lock_class *class) |
| 330 | { |
Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 331 | #if 0 |
| 332 | /* Example */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 333 | if (class->name_version == 1 && |
Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 334 | !strcmp(class->name, "lockname")) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 335 | return 1; |
| 336 | if (class->name_version == 1 && |
Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 337 | !strcmp(class->name, "&struct->lockfield")) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 338 | return 1; |
Andi Kleen | f9829cc | 2006-07-10 04:44:01 -0700 | [diff] [blame] | 339 | #endif |
Ingo Molnar | a664089 | 2006-12-13 00:34:39 -0800 | [diff] [blame] | 340 | /* Filter everything else. 1 would be to allow everything else */ |
| 341 | return 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 342 | } |
| 343 | #endif |
| 344 | |
| 345 | static int verbose(struct lock_class *class) |
| 346 | { |
| 347 | #if VERBOSE |
| 348 | return class_filter(class); |
| 349 | #endif |
| 350 | return 0; |
| 351 | } |
| 352 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 353 | /* |
| 354 | * Stack-trace: tightly packed array of stack backtrace |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 355 | * addresses. Protected by the graph_lock. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 356 | */ |
| 357 | unsigned long nr_stack_trace_entries; |
| 358 | static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; |
| 359 | |
| 360 | static int save_trace(struct stack_trace *trace) |
| 361 | { |
| 362 | trace->nr_entries = 0; |
| 363 | trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; |
| 364 | trace->entries = stack_trace + nr_stack_trace_entries; |
| 365 | |
Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 366 | trace->skip = 3; |
Andi Kleen | 5a1b399 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 367 | |
Christoph Hellwig | ab1b6f0 | 2007-05-08 00:23:29 -0700 | [diff] [blame] | 368 | save_stack_trace(trace); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 369 | |
Peter Zijlstra | 4f84f43 | 2009-07-20 15:27:04 +0200 | [diff] [blame] | 370 | /* |
| 371 | * Some daft arches put -1 at the end to indicate its a full trace. |
| 372 | * |
| 373 | * <rant> this is buggy anyway, since it takes a whole extra entry so a |
| 374 | * complete trace that maxes out the entries provided will be reported |
| 375 | * as incomplete, friggin useless </rant> |
| 376 | */ |
| 377 | if (trace->entries[trace->nr_entries-1] == ULONG_MAX) |
| 378 | trace->nr_entries--; |
| 379 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 380 | trace->max_entries = trace->nr_entries; |
| 381 | |
| 382 | nr_stack_trace_entries += trace->nr_entries; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 383 | |
Peter Zijlstra | 4f84f43 | 2009-07-20 15:27:04 +0200 | [diff] [blame] | 384 | if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 385 | if (!debug_locks_off_graph_unlock()) |
| 386 | return 0; |
| 387 | |
| 388 | printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); |
| 389 | printk("turning off the locking correctness validator.\n"); |
| 390 | dump_stack(); |
| 391 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 392 | return 0; |
| 393 | } |
| 394 | |
| 395 | return 1; |
| 396 | } |
| 397 | |
| 398 | unsigned int nr_hardirq_chains; |
| 399 | unsigned int nr_softirq_chains; |
| 400 | unsigned int nr_process_chains; |
| 401 | unsigned int max_lockdep_depth; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 402 | |
| 403 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 404 | /* |
| 405 | * We cannot printk in early bootup code. Not even early_printk() |
| 406 | * might work. So we mark any initialization errors and printk |
| 407 | * about it later on, in lockdep_info(). |
| 408 | */ |
| 409 | static int lockdep_init_error; |
Johannes Berg | c71063c | 2007-07-19 01:49:02 -0700 | [diff] [blame] | 410 | static unsigned long lockdep_init_trace_data[20]; |
| 411 | static struct stack_trace lockdep_init_trace = { |
| 412 | .max_entries = ARRAY_SIZE(lockdep_init_trace_data), |
| 413 | .entries = lockdep_init_trace_data, |
| 414 | }; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 415 | |
| 416 | /* |
| 417 | * Various lockdep statistics: |
| 418 | */ |
| 419 | atomic_t chain_lookup_hits; |
| 420 | atomic_t chain_lookup_misses; |
| 421 | atomic_t hardirqs_on_events; |
| 422 | atomic_t hardirqs_off_events; |
| 423 | atomic_t redundant_hardirqs_on; |
| 424 | atomic_t redundant_hardirqs_off; |
| 425 | atomic_t softirqs_on_events; |
| 426 | atomic_t softirqs_off_events; |
| 427 | atomic_t redundant_softirqs_on; |
| 428 | atomic_t redundant_softirqs_off; |
| 429 | atomic_t nr_unused_locks; |
| 430 | atomic_t nr_cyclic_checks; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 431 | atomic_t nr_find_usage_forwards_checks; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 432 | atomic_t nr_find_usage_backwards_checks; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 433 | #endif |
| 434 | |
| 435 | /* |
| 436 | * Locking printouts: |
| 437 | */ |
| 438 | |
Peter Zijlstra | fabe9c4 | 2009-01-22 14:51:01 +0100 | [diff] [blame] | 439 | #define __USAGE(__STATE) \ |
Peter Zijlstra | b4b136f | 2009-01-29 14:50:36 +0100 | [diff] [blame] | 440 | [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ |
| 441 | [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ |
| 442 | [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ |
| 443 | [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", |
Peter Zijlstra | fabe9c4 | 2009-01-22 14:51:01 +0100 | [diff] [blame] | 444 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 445 | static const char *usage_str[] = |
| 446 | { |
Peter Zijlstra | fabe9c4 | 2009-01-22 14:51:01 +0100 | [diff] [blame] | 447 | #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) |
| 448 | #include "lockdep_states.h" |
| 449 | #undef LOCKDEP_STATE |
| 450 | [LOCK_USED] = "INITIAL USE", |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 451 | }; |
| 452 | |
| 453 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
| 454 | { |
Alexey Dobriyan | ffb4512 | 2007-05-08 00:28:41 -0700 | [diff] [blame] | 455 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 456 | } |
| 457 | |
Peter Zijlstra | 3ff176c | 2009-01-22 17:40:42 +0100 | [diff] [blame] | 458 | static inline unsigned long lock_flag(enum lock_usage_bit bit) |
| 459 | { |
| 460 | return 1UL << bit; |
| 461 | } |
| 462 | |
| 463 | static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) |
| 464 | { |
| 465 | char c = '.'; |
| 466 | |
| 467 | if (class->usage_mask & lock_flag(bit + 2)) |
| 468 | c = '+'; |
| 469 | if (class->usage_mask & lock_flag(bit)) { |
| 470 | c = '-'; |
| 471 | if (class->usage_mask & lock_flag(bit + 2)) |
| 472 | c = '?'; |
| 473 | } |
| 474 | |
| 475 | return c; |
| 476 | } |
| 477 | |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 478 | void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 479 | { |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 480 | int i = 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 481 | |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 482 | #define LOCKDEP_STATE(__STATE) \ |
| 483 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ |
| 484 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); |
| 485 | #include "lockdep_states.h" |
| 486 | #undef LOCKDEP_STATE |
| 487 | |
| 488 | usage[i] = '\0'; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | static void print_lock_name(struct lock_class *class) |
| 492 | { |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 493 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 494 | const char *name; |
| 495 | |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 496 | get_usage_chars(class, usage); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 497 | |
| 498 | name = class->name; |
| 499 | if (!name) { |
| 500 | name = __get_key_name(class->key, str); |
| 501 | printk(" (%s", name); |
| 502 | } else { |
| 503 | printk(" (%s", name); |
| 504 | if (class->name_version > 1) |
| 505 | printk("#%d", class->name_version); |
| 506 | if (class->subclass) |
| 507 | printk("/%d", class->subclass); |
| 508 | } |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 509 | printk("){%s}", usage); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 510 | } |
| 511 | |
| 512 | static void print_lockdep_cache(struct lockdep_map *lock) |
| 513 | { |
| 514 | const char *name; |
Tejun Heo | 9281ace | 2007-07-17 04:03:51 -0700 | [diff] [blame] | 515 | char str[KSYM_NAME_LEN]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 516 | |
| 517 | name = lock->name; |
| 518 | if (!name) |
| 519 | name = __get_key_name(lock->key->subkeys, str); |
| 520 | |
| 521 | printk("%s", name); |
| 522 | } |
| 523 | |
| 524 | static void print_lock(struct held_lock *hlock) |
| 525 | { |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 526 | print_lock_name(hlock_class(hlock)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 527 | printk(", at: "); |
| 528 | print_ip_sym(hlock->acquire_ip); |
| 529 | } |
| 530 | |
| 531 | static void lockdep_print_held_locks(struct task_struct *curr) |
| 532 | { |
| 533 | int i, depth = curr->lockdep_depth; |
| 534 | |
| 535 | if (!depth) { |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 536 | printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 537 | return; |
| 538 | } |
| 539 | printk("%d lock%s held by %s/%d:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 540 | depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 541 | |
| 542 | for (i = 0; i < depth; i++) { |
| 543 | printk(" #%d: ", i); |
| 544 | print_lock(curr->held_locks + i); |
| 545 | } |
| 546 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 547 | |
Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 548 | static void print_kernel_version(void) |
| 549 | { |
Serge E. Hallyn | 96b644b | 2006-10-02 02:18:13 -0700 | [diff] [blame] | 550 | printk("%s %.*s\n", init_utsname()->release, |
| 551 | (int)strcspn(init_utsname()->version, " "), |
| 552 | init_utsname()->version); |
Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 553 | } |
| 554 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 555 | static int very_verbose(struct lock_class *class) |
| 556 | { |
| 557 | #if VERY_VERBOSE |
| 558 | return class_filter(class); |
| 559 | #endif |
| 560 | return 0; |
| 561 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 562 | |
| 563 | /* |
| 564 | * Is this the address of a static object: |
| 565 | */ |
| 566 | static int static_obj(void *obj) |
| 567 | { |
| 568 | unsigned long start = (unsigned long) &_stext, |
| 569 | end = (unsigned long) &_end, |
| 570 | addr = (unsigned long) obj; |
| 571 | #ifdef CONFIG_SMP |
| 572 | int i; |
| 573 | #endif |
| 574 | |
| 575 | /* |
| 576 | * static variable? |
| 577 | */ |
| 578 | if ((addr >= start) && (addr < end)) |
| 579 | return 1; |
| 580 | |
| 581 | #ifdef CONFIG_SMP |
| 582 | /* |
| 583 | * percpu var? |
| 584 | */ |
| 585 | for_each_possible_cpu(i) { |
| 586 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); |
Ingo Molnar | 1ff5683 | 2006-11-17 19:57:22 +0100 | [diff] [blame] | 587 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM |
| 588 | + per_cpu_offset(i); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 589 | |
| 590 | if ((addr >= start) && (addr < end)) |
| 591 | return 1; |
| 592 | } |
| 593 | #endif |
| 594 | |
| 595 | /* |
| 596 | * module var? |
| 597 | */ |
| 598 | return is_module_address(addr); |
| 599 | } |
| 600 | |
| 601 | /* |
| 602 | * To make lock name printouts unique, we calculate a unique |
| 603 | * class->name_version generation counter: |
| 604 | */ |
| 605 | static int count_matching_names(struct lock_class *new_class) |
| 606 | { |
| 607 | struct lock_class *class; |
| 608 | int count = 0; |
| 609 | |
| 610 | if (!new_class->name) |
| 611 | return 0; |
| 612 | |
| 613 | list_for_each_entry(class, &all_lock_classes, lock_entry) { |
| 614 | if (new_class->key - new_class->subclass == class->key) |
| 615 | return class->name_version; |
| 616 | if (class->name && !strcmp(class->name, new_class->name)) |
| 617 | count = max(count, class->name_version); |
| 618 | } |
| 619 | |
| 620 | return count + 1; |
| 621 | } |
| 622 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 623 | /* |
| 624 | * Register a lock's class in the hash-table, if the class is not present |
| 625 | * yet. Otherwise we look it up. We cache the result in the lock object |
| 626 | * itself, so actual lookup of the hash should be once per lock object. |
| 627 | */ |
| 628 | static inline struct lock_class * |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 629 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 630 | { |
| 631 | struct lockdep_subclass_key *key; |
| 632 | struct list_head *hash_head; |
| 633 | struct lock_class *class; |
| 634 | |
| 635 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 636 | /* |
| 637 | * If the architecture calls into lockdep before initializing |
| 638 | * the hashes then we'll warn about it later. (we cannot printk |
| 639 | * right now) |
| 640 | */ |
| 641 | if (unlikely(!lockdep_initialized)) { |
| 642 | lockdep_init(); |
| 643 | lockdep_init_error = 1; |
Johannes Berg | c71063c | 2007-07-19 01:49:02 -0700 | [diff] [blame] | 644 | save_stack_trace(&lockdep_init_trace); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 645 | } |
| 646 | #endif |
| 647 | |
| 648 | /* |
| 649 | * Static locks do not have their class-keys yet - for them the key |
| 650 | * is the lock object itself: |
| 651 | */ |
| 652 | if (unlikely(!lock->key)) |
| 653 | lock->key = (void *)lock; |
| 654 | |
| 655 | /* |
| 656 | * NOTE: the class-key must be unique. For dynamic locks, a static |
| 657 | * lock_class_key variable is passed in through the mutex_init() |
| 658 | * (or spin_lock_init()) call - which acts as the key. For static |
| 659 | * locks we use the lock object itself as the key. |
| 660 | */ |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 661 | BUILD_BUG_ON(sizeof(struct lock_class_key) > |
| 662 | sizeof(struct lockdep_map)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 663 | |
| 664 | key = lock->key->subkeys + subclass; |
| 665 | |
| 666 | hash_head = classhashentry(key); |
| 667 | |
| 668 | /* |
| 669 | * We can walk the hash lockfree, because the hash only |
| 670 | * grows, and we are careful when adding entries to the end: |
| 671 | */ |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 672 | list_for_each_entry(class, hash_head, hash_entry) { |
| 673 | if (class->key == key) { |
| 674 | WARN_ON_ONCE(class->name != lock->name); |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 675 | return class; |
Peter Zijlstra | 4b32d0a | 2007-07-19 01:48:59 -0700 | [diff] [blame] | 676 | } |
| 677 | } |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 678 | |
| 679 | return NULL; |
| 680 | } |
| 681 | |
| 682 | /* |
| 683 | * Register a lock's class in the hash-table, if the class is not present |
| 684 | * yet. Otherwise we look it up. We cache the result in the lock object |
| 685 | * itself, so actual lookup of the hash should be once per lock object. |
| 686 | */ |
| 687 | static inline struct lock_class * |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 688 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 689 | { |
| 690 | struct lockdep_subclass_key *key; |
| 691 | struct list_head *hash_head; |
| 692 | struct lock_class *class; |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 693 | unsigned long flags; |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 694 | |
| 695 | class = look_up_lock_class(lock, subclass); |
| 696 | if (likely(class)) |
| 697 | return class; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 698 | |
| 699 | /* |
| 700 | * Debug-check: all keys must be persistent! |
| 701 | */ |
| 702 | if (!static_obj(lock->key)) { |
| 703 | debug_locks_off(); |
| 704 | printk("INFO: trying to register non-static key.\n"); |
| 705 | printk("the code is fine but needs lockdep annotation.\n"); |
| 706 | printk("turning off the locking correctness validator.\n"); |
| 707 | dump_stack(); |
| 708 | |
| 709 | return NULL; |
| 710 | } |
| 711 | |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 712 | key = lock->key->subkeys + subclass; |
| 713 | hash_head = classhashentry(key); |
| 714 | |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 715 | raw_local_irq_save(flags); |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 716 | if (!graph_lock()) { |
| 717 | raw_local_irq_restore(flags); |
| 718 | return NULL; |
| 719 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 720 | /* |
| 721 | * We have to do the hash-walk again, to avoid races |
| 722 | * with another CPU: |
| 723 | */ |
| 724 | list_for_each_entry(class, hash_head, hash_entry) |
| 725 | if (class->key == key) |
| 726 | goto out_unlock_set; |
| 727 | /* |
| 728 | * Allocate a new key from the static array, and add it to |
| 729 | * the hash: |
| 730 | */ |
| 731 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 732 | if (!debug_locks_off_graph_unlock()) { |
| 733 | raw_local_irq_restore(flags); |
| 734 | return NULL; |
| 735 | } |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 736 | raw_local_irq_restore(flags); |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 737 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 738 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); |
| 739 | printk("turning off the locking correctness validator.\n"); |
Peter Zijlstra | eedeeab | 2009-03-18 12:38:47 +0100 | [diff] [blame] | 740 | dump_stack(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 741 | return NULL; |
| 742 | } |
| 743 | class = lock_classes + nr_lock_classes++; |
| 744 | debug_atomic_inc(&nr_unused_locks); |
| 745 | class->key = key; |
| 746 | class->name = lock->name; |
| 747 | class->subclass = subclass; |
| 748 | INIT_LIST_HEAD(&class->lock_entry); |
| 749 | INIT_LIST_HEAD(&class->locks_before); |
| 750 | INIT_LIST_HEAD(&class->locks_after); |
| 751 | class->name_version = count_matching_names(class); |
| 752 | /* |
| 753 | * We use RCU's safe list-add method to make |
| 754 | * parallel walking of the hash-list safe: |
| 755 | */ |
| 756 | list_add_tail_rcu(&class->hash_entry, hash_head); |
Dale Farnsworth | 1481197 | 2008-02-25 23:03:02 +0100 | [diff] [blame] | 757 | /* |
| 758 | * Add it to the global list of classes: |
| 759 | */ |
| 760 | list_add_tail_rcu(&class->lock_entry, &all_lock_classes); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 761 | |
| 762 | if (verbose(class)) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 763 | graph_unlock(); |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 764 | raw_local_irq_restore(flags); |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 765 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 766 | printk("\nnew class %p: %s", class->key, class->name); |
| 767 | if (class->name_version > 1) |
| 768 | printk("#%d", class->name_version); |
| 769 | printk("\n"); |
| 770 | dump_stack(); |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 771 | |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 772 | raw_local_irq_save(flags); |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 773 | if (!graph_lock()) { |
| 774 | raw_local_irq_restore(flags); |
| 775 | return NULL; |
| 776 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 777 | } |
| 778 | out_unlock_set: |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 779 | graph_unlock(); |
Ingo Molnar | 70e45067 | 2006-12-06 20:40:50 -0800 | [diff] [blame] | 780 | raw_local_irq_restore(flags); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 781 | |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 782 | if (!subclass || force) |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 783 | lock->class_cache = class; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 784 | |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 785 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
| 786 | return NULL; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 787 | |
| 788 | return class; |
| 789 | } |
| 790 | |
Peter Zijlstra | ca58abc | 2007-07-19 01:48:53 -0700 | [diff] [blame] | 791 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 792 | /* |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 793 | * Allocate a lockdep entry. (assumes the graph_lock held, returns |
| 794 | * with NULL on failure) |
| 795 | */ |
| 796 | static struct lock_list *alloc_list_entry(void) |
| 797 | { |
| 798 | if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { |
| 799 | if (!debug_locks_off_graph_unlock()) |
| 800 | return NULL; |
| 801 | |
| 802 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); |
| 803 | printk("turning off the locking correctness validator.\n"); |
Peter Zijlstra | eedeeab | 2009-03-18 12:38:47 +0100 | [diff] [blame] | 804 | dump_stack(); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 805 | return NULL; |
| 806 | } |
| 807 | return list_entries + nr_list_entries++; |
| 808 | } |
| 809 | |
| 810 | /* |
| 811 | * Add a new dependency to the head of the list: |
| 812 | */ |
| 813 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, |
| 814 | struct list_head *head, unsigned long ip, int distance) |
| 815 | { |
| 816 | struct lock_list *entry; |
| 817 | /* |
| 818 | * Lock not present yet - get a new dependency struct and |
| 819 | * add it to the list: |
| 820 | */ |
| 821 | entry = alloc_list_entry(); |
| 822 | if (!entry) |
| 823 | return 0; |
| 824 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 825 | if (!save_trace(&entry->trace)) |
| 826 | return 0; |
| 827 | |
Zhu Yi | 7487017 | 2008-08-27 14:33:00 +0800 | [diff] [blame] | 828 | entry->class = this; |
| 829 | entry->distance = distance; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 830 | /* |
| 831 | * Since we never remove from the dependency list, the list can |
| 832 | * be walked lockless by other CPUs, it's only allocation |
| 833 | * that must be protected by the spinlock. But this also means |
| 834 | * we must make new entries visible only once writes to the |
| 835 | * entry become visible - hence the RCU op: |
| 836 | */ |
| 837 | list_add_tail_rcu(&entry->entry, head); |
| 838 | |
| 839 | return 1; |
| 840 | } |
| 841 | |
Peter Zijlstra | 98c33ed | 2009-07-21 13:19:07 +0200 | [diff] [blame] | 842 | /* |
| 843 | * For good efficiency of modular, we use power of 2 |
| 844 | */ |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 845 | #define MAX_CIRCULAR_QUEUE_SIZE 4096UL |
| 846 | #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) |
| 847 | |
Peter Zijlstra | 98c33ed | 2009-07-21 13:19:07 +0200 | [diff] [blame] | 848 | /* |
| 849 | * The circular_queue and helpers is used to implement the |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 850 | * breadth-first search(BFS)algorithem, by which we can build |
| 851 | * the shortest path from the next lock to be acquired to the |
| 852 | * previous held lock if there is a circular between them. |
Peter Zijlstra | 98c33ed | 2009-07-21 13:19:07 +0200 | [diff] [blame] | 853 | */ |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 854 | struct circular_queue { |
| 855 | unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; |
| 856 | unsigned int front, rear; |
| 857 | }; |
| 858 | |
| 859 | static struct circular_queue lock_cq; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 860 | |
Ming Lei | 12f3dfd | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 861 | unsigned int max_bfs_queue_depth; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 862 | |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 863 | static unsigned int lockdep_dependency_gen_id; |
| 864 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 865 | static inline void __cq_init(struct circular_queue *cq) |
| 866 | { |
| 867 | cq->front = cq->rear = 0; |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 868 | lockdep_dependency_gen_id++; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 869 | } |
| 870 | |
| 871 | static inline int __cq_empty(struct circular_queue *cq) |
| 872 | { |
| 873 | return (cq->front == cq->rear); |
| 874 | } |
| 875 | |
| 876 | static inline int __cq_full(struct circular_queue *cq) |
| 877 | { |
| 878 | return ((cq->rear + 1) & CQ_MASK) == cq->front; |
| 879 | } |
| 880 | |
| 881 | static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) |
| 882 | { |
| 883 | if (__cq_full(cq)) |
| 884 | return -1; |
| 885 | |
| 886 | cq->element[cq->rear] = elem; |
| 887 | cq->rear = (cq->rear + 1) & CQ_MASK; |
| 888 | return 0; |
| 889 | } |
| 890 | |
| 891 | static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) |
| 892 | { |
| 893 | if (__cq_empty(cq)) |
| 894 | return -1; |
| 895 | |
| 896 | *elem = cq->element[cq->front]; |
| 897 | cq->front = (cq->front + 1) & CQ_MASK; |
| 898 | return 0; |
| 899 | } |
| 900 | |
| 901 | static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) |
| 902 | { |
| 903 | return (cq->rear - cq->front) & CQ_MASK; |
| 904 | } |
| 905 | |
| 906 | static inline void mark_lock_accessed(struct lock_list *lock, |
| 907 | struct lock_list *parent) |
| 908 | { |
| 909 | unsigned long nr; |
Peter Zijlstra | 98c33ed | 2009-07-21 13:19:07 +0200 | [diff] [blame] | 910 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 911 | nr = lock - list_entries; |
| 912 | WARN_ON(nr >= nr_list_entries); |
| 913 | lock->parent = parent; |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 914 | lock->class->dep_gen_id = lockdep_dependency_gen_id; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 915 | } |
| 916 | |
| 917 | static inline unsigned long lock_accessed(struct lock_list *lock) |
| 918 | { |
| 919 | unsigned long nr; |
Peter Zijlstra | 98c33ed | 2009-07-21 13:19:07 +0200 | [diff] [blame] | 920 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 921 | nr = lock - list_entries; |
| 922 | WARN_ON(nr >= nr_list_entries); |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 923 | return lock->class->dep_gen_id == lockdep_dependency_gen_id; |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | static inline struct lock_list *get_lock_parent(struct lock_list *child) |
| 927 | { |
| 928 | return child->parent; |
| 929 | } |
| 930 | |
| 931 | static inline int get_lock_depth(struct lock_list *child) |
| 932 | { |
| 933 | int depth = 0; |
| 934 | struct lock_list *parent; |
| 935 | |
| 936 | while ((parent = get_lock_parent(child))) { |
| 937 | child = parent; |
| 938 | depth++; |
| 939 | } |
| 940 | return depth; |
| 941 | } |
| 942 | |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 943 | static int __bfs(struct lock_list *source_entry, |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 944 | void *data, |
| 945 | int (*match)(struct lock_list *entry, void *data), |
| 946 | struct lock_list **target_entry, |
| 947 | int forward) |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 948 | { |
| 949 | struct lock_list *entry; |
Ming Lei | d588e46 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 950 | struct list_head *head; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 951 | struct circular_queue *cq = &lock_cq; |
| 952 | int ret = 1; |
| 953 | |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 954 | if (match(source_entry, data)) { |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 955 | *target_entry = source_entry; |
| 956 | ret = 0; |
| 957 | goto exit; |
| 958 | } |
| 959 | |
Ming Lei | d588e46 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 960 | if (forward) |
| 961 | head = &source_entry->class->locks_after; |
| 962 | else |
| 963 | head = &source_entry->class->locks_before; |
| 964 | |
| 965 | if (list_empty(head)) |
| 966 | goto exit; |
| 967 | |
| 968 | __cq_init(cq); |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 969 | __cq_enqueue(cq, (unsigned long)source_entry); |
| 970 | |
| 971 | while (!__cq_empty(cq)) { |
| 972 | struct lock_list *lock; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 973 | |
| 974 | __cq_dequeue(cq, (unsigned long *)&lock); |
| 975 | |
| 976 | if (!lock->class) { |
| 977 | ret = -2; |
| 978 | goto exit; |
| 979 | } |
| 980 | |
| 981 | if (forward) |
| 982 | head = &lock->class->locks_after; |
| 983 | else |
| 984 | head = &lock->class->locks_before; |
| 985 | |
| 986 | list_for_each_entry(entry, head, entry) { |
| 987 | if (!lock_accessed(entry)) { |
Ming Lei | 12f3dfd | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 988 | unsigned int cq_depth; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 989 | mark_lock_accessed(entry, lock); |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 990 | if (match(entry, data)) { |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 991 | *target_entry = entry; |
| 992 | ret = 0; |
| 993 | goto exit; |
| 994 | } |
| 995 | |
| 996 | if (__cq_enqueue(cq, (unsigned long)entry)) { |
| 997 | ret = -1; |
| 998 | goto exit; |
| 999 | } |
Ming Lei | 12f3dfd | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1000 | cq_depth = __cq_get_elem_count(cq); |
| 1001 | if (max_bfs_queue_depth < cq_depth) |
| 1002 | max_bfs_queue_depth = cq_depth; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1003 | } |
| 1004 | } |
| 1005 | } |
| 1006 | exit: |
| 1007 | return ret; |
| 1008 | } |
| 1009 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1010 | static inline int __bfs_forwards(struct lock_list *src_entry, |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1011 | void *data, |
| 1012 | int (*match)(struct lock_list *entry, void *data), |
| 1013 | struct lock_list **target_entry) |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1014 | { |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1015 | return __bfs(src_entry, data, match, target_entry, 1); |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1016 | |
| 1017 | } |
| 1018 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1019 | static inline int __bfs_backwards(struct lock_list *src_entry, |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1020 | void *data, |
| 1021 | int (*match)(struct lock_list *entry, void *data), |
| 1022 | struct lock_list **target_entry) |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1023 | { |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1024 | return __bfs(src_entry, data, match, target_entry, 0); |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1025 | |
| 1026 | } |
| 1027 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1028 | /* |
| 1029 | * Recursive, forwards-direction lock-dependency checking, used for |
| 1030 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe |
| 1031 | * checking. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1032 | */ |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1033 | |
| 1034 | /* |
| 1035 | * Print a dependency chain entry (this is only done when a deadlock |
| 1036 | * has been detected): |
| 1037 | */ |
| 1038 | static noinline int |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1039 | print_circular_bug_entry(struct lock_list *target, int depth) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1040 | { |
| 1041 | if (debug_locks_silent) |
| 1042 | return 0; |
| 1043 | printk("\n-> #%u", depth); |
| 1044 | print_lock_name(target->class); |
| 1045 | printk(":\n"); |
| 1046 | print_stack_trace(&target->trace, 6); |
| 1047 | |
| 1048 | return 0; |
| 1049 | } |
| 1050 | |
| 1051 | /* |
| 1052 | * When a circular dependency is detected, print the |
| 1053 | * header first: |
| 1054 | */ |
| 1055 | static noinline int |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1056 | print_circular_bug_header(struct lock_list *entry, unsigned int depth, |
| 1057 | struct held_lock *check_src, |
| 1058 | struct held_lock *check_tgt) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1059 | { |
| 1060 | struct task_struct *curr = current; |
| 1061 | |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1062 | if (debug_locks_silent) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1063 | return 0; |
| 1064 | |
| 1065 | printk("\n=======================================================\n"); |
| 1066 | printk( "[ INFO: possible circular locking dependency detected ]\n"); |
| 1067 | print_kernel_version(); |
| 1068 | printk( "-------------------------------------------------------\n"); |
| 1069 | printk("%s/%d is trying to acquire lock:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1070 | curr->comm, task_pid_nr(curr)); |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1071 | print_lock(check_src); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1072 | printk("\nbut task is already holding lock:\n"); |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1073 | print_lock(check_tgt); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1074 | printk("\nwhich lock already depends on the new lock.\n\n"); |
| 1075 | printk("\nthe existing dependency chain (in reverse order) is:\n"); |
| 1076 | |
| 1077 | print_circular_bug_entry(entry, depth); |
| 1078 | |
| 1079 | return 0; |
| 1080 | } |
| 1081 | |
Ming Lei | 9e2d551 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1082 | static inline int class_equal(struct lock_list *entry, void *data) |
| 1083 | { |
| 1084 | return entry->class == data; |
| 1085 | } |
| 1086 | |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1087 | static noinline int print_circular_bug(struct lock_list *this, |
| 1088 | struct lock_list *target, |
| 1089 | struct held_lock *check_src, |
| 1090 | struct held_lock *check_tgt) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1091 | { |
| 1092 | struct task_struct *curr = current; |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1093 | struct lock_list *parent; |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1094 | int depth; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1095 | |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1096 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1097 | return 0; |
| 1098 | |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1099 | if (!save_trace(&this->trace)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1100 | return 0; |
| 1101 | |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1102 | depth = get_lock_depth(target); |
| 1103 | |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1104 | print_circular_bug_header(target, depth, check_src, check_tgt); |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1105 | |
| 1106 | parent = get_lock_parent(target); |
| 1107 | |
| 1108 | while (parent) { |
| 1109 | print_circular_bug_entry(parent, --depth); |
| 1110 | parent = get_lock_parent(parent); |
| 1111 | } |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1112 | |
| 1113 | printk("\nother info that might help us debug this:\n\n"); |
| 1114 | lockdep_print_held_locks(curr); |
| 1115 | |
| 1116 | printk("\nstack backtrace:\n"); |
| 1117 | dump_stack(); |
| 1118 | |
| 1119 | return 0; |
| 1120 | } |
| 1121 | |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1122 | static noinline int print_bfs_bug(int ret) |
| 1123 | { |
| 1124 | if (!debug_locks_off_graph_unlock()) |
| 1125 | return 0; |
| 1126 | |
| 1127 | WARN(1, "lockdep bfs error:%d\n", ret); |
| 1128 | |
| 1129 | return 0; |
| 1130 | } |
| 1131 | |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1132 | static int noop_count(struct lock_list *entry, void *data) |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1133 | { |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1134 | (*(unsigned long *)data)++; |
| 1135 | return 0; |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1136 | } |
| 1137 | |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1138 | unsigned long __lockdep_count_forward_deps(struct lock_list *this) |
| 1139 | { |
| 1140 | unsigned long count = 0; |
| 1141 | struct lock_list *uninitialized_var(target_entry); |
| 1142 | |
| 1143 | __bfs_forwards(this, (void *)&count, noop_count, &target_entry); |
| 1144 | |
| 1145 | return count; |
| 1146 | } |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1147 | unsigned long lockdep_count_forward_deps(struct lock_class *class) |
| 1148 | { |
| 1149 | unsigned long ret, flags; |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1150 | struct lock_list this; |
| 1151 | |
| 1152 | this.parent = NULL; |
| 1153 | this.class = class; |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1154 | |
| 1155 | local_irq_save(flags); |
| 1156 | __raw_spin_lock(&lockdep_lock); |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1157 | ret = __lockdep_count_forward_deps(&this); |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1158 | __raw_spin_unlock(&lockdep_lock); |
| 1159 | local_irq_restore(flags); |
| 1160 | |
| 1161 | return ret; |
| 1162 | } |
| 1163 | |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1164 | unsigned long __lockdep_count_backward_deps(struct lock_list *this) |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1165 | { |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1166 | unsigned long count = 0; |
| 1167 | struct lock_list *uninitialized_var(target_entry); |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1168 | |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1169 | __bfs_backwards(this, (void *)&count, noop_count, &target_entry); |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1170 | |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1171 | return count; |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1172 | } |
| 1173 | |
| 1174 | unsigned long lockdep_count_backward_deps(struct lock_class *class) |
| 1175 | { |
| 1176 | unsigned long ret, flags; |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1177 | struct lock_list this; |
| 1178 | |
| 1179 | this.parent = NULL; |
| 1180 | this.class = class; |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1181 | |
| 1182 | local_irq_save(flags); |
| 1183 | __raw_spin_lock(&lockdep_lock); |
Ming Lei | ef68102 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1184 | ret = __lockdep_count_backward_deps(&this); |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1185 | __raw_spin_unlock(&lockdep_lock); |
| 1186 | local_irq_restore(flags); |
| 1187 | |
| 1188 | return ret; |
| 1189 | } |
| 1190 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1191 | /* |
| 1192 | * Prove that the dependency graph starting at <entry> can not |
| 1193 | * lead to <target>. Print an error and return 0 if it does. |
| 1194 | */ |
| 1195 | static noinline int |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1196 | check_noncircular(struct lock_list *root, struct lock_class *target, |
| 1197 | struct lock_list **target_entry) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1198 | { |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1199 | int result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1200 | |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1201 | debug_atomic_inc(&nr_cyclic_checks); |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 1202 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1203 | result = __bfs_forwards(root, target, class_equal, target_entry); |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1204 | |
| 1205 | return result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1208 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1209 | /* |
| 1210 | * Forwards and backwards subgraph searching, for the purposes of |
| 1211 | * proving that two subgraphs can be connected by a new dependency |
| 1212 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. |
| 1213 | */ |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1214 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1215 | static inline int usage_match(struct lock_list *entry, void *bit) |
| 1216 | { |
| 1217 | return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); |
| 1218 | } |
| 1219 | |
| 1220 | |
| 1221 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1222 | /* |
| 1223 | * Find a node in the forwards-direction dependency sub-graph starting |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1224 | * at @root->class that matches @bit. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1225 | * |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1226 | * Return 0 if such a node exists in the subgraph, and put that node |
| 1227 | * into *@target_entry. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1228 | * |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1229 | * Return 1 otherwise and keep *@target_entry unchanged. |
| 1230 | * Return <0 on error. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1231 | */ |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1232 | static int |
| 1233 | find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, |
| 1234 | struct lock_list **target_entry) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1235 | { |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1236 | int result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1237 | |
| 1238 | debug_atomic_inc(&nr_find_usage_forwards_checks); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1239 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1240 | result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); |
| 1241 | |
| 1242 | return result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1243 | } |
| 1244 | |
| 1245 | /* |
| 1246 | * Find a node in the backwards-direction dependency sub-graph starting |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1247 | * at @root->class that matches @bit. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1248 | * |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1249 | * Return 0 if such a node exists in the subgraph, and put that node |
| 1250 | * into *@target_entry. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1251 | * |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1252 | * Return 1 otherwise and keep *@target_entry unchanged. |
| 1253 | * Return <0 on error. |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1254 | */ |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1255 | static int |
| 1256 | find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, |
| 1257 | struct lock_list **target_entry) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1258 | { |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1259 | int result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1260 | |
| 1261 | debug_atomic_inc(&nr_find_usage_backwards_checks); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1262 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1263 | result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1264 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1265 | return result; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1266 | } |
| 1267 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1268 | static void print_lock_class_header(struct lock_class *class, int depth) |
| 1269 | { |
| 1270 | int bit; |
| 1271 | |
| 1272 | printk("%*s->", depth, ""); |
| 1273 | print_lock_name(class); |
| 1274 | printk(" ops: %lu", class->ops); |
| 1275 | printk(" {\n"); |
| 1276 | |
| 1277 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { |
| 1278 | if (class->usage_mask & (1 << bit)) { |
| 1279 | int len = depth; |
| 1280 | |
| 1281 | len += printk("%*s %s", depth, "", usage_str[bit]); |
| 1282 | len += printk(" at:\n"); |
| 1283 | print_stack_trace(class->usage_traces + bit, len); |
| 1284 | } |
| 1285 | } |
| 1286 | printk("%*s }\n", depth, ""); |
| 1287 | |
| 1288 | printk("%*s ... key at: ",depth,""); |
| 1289 | print_ip_sym((unsigned long)class->key); |
| 1290 | } |
| 1291 | |
| 1292 | /* |
| 1293 | * printk the shortest lock dependencies from @start to @end in reverse order: |
| 1294 | */ |
| 1295 | static void __used |
| 1296 | print_shortest_lock_dependencies(struct lock_list *leaf, |
| 1297 | struct lock_list *root) |
| 1298 | { |
| 1299 | struct lock_list *entry = leaf; |
| 1300 | int depth; |
| 1301 | |
| 1302 | /*compute depth from generated tree by BFS*/ |
| 1303 | depth = get_lock_depth(leaf); |
| 1304 | |
| 1305 | do { |
| 1306 | print_lock_class_header(entry->class, depth); |
| 1307 | printk("%*s ... acquired at:\n", depth, ""); |
| 1308 | print_stack_trace(&entry->trace, 2); |
| 1309 | printk("\n"); |
| 1310 | |
| 1311 | if (depth == 0 && (entry != root)) { |
| 1312 | printk("lockdep:%s bad BFS generated tree\n", __func__); |
| 1313 | break; |
| 1314 | } |
| 1315 | |
| 1316 | entry = get_lock_parent(entry); |
| 1317 | depth--; |
| 1318 | } while (entry && (depth >= 0)); |
| 1319 | |
| 1320 | return; |
| 1321 | } |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1322 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1323 | static int |
| 1324 | print_bad_irq_dependency(struct task_struct *curr, |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1325 | struct lock_list *prev_root, |
| 1326 | struct lock_list *next_root, |
| 1327 | struct lock_list *backwards_entry, |
| 1328 | struct lock_list *forwards_entry, |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1329 | struct held_lock *prev, |
| 1330 | struct held_lock *next, |
| 1331 | enum lock_usage_bit bit1, |
| 1332 | enum lock_usage_bit bit2, |
| 1333 | const char *irqclass) |
| 1334 | { |
| 1335 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
| 1336 | return 0; |
| 1337 | |
| 1338 | printk("\n======================================================\n"); |
| 1339 | printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", |
| 1340 | irqclass, irqclass); |
| 1341 | print_kernel_version(); |
| 1342 | printk( "------------------------------------------------------\n"); |
| 1343 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1344 | curr->comm, task_pid_nr(curr), |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1345 | curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, |
| 1346 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, |
| 1347 | curr->hardirqs_enabled, |
| 1348 | curr->softirqs_enabled); |
| 1349 | print_lock(next); |
| 1350 | |
| 1351 | printk("\nand this task is already holding:\n"); |
| 1352 | print_lock(prev); |
| 1353 | printk("which would create a new lock dependency:\n"); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1354 | print_lock_name(hlock_class(prev)); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1355 | printk(" ->"); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1356 | print_lock_name(hlock_class(next)); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1357 | printk("\n"); |
| 1358 | |
| 1359 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
| 1360 | irqclass); |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1361 | print_lock_name(backwards_entry->class); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1362 | printk("\n... which became %s-irq-safe at:\n", irqclass); |
| 1363 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1364 | print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1365 | |
| 1366 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1367 | print_lock_name(forwards_entry->class); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1368 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); |
| 1369 | printk("..."); |
| 1370 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1371 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1372 | |
| 1373 | printk("\nother info that might help us debug this:\n\n"); |
| 1374 | lockdep_print_held_locks(curr); |
| 1375 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1376 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); |
| 1377 | printk(" and the holding lock:\n"); |
| 1378 | if (!save_trace(&prev_root->trace)) |
| 1379 | return 0; |
| 1380 | print_shortest_lock_dependencies(backwards_entry, prev_root); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1381 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1382 | printk("\nthe dependencies between the lock to be acquired"); |
| 1383 | printk(" and %s-irq-unsafe lock:\n", irqclass); |
| 1384 | if (!save_trace(&next_root->trace)) |
| 1385 | return 0; |
| 1386 | print_shortest_lock_dependencies(forwards_entry, next_root); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1387 | |
| 1388 | printk("\nstack backtrace:\n"); |
| 1389 | dump_stack(); |
| 1390 | |
| 1391 | return 0; |
| 1392 | } |
| 1393 | |
| 1394 | static int |
| 1395 | check_usage(struct task_struct *curr, struct held_lock *prev, |
| 1396 | struct held_lock *next, enum lock_usage_bit bit_backwards, |
| 1397 | enum lock_usage_bit bit_forwards, const char *irqclass) |
| 1398 | { |
| 1399 | int ret; |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1400 | struct lock_list this, that; |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1401 | struct lock_list *uninitialized_var(target_entry); |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1402 | struct lock_list *uninitialized_var(target_entry1); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1403 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1404 | this.parent = NULL; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1405 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1406 | this.class = hlock_class(prev); |
| 1407 | ret = find_usage_backwards(&this, bit_backwards, &target_entry); |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1408 | if (ret < 0) |
| 1409 | return print_bfs_bug(ret); |
| 1410 | if (ret == 1) |
| 1411 | return ret; |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1412 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1413 | that.parent = NULL; |
| 1414 | that.class = hlock_class(next); |
| 1415 | ret = find_usage_forwards(&that, bit_forwards, &target_entry1); |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1416 | if (ret < 0) |
| 1417 | return print_bfs_bug(ret); |
| 1418 | if (ret == 1) |
| 1419 | return ret; |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1420 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1421 | return print_bad_irq_dependency(curr, &this, &that, |
| 1422 | target_entry, target_entry1, |
| 1423 | prev, next, |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1424 | bit_backwards, bit_forwards, irqclass); |
| 1425 | } |
| 1426 | |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1427 | static const char *state_names[] = { |
| 1428 | #define LOCKDEP_STATE(__STATE) \ |
Peter Zijlstra | b4b136f | 2009-01-29 14:50:36 +0100 | [diff] [blame] | 1429 | __stringify(__STATE), |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1430 | #include "lockdep_states.h" |
| 1431 | #undef LOCKDEP_STATE |
| 1432 | }; |
| 1433 | |
| 1434 | static const char *state_rnames[] = { |
| 1435 | #define LOCKDEP_STATE(__STATE) \ |
Peter Zijlstra | b4b136f | 2009-01-29 14:50:36 +0100 | [diff] [blame] | 1436 | __stringify(__STATE)"-READ", |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1437 | #include "lockdep_states.h" |
| 1438 | #undef LOCKDEP_STATE |
| 1439 | }; |
| 1440 | |
| 1441 | static inline const char *state_name(enum lock_usage_bit bit) |
| 1442 | { |
| 1443 | return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; |
| 1444 | } |
| 1445 | |
| 1446 | static int exclusive_bit(int new_bit) |
| 1447 | { |
| 1448 | /* |
| 1449 | * USED_IN |
| 1450 | * USED_IN_READ |
| 1451 | * ENABLED |
| 1452 | * ENABLED_READ |
| 1453 | * |
| 1454 | * bit 0 - write/read |
| 1455 | * bit 1 - used_in/enabled |
| 1456 | * bit 2+ state |
| 1457 | */ |
| 1458 | |
| 1459 | int state = new_bit & ~3; |
| 1460 | int dir = new_bit & 2; |
| 1461 | |
| 1462 | /* |
| 1463 | * keep state, bit flip the direction and strip read. |
| 1464 | */ |
| 1465 | return state | (dir ^ 2); |
| 1466 | } |
| 1467 | |
| 1468 | static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, |
| 1469 | struct held_lock *next, enum lock_usage_bit bit) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1470 | { |
| 1471 | /* |
| 1472 | * Prove that the new dependency does not connect a hardirq-safe |
| 1473 | * lock with a hardirq-unsafe lock - to achieve this we search |
| 1474 | * the backwards-subgraph starting at <prev>, and the |
| 1475 | * forwards-subgraph starting at <next>: |
| 1476 | */ |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1477 | if (!check_usage(curr, prev, next, bit, |
| 1478 | exclusive_bit(bit), state_name(bit))) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1479 | return 0; |
| 1480 | |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1481 | bit++; /* _READ */ |
| 1482 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1483 | /* |
| 1484 | * Prove that the new dependency does not connect a hardirq-safe-read |
| 1485 | * lock with a hardirq-unsafe lock - to achieve this we search |
| 1486 | * the backwards-subgraph starting at <prev>, and the |
| 1487 | * forwards-subgraph starting at <next>: |
| 1488 | */ |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1489 | if (!check_usage(curr, prev, next, bit, |
| 1490 | exclusive_bit(bit), state_name(bit))) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1491 | return 0; |
| 1492 | |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1493 | return 1; |
| 1494 | } |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1495 | |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1496 | static int |
| 1497 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
| 1498 | struct held_lock *next) |
| 1499 | { |
| 1500 | #define LOCKDEP_STATE(__STATE) \ |
| 1501 | if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 1502 | return 0; |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 1503 | #include "lockdep_states.h" |
| 1504 | #undef LOCKDEP_STATE |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 1505 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1506 | return 1; |
| 1507 | } |
| 1508 | |
| 1509 | static void inc_chains(void) |
| 1510 | { |
| 1511 | if (current->hardirq_context) |
| 1512 | nr_hardirq_chains++; |
| 1513 | else { |
| 1514 | if (current->softirq_context) |
| 1515 | nr_softirq_chains++; |
| 1516 | else |
| 1517 | nr_process_chains++; |
| 1518 | } |
| 1519 | } |
| 1520 | |
| 1521 | #else |
| 1522 | |
| 1523 | static inline int |
| 1524 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
| 1525 | struct held_lock *next) |
| 1526 | { |
| 1527 | return 1; |
| 1528 | } |
| 1529 | |
| 1530 | static inline void inc_chains(void) |
| 1531 | { |
| 1532 | nr_process_chains++; |
| 1533 | } |
| 1534 | |
| 1535 | #endif |
| 1536 | |
| 1537 | static int |
| 1538 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, |
| 1539 | struct held_lock *next) |
| 1540 | { |
| 1541 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
| 1542 | return 0; |
| 1543 | |
| 1544 | printk("\n=============================================\n"); |
| 1545 | printk( "[ INFO: possible recursive locking detected ]\n"); |
| 1546 | print_kernel_version(); |
| 1547 | printk( "---------------------------------------------\n"); |
| 1548 | printk("%s/%d is trying to acquire lock:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1549 | curr->comm, task_pid_nr(curr)); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1550 | print_lock(next); |
| 1551 | printk("\nbut task is already holding lock:\n"); |
| 1552 | print_lock(prev); |
| 1553 | |
| 1554 | printk("\nother info that might help us debug this:\n"); |
| 1555 | lockdep_print_held_locks(curr); |
| 1556 | |
| 1557 | printk("\nstack backtrace:\n"); |
| 1558 | dump_stack(); |
| 1559 | |
| 1560 | return 0; |
| 1561 | } |
| 1562 | |
| 1563 | /* |
| 1564 | * Check whether we are holding such a class already. |
| 1565 | * |
| 1566 | * (Note that this has to be done separately, because the graph cannot |
| 1567 | * detect such classes of deadlocks.) |
| 1568 | * |
| 1569 | * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read |
| 1570 | */ |
| 1571 | static int |
| 1572 | check_deadlock(struct task_struct *curr, struct held_lock *next, |
| 1573 | struct lockdep_map *next_instance, int read) |
| 1574 | { |
| 1575 | struct held_lock *prev; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 1576 | struct held_lock *nest = NULL; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1577 | int i; |
| 1578 | |
| 1579 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 1580 | prev = curr->held_locks + i; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 1581 | |
| 1582 | if (prev->instance == next->nest_lock) |
| 1583 | nest = prev; |
| 1584 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1585 | if (hlock_class(prev) != hlock_class(next)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1586 | continue; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 1587 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1588 | /* |
| 1589 | * Allow read-after-read recursion of the same |
| 1590 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
| 1591 | */ |
| 1592 | if ((read == 2) && prev->read) |
| 1593 | return 2; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 1594 | |
| 1595 | /* |
| 1596 | * We're holding the nest_lock, which serializes this lock's |
| 1597 | * nesting behaviour. |
| 1598 | */ |
| 1599 | if (nest) |
| 1600 | return 2; |
| 1601 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1602 | return print_deadlock_bug(curr, prev, next); |
| 1603 | } |
| 1604 | return 1; |
| 1605 | } |
| 1606 | |
| 1607 | /* |
| 1608 | * There was a chain-cache miss, and we are about to add a new dependency |
| 1609 | * to a previous lock. We recursively validate the following rules: |
| 1610 | * |
| 1611 | * - would the adding of the <prev> -> <next> dependency create a |
| 1612 | * circular dependency in the graph? [== circular deadlock] |
| 1613 | * |
| 1614 | * - does the new prev->next dependency connect any hardirq-safe lock |
| 1615 | * (in the full backwards-subgraph starting at <prev>) with any |
| 1616 | * hardirq-unsafe lock (in the full forwards-subgraph starting at |
| 1617 | * <next>)? [== illegal lock inversion with hardirq contexts] |
| 1618 | * |
| 1619 | * - does the new prev->next dependency connect any softirq-safe lock |
| 1620 | * (in the full backwards-subgraph starting at <prev>) with any |
| 1621 | * softirq-unsafe lock (in the full forwards-subgraph starting at |
| 1622 | * <next>)? [== illegal lock inversion with softirq contexts] |
| 1623 | * |
| 1624 | * any of these scenarios could lead to a deadlock. |
| 1625 | * |
| 1626 | * Then if all the validations pass, we add the forwards and backwards |
| 1627 | * dependency. |
| 1628 | */ |
| 1629 | static int |
| 1630 | check_prev_add(struct task_struct *curr, struct held_lock *prev, |
| 1631 | struct held_lock *next, int distance) |
| 1632 | { |
| 1633 | struct lock_list *entry; |
| 1634 | int ret; |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1635 | struct lock_list this; |
| 1636 | struct lock_list *uninitialized_var(target_entry); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1637 | |
| 1638 | /* |
| 1639 | * Prove that the new <prev> -> <next> dependency would not |
| 1640 | * create a circular dependency in the graph. (We do this by |
| 1641 | * forward-recursing into the graph starting at <next>, and |
| 1642 | * checking whether we can reach <prev>.) |
| 1643 | * |
| 1644 | * We are using global variables to control the recursion, to |
| 1645 | * keep the stackframe size of the recursive functions low: |
| 1646 | */ |
Ming Lei | db0002a | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1647 | this.class = hlock_class(next); |
| 1648 | this.parent = NULL; |
| 1649 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
| 1650 | if (unlikely(!ret)) |
| 1651 | return print_circular_bug(&this, target_entry, next, prev); |
| 1652 | else if (unlikely(ret < 0)) |
| 1653 | return print_bfs_bug(ret); |
Ming Lei | c94aa5c | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1654 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1655 | if (!check_prev_add_irq(curr, prev, next)) |
| 1656 | return 0; |
| 1657 | |
| 1658 | /* |
| 1659 | * For recursive read-locks we do all the dependency checks, |
| 1660 | * but we dont store read-triggered dependencies (only |
| 1661 | * write-triggered dependencies). This ensures that only the |
| 1662 | * write-side dependencies matter, and that if for example a |
| 1663 | * write-lock never takes any other locks, then the reads are |
| 1664 | * equivalent to a NOP. |
| 1665 | */ |
| 1666 | if (next->read == 2 || prev->read == 2) |
| 1667 | return 1; |
| 1668 | /* |
| 1669 | * Is the <prev> -> <next> dependency already present? |
| 1670 | * |
| 1671 | * (this may occur even though this is a new chain: consider |
| 1672 | * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 |
| 1673 | * chains - the second one will be new, but L1 already has |
| 1674 | * L2 added to its dependency list, due to the first chain.) |
| 1675 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1676 | list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { |
| 1677 | if (entry->class == hlock_class(next)) { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1678 | if (distance == 1) |
| 1679 | entry->distance = 1; |
| 1680 | return 2; |
| 1681 | } |
| 1682 | } |
| 1683 | |
| 1684 | /* |
| 1685 | * Ok, all validations passed, add the new lock |
| 1686 | * to the previous lock's dependency list: |
| 1687 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1688 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
| 1689 | &hlock_class(prev)->locks_after, |
| 1690 | next->acquire_ip, distance); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1691 | |
| 1692 | if (!ret) |
| 1693 | return 0; |
| 1694 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1695 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
| 1696 | &hlock_class(next)->locks_before, |
| 1697 | next->acquire_ip, distance); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1698 | if (!ret) |
| 1699 | return 0; |
| 1700 | |
| 1701 | /* |
| 1702 | * Debugging printouts: |
| 1703 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1704 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1705 | graph_unlock(); |
| 1706 | printk("\n new dependency: "); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1707 | print_lock_name(hlock_class(prev)); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1708 | printk(" => "); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1709 | print_lock_name(hlock_class(next)); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1710 | printk("\n"); |
| 1711 | dump_stack(); |
| 1712 | return graph_lock(); |
| 1713 | } |
| 1714 | return 1; |
| 1715 | } |
| 1716 | |
| 1717 | /* |
| 1718 | * Add the dependency to all directly-previous locks that are 'relevant'. |
| 1719 | * The ones that are relevant are (in increasing distance from curr): |
| 1720 | * all consecutive trylock entries and the final non-trylock entry - or |
| 1721 | * the end of this context's lock-chain - whichever comes first. |
| 1722 | */ |
| 1723 | static int |
| 1724 | check_prevs_add(struct task_struct *curr, struct held_lock *next) |
| 1725 | { |
| 1726 | int depth = curr->lockdep_depth; |
| 1727 | struct held_lock *hlock; |
| 1728 | |
| 1729 | /* |
| 1730 | * Debugging checks. |
| 1731 | * |
| 1732 | * Depth must not be zero for a non-head lock: |
| 1733 | */ |
| 1734 | if (!depth) |
| 1735 | goto out_bug; |
| 1736 | /* |
| 1737 | * At least two relevant locks must exist for this |
| 1738 | * to be a head: |
| 1739 | */ |
| 1740 | if (curr->held_locks[depth].irq_context != |
| 1741 | curr->held_locks[depth-1].irq_context) |
| 1742 | goto out_bug; |
| 1743 | |
| 1744 | for (;;) { |
| 1745 | int distance = curr->lockdep_depth - depth + 1; |
| 1746 | hlock = curr->held_locks + depth-1; |
| 1747 | /* |
| 1748 | * Only non-recursive-read entries get new dependencies |
| 1749 | * added: |
| 1750 | */ |
| 1751 | if (hlock->read != 2) { |
| 1752 | if (!check_prev_add(curr, hlock, next, distance)) |
| 1753 | return 0; |
| 1754 | /* |
| 1755 | * Stop after the first non-trylock entry, |
| 1756 | * as non-trylock entries have added their |
| 1757 | * own direct dependencies already, so this |
| 1758 | * lock is connected to them indirectly: |
| 1759 | */ |
| 1760 | if (!hlock->trylock) |
| 1761 | break; |
| 1762 | } |
| 1763 | depth--; |
| 1764 | /* |
| 1765 | * End of lock-stack? |
| 1766 | */ |
| 1767 | if (!depth) |
| 1768 | break; |
| 1769 | /* |
| 1770 | * Stop the search if we cross into another context: |
| 1771 | */ |
| 1772 | if (curr->held_locks[depth].irq_context != |
| 1773 | curr->held_locks[depth-1].irq_context) |
| 1774 | break; |
| 1775 | } |
| 1776 | return 1; |
| 1777 | out_bug: |
| 1778 | if (!debug_locks_off_graph_unlock()) |
| 1779 | return 0; |
| 1780 | |
| 1781 | WARN_ON(1); |
| 1782 | |
| 1783 | return 0; |
| 1784 | } |
| 1785 | |
| 1786 | unsigned long nr_lock_chains; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1787 | struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; |
Huang, Ying | cd1a28e | 2008-06-23 11:20:54 +0800 | [diff] [blame] | 1788 | int nr_chain_hlocks; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1789 | static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; |
| 1790 | |
| 1791 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) |
| 1792 | { |
| 1793 | return lock_classes + chain_hlocks[chain->base + i]; |
| 1794 | } |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1795 | |
| 1796 | /* |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1797 | * Look up a dependency chain. If the key is not present yet then |
Jarek Poplawski | 9e860d0 | 2007-05-08 00:30:12 -0700 | [diff] [blame] | 1798 | * add it and return 1 - in this case the new dependency chain is |
| 1799 | * validated. If the key is already hashed, return 0. |
| 1800 | * (On return with 1 graph_lock is held.) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1801 | */ |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1802 | static inline int lookup_chain_cache(struct task_struct *curr, |
| 1803 | struct held_lock *hlock, |
| 1804 | u64 chain_key) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1805 | { |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1806 | struct lock_class *class = hlock_class(hlock); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1807 | struct list_head *hash_head = chainhashentry(chain_key); |
| 1808 | struct lock_chain *chain; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1809 | struct held_lock *hlock_curr, *hlock_next; |
Huang, Ying | cd1a28e | 2008-06-23 11:20:54 +0800 | [diff] [blame] | 1810 | int i, j, n, cn; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1811 | |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 1812 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 1813 | return 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1814 | /* |
| 1815 | * We can walk it lock-free, because entries only get added |
| 1816 | * to the hash: |
| 1817 | */ |
| 1818 | list_for_each_entry(chain, hash_head, entry) { |
| 1819 | if (chain->chain_key == chain_key) { |
| 1820 | cache_hit: |
| 1821 | debug_atomic_inc(&chain_lookup_hits); |
Ingo Molnar | 81fc685 | 2006-12-13 00:34:40 -0800 | [diff] [blame] | 1822 | if (very_verbose(class)) |
Andrew Morton | 755cd90 | 2006-12-29 16:49:14 -0800 | [diff] [blame] | 1823 | printk("\nhash chain already cached, key: " |
| 1824 | "%016Lx tail class: [%p] %s\n", |
| 1825 | (unsigned long long)chain_key, |
| 1826 | class->key, class->name); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1827 | return 0; |
| 1828 | } |
| 1829 | } |
Ingo Molnar | 81fc685 | 2006-12-13 00:34:40 -0800 | [diff] [blame] | 1830 | if (very_verbose(class)) |
Andrew Morton | 755cd90 | 2006-12-29 16:49:14 -0800 | [diff] [blame] | 1831 | printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", |
| 1832 | (unsigned long long)chain_key, class->key, class->name); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1833 | /* |
| 1834 | * Allocate a new chain entry from the static array, and add |
| 1835 | * it to the hash: |
| 1836 | */ |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 1837 | if (!graph_lock()) |
| 1838 | return 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1839 | /* |
| 1840 | * We have to walk the chain again locked - to avoid duplicates: |
| 1841 | */ |
| 1842 | list_for_each_entry(chain, hash_head, entry) { |
| 1843 | if (chain->chain_key == chain_key) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 1844 | graph_unlock(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1845 | goto cache_hit; |
| 1846 | } |
| 1847 | } |
| 1848 | if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 1849 | if (!debug_locks_off_graph_unlock()) |
| 1850 | return 0; |
| 1851 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1852 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); |
| 1853 | printk("turning off the locking correctness validator.\n"); |
Peter Zijlstra | eedeeab | 2009-03-18 12:38:47 +0100 | [diff] [blame] | 1854 | dump_stack(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1855 | return 0; |
| 1856 | } |
| 1857 | chain = lock_chains + nr_lock_chains++; |
| 1858 | chain->chain_key = chain_key; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1859 | chain->irq_context = hlock->irq_context; |
| 1860 | /* Find the first held_lock of current chain */ |
| 1861 | hlock_next = hlock; |
| 1862 | for (i = curr->lockdep_depth - 1; i >= 0; i--) { |
| 1863 | hlock_curr = curr->held_locks + i; |
| 1864 | if (hlock_curr->irq_context != hlock_next->irq_context) |
| 1865 | break; |
| 1866 | hlock_next = hlock; |
| 1867 | } |
| 1868 | i++; |
| 1869 | chain->depth = curr->lockdep_depth + 1 - i; |
Huang, Ying | cd1a28e | 2008-06-23 11:20:54 +0800 | [diff] [blame] | 1870 | cn = nr_chain_hlocks; |
| 1871 | while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { |
| 1872 | n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); |
| 1873 | if (n == cn) |
| 1874 | break; |
| 1875 | cn = n; |
| 1876 | } |
| 1877 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
| 1878 | chain->base = cn; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1879 | for (j = 0; j < chain->depth - 1; j++, i++) { |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1880 | int lock_id = curr->held_locks[i].class_idx - 1; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1881 | chain_hlocks[chain->base + j] = lock_id; |
| 1882 | } |
| 1883 | chain_hlocks[chain->base + j] = class - lock_classes; |
| 1884 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1885 | list_add_tail_rcu(&chain->entry, hash_head); |
| 1886 | debug_atomic_inc(&chain_lookup_misses); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1887 | inc_chains(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1888 | |
| 1889 | return 1; |
| 1890 | } |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1891 | |
| 1892 | static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 1893 | struct held_lock *hlock, int chain_head, u64 chain_key) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1894 | { |
| 1895 | /* |
| 1896 | * Trylock needs to maintain the stack of held locks, but it |
| 1897 | * does not add new dependencies, because trylock can be done |
| 1898 | * in any order. |
| 1899 | * |
| 1900 | * We look up the chain_key and do the O(N^2) check and update of |
| 1901 | * the dependencies only if this is a new dependency chain. |
| 1902 | * (If lookup_chain_cache() returns with 1 it acquires |
| 1903 | * graph_lock for us) |
| 1904 | */ |
| 1905 | if (!hlock->trylock && (hlock->check == 2) && |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 1906 | lookup_chain_cache(curr, hlock, chain_key)) { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1907 | /* |
| 1908 | * Check whether last held lock: |
| 1909 | * |
| 1910 | * - is irq-safe, if this lock is irq-unsafe |
| 1911 | * - is softirq-safe, if this lock is hardirq-unsafe |
| 1912 | * |
| 1913 | * And check whether the new lock's dependency graph |
| 1914 | * could lead back to the previous lock. |
| 1915 | * |
| 1916 | * any of these scenarios could lead to a deadlock. If |
| 1917 | * All validations |
| 1918 | */ |
| 1919 | int ret = check_deadlock(curr, hlock, lock, hlock->read); |
| 1920 | |
| 1921 | if (!ret) |
| 1922 | return 0; |
| 1923 | /* |
| 1924 | * Mark recursive read, as we jump over it when |
| 1925 | * building dependencies (just like we jump over |
| 1926 | * trylock entries): |
| 1927 | */ |
| 1928 | if (ret == 2) |
| 1929 | hlock->read = 2; |
| 1930 | /* |
| 1931 | * Add dependency only if this lock is not the head |
| 1932 | * of the chain, and if it's not a secondary read-lock: |
| 1933 | */ |
| 1934 | if (!chain_head && ret != 2) |
| 1935 | if (!check_prevs_add(curr, hlock)) |
| 1936 | return 0; |
| 1937 | graph_unlock(); |
| 1938 | } else |
| 1939 | /* after lookup_chain_cache(): */ |
| 1940 | if (unlikely(!debug_locks)) |
| 1941 | return 0; |
| 1942 | |
| 1943 | return 1; |
| 1944 | } |
| 1945 | #else |
| 1946 | static inline int validate_chain(struct task_struct *curr, |
| 1947 | struct lockdep_map *lock, struct held_lock *hlock, |
Gregory Haskins | 3aa416b | 2007-10-11 22:11:11 +0200 | [diff] [blame] | 1948 | int chain_head, u64 chain_key) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1949 | { |
| 1950 | return 1; |
| 1951 | } |
Peter Zijlstra | ca58abc | 2007-07-19 01:48:53 -0700 | [diff] [blame] | 1952 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1953 | |
| 1954 | /* |
| 1955 | * We are building curr_chain_key incrementally, so double-check |
| 1956 | * it from scratch, to make sure that it's done correctly: |
| 1957 | */ |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 1958 | static void check_chain_key(struct task_struct *curr) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1959 | { |
| 1960 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 1961 | struct held_lock *hlock, *prev_hlock = NULL; |
| 1962 | unsigned int i, id; |
| 1963 | u64 chain_key = 0; |
| 1964 | |
| 1965 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 1966 | hlock = curr->held_locks + i; |
| 1967 | if (chain_key != hlock->prev_chain_key) { |
| 1968 | debug_locks_off(); |
Arjan van de Ven | 2df8b1d | 2008-07-30 12:43:11 -0700 | [diff] [blame] | 1969 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1970 | curr->lockdep_depth, i, |
| 1971 | (unsigned long long)chain_key, |
| 1972 | (unsigned long long)hlock->prev_chain_key); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1973 | return; |
| 1974 | } |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 1975 | id = hlock->class_idx - 1; |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 1976 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
| 1977 | return; |
| 1978 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1979 | if (prev_hlock && (prev_hlock->irq_context != |
| 1980 | hlock->irq_context)) |
| 1981 | chain_key = 0; |
| 1982 | chain_key = iterate_chain_key(chain_key, id); |
| 1983 | prev_hlock = hlock; |
| 1984 | } |
| 1985 | if (chain_key != curr->curr_chain_key) { |
| 1986 | debug_locks_off(); |
Arjan van de Ven | 2df8b1d | 2008-07-30 12:43:11 -0700 | [diff] [blame] | 1987 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1988 | curr->lockdep_depth, i, |
| 1989 | (unsigned long long)chain_key, |
| 1990 | (unsigned long long)curr->curr_chain_key); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1991 | } |
| 1992 | #endif |
| 1993 | } |
| 1994 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 1995 | static int |
| 1996 | print_usage_bug(struct task_struct *curr, struct held_lock *this, |
| 1997 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) |
| 1998 | { |
| 1999 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
| 2000 | return 0; |
| 2001 | |
| 2002 | printk("\n=================================\n"); |
| 2003 | printk( "[ INFO: inconsistent lock state ]\n"); |
| 2004 | print_kernel_version(); |
| 2005 | printk( "---------------------------------\n"); |
| 2006 | |
| 2007 | printk("inconsistent {%s} -> {%s} usage.\n", |
| 2008 | usage_str[prev_bit], usage_str[new_bit]); |
| 2009 | |
| 2010 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 2011 | curr->comm, task_pid_nr(curr), |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2012 | trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, |
| 2013 | trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, |
| 2014 | trace_hardirqs_enabled(curr), |
| 2015 | trace_softirqs_enabled(curr)); |
| 2016 | print_lock(this); |
| 2017 | |
| 2018 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2019 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2020 | |
| 2021 | print_irqtrace_events(curr); |
| 2022 | printk("\nother info that might help us debug this:\n"); |
| 2023 | lockdep_print_held_locks(curr); |
| 2024 | |
| 2025 | printk("\nstack backtrace:\n"); |
| 2026 | dump_stack(); |
| 2027 | |
| 2028 | return 0; |
| 2029 | } |
| 2030 | |
| 2031 | /* |
| 2032 | * Print out an error if an invalid bit is set: |
| 2033 | */ |
| 2034 | static inline int |
| 2035 | valid_state(struct task_struct *curr, struct held_lock *this, |
| 2036 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
| 2037 | { |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2038 | if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2039 | return print_usage_bug(curr, this, bad_bit, new_bit); |
| 2040 | return 1; |
| 2041 | } |
| 2042 | |
| 2043 | static int mark_lock(struct task_struct *curr, struct held_lock *this, |
| 2044 | enum lock_usage_bit new_bit); |
| 2045 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2046 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2047 | |
| 2048 | /* |
| 2049 | * print irq inversion bug: |
| 2050 | */ |
| 2051 | static int |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2052 | print_irq_inversion_bug(struct task_struct *curr, |
| 2053 | struct lock_list *root, struct lock_list *other, |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2054 | struct held_lock *this, int forwards, |
| 2055 | const char *irqclass) |
| 2056 | { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 2057 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2058 | return 0; |
| 2059 | |
| 2060 | printk("\n=========================================================\n"); |
| 2061 | printk( "[ INFO: possible irq lock inversion dependency detected ]\n"); |
Dave Jones | 99de055 | 2006-09-29 02:00:10 -0700 | [diff] [blame] | 2062 | print_kernel_version(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2063 | printk( "---------------------------------------------------------\n"); |
| 2064 | printk("%s/%d just changed the state of lock:\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 2065 | curr->comm, task_pid_nr(curr)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2066 | print_lock(this); |
| 2067 | if (forwards) |
Peter Zijlstra | 26575e2 | 2009-03-04 14:53:24 +0100 | [diff] [blame] | 2068 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2069 | else |
Peter Zijlstra | 26575e2 | 2009-03-04 14:53:24 +0100 | [diff] [blame] | 2070 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2071 | print_lock_name(other->class); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2072 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
| 2073 | |
| 2074 | printk("\nother info that might help us debug this:\n"); |
| 2075 | lockdep_print_held_locks(curr); |
| 2076 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2077 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
| 2078 | if (!save_trace(&root->trace)) |
| 2079 | return 0; |
| 2080 | print_shortest_lock_dependencies(other, root); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2081 | |
| 2082 | printk("\nstack backtrace:\n"); |
| 2083 | dump_stack(); |
| 2084 | |
| 2085 | return 0; |
| 2086 | } |
| 2087 | |
| 2088 | /* |
| 2089 | * Prove that in the forwards-direction subgraph starting at <this> |
| 2090 | * there is no lock matching <mask>: |
| 2091 | */ |
| 2092 | static int |
| 2093 | check_usage_forwards(struct task_struct *curr, struct held_lock *this, |
| 2094 | enum lock_usage_bit bit, const char *irqclass) |
| 2095 | { |
| 2096 | int ret; |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2097 | struct lock_list root; |
| 2098 | struct lock_list *uninitialized_var(target_entry); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2099 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2100 | root.parent = NULL; |
| 2101 | root.class = hlock_class(this); |
| 2102 | ret = find_usage_forwards(&root, bit, &target_entry); |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2103 | if (ret < 0) |
| 2104 | return print_bfs_bug(ret); |
| 2105 | if (ret == 1) |
| 2106 | return ret; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2107 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2108 | return print_irq_inversion_bug(curr, &root, target_entry, |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2109 | this, 1, irqclass); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2110 | } |
| 2111 | |
| 2112 | /* |
| 2113 | * Prove that in the backwards-direction subgraph starting at <this> |
| 2114 | * there is no lock matching <mask>: |
| 2115 | */ |
| 2116 | static int |
| 2117 | check_usage_backwards(struct task_struct *curr, struct held_lock *this, |
| 2118 | enum lock_usage_bit bit, const char *irqclass) |
| 2119 | { |
| 2120 | int ret; |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2121 | struct lock_list root; |
| 2122 | struct lock_list *uninitialized_var(target_entry); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2123 | |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2124 | root.parent = NULL; |
| 2125 | root.class = hlock_class(this); |
| 2126 | ret = find_usage_backwards(&root, bit, &target_entry); |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2127 | if (ret < 0) |
| 2128 | return print_bfs_bug(ret); |
| 2129 | if (ret == 1) |
| 2130 | return ret; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2131 | |
Ming Lei | 24208ca | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2132 | return print_irq_inversion_bug(curr, &root, target_entry, |
Ming Lei | d7aaba1 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 2133 | this, 1, irqclass); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2134 | } |
| 2135 | |
Ingo Molnar | 3117df0 | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 2136 | void print_irqtrace_events(struct task_struct *curr) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2137 | { |
| 2138 | printk("irq event stamp: %u\n", curr->irq_events); |
| 2139 | printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); |
| 2140 | print_ip_sym(curr->hardirq_enable_ip); |
| 2141 | printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); |
| 2142 | print_ip_sym(curr->hardirq_disable_ip); |
| 2143 | printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); |
| 2144 | print_ip_sym(curr->softirq_enable_ip); |
| 2145 | printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); |
| 2146 | print_ip_sym(curr->softirq_disable_ip); |
| 2147 | } |
| 2148 | |
Peter Zijlstra | cd95302 | 2009-01-22 16:38:21 +0100 | [diff] [blame] | 2149 | static int HARDIRQ_verbose(struct lock_class *class) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2150 | { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2151 | #if HARDIRQ_VERBOSE |
| 2152 | return class_filter(class); |
| 2153 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2154 | return 0; |
| 2155 | } |
| 2156 | |
Peter Zijlstra | cd95302 | 2009-01-22 16:38:21 +0100 | [diff] [blame] | 2157 | static int SOFTIRQ_verbose(struct lock_class *class) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2158 | { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2159 | #if SOFTIRQ_VERBOSE |
| 2160 | return class_filter(class); |
| 2161 | #endif |
| 2162 | return 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2163 | } |
| 2164 | |
Peter Zijlstra | cd95302 | 2009-01-22 16:38:21 +0100 | [diff] [blame] | 2165 | static int RECLAIM_FS_verbose(struct lock_class *class) |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2166 | { |
| 2167 | #if RECLAIM_VERBOSE |
| 2168 | return class_filter(class); |
| 2169 | #endif |
| 2170 | return 0; |
| 2171 | } |
| 2172 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2173 | #define STRICT_READ_CHECKS 1 |
| 2174 | |
Peter Zijlstra | cd95302 | 2009-01-22 16:38:21 +0100 | [diff] [blame] | 2175 | static int (*state_verbose_f[])(struct lock_class *class) = { |
| 2176 | #define LOCKDEP_STATE(__STATE) \ |
| 2177 | __STATE##_verbose, |
| 2178 | #include "lockdep_states.h" |
| 2179 | #undef LOCKDEP_STATE |
| 2180 | }; |
| 2181 | |
| 2182 | static inline int state_verbose(enum lock_usage_bit bit, |
| 2183 | struct lock_class *class) |
| 2184 | { |
| 2185 | return state_verbose_f[bit >> 2](class); |
| 2186 | } |
| 2187 | |
Peter Zijlstra | 42c50d5 | 2009-01-22 16:58:16 +0100 | [diff] [blame] | 2188 | typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, |
| 2189 | enum lock_usage_bit bit, const char *name); |
| 2190 | |
Peter Zijlstra | 6a6904d | 2009-01-22 16:07:44 +0100 | [diff] [blame] | 2191 | static int |
Peter Zijlstra | 1c21f14 | 2009-03-04 13:51:13 +0100 | [diff] [blame] | 2192 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
| 2193 | enum lock_usage_bit new_bit) |
Peter Zijlstra | 6a6904d | 2009-01-22 16:07:44 +0100 | [diff] [blame] | 2194 | { |
Peter Zijlstra | f989209 | 2009-01-22 16:09:59 +0100 | [diff] [blame] | 2195 | int excl_bit = exclusive_bit(new_bit); |
Peter Zijlstra | 9d3651a | 2009-01-22 17:18:32 +0100 | [diff] [blame] | 2196 | int read = new_bit & 1; |
Peter Zijlstra | 42c50d5 | 2009-01-22 16:58:16 +0100 | [diff] [blame] | 2197 | int dir = new_bit & 2; |
| 2198 | |
Peter Zijlstra | 38aa271 | 2009-01-27 14:53:50 +0100 | [diff] [blame] | 2199 | /* |
| 2200 | * mark USED_IN has to look forwards -- to ensure no dependency |
| 2201 | * has ENABLED state, which would allow recursion deadlocks. |
| 2202 | * |
| 2203 | * mark ENABLED has to look backwards -- to ensure no dependee |
| 2204 | * has USED_IN state, which, again, would allow recursion deadlocks. |
| 2205 | */ |
Peter Zijlstra | 42c50d5 | 2009-01-22 16:58:16 +0100 | [diff] [blame] | 2206 | check_usage_f usage = dir ? |
| 2207 | check_usage_backwards : check_usage_forwards; |
Peter Zijlstra | f989209 | 2009-01-22 16:09:59 +0100 | [diff] [blame] | 2208 | |
Peter Zijlstra | 38aa271 | 2009-01-27 14:53:50 +0100 | [diff] [blame] | 2209 | /* |
| 2210 | * Validate that this particular lock does not have conflicting |
| 2211 | * usage states. |
| 2212 | */ |
Peter Zijlstra | 6a6904d | 2009-01-22 16:07:44 +0100 | [diff] [blame] | 2213 | if (!valid_state(curr, this, new_bit, excl_bit)) |
| 2214 | return 0; |
Peter Zijlstra | 9d3651a | 2009-01-22 17:18:32 +0100 | [diff] [blame] | 2215 | |
Peter Zijlstra | 38aa271 | 2009-01-27 14:53:50 +0100 | [diff] [blame] | 2216 | /* |
| 2217 | * Validate that the lock dependencies don't have conflicting usage |
| 2218 | * states. |
| 2219 | */ |
| 2220 | if ((!read || !dir || STRICT_READ_CHECKS) && |
Peter Zijlstra | 1c21f14 | 2009-03-04 13:51:13 +0100 | [diff] [blame] | 2221 | !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
Peter Zijlstra | 6a6904d | 2009-01-22 16:07:44 +0100 | [diff] [blame] | 2222 | return 0; |
Peter Zijlstra | 780e820 | 2009-01-22 16:51:29 +0100 | [diff] [blame] | 2223 | |
Peter Zijlstra | 38aa271 | 2009-01-27 14:53:50 +0100 | [diff] [blame] | 2224 | /* |
| 2225 | * Check for read in write conflicts |
| 2226 | */ |
| 2227 | if (!read) { |
| 2228 | if (!valid_state(curr, this, new_bit, excl_bit + 1)) |
| 2229 | return 0; |
| 2230 | |
| 2231 | if (STRICT_READ_CHECKS && |
Peter Zijlstra | 4f367d8a | 2009-01-22 18:10:42 +0100 | [diff] [blame] | 2232 | !usage(curr, this, excl_bit + 1, |
| 2233 | state_name(new_bit + 1))) |
Peter Zijlstra | 38aa271 | 2009-01-27 14:53:50 +0100 | [diff] [blame] | 2234 | return 0; |
| 2235 | } |
Peter Zijlstra | 780e820 | 2009-01-22 16:51:29 +0100 | [diff] [blame] | 2236 | |
Peter Zijlstra | cd95302 | 2009-01-22 16:38:21 +0100 | [diff] [blame] | 2237 | if (state_verbose(new_bit, hlock_class(this))) |
Peter Zijlstra | 6a6904d | 2009-01-22 16:07:44 +0100 | [diff] [blame] | 2238 | return 2; |
| 2239 | |
| 2240 | return 1; |
| 2241 | } |
| 2242 | |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2243 | enum mark_type { |
Peter Zijlstra | 36bfb9b | 2009-01-22 14:12:41 +0100 | [diff] [blame] | 2244 | #define LOCKDEP_STATE(__STATE) __STATE, |
| 2245 | #include "lockdep_states.h" |
| 2246 | #undef LOCKDEP_STATE |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2247 | }; |
| 2248 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2249 | /* |
| 2250 | * Mark all held locks with a usage bit: |
| 2251 | */ |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2252 | static int |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2253 | mark_held_locks(struct task_struct *curr, enum mark_type mark) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2254 | { |
| 2255 | enum lock_usage_bit usage_bit; |
| 2256 | struct held_lock *hlock; |
| 2257 | int i; |
| 2258 | |
| 2259 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 2260 | hlock = curr->held_locks + i; |
| 2261 | |
Peter Zijlstra | cf2ad4d | 2009-01-27 13:58:08 +0100 | [diff] [blame] | 2262 | usage_bit = 2 + (mark << 2); /* ENABLED */ |
| 2263 | if (hlock->read) |
| 2264 | usage_bit += 1; /* READ */ |
| 2265 | |
| 2266 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2267 | |
Jarek Poplawski | 4ff773bb | 2007-05-08 00:31:00 -0700 | [diff] [blame] | 2268 | if (!mark_lock(curr, hlock, usage_bit)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2269 | return 0; |
| 2270 | } |
| 2271 | |
| 2272 | return 1; |
| 2273 | } |
| 2274 | |
| 2275 | /* |
| 2276 | * Debugging helper: via this flag we know that we are in |
| 2277 | * 'early bootup code', and will warn about any invalid irqs-on event: |
| 2278 | */ |
| 2279 | static int early_boot_irqs_enabled; |
| 2280 | |
| 2281 | void early_boot_irqs_off(void) |
| 2282 | { |
| 2283 | early_boot_irqs_enabled = 0; |
| 2284 | } |
| 2285 | |
| 2286 | void early_boot_irqs_on(void) |
| 2287 | { |
| 2288 | early_boot_irqs_enabled = 1; |
| 2289 | } |
| 2290 | |
| 2291 | /* |
| 2292 | * Hardirqs will be enabled: |
| 2293 | */ |
Heiko Carstens | 6afe40b | 2008-10-28 11:14:58 +0100 | [diff] [blame] | 2294 | void trace_hardirqs_on_caller(unsigned long ip) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2295 | { |
| 2296 | struct task_struct *curr = current; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2297 | |
Heiko Carstens | 6afe40b | 2008-10-28 11:14:58 +0100 | [diff] [blame] | 2298 | time_hardirqs_on(CALLER_ADDR0, ip); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2299 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2300 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2301 | return; |
| 2302 | |
| 2303 | if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) |
| 2304 | return; |
| 2305 | |
| 2306 | if (unlikely(curr->hardirqs_enabled)) { |
| 2307 | debug_atomic_inc(&redundant_hardirqs_on); |
| 2308 | return; |
| 2309 | } |
| 2310 | /* we'll do an OFF -> ON transition: */ |
| 2311 | curr->hardirqs_enabled = 1; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2312 | |
| 2313 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2314 | return; |
| 2315 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) |
| 2316 | return; |
| 2317 | /* |
| 2318 | * We are going to turn hardirqs on, so set the |
| 2319 | * usage bit for all held locks: |
| 2320 | */ |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2321 | if (!mark_held_locks(curr, HARDIRQ)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2322 | return; |
| 2323 | /* |
| 2324 | * If we have softirqs enabled, then set the usage |
| 2325 | * bit for all held locks. (disabled hardirqs prevented |
| 2326 | * this bit from being set before) |
| 2327 | */ |
| 2328 | if (curr->softirqs_enabled) |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2329 | if (!mark_held_locks(curr, SOFTIRQ)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2330 | return; |
| 2331 | |
| 2332 | curr->hardirq_enable_ip = ip; |
| 2333 | curr->hardirq_enable_event = ++curr->irq_events; |
| 2334 | debug_atomic_inc(&hardirqs_on_events); |
| 2335 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2336 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2337 | |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2338 | void trace_hardirqs_on(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2339 | { |
| 2340 | trace_hardirqs_on_caller(CALLER_ADDR0); |
| 2341 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2342 | EXPORT_SYMBOL(trace_hardirqs_on); |
| 2343 | |
| 2344 | /* |
| 2345 | * Hardirqs were disabled: |
| 2346 | */ |
Heiko Carstens | 6afe40b | 2008-10-28 11:14:58 +0100 | [diff] [blame] | 2347 | void trace_hardirqs_off_caller(unsigned long ip) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2348 | { |
| 2349 | struct task_struct *curr = current; |
| 2350 | |
Heiko Carstens | 6afe40b | 2008-10-28 11:14:58 +0100 | [diff] [blame] | 2351 | time_hardirqs_off(CALLER_ADDR0, ip); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2352 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2353 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2354 | return; |
| 2355 | |
| 2356 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2357 | return; |
| 2358 | |
| 2359 | if (curr->hardirqs_enabled) { |
| 2360 | /* |
| 2361 | * We have done an ON -> OFF transition: |
| 2362 | */ |
| 2363 | curr->hardirqs_enabled = 0; |
Heiko Carstens | 6afe40b | 2008-10-28 11:14:58 +0100 | [diff] [blame] | 2364 | curr->hardirq_disable_ip = ip; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2365 | curr->hardirq_disable_event = ++curr->irq_events; |
| 2366 | debug_atomic_inc(&hardirqs_off_events); |
| 2367 | } else |
| 2368 | debug_atomic_inc(&redundant_hardirqs_off); |
| 2369 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2370 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2371 | |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2372 | void trace_hardirqs_off(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2373 | { |
| 2374 | trace_hardirqs_off_caller(CALLER_ADDR0); |
| 2375 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2376 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 2377 | |
| 2378 | /* |
| 2379 | * Softirqs will be enabled: |
| 2380 | */ |
| 2381 | void trace_softirqs_on(unsigned long ip) |
| 2382 | { |
| 2383 | struct task_struct *curr = current; |
| 2384 | |
| 2385 | if (unlikely(!debug_locks)) |
| 2386 | return; |
| 2387 | |
| 2388 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2389 | return; |
| 2390 | |
| 2391 | if (curr->softirqs_enabled) { |
| 2392 | debug_atomic_inc(&redundant_softirqs_on); |
| 2393 | return; |
| 2394 | } |
| 2395 | |
| 2396 | /* |
| 2397 | * We'll do an OFF -> ON transition: |
| 2398 | */ |
| 2399 | curr->softirqs_enabled = 1; |
| 2400 | curr->softirq_enable_ip = ip; |
| 2401 | curr->softirq_enable_event = ++curr->irq_events; |
| 2402 | debug_atomic_inc(&softirqs_on_events); |
| 2403 | /* |
| 2404 | * We are going to turn softirqs on, so set the |
| 2405 | * usage bit for all held locks, if hardirqs are |
| 2406 | * enabled too: |
| 2407 | */ |
| 2408 | if (curr->hardirqs_enabled) |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2409 | mark_held_locks(curr, SOFTIRQ); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2410 | } |
| 2411 | |
| 2412 | /* |
| 2413 | * Softirqs were disabled: |
| 2414 | */ |
| 2415 | void trace_softirqs_off(unsigned long ip) |
| 2416 | { |
| 2417 | struct task_struct *curr = current; |
| 2418 | |
| 2419 | if (unlikely(!debug_locks)) |
| 2420 | return; |
| 2421 | |
| 2422 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2423 | return; |
| 2424 | |
| 2425 | if (curr->softirqs_enabled) { |
| 2426 | /* |
| 2427 | * We have done an ON -> OFF transition: |
| 2428 | */ |
| 2429 | curr->softirqs_enabled = 0; |
| 2430 | curr->softirq_disable_ip = ip; |
| 2431 | curr->softirq_disable_event = ++curr->irq_events; |
| 2432 | debug_atomic_inc(&softirqs_off_events); |
| 2433 | DEBUG_LOCKS_WARN_ON(!softirq_count()); |
| 2434 | } else |
| 2435 | debug_atomic_inc(&redundant_softirqs_off); |
| 2436 | } |
| 2437 | |
Peter Zijlstra | 2f85018 | 2009-03-20 11:13:20 +0100 | [diff] [blame] | 2438 | static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2439 | { |
| 2440 | struct task_struct *curr = current; |
| 2441 | |
| 2442 | if (unlikely(!debug_locks)) |
| 2443 | return; |
| 2444 | |
| 2445 | /* no reclaim without waiting on it */ |
| 2446 | if (!(gfp_mask & __GFP_WAIT)) |
| 2447 | return; |
| 2448 | |
| 2449 | /* this guy won't enter reclaim */ |
| 2450 | if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) |
| 2451 | return; |
| 2452 | |
| 2453 | /* We're only interested __GFP_FS allocations for now */ |
| 2454 | if (!(gfp_mask & __GFP_FS)) |
| 2455 | return; |
| 2456 | |
Peter Zijlstra | 2f85018 | 2009-03-20 11:13:20 +0100 | [diff] [blame] | 2457 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2458 | return; |
| 2459 | |
| 2460 | mark_held_locks(curr, RECLAIM_FS); |
| 2461 | } |
| 2462 | |
Peter Zijlstra | 2f85018 | 2009-03-20 11:13:20 +0100 | [diff] [blame] | 2463 | static void check_flags(unsigned long flags); |
| 2464 | |
| 2465 | void lockdep_trace_alloc(gfp_t gfp_mask) |
| 2466 | { |
| 2467 | unsigned long flags; |
| 2468 | |
| 2469 | if (unlikely(current->lockdep_recursion)) |
| 2470 | return; |
| 2471 | |
| 2472 | raw_local_irq_save(flags); |
| 2473 | check_flags(flags); |
| 2474 | current->lockdep_recursion = 1; |
| 2475 | __lockdep_trace_alloc(gfp_mask, flags); |
| 2476 | current->lockdep_recursion = 0; |
| 2477 | raw_local_irq_restore(flags); |
| 2478 | } |
| 2479 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2480 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
| 2481 | { |
| 2482 | /* |
| 2483 | * If non-trylock use in a hardirq or softirq context, then |
| 2484 | * mark the lock as used in these contexts: |
| 2485 | */ |
| 2486 | if (!hlock->trylock) { |
| 2487 | if (hlock->read) { |
| 2488 | if (curr->hardirq_context) |
| 2489 | if (!mark_lock(curr, hlock, |
| 2490 | LOCK_USED_IN_HARDIRQ_READ)) |
| 2491 | return 0; |
| 2492 | if (curr->softirq_context) |
| 2493 | if (!mark_lock(curr, hlock, |
| 2494 | LOCK_USED_IN_SOFTIRQ_READ)) |
| 2495 | return 0; |
| 2496 | } else { |
| 2497 | if (curr->hardirq_context) |
| 2498 | if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) |
| 2499 | return 0; |
| 2500 | if (curr->softirq_context) |
| 2501 | if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) |
| 2502 | return 0; |
| 2503 | } |
| 2504 | } |
| 2505 | if (!hlock->hardirqs_off) { |
| 2506 | if (hlock->read) { |
| 2507 | if (!mark_lock(curr, hlock, |
Peter Zijlstra | 4fc95e8 | 2009-01-22 13:10:52 +0100 | [diff] [blame] | 2508 | LOCK_ENABLED_HARDIRQ_READ)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2509 | return 0; |
| 2510 | if (curr->softirqs_enabled) |
| 2511 | if (!mark_lock(curr, hlock, |
Peter Zijlstra | 4fc95e8 | 2009-01-22 13:10:52 +0100 | [diff] [blame] | 2512 | LOCK_ENABLED_SOFTIRQ_READ)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2513 | return 0; |
| 2514 | } else { |
| 2515 | if (!mark_lock(curr, hlock, |
Peter Zijlstra | 4fc95e8 | 2009-01-22 13:10:52 +0100 | [diff] [blame] | 2516 | LOCK_ENABLED_HARDIRQ)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2517 | return 0; |
| 2518 | if (curr->softirqs_enabled) |
| 2519 | if (!mark_lock(curr, hlock, |
Peter Zijlstra | 4fc95e8 | 2009-01-22 13:10:52 +0100 | [diff] [blame] | 2520 | LOCK_ENABLED_SOFTIRQ)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2521 | return 0; |
| 2522 | } |
| 2523 | } |
| 2524 | |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 2525 | /* |
| 2526 | * We reuse the irq context infrastructure more broadly as a general |
| 2527 | * context checking code. This tests GFP_FS recursion (a lock taken |
| 2528 | * during reclaim for a GFP_FS allocation is held over a GFP_FS |
| 2529 | * allocation). |
| 2530 | */ |
| 2531 | if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { |
| 2532 | if (hlock->read) { |
| 2533 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) |
| 2534 | return 0; |
| 2535 | } else { |
| 2536 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) |
| 2537 | return 0; |
| 2538 | } |
| 2539 | } |
| 2540 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2541 | return 1; |
| 2542 | } |
| 2543 | |
| 2544 | static int separate_irq_context(struct task_struct *curr, |
| 2545 | struct held_lock *hlock) |
| 2546 | { |
| 2547 | unsigned int depth = curr->lockdep_depth; |
| 2548 | |
| 2549 | /* |
| 2550 | * Keep track of points where we cross into an interrupt context: |
| 2551 | */ |
| 2552 | hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + |
| 2553 | curr->softirq_context; |
| 2554 | if (depth) { |
| 2555 | struct held_lock *prev_hlock; |
| 2556 | |
| 2557 | prev_hlock = curr->held_locks + depth-1; |
| 2558 | /* |
| 2559 | * If we cross into another context, reset the |
| 2560 | * hash key (this also prevents the checking and the |
| 2561 | * adding of the dependency to 'prev'): |
| 2562 | */ |
| 2563 | if (prev_hlock->irq_context != hlock->irq_context) |
| 2564 | return 1; |
| 2565 | } |
| 2566 | return 0; |
| 2567 | } |
| 2568 | |
| 2569 | #else |
| 2570 | |
| 2571 | static inline |
| 2572 | int mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
| 2573 | enum lock_usage_bit new_bit) |
| 2574 | { |
| 2575 | WARN_ON(1); |
| 2576 | return 1; |
| 2577 | } |
| 2578 | |
| 2579 | static inline int mark_irqflags(struct task_struct *curr, |
| 2580 | struct held_lock *hlock) |
| 2581 | { |
| 2582 | return 1; |
| 2583 | } |
| 2584 | |
| 2585 | static inline int separate_irq_context(struct task_struct *curr, |
| 2586 | struct held_lock *hlock) |
| 2587 | { |
| 2588 | return 0; |
| 2589 | } |
| 2590 | |
Peter Zijlstra | 868a23a | 2009-02-15 00:25:21 +0100 | [diff] [blame] | 2591 | void lockdep_trace_alloc(gfp_t gfp_mask) |
| 2592 | { |
| 2593 | } |
| 2594 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2595 | #endif |
| 2596 | |
| 2597 | /* |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2598 | * Mark a lock with a usage bit, and validate the state transition: |
| 2599 | */ |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2600 | static int mark_lock(struct task_struct *curr, struct held_lock *this, |
Steven Rostedt | 0764d23 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2601 | enum lock_usage_bit new_bit) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2602 | { |
| 2603 | unsigned int new_mask = 1 << new_bit, ret = 1; |
| 2604 | |
| 2605 | /* |
| 2606 | * If already set then do not dirty the cacheline, |
| 2607 | * nor do any checks: |
| 2608 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2609 | if (likely(hlock_class(this)->usage_mask & new_mask)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2610 | return 1; |
| 2611 | |
| 2612 | if (!graph_lock()) |
| 2613 | return 0; |
| 2614 | /* |
| 2615 | * Make sure we didnt race: |
| 2616 | */ |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2617 | if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2618 | graph_unlock(); |
| 2619 | return 1; |
| 2620 | } |
| 2621 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2622 | hlock_class(this)->usage_mask |= new_mask; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2623 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2624 | if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2625 | return 0; |
| 2626 | |
| 2627 | switch (new_bit) { |
Peter Zijlstra | 5346417 | 2009-01-22 14:15:53 +0100 | [diff] [blame] | 2628 | #define LOCKDEP_STATE(__STATE) \ |
| 2629 | case LOCK_USED_IN_##__STATE: \ |
| 2630 | case LOCK_USED_IN_##__STATE##_READ: \ |
| 2631 | case LOCK_ENABLED_##__STATE: \ |
| 2632 | case LOCK_ENABLED_##__STATE##_READ: |
| 2633 | #include "lockdep_states.h" |
| 2634 | #undef LOCKDEP_STATE |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2635 | ret = mark_lock_irq(curr, this, new_bit); |
| 2636 | if (!ret) |
| 2637 | return 0; |
| 2638 | break; |
| 2639 | case LOCK_USED: |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2640 | debug_atomic_dec(&nr_unused_locks); |
| 2641 | break; |
| 2642 | default: |
| 2643 | if (!debug_locks_off_graph_unlock()) |
| 2644 | return 0; |
| 2645 | WARN_ON(1); |
| 2646 | return 0; |
| 2647 | } |
| 2648 | |
| 2649 | graph_unlock(); |
| 2650 | |
| 2651 | /* |
| 2652 | * We must printk outside of the graph_lock: |
| 2653 | */ |
| 2654 | if (ret == 2) { |
| 2655 | printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); |
| 2656 | print_lock(this); |
| 2657 | print_irqtrace_events(curr); |
| 2658 | dump_stack(); |
| 2659 | } |
| 2660 | |
| 2661 | return ret; |
| 2662 | } |
| 2663 | |
| 2664 | /* |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2665 | * Initialize a lock instance's lock-class mapping info: |
| 2666 | */ |
| 2667 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 2668 | struct lock_class_key *key, int subclass) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2669 | { |
Peter Zijlstra | c8a2500 | 2009-04-17 09:40:49 +0200 | [diff] [blame] | 2670 | lock->class_cache = NULL; |
| 2671 | #ifdef CONFIG_LOCK_STAT |
| 2672 | lock->cpu = raw_smp_processor_id(); |
| 2673 | #endif |
| 2674 | |
| 2675 | if (DEBUG_LOCKS_WARN_ON(!name)) { |
| 2676 | lock->name = "NULL"; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2677 | return; |
Peter Zijlstra | c8a2500 | 2009-04-17 09:40:49 +0200 | [diff] [blame] | 2678 | } |
| 2679 | |
| 2680 | lock->name = name; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2681 | |
| 2682 | if (DEBUG_LOCKS_WARN_ON(!key)) |
| 2683 | return; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2684 | /* |
| 2685 | * Sanity check, the lock-class key must be persistent: |
| 2686 | */ |
| 2687 | if (!static_obj(key)) { |
| 2688 | printk("BUG: key %p not in .data!\n", key); |
| 2689 | DEBUG_LOCKS_WARN_ON(1); |
| 2690 | return; |
| 2691 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2692 | lock->key = key; |
Peter Zijlstra | c8a2500 | 2009-04-17 09:40:49 +0200 | [diff] [blame] | 2693 | |
| 2694 | if (unlikely(!debug_locks)) |
| 2695 | return; |
| 2696 | |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 2697 | if (subclass) |
| 2698 | register_lock_class(lock, subclass, 1); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2699 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2700 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| 2701 | |
| 2702 | /* |
| 2703 | * This gets called for every mutex_lock*()/spin_lock*() operation. |
| 2704 | * We maintain the dependency maps and validate the locking attempt: |
| 2705 | */ |
| 2706 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 2707 | int trylock, int read, int check, int hardirqs_off, |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2708 | struct lockdep_map *nest_lock, unsigned long ip, |
| 2709 | int references) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2710 | { |
| 2711 | struct task_struct *curr = current; |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2712 | struct lock_class *class = NULL; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2713 | struct held_lock *hlock; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2714 | unsigned int depth, id; |
| 2715 | int chain_head = 0; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2716 | int class_idx; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2717 | u64 chain_key; |
| 2718 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 2719 | if (!prove_locking) |
| 2720 | check = 1; |
| 2721 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2722 | if (unlikely(!debug_locks)) |
| 2723 | return 0; |
| 2724 | |
| 2725 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2726 | return 0; |
| 2727 | |
| 2728 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { |
| 2729 | debug_locks_off(); |
| 2730 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); |
| 2731 | printk("turning off the locking correctness validator.\n"); |
Peter Zijlstra | eedeeab | 2009-03-18 12:38:47 +0100 | [diff] [blame] | 2732 | dump_stack(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2733 | return 0; |
| 2734 | } |
| 2735 | |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 2736 | if (!subclass) |
| 2737 | class = lock->class_cache; |
| 2738 | /* |
| 2739 | * Not cached yet or subclass? |
| 2740 | */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2741 | if (unlikely(!class)) { |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 2742 | class = register_lock_class(lock, subclass, 0); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2743 | if (!class) |
| 2744 | return 0; |
| 2745 | } |
| 2746 | debug_atomic_inc((atomic_t *)&class->ops); |
| 2747 | if (very_verbose(class)) { |
| 2748 | printk("\nacquire class [%p] %s", class->key, class->name); |
| 2749 | if (class->name_version > 1) |
| 2750 | printk("#%d", class->name_version); |
| 2751 | printk("\n"); |
| 2752 | dump_stack(); |
| 2753 | } |
| 2754 | |
| 2755 | /* |
| 2756 | * Add the lock to the list of currently held locks. |
| 2757 | * (we dont increase the depth just yet, up until the |
| 2758 | * dependency checks are done) |
| 2759 | */ |
| 2760 | depth = curr->lockdep_depth; |
| 2761 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) |
| 2762 | return 0; |
| 2763 | |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2764 | class_idx = class - lock_classes + 1; |
| 2765 | |
| 2766 | if (depth) { |
| 2767 | hlock = curr->held_locks + depth - 1; |
| 2768 | if (hlock->class_idx == class_idx && nest_lock) { |
| 2769 | if (hlock->references) |
| 2770 | hlock->references++; |
| 2771 | else |
| 2772 | hlock->references = 2; |
| 2773 | |
| 2774 | return 1; |
| 2775 | } |
| 2776 | } |
| 2777 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2778 | hlock = curr->held_locks + depth; |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2779 | if (DEBUG_LOCKS_WARN_ON(!class)) |
| 2780 | return 0; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2781 | hlock->class_idx = class_idx; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2782 | hlock->acquire_ip = ip; |
| 2783 | hlock->instance = lock; |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 2784 | hlock->nest_lock = nest_lock; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2785 | hlock->trylock = trylock; |
| 2786 | hlock->read = read; |
| 2787 | hlock->check = check; |
Dmitry Baryshkov | 6951b12 | 2008-08-18 04:26:37 +0400 | [diff] [blame] | 2788 | hlock->hardirqs_off = !!hardirqs_off; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2789 | hlock->references = references; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 2790 | #ifdef CONFIG_LOCK_STAT |
| 2791 | hlock->waittime_stamp = 0; |
| 2792 | hlock->holdtime_stamp = sched_clock(); |
| 2793 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2794 | |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2795 | if (check == 2 && !mark_irqflags(curr, hlock)) |
| 2796 | return 0; |
| 2797 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2798 | /* mark it as used: */ |
Jarek Poplawski | 4ff773bb | 2007-05-08 00:31:00 -0700 | [diff] [blame] | 2799 | if (!mark_lock(curr, hlock, LOCK_USED)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2800 | return 0; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2801 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2802 | /* |
Gautham R Shenoy | 17aacfb9 | 2007-10-28 20:47:01 +0100 | [diff] [blame] | 2803 | * Calculate the chain hash: it's the combined hash of all the |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2804 | * lock keys along the dependency chain. We save the hash value |
| 2805 | * at every step so that we can get the current hash easily |
| 2806 | * after unlock. The chain hash is then used to cache dependency |
| 2807 | * results. |
| 2808 | * |
| 2809 | * The 'key ID' is what is the most compact key value to drive |
| 2810 | * the hash, not class->key. |
| 2811 | */ |
| 2812 | id = class - lock_classes; |
| 2813 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
| 2814 | return 0; |
| 2815 | |
| 2816 | chain_key = curr->curr_chain_key; |
| 2817 | if (!depth) { |
| 2818 | if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) |
| 2819 | return 0; |
| 2820 | chain_head = 1; |
| 2821 | } |
| 2822 | |
| 2823 | hlock->prev_chain_key = chain_key; |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2824 | if (separate_irq_context(curr, hlock)) { |
| 2825 | chain_key = 0; |
| 2826 | chain_head = 1; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2827 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2828 | chain_key = iterate_chain_key(chain_key, id); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2829 | |
Gregory Haskins | 3aa416b | 2007-10-11 22:11:11 +0200 | [diff] [blame] | 2830 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) |
Peter Zijlstra | 8e18257 | 2007-07-19 01:48:54 -0700 | [diff] [blame] | 2831 | return 0; |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 2832 | |
Gregory Haskins | 3aa416b | 2007-10-11 22:11:11 +0200 | [diff] [blame] | 2833 | curr->curr_chain_key = chain_key; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2834 | curr->lockdep_depth++; |
| 2835 | check_chain_key(curr); |
Jarek Poplawski | 60e114d | 2007-02-20 13:58:00 -0800 | [diff] [blame] | 2836 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 2837 | if (unlikely(!debug_locks)) |
| 2838 | return 0; |
| 2839 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2840 | if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { |
| 2841 | debug_locks_off(); |
| 2842 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); |
| 2843 | printk("turning off the locking correctness validator.\n"); |
Peter Zijlstra | eedeeab | 2009-03-18 12:38:47 +0100 | [diff] [blame] | 2844 | dump_stack(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2845 | return 0; |
| 2846 | } |
Jarek Poplawski | 381a229 | 2007-02-10 01:44:58 -0800 | [diff] [blame] | 2847 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2848 | if (unlikely(curr->lockdep_depth > max_lockdep_depth)) |
| 2849 | max_lockdep_depth = curr->lockdep_depth; |
| 2850 | |
| 2851 | return 1; |
| 2852 | } |
| 2853 | |
| 2854 | static int |
| 2855 | print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, |
| 2856 | unsigned long ip) |
| 2857 | { |
| 2858 | if (!debug_locks_off()) |
| 2859 | return 0; |
| 2860 | if (debug_locks_silent) |
| 2861 | return 0; |
| 2862 | |
| 2863 | printk("\n=====================================\n"); |
| 2864 | printk( "[ BUG: bad unlock balance detected! ]\n"); |
| 2865 | printk( "-------------------------------------\n"); |
| 2866 | printk("%s/%d is trying to release lock (", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 2867 | curr->comm, task_pid_nr(curr)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2868 | print_lockdep_cache(lock); |
| 2869 | printk(") at:\n"); |
| 2870 | print_ip_sym(ip); |
| 2871 | printk("but there are no more locks to release!\n"); |
| 2872 | printk("\nother info that might help us debug this:\n"); |
| 2873 | lockdep_print_held_locks(curr); |
| 2874 | |
| 2875 | printk("\nstack backtrace:\n"); |
| 2876 | dump_stack(); |
| 2877 | |
| 2878 | return 0; |
| 2879 | } |
| 2880 | |
| 2881 | /* |
| 2882 | * Common debugging checks for both nested and non-nested unlock: |
| 2883 | */ |
| 2884 | static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, |
| 2885 | unsigned long ip) |
| 2886 | { |
| 2887 | if (unlikely(!debug_locks)) |
| 2888 | return 0; |
| 2889 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2890 | return 0; |
| 2891 | |
| 2892 | if (curr->lockdep_depth <= 0) |
| 2893 | return print_unlock_inbalance_bug(curr, lock, ip); |
| 2894 | |
| 2895 | return 1; |
| 2896 | } |
| 2897 | |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2898 | static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) |
| 2899 | { |
| 2900 | if (hlock->instance == lock) |
| 2901 | return 1; |
| 2902 | |
| 2903 | if (hlock->references) { |
| 2904 | struct lock_class *class = lock->class_cache; |
| 2905 | |
| 2906 | if (!class) |
| 2907 | class = look_up_lock_class(lock, 0); |
| 2908 | |
| 2909 | if (DEBUG_LOCKS_WARN_ON(!class)) |
| 2910 | return 0; |
| 2911 | |
| 2912 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |
| 2913 | return 0; |
| 2914 | |
| 2915 | if (hlock->class_idx == class - lock_classes + 1) |
| 2916 | return 1; |
| 2917 | } |
| 2918 | |
| 2919 | return 0; |
| 2920 | } |
| 2921 | |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2922 | static int |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 2923 | __lock_set_class(struct lockdep_map *lock, const char *name, |
| 2924 | struct lock_class_key *key, unsigned int subclass, |
| 2925 | unsigned long ip) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2926 | { |
| 2927 | struct task_struct *curr = current; |
| 2928 | struct held_lock *hlock, *prev_hlock; |
| 2929 | struct lock_class *class; |
| 2930 | unsigned int depth; |
| 2931 | int i; |
| 2932 | |
| 2933 | depth = curr->lockdep_depth; |
| 2934 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 2935 | return 0; |
| 2936 | |
| 2937 | prev_hlock = NULL; |
| 2938 | for (i = depth-1; i >= 0; i--) { |
| 2939 | hlock = curr->held_locks + i; |
| 2940 | /* |
| 2941 | * We must not cross into another context: |
| 2942 | */ |
| 2943 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
| 2944 | break; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2945 | if (match_held_lock(hlock, lock)) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2946 | goto found_it; |
| 2947 | prev_hlock = hlock; |
| 2948 | } |
| 2949 | return print_unlock_inbalance_bug(curr, lock, ip); |
| 2950 | |
| 2951 | found_it: |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 2952 | lockdep_init_map(lock, name, key, 0); |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2953 | class = register_lock_class(lock, subclass, 0); |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2954 | hlock->class_idx = class - lock_classes + 1; |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2955 | |
| 2956 | curr->lockdep_depth = i; |
| 2957 | curr->curr_chain_key = hlock->prev_chain_key; |
| 2958 | |
| 2959 | for (; i < depth; i++) { |
| 2960 | hlock = curr->held_locks + i; |
| 2961 | if (!__lock_acquire(hlock->instance, |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 2962 | hlock_class(hlock)->subclass, hlock->trylock, |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2963 | hlock->read, hlock->check, hlock->hardirqs_off, |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 2964 | hlock->nest_lock, hlock->acquire_ip, |
| 2965 | hlock->references)) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 2966 | return 0; |
| 2967 | } |
| 2968 | |
| 2969 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) |
| 2970 | return 0; |
| 2971 | return 1; |
| 2972 | } |
| 2973 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2974 | /* |
| 2975 | * Remove the lock to the list of currently held locks in a |
| 2976 | * potentially non-nested (out of order) manner. This is a |
| 2977 | * relatively rare operation, as all the unlock APIs default |
| 2978 | * to nested mode (which uses lock_release()): |
| 2979 | */ |
| 2980 | static int |
| 2981 | lock_release_non_nested(struct task_struct *curr, |
| 2982 | struct lockdep_map *lock, unsigned long ip) |
| 2983 | { |
| 2984 | struct held_lock *hlock, *prev_hlock; |
| 2985 | unsigned int depth; |
| 2986 | int i; |
| 2987 | |
| 2988 | /* |
| 2989 | * Check whether the lock exists in the current stack |
| 2990 | * of held locks: |
| 2991 | */ |
| 2992 | depth = curr->lockdep_depth; |
| 2993 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 2994 | return 0; |
| 2995 | |
| 2996 | prev_hlock = NULL; |
| 2997 | for (i = depth-1; i >= 0; i--) { |
| 2998 | hlock = curr->held_locks + i; |
| 2999 | /* |
| 3000 | * We must not cross into another context: |
| 3001 | */ |
| 3002 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
| 3003 | break; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3004 | if (match_held_lock(hlock, lock)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3005 | goto found_it; |
| 3006 | prev_hlock = hlock; |
| 3007 | } |
| 3008 | return print_unlock_inbalance_bug(curr, lock, ip); |
| 3009 | |
| 3010 | found_it: |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3011 | if (hlock->instance == lock) |
| 3012 | lock_release_holdtime(hlock); |
| 3013 | |
| 3014 | if (hlock->references) { |
| 3015 | hlock->references--; |
| 3016 | if (hlock->references) { |
| 3017 | /* |
| 3018 | * We had, and after removing one, still have |
| 3019 | * references, the current lock stack is still |
| 3020 | * valid. We're done! |
| 3021 | */ |
| 3022 | return 1; |
| 3023 | } |
| 3024 | } |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3025 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3026 | /* |
| 3027 | * We have the right lock to unlock, 'hlock' points to it. |
| 3028 | * Now we remove it from the stack, and add back the other |
| 3029 | * entries (if any), recalculating the hash along the way: |
| 3030 | */ |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3031 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3032 | curr->lockdep_depth = i; |
| 3033 | curr->curr_chain_key = hlock->prev_chain_key; |
| 3034 | |
| 3035 | for (i++; i < depth; i++) { |
| 3036 | hlock = curr->held_locks + i; |
| 3037 | if (!__lock_acquire(hlock->instance, |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 3038 | hlock_class(hlock)->subclass, hlock->trylock, |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3039 | hlock->read, hlock->check, hlock->hardirqs_off, |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3040 | hlock->nest_lock, hlock->acquire_ip, |
| 3041 | hlock->references)) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3042 | return 0; |
| 3043 | } |
| 3044 | |
| 3045 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) |
| 3046 | return 0; |
| 3047 | return 1; |
| 3048 | } |
| 3049 | |
| 3050 | /* |
| 3051 | * Remove the lock to the list of currently held locks - this gets |
| 3052 | * called on mutex_unlock()/spin_unlock*() (or on a failed |
| 3053 | * mutex_lock_interruptible()). This is done for unlocks that nest |
| 3054 | * perfectly. (i.e. the current top of the lock-stack is unlocked) |
| 3055 | */ |
| 3056 | static int lock_release_nested(struct task_struct *curr, |
| 3057 | struct lockdep_map *lock, unsigned long ip) |
| 3058 | { |
| 3059 | struct held_lock *hlock; |
| 3060 | unsigned int depth; |
| 3061 | |
| 3062 | /* |
| 3063 | * Pop off the top of the lock stack: |
| 3064 | */ |
| 3065 | depth = curr->lockdep_depth - 1; |
| 3066 | hlock = curr->held_locks + depth; |
| 3067 | |
| 3068 | /* |
| 3069 | * Is the unlock non-nested: |
| 3070 | */ |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3071 | if (hlock->instance != lock || hlock->references) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3072 | return lock_release_non_nested(curr, lock, ip); |
| 3073 | curr->lockdep_depth--; |
| 3074 | |
| 3075 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) |
| 3076 | return 0; |
| 3077 | |
| 3078 | curr->curr_chain_key = hlock->prev_chain_key; |
| 3079 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3080 | lock_release_holdtime(hlock); |
| 3081 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3082 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 3083 | hlock->prev_chain_key = 0; |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 3084 | hlock->class_idx = 0; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3085 | hlock->acquire_ip = 0; |
| 3086 | hlock->irq_context = 0; |
| 3087 | #endif |
| 3088 | return 1; |
| 3089 | } |
| 3090 | |
| 3091 | /* |
| 3092 | * Remove the lock to the list of currently held locks - this gets |
| 3093 | * called on mutex_unlock()/spin_unlock*() (or on a failed |
| 3094 | * mutex_lock_interruptible()). This is done for unlocks that nest |
| 3095 | * perfectly. (i.e. the current top of the lock-stack is unlocked) |
| 3096 | */ |
| 3097 | static void |
| 3098 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) |
| 3099 | { |
| 3100 | struct task_struct *curr = current; |
| 3101 | |
| 3102 | if (!check_unlock(curr, lock, ip)) |
| 3103 | return; |
| 3104 | |
| 3105 | if (nested) { |
| 3106 | if (!lock_release_nested(curr, lock, ip)) |
| 3107 | return; |
| 3108 | } else { |
| 3109 | if (!lock_release_non_nested(curr, lock, ip)) |
| 3110 | return; |
| 3111 | } |
| 3112 | |
| 3113 | check_chain_key(curr); |
| 3114 | } |
| 3115 | |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 3116 | static int __lock_is_held(struct lockdep_map *lock) |
| 3117 | { |
| 3118 | struct task_struct *curr = current; |
| 3119 | int i; |
| 3120 | |
| 3121 | for (i = 0; i < curr->lockdep_depth; i++) { |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3122 | struct held_lock *hlock = curr->held_locks + i; |
| 3123 | |
| 3124 | if (match_held_lock(hlock, lock)) |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 3125 | return 1; |
| 3126 | } |
| 3127 | |
| 3128 | return 0; |
| 3129 | } |
| 3130 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3131 | /* |
| 3132 | * Check whether we follow the irq-flags state precisely: |
| 3133 | */ |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 3134 | static void check_flags(unsigned long flags) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3135 | { |
Ingo Molnar | 992860e | 2008-07-14 10:28:38 +0200 | [diff] [blame] | 3136 | #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ |
| 3137 | defined(CONFIG_TRACE_IRQFLAGS) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3138 | if (!debug_locks) |
| 3139 | return; |
| 3140 | |
Ingo Molnar | 5f9fa8a | 2007-12-07 19:02:47 +0100 | [diff] [blame] | 3141 | if (irqs_disabled_flags(flags)) { |
| 3142 | if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { |
| 3143 | printk("possible reason: unannotated irqs-off.\n"); |
| 3144 | } |
| 3145 | } else { |
| 3146 | if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { |
| 3147 | printk("possible reason: unannotated irqs-on.\n"); |
| 3148 | } |
| 3149 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3150 | |
| 3151 | /* |
| 3152 | * We dont accurately track softirq state in e.g. |
| 3153 | * hardirq contexts (such as on 4KSTACKS), so only |
| 3154 | * check if not in hardirq contexts: |
| 3155 | */ |
| 3156 | if (!hardirq_count()) { |
| 3157 | if (softirq_count()) |
| 3158 | DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); |
| 3159 | else |
| 3160 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); |
| 3161 | } |
| 3162 | |
| 3163 | if (!debug_locks) |
| 3164 | print_irqtrace_events(current); |
| 3165 | #endif |
| 3166 | } |
| 3167 | |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 3168 | void lock_set_class(struct lockdep_map *lock, const char *name, |
| 3169 | struct lock_class_key *key, unsigned int subclass, |
| 3170 | unsigned long ip) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 3171 | { |
| 3172 | unsigned long flags; |
| 3173 | |
| 3174 | if (unlikely(current->lockdep_recursion)) |
| 3175 | return; |
| 3176 | |
| 3177 | raw_local_irq_save(flags); |
| 3178 | current->lockdep_recursion = 1; |
| 3179 | check_flags(flags); |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 3180 | if (__lock_set_class(lock, name, key, subclass, ip)) |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 3181 | check_chain_key(current); |
| 3182 | current->lockdep_recursion = 0; |
| 3183 | raw_local_irq_restore(flags); |
| 3184 | } |
Peter Zijlstra | 00ef9f7 | 2008-12-04 09:00:17 +0100 | [diff] [blame] | 3185 | EXPORT_SYMBOL_GPL(lock_set_class); |
Peter Zijlstra | 64aa348 | 2008-08-11 09:30:21 +0200 | [diff] [blame] | 3186 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3187 | /* |
| 3188 | * We are not always called with irqs disabled - do that here, |
| 3189 | * and also avoid lockdep recursion: |
| 3190 | */ |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 3191 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
Peter Zijlstra | 7531e2f | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 3192 | int trylock, int read, int check, |
| 3193 | struct lockdep_map *nest_lock, unsigned long ip) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3194 | { |
| 3195 | unsigned long flags; |
| 3196 | |
Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 3197 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); |
| 3198 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3199 | if (unlikely(current->lockdep_recursion)) |
| 3200 | return; |
| 3201 | |
| 3202 | raw_local_irq_save(flags); |
| 3203 | check_flags(flags); |
| 3204 | |
| 3205 | current->lockdep_recursion = 1; |
| 3206 | __lock_acquire(lock, subclass, trylock, read, check, |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3207 | irqs_disabled_flags(flags), nest_lock, ip, 0); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3208 | current->lockdep_recursion = 0; |
| 3209 | raw_local_irq_restore(flags); |
| 3210 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3211 | EXPORT_SYMBOL_GPL(lock_acquire); |
| 3212 | |
Steven Rostedt | 1d09daa | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 3213 | void lock_release(struct lockdep_map *lock, int nested, |
Steven Rostedt | 0764d23 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 3214 | unsigned long ip) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3215 | { |
| 3216 | unsigned long flags; |
| 3217 | |
Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 3218 | trace_lock_release(lock, nested, ip); |
| 3219 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3220 | if (unlikely(current->lockdep_recursion)) |
| 3221 | return; |
| 3222 | |
| 3223 | raw_local_irq_save(flags); |
| 3224 | check_flags(flags); |
| 3225 | current->lockdep_recursion = 1; |
| 3226 | __lock_release(lock, nested, ip); |
| 3227 | current->lockdep_recursion = 0; |
| 3228 | raw_local_irq_restore(flags); |
| 3229 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3230 | EXPORT_SYMBOL_GPL(lock_release); |
| 3231 | |
Peter Zijlstra | f607c66 | 2009-07-20 19:16:29 +0200 | [diff] [blame] | 3232 | int lock_is_held(struct lockdep_map *lock) |
| 3233 | { |
| 3234 | unsigned long flags; |
| 3235 | int ret = 0; |
| 3236 | |
| 3237 | if (unlikely(current->lockdep_recursion)) |
| 3238 | return ret; |
| 3239 | |
| 3240 | raw_local_irq_save(flags); |
| 3241 | check_flags(flags); |
| 3242 | |
| 3243 | current->lockdep_recursion = 1; |
| 3244 | ret = __lock_is_held(lock); |
| 3245 | current->lockdep_recursion = 0; |
| 3246 | raw_local_irq_restore(flags); |
| 3247 | |
| 3248 | return ret; |
| 3249 | } |
| 3250 | EXPORT_SYMBOL_GPL(lock_is_held); |
| 3251 | |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 3252 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) |
| 3253 | { |
| 3254 | current->lockdep_reclaim_gfp = gfp_mask; |
| 3255 | } |
| 3256 | |
| 3257 | void lockdep_clear_current_reclaim_state(void) |
| 3258 | { |
| 3259 | current->lockdep_reclaim_gfp = 0; |
| 3260 | } |
| 3261 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3262 | #ifdef CONFIG_LOCK_STAT |
| 3263 | static int |
| 3264 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
| 3265 | unsigned long ip) |
| 3266 | { |
| 3267 | if (!debug_locks_off()) |
| 3268 | return 0; |
| 3269 | if (debug_locks_silent) |
| 3270 | return 0; |
| 3271 | |
| 3272 | printk("\n=================================\n"); |
| 3273 | printk( "[ BUG: bad contention detected! ]\n"); |
| 3274 | printk( "---------------------------------\n"); |
| 3275 | printk("%s/%d is trying to contend lock (", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 3276 | curr->comm, task_pid_nr(curr)); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3277 | print_lockdep_cache(lock); |
| 3278 | printk(") at:\n"); |
| 3279 | print_ip_sym(ip); |
| 3280 | printk("but there are no locks held!\n"); |
| 3281 | printk("\nother info that might help us debug this:\n"); |
| 3282 | lockdep_print_held_locks(curr); |
| 3283 | |
| 3284 | printk("\nstack backtrace:\n"); |
| 3285 | dump_stack(); |
| 3286 | |
| 3287 | return 0; |
| 3288 | } |
| 3289 | |
| 3290 | static void |
| 3291 | __lock_contended(struct lockdep_map *lock, unsigned long ip) |
| 3292 | { |
| 3293 | struct task_struct *curr = current; |
| 3294 | struct held_lock *hlock, *prev_hlock; |
| 3295 | struct lock_class_stats *stats; |
| 3296 | unsigned int depth; |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3297 | int i, contention_point, contending_point; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3298 | |
| 3299 | depth = curr->lockdep_depth; |
| 3300 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3301 | return; |
| 3302 | |
| 3303 | prev_hlock = NULL; |
| 3304 | for (i = depth-1; i >= 0; i--) { |
| 3305 | hlock = curr->held_locks + i; |
| 3306 | /* |
| 3307 | * We must not cross into another context: |
| 3308 | */ |
| 3309 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
| 3310 | break; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3311 | if (match_held_lock(hlock, lock)) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3312 | goto found_it; |
| 3313 | prev_hlock = hlock; |
| 3314 | } |
| 3315 | print_lock_contention_bug(curr, lock, ip); |
| 3316 | return; |
| 3317 | |
| 3318 | found_it: |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3319 | if (hlock->instance != lock) |
| 3320 | return; |
| 3321 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3322 | hlock->waittime_stamp = sched_clock(); |
| 3323 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3324 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
| 3325 | contending_point = lock_point(hlock_class(hlock)->contending_point, |
| 3326 | lock->ip); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3327 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 3328 | stats = get_lock_stats(hlock_class(hlock)); |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3329 | if (contention_point < LOCKSTAT_POINTS) |
| 3330 | stats->contention_point[contention_point]++; |
| 3331 | if (contending_point < LOCKSTAT_POINTS) |
| 3332 | stats->contending_point[contending_point]++; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 3333 | if (lock->cpu != smp_processor_id()) |
| 3334 | stats->bounces[bounce_contended + !!hlock->read]++; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3335 | put_lock_stats(stats); |
| 3336 | } |
| 3337 | |
| 3338 | static void |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3339 | __lock_acquired(struct lockdep_map *lock, unsigned long ip) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3340 | { |
| 3341 | struct task_struct *curr = current; |
| 3342 | struct held_lock *hlock, *prev_hlock; |
| 3343 | struct lock_class_stats *stats; |
| 3344 | unsigned int depth; |
| 3345 | u64 now; |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 3346 | s64 waittime = 0; |
| 3347 | int i, cpu; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3348 | |
| 3349 | depth = curr->lockdep_depth; |
| 3350 | if (DEBUG_LOCKS_WARN_ON(!depth)) |
| 3351 | return; |
| 3352 | |
| 3353 | prev_hlock = NULL; |
| 3354 | for (i = depth-1; i >= 0; i--) { |
| 3355 | hlock = curr->held_locks + i; |
| 3356 | /* |
| 3357 | * We must not cross into another context: |
| 3358 | */ |
| 3359 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
| 3360 | break; |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3361 | if (match_held_lock(hlock, lock)) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3362 | goto found_it; |
| 3363 | prev_hlock = hlock; |
| 3364 | } |
| 3365 | print_lock_contention_bug(curr, lock, _RET_IP_); |
| 3366 | return; |
| 3367 | |
| 3368 | found_it: |
Peter Zijlstra | bb97a91 | 2009-07-20 19:15:35 +0200 | [diff] [blame] | 3369 | if (hlock->instance != lock) |
| 3370 | return; |
| 3371 | |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 3372 | cpu = smp_processor_id(); |
| 3373 | if (hlock->waittime_stamp) { |
| 3374 | now = sched_clock(); |
| 3375 | waittime = now - hlock->waittime_stamp; |
| 3376 | hlock->holdtime_stamp = now; |
| 3377 | } |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3378 | |
Frederic Weisbecker | 2062501 | 2009-04-06 01:49:33 +0200 | [diff] [blame] | 3379 | trace_lock_acquired(lock, ip, waittime); |
| 3380 | |
Dave Jones | f82b217 | 2008-08-11 09:30:23 +0200 | [diff] [blame] | 3381 | stats = get_lock_stats(hlock_class(hlock)); |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 3382 | if (waittime) { |
| 3383 | if (hlock->read) |
| 3384 | lock_time_inc(&stats->read_waittime, waittime); |
| 3385 | else |
| 3386 | lock_time_inc(&stats->write_waittime, waittime); |
| 3387 | } |
| 3388 | if (lock->cpu != cpu) |
| 3389 | stats->bounces[bounce_acquired + !!hlock->read]++; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3390 | put_lock_stats(stats); |
Peter Zijlstra | 9664567 | 2007-07-19 01:49:00 -0700 | [diff] [blame] | 3391 | |
| 3392 | lock->cpu = cpu; |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3393 | lock->ip = ip; |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3394 | } |
| 3395 | |
| 3396 | void lock_contended(struct lockdep_map *lock, unsigned long ip) |
| 3397 | { |
| 3398 | unsigned long flags; |
| 3399 | |
Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 3400 | trace_lock_contended(lock, ip); |
| 3401 | |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3402 | if (unlikely(!lock_stat)) |
| 3403 | return; |
| 3404 | |
| 3405 | if (unlikely(current->lockdep_recursion)) |
| 3406 | return; |
| 3407 | |
| 3408 | raw_local_irq_save(flags); |
| 3409 | check_flags(flags); |
| 3410 | current->lockdep_recursion = 1; |
| 3411 | __lock_contended(lock, ip); |
| 3412 | current->lockdep_recursion = 0; |
| 3413 | raw_local_irq_restore(flags); |
| 3414 | } |
| 3415 | EXPORT_SYMBOL_GPL(lock_contended); |
| 3416 | |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3417 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3418 | { |
| 3419 | unsigned long flags; |
| 3420 | |
| 3421 | if (unlikely(!lock_stat)) |
| 3422 | return; |
| 3423 | |
| 3424 | if (unlikely(current->lockdep_recursion)) |
| 3425 | return; |
| 3426 | |
| 3427 | raw_local_irq_save(flags); |
| 3428 | check_flags(flags); |
| 3429 | current->lockdep_recursion = 1; |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 3430 | __lock_acquired(lock, ip); |
Peter Zijlstra | f20786f | 2007-07-19 01:48:56 -0700 | [diff] [blame] | 3431 | current->lockdep_recursion = 0; |
| 3432 | raw_local_irq_restore(flags); |
| 3433 | } |
| 3434 | EXPORT_SYMBOL_GPL(lock_acquired); |
| 3435 | #endif |
| 3436 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3437 | /* |
| 3438 | * Used by the testsuite, sanitize the validator state |
| 3439 | * after a simulated failure: |
| 3440 | */ |
| 3441 | |
| 3442 | void lockdep_reset(void) |
| 3443 | { |
| 3444 | unsigned long flags; |
Ingo Molnar | 23d95a0 | 2006-12-13 00:34:40 -0800 | [diff] [blame] | 3445 | int i; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3446 | |
| 3447 | raw_local_irq_save(flags); |
| 3448 | current->curr_chain_key = 0; |
| 3449 | current->lockdep_depth = 0; |
| 3450 | current->lockdep_recursion = 0; |
| 3451 | memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); |
| 3452 | nr_hardirq_chains = 0; |
| 3453 | nr_softirq_chains = 0; |
| 3454 | nr_process_chains = 0; |
| 3455 | debug_locks = 1; |
Ingo Molnar | 23d95a0 | 2006-12-13 00:34:40 -0800 | [diff] [blame] | 3456 | for (i = 0; i < CHAINHASH_SIZE; i++) |
| 3457 | INIT_LIST_HEAD(chainhash_table + i); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3458 | raw_local_irq_restore(flags); |
| 3459 | } |
| 3460 | |
| 3461 | static void zap_class(struct lock_class *class) |
| 3462 | { |
| 3463 | int i; |
| 3464 | |
| 3465 | /* |
| 3466 | * Remove all dependencies this lock is |
| 3467 | * involved in: |
| 3468 | */ |
| 3469 | for (i = 0; i < nr_list_entries; i++) { |
| 3470 | if (list_entries[i].class == class) |
| 3471 | list_del_rcu(&list_entries[i].entry); |
| 3472 | } |
| 3473 | /* |
| 3474 | * Unhash the class and remove it from the all_lock_classes list: |
| 3475 | */ |
| 3476 | list_del_rcu(&class->hash_entry); |
| 3477 | list_del_rcu(&class->lock_entry); |
| 3478 | |
Rabin Vincent | 8bfe029 | 2008-08-11 09:30:26 +0200 | [diff] [blame] | 3479 | class->key = NULL; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3480 | } |
| 3481 | |
Arjan van de Ven | fabe874 | 2008-01-24 07:00:45 +0100 | [diff] [blame] | 3482 | static inline int within(const void *addr, void *start, unsigned long size) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3483 | { |
| 3484 | return addr >= start && addr < start + size; |
| 3485 | } |
| 3486 | |
| 3487 | void lockdep_free_key_range(void *start, unsigned long size) |
| 3488 | { |
| 3489 | struct lock_class *class, *next; |
| 3490 | struct list_head *head; |
| 3491 | unsigned long flags; |
| 3492 | int i; |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3493 | int locked; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3494 | |
| 3495 | raw_local_irq_save(flags); |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3496 | locked = graph_lock(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3497 | |
| 3498 | /* |
| 3499 | * Unhash all classes that were created by this module: |
| 3500 | */ |
| 3501 | for (i = 0; i < CLASSHASH_SIZE; i++) { |
| 3502 | head = classhash_table + i; |
| 3503 | if (list_empty(head)) |
| 3504 | continue; |
Arjan van de Ven | fabe874 | 2008-01-24 07:00:45 +0100 | [diff] [blame] | 3505 | list_for_each_entry_safe(class, next, head, hash_entry) { |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3506 | if (within(class->key, start, size)) |
| 3507 | zap_class(class); |
Arjan van de Ven | fabe874 | 2008-01-24 07:00:45 +0100 | [diff] [blame] | 3508 | else if (within(class->name, start, size)) |
| 3509 | zap_class(class); |
| 3510 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3511 | } |
| 3512 | |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3513 | if (locked) |
| 3514 | graph_unlock(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3515 | raw_local_irq_restore(flags); |
| 3516 | } |
| 3517 | |
| 3518 | void lockdep_reset_lock(struct lockdep_map *lock) |
| 3519 | { |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3520 | struct lock_class *class, *next; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3521 | struct list_head *head; |
| 3522 | unsigned long flags; |
| 3523 | int i, j; |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3524 | int locked; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3525 | |
| 3526 | raw_local_irq_save(flags); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3527 | |
| 3528 | /* |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3529 | * Remove all classes this lock might have: |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3530 | */ |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3531 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { |
| 3532 | /* |
| 3533 | * If the class exists we look it up and zap it: |
| 3534 | */ |
| 3535 | class = look_up_lock_class(lock, j); |
| 3536 | if (class) |
| 3537 | zap_class(class); |
| 3538 | } |
| 3539 | /* |
| 3540 | * Debug check: in the end all mapped classes should |
| 3541 | * be gone. |
| 3542 | */ |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3543 | locked = graph_lock(); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3544 | for (i = 0; i < CLASSHASH_SIZE; i++) { |
| 3545 | head = classhash_table + i; |
| 3546 | if (list_empty(head)) |
| 3547 | continue; |
| 3548 | list_for_each_entry_safe(class, next, head, hash_entry) { |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3549 | if (unlikely(class == lock->class_cache)) { |
Ingo Molnar | 74c383f | 2006-12-13 00:34:43 -0800 | [diff] [blame] | 3550 | if (debug_locks_off_graph_unlock()) |
| 3551 | WARN_ON(1); |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3552 | goto out_restore; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3553 | } |
| 3554 | } |
| 3555 | } |
Nick Piggin | 5a26db5 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 3556 | if (locked) |
| 3557 | graph_unlock(); |
Ingo Molnar | d6d897c | 2006-07-10 04:44:04 -0700 | [diff] [blame] | 3558 | |
| 3559 | out_restore: |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3560 | raw_local_irq_restore(flags); |
| 3561 | } |
| 3562 | |
Sam Ravnborg | 1499993 | 2007-02-28 20:12:31 -0800 | [diff] [blame] | 3563 | void lockdep_init(void) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3564 | { |
| 3565 | int i; |
| 3566 | |
| 3567 | /* |
| 3568 | * Some architectures have their own start_kernel() |
| 3569 | * code which calls lockdep_init(), while we also |
| 3570 | * call lockdep_init() from the start_kernel() itself, |
| 3571 | * and we want to initialize the hashes only once: |
| 3572 | */ |
| 3573 | if (lockdep_initialized) |
| 3574 | return; |
| 3575 | |
| 3576 | for (i = 0; i < CLASSHASH_SIZE; i++) |
| 3577 | INIT_LIST_HEAD(classhash_table + i); |
| 3578 | |
| 3579 | for (i = 0; i < CHAINHASH_SIZE; i++) |
| 3580 | INIT_LIST_HEAD(chainhash_table + i); |
| 3581 | |
| 3582 | lockdep_initialized = 1; |
| 3583 | } |
| 3584 | |
| 3585 | void __init lockdep_info(void) |
| 3586 | { |
| 3587 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); |
| 3588 | |
Li Zefan | b0788ca | 2008-11-21 15:57:32 +0800 | [diff] [blame] | 3589 | printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3590 | printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); |
| 3591 | printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); |
Li Zefan | b0788ca | 2008-11-21 15:57:32 +0800 | [diff] [blame] | 3592 | printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3593 | printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); |
| 3594 | printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); |
| 3595 | printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); |
| 3596 | |
| 3597 | printk(" memory used by lock dependency info: %lu kB\n", |
| 3598 | (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + |
| 3599 | sizeof(struct list_head) * CLASSHASH_SIZE + |
| 3600 | sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + |
| 3601 | sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + |
Ming Lei | 90629209 | 2009-08-02 21:43:36 +0800 | [diff] [blame] | 3602 | sizeof(struct list_head) * CHAINHASH_SIZE |
Ming Lei | 4dd861d | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 3603 | #ifdef CONFIG_PROVE_LOCKING |
Ming Lei | e351b66 | 2009-07-22 22:48:09 +0800 | [diff] [blame] | 3604 | + sizeof(struct circular_queue) |
Ming Lei | 4dd861d | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 3605 | #endif |
Ming Lei | 90629209 | 2009-08-02 21:43:36 +0800 | [diff] [blame] | 3606 | ) / 1024 |
Ming Lei | 4dd861d | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 3607 | ); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3608 | |
| 3609 | printk(" per task-struct memory footprint: %lu bytes\n", |
| 3610 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); |
| 3611 | |
| 3612 | #ifdef CONFIG_DEBUG_LOCKDEP |
Johannes Berg | c71063c | 2007-07-19 01:49:02 -0700 | [diff] [blame] | 3613 | if (lockdep_init_error) { |
| 3614 | printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n"); |
| 3615 | printk("Call stack leading to lockdep invocation was:\n"); |
| 3616 | print_stack_trace(&lockdep_init_trace, 0); |
| 3617 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3618 | #endif |
| 3619 | } |
| 3620 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3621 | static void |
| 3622 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, |
Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 3623 | const void *mem_to, struct held_lock *hlock) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3624 | { |
| 3625 | if (!debug_locks_off()) |
| 3626 | return; |
| 3627 | if (debug_locks_silent) |
| 3628 | return; |
| 3629 | |
| 3630 | printk("\n=========================\n"); |
| 3631 | printk( "[ BUG: held lock freed! ]\n"); |
| 3632 | printk( "-------------------------\n"); |
| 3633 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 3634 | curr->comm, task_pid_nr(curr), mem_from, mem_to-1); |
Arjan van de Ven | 55794a4 | 2006-07-10 04:44:03 -0700 | [diff] [blame] | 3635 | print_lock(hlock); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3636 | lockdep_print_held_locks(curr); |
| 3637 | |
| 3638 | printk("\nstack backtrace:\n"); |
| 3639 | dump_stack(); |
| 3640 | } |
| 3641 | |
Oleg Nesterov | 5456178 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3642 | static inline int not_in_range(const void* mem_from, unsigned long mem_len, |
| 3643 | const void* lock_from, unsigned long lock_len) |
| 3644 | { |
| 3645 | return lock_from + lock_len <= mem_from || |
| 3646 | mem_from + mem_len <= lock_from; |
| 3647 | } |
| 3648 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3649 | /* |
| 3650 | * Called when kernel memory is freed (or unmapped), or if a lock |
| 3651 | * is destroyed or reinitialized - this code checks whether there is |
| 3652 | * any held lock in the memory range of <from> to <to>: |
| 3653 | */ |
| 3654 | void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) |
| 3655 | { |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3656 | struct task_struct *curr = current; |
| 3657 | struct held_lock *hlock; |
| 3658 | unsigned long flags; |
| 3659 | int i; |
| 3660 | |
| 3661 | if (unlikely(!debug_locks)) |
| 3662 | return; |
| 3663 | |
| 3664 | local_irq_save(flags); |
| 3665 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 3666 | hlock = curr->held_locks + i; |
| 3667 | |
Oleg Nesterov | 5456178 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3668 | if (not_in_range(mem_from, mem_len, hlock->instance, |
| 3669 | sizeof(*hlock->instance))) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3670 | continue; |
| 3671 | |
Oleg Nesterov | 5456178 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3672 | print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3673 | break; |
| 3674 | } |
| 3675 | local_irq_restore(flags); |
| 3676 | } |
Peter Zijlstra | ed07536 | 2006-12-06 20:35:24 -0800 | [diff] [blame] | 3677 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3678 | |
| 3679 | static void print_held_locks_bug(struct task_struct *curr) |
| 3680 | { |
| 3681 | if (!debug_locks_off()) |
| 3682 | return; |
| 3683 | if (debug_locks_silent) |
| 3684 | return; |
| 3685 | |
| 3686 | printk("\n=====================================\n"); |
| 3687 | printk( "[ BUG: lock held at task exit time! ]\n"); |
| 3688 | printk( "-------------------------------------\n"); |
| 3689 | printk("%s/%d is exiting with locks still held!\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 3690 | curr->comm, task_pid_nr(curr)); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3691 | lockdep_print_held_locks(curr); |
| 3692 | |
| 3693 | printk("\nstack backtrace:\n"); |
| 3694 | dump_stack(); |
| 3695 | } |
| 3696 | |
| 3697 | void debug_check_no_locks_held(struct task_struct *task) |
| 3698 | { |
| 3699 | if (unlikely(task->lockdep_depth > 0)) |
| 3700 | print_held_locks_bug(task); |
| 3701 | } |
| 3702 | |
| 3703 | void debug_show_all_locks(void) |
| 3704 | { |
| 3705 | struct task_struct *g, *p; |
| 3706 | int count = 10; |
| 3707 | int unlock = 1; |
| 3708 | |
Jarek Poplawski | 9c35dd7 | 2007-03-22 00:11:28 -0800 | [diff] [blame] | 3709 | if (unlikely(!debug_locks)) { |
| 3710 | printk("INFO: lockdep is turned off.\n"); |
| 3711 | return; |
| 3712 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3713 | printk("\nShowing all locks held in the system:\n"); |
| 3714 | |
| 3715 | /* |
| 3716 | * Here we try to get the tasklist_lock as hard as possible, |
| 3717 | * if not successful after 2 seconds we ignore it (but keep |
| 3718 | * trying). This is to enable a debug printout even if a |
| 3719 | * tasklist_lock-holding task deadlocks or crashes. |
| 3720 | */ |
| 3721 | retry: |
| 3722 | if (!read_trylock(&tasklist_lock)) { |
| 3723 | if (count == 10) |
| 3724 | printk("hm, tasklist_lock locked, retrying... "); |
| 3725 | if (count) { |
| 3726 | count--; |
| 3727 | printk(" #%d", 10-count); |
| 3728 | mdelay(200); |
| 3729 | goto retry; |
| 3730 | } |
| 3731 | printk(" ignoring it.\n"); |
| 3732 | unlock = 0; |
qinghuang feng | 46fec7a | 2008-10-28 17:24:28 +0800 | [diff] [blame] | 3733 | } else { |
| 3734 | if (count != 10) |
| 3735 | printk(KERN_CONT " locked it.\n"); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3736 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3737 | |
| 3738 | do_each_thread(g, p) { |
Ingo Molnar | 8568487 | 2007-12-05 15:46:09 +0100 | [diff] [blame] | 3739 | /* |
| 3740 | * It's not reliable to print a task's held locks |
| 3741 | * if it's not sleeping (or if it's not the current |
| 3742 | * task): |
| 3743 | */ |
| 3744 | if (p->state == TASK_RUNNING && p != current) |
| 3745 | continue; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3746 | if (p->lockdep_depth) |
| 3747 | lockdep_print_held_locks(p); |
| 3748 | if (!unlock) |
| 3749 | if (read_trylock(&tasklist_lock)) |
| 3750 | unlock = 1; |
| 3751 | } while_each_thread(g, p); |
| 3752 | |
| 3753 | printk("\n"); |
| 3754 | printk("=============================================\n\n"); |
| 3755 | |
| 3756 | if (unlock) |
| 3757 | read_unlock(&tasklist_lock); |
| 3758 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3759 | EXPORT_SYMBOL_GPL(debug_show_all_locks); |
| 3760 | |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3761 | /* |
| 3762 | * Careful: only use this function if you are sure that |
| 3763 | * the task cannot run in parallel! |
| 3764 | */ |
| 3765 | void __debug_show_held_locks(struct task_struct *task) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3766 | { |
Jarek Poplawski | 9c35dd7 | 2007-03-22 00:11:28 -0800 | [diff] [blame] | 3767 | if (unlikely(!debug_locks)) { |
| 3768 | printk("INFO: lockdep is turned off.\n"); |
| 3769 | return; |
| 3770 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3771 | lockdep_print_held_locks(task); |
| 3772 | } |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 3773 | EXPORT_SYMBOL_GPL(__debug_show_held_locks); |
| 3774 | |
| 3775 | void debug_show_held_locks(struct task_struct *task) |
| 3776 | { |
| 3777 | __debug_show_held_locks(task); |
| 3778 | } |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 3779 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
Peter Zijlstra | b351d16 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 3780 | |
| 3781 | void lockdep_sys_exit(void) |
| 3782 | { |
| 3783 | struct task_struct *curr = current; |
| 3784 | |
| 3785 | if (unlikely(curr->lockdep_depth)) { |
| 3786 | if (!debug_locks_off()) |
| 3787 | return; |
| 3788 | printk("\n================================================\n"); |
| 3789 | printk( "[ BUG: lock held when returning to user space! ]\n"); |
| 3790 | printk( "------------------------------------------------\n"); |
| 3791 | printk("%s/%d is leaving the kernel with locks still held!\n", |
| 3792 | curr->comm, curr->pid); |
| 3793 | lockdep_print_held_locks(curr); |
| 3794 | } |
| 3795 | } |