Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * kernel/mutex-debug.c |
| 3 | * |
| 4 | * Debugging code for mutexes |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * lock debugging, locking tree, deadlock detection started by: |
| 11 | * |
| 12 | * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey |
| 13 | * Released under the General Public License (GPL). |
| 14 | */ |
| 15 | #include <linux/mutex.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/kallsyms.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | |
| 23 | #include <asm/mutex.h> |
| 24 | |
| 25 | #include "mutex-debug.h" |
| 26 | |
| 27 | /* |
| 28 | * We need a global lock when we walk through the multi-process |
| 29 | * lock tree. Only used in the deadlock-debugging case. |
| 30 | */ |
| 31 | DEFINE_SPINLOCK(debug_mutex_lock); |
| 32 | |
| 33 | /* |
| 34 | * All locks held by all tasks, in a single global list: |
| 35 | */ |
| 36 | LIST_HEAD(debug_mutex_held_locks); |
| 37 | |
| 38 | /* |
| 39 | * In the debug case we carry the caller's instruction pointer into |
| 40 | * other functions, but we dont want the function argument overhead |
| 41 | * in the nondebug case - hence these macros: |
| 42 | */ |
| 43 | #define __IP_DECL__ , unsigned long ip |
| 44 | #define __IP__ , ip |
| 45 | #define __RET_IP__ , (unsigned long)__builtin_return_address(0) |
| 46 | |
| 47 | /* |
| 48 | * "mutex debugging enabled" flag. We turn it off when we detect |
| 49 | * the first problem because we dont want to recurse back |
| 50 | * into the tracing code when doing error printk or |
| 51 | * executing a BUG(): |
| 52 | */ |
| 53 | int debug_mutex_on = 1; |
| 54 | |
| 55 | static void printk_task(struct task_struct *p) |
| 56 | { |
| 57 | if (p) |
| 58 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); |
| 59 | else |
| 60 | printk("<none>"); |
| 61 | } |
| 62 | |
| 63 | static void printk_ti(struct thread_info *ti) |
| 64 | { |
| 65 | if (ti) |
| 66 | printk_task(ti->task); |
| 67 | else |
| 68 | printk("<none>"); |
| 69 | } |
| 70 | |
| 71 | static void printk_task_short(struct task_struct *p) |
| 72 | { |
| 73 | if (p) |
| 74 | printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); |
| 75 | else |
| 76 | printk("<none>"); |
| 77 | } |
| 78 | |
| 79 | static void printk_lock(struct mutex *lock, int print_owner) |
| 80 | { |
| 81 | printk(" [%p] {%s}\n", lock, lock->name); |
| 82 | |
| 83 | if (print_owner && lock->owner) { |
| 84 | printk(".. held by: "); |
| 85 | printk_ti(lock->owner); |
| 86 | printk("\n"); |
| 87 | } |
| 88 | if (lock->owner) { |
| 89 | printk("... acquired at: "); |
| 90 | print_symbol("%s\n", lock->acquire_ip); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * printk locks held by a task: |
| 96 | */ |
| 97 | static void show_task_locks(struct task_struct *p) |
| 98 | { |
| 99 | switch (p->state) { |
| 100 | case TASK_RUNNING: printk("R"); break; |
| 101 | case TASK_INTERRUPTIBLE: printk("S"); break; |
| 102 | case TASK_UNINTERRUPTIBLE: printk("D"); break; |
| 103 | case TASK_STOPPED: printk("T"); break; |
| 104 | case EXIT_ZOMBIE: printk("Z"); break; |
| 105 | case EXIT_DEAD: printk("X"); break; |
| 106 | default: printk("?"); break; |
| 107 | } |
| 108 | printk_task(p); |
| 109 | if (p->blocked_on) { |
| 110 | struct mutex *lock = p->blocked_on->lock; |
| 111 | |
| 112 | printk(" blocked on mutex:"); |
| 113 | printk_lock(lock, 1); |
| 114 | } else |
| 115 | printk(" (not blocked on mutex)\n"); |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * printk all locks held in the system (if filter == NULL), |
| 120 | * or all locks belonging to a single task (if filter != NULL): |
| 121 | */ |
| 122 | void show_held_locks(struct task_struct *filter) |
| 123 | { |
| 124 | struct list_head *curr, *cursor = NULL; |
| 125 | struct mutex *lock; |
| 126 | struct thread_info *t; |
| 127 | unsigned long flags; |
| 128 | int count = 0; |
| 129 | |
| 130 | if (filter) { |
| 131 | printk("------------------------------\n"); |
| 132 | printk("| showing all locks held by: | ("); |
| 133 | printk_task_short(filter); |
| 134 | printk("):\n"); |
| 135 | printk("------------------------------\n"); |
| 136 | } else { |
| 137 | printk("---------------------------\n"); |
| 138 | printk("| showing all locks held: |\n"); |
| 139 | printk("---------------------------\n"); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Play safe and acquire the global trace lock. We |
| 144 | * cannot printk with that lock held so we iterate |
| 145 | * very carefully: |
| 146 | */ |
| 147 | next: |
| 148 | debug_spin_lock_save(&debug_mutex_lock, flags); |
| 149 | list_for_each(curr, &debug_mutex_held_locks) { |
| 150 | if (cursor && curr != cursor) |
| 151 | continue; |
| 152 | lock = list_entry(curr, struct mutex, held_list); |
| 153 | t = lock->owner; |
| 154 | if (filter && (t != filter->thread_info)) |
| 155 | continue; |
| 156 | count++; |
| 157 | cursor = curr->next; |
| 158 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 159 | |
| 160 | printk("\n#%03d: ", count); |
| 161 | printk_lock(lock, filter ? 0 : 1); |
| 162 | goto next; |
| 163 | } |
| 164 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 165 | printk("\n"); |
| 166 | } |
| 167 | |
| 168 | void mutex_debug_show_all_locks(void) |
| 169 | { |
| 170 | struct task_struct *g, *p; |
| 171 | int count = 10; |
| 172 | int unlock = 1; |
| 173 | |
| 174 | printk("\nShowing all blocking locks in the system:\n"); |
| 175 | |
| 176 | /* |
| 177 | * Here we try to get the tasklist_lock as hard as possible, |
| 178 | * if not successful after 2 seconds we ignore it (but keep |
| 179 | * trying). This is to enable a debug printout even if a |
| 180 | * tasklist_lock-holding task deadlocks or crashes. |
| 181 | */ |
| 182 | retry: |
| 183 | if (!read_trylock(&tasklist_lock)) { |
| 184 | if (count == 10) |
| 185 | printk("hm, tasklist_lock locked, retrying... "); |
| 186 | if (count) { |
| 187 | count--; |
| 188 | printk(" #%d", 10-count); |
| 189 | mdelay(200); |
| 190 | goto retry; |
| 191 | } |
| 192 | printk(" ignoring it.\n"); |
| 193 | unlock = 0; |
| 194 | } |
| 195 | if (count != 10) |
| 196 | printk(" locked it.\n"); |
| 197 | |
| 198 | do_each_thread(g, p) { |
| 199 | show_task_locks(p); |
| 200 | if (!unlock) |
| 201 | if (read_trylock(&tasklist_lock)) |
| 202 | unlock = 1; |
| 203 | } while_each_thread(g, p); |
| 204 | |
| 205 | printk("\n"); |
| 206 | show_held_locks(NULL); |
| 207 | printk("=============================================\n\n"); |
| 208 | |
| 209 | if (unlock) |
| 210 | read_unlock(&tasklist_lock); |
| 211 | } |
| 212 | |
| 213 | static void report_deadlock(struct task_struct *task, struct mutex *lock, |
| 214 | struct mutex *lockblk, unsigned long ip) |
| 215 | { |
| 216 | printk("\n%s/%d is trying to acquire this lock:\n", |
| 217 | current->comm, current->pid); |
| 218 | printk_lock(lock, 1); |
| 219 | printk("... trying at: "); |
| 220 | print_symbol("%s\n", ip); |
| 221 | show_held_locks(current); |
| 222 | |
| 223 | if (lockblk) { |
| 224 | printk("but %s/%d is deadlocking current task %s/%d!\n\n", |
| 225 | task->comm, task->pid, current->comm, current->pid); |
| 226 | printk("\n%s/%d is blocked on this lock:\n", |
| 227 | task->comm, task->pid); |
| 228 | printk_lock(lockblk, 1); |
| 229 | |
| 230 | show_held_locks(task); |
| 231 | |
| 232 | printk("\n%s/%d's [blocked] stackdump:\n\n", |
| 233 | task->comm, task->pid); |
| 234 | show_stack(task, NULL); |
| 235 | } |
| 236 | |
| 237 | printk("\n%s/%d's [current] stackdump:\n\n", |
| 238 | current->comm, current->pid); |
| 239 | dump_stack(); |
| 240 | mutex_debug_show_all_locks(); |
| 241 | printk("[ turning off deadlock detection. Please report this. ]\n\n"); |
| 242 | local_irq_disable(); |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Recursively check for mutex deadlocks: |
| 247 | */ |
| 248 | static int check_deadlock(struct mutex *lock, int depth, |
| 249 | struct thread_info *ti, unsigned long ip) |
| 250 | { |
| 251 | struct mutex *lockblk; |
| 252 | struct task_struct *task; |
| 253 | |
| 254 | if (!debug_mutex_on) |
| 255 | return 0; |
| 256 | |
| 257 | ti = lock->owner; |
| 258 | if (!ti) |
| 259 | return 0; |
| 260 | |
| 261 | task = ti->task; |
| 262 | lockblk = NULL; |
| 263 | if (task->blocked_on) |
| 264 | lockblk = task->blocked_on->lock; |
| 265 | |
| 266 | /* Self-deadlock: */ |
| 267 | if (current == task) { |
| 268 | DEBUG_OFF(); |
| 269 | if (depth) |
| 270 | return 1; |
| 271 | printk("\n==========================================\n"); |
| 272 | printk( "[ BUG: lock recursion deadlock detected! |\n"); |
| 273 | printk( "------------------------------------------\n"); |
| 274 | report_deadlock(task, lock, NULL, ip); |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | /* Ugh, something corrupted the lock data structure? */ |
| 279 | if (depth > 20) { |
| 280 | DEBUG_OFF(); |
| 281 | printk("\n===========================================\n"); |
| 282 | printk( "[ BUG: infinite lock dependency detected!? |\n"); |
| 283 | printk( "-------------------------------------------\n"); |
| 284 | report_deadlock(task, lock, lockblk, ip); |
| 285 | return 0; |
| 286 | } |
| 287 | |
| 288 | /* Recursively check for dependencies: */ |
| 289 | if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) { |
| 290 | printk("\n============================================\n"); |
| 291 | printk( "[ BUG: circular locking deadlock detected! ]\n"); |
| 292 | printk( "--------------------------------------------\n"); |
| 293 | report_deadlock(task, lock, lockblk, ip); |
| 294 | return 0; |
| 295 | } |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * Called when a task exits, this function checks whether the |
| 301 | * task is holding any locks, and reports the first one if so: |
| 302 | */ |
| 303 | void mutex_debug_check_no_locks_held(struct task_struct *task) |
| 304 | { |
| 305 | struct list_head *curr, *next; |
| 306 | struct thread_info *t; |
| 307 | unsigned long flags; |
| 308 | struct mutex *lock; |
| 309 | |
| 310 | if (!debug_mutex_on) |
| 311 | return; |
| 312 | |
| 313 | debug_spin_lock_save(&debug_mutex_lock, flags); |
| 314 | list_for_each_safe(curr, next, &debug_mutex_held_locks) { |
| 315 | lock = list_entry(curr, struct mutex, held_list); |
| 316 | t = lock->owner; |
| 317 | if (t != task->thread_info) |
| 318 | continue; |
| 319 | list_del_init(curr); |
| 320 | DEBUG_OFF(); |
| 321 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 322 | |
| 323 | printk("BUG: %s/%d, lock held at task exit time!\n", |
| 324 | task->comm, task->pid); |
| 325 | printk_lock(lock, 1); |
| 326 | if (lock->owner != task->thread_info) |
| 327 | printk("exiting task is not even the owner??\n"); |
| 328 | return; |
| 329 | } |
| 330 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 331 | } |
| 332 | |
| 333 | /* |
| 334 | * Called when kernel memory is freed (or unmapped), or if a mutex |
| 335 | * is destroyed or reinitialized - this code checks whether there is |
| 336 | * any held lock in the memory range of <from> to <to>: |
| 337 | */ |
| 338 | void mutex_debug_check_no_locks_freed(const void *from, const void *to) |
| 339 | { |
| 340 | struct list_head *curr, *next; |
| 341 | unsigned long flags; |
| 342 | struct mutex *lock; |
| 343 | void *lock_addr; |
| 344 | |
| 345 | if (!debug_mutex_on) |
| 346 | return; |
| 347 | |
| 348 | debug_spin_lock_save(&debug_mutex_lock, flags); |
| 349 | list_for_each_safe(curr, next, &debug_mutex_held_locks) { |
| 350 | lock = list_entry(curr, struct mutex, held_list); |
| 351 | lock_addr = lock; |
| 352 | if (lock_addr < from || lock_addr >= to) |
| 353 | continue; |
| 354 | list_del_init(curr); |
| 355 | DEBUG_OFF(); |
| 356 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 357 | |
| 358 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", |
| 359 | current->comm, current->pid, lock, from, to); |
| 360 | dump_stack(); |
| 361 | printk_lock(lock, 1); |
| 362 | if (lock->owner != current_thread_info()) |
| 363 | printk("freeing task is not even the owner??\n"); |
| 364 | return; |
| 365 | } |
| 366 | debug_spin_lock_restore(&debug_mutex_lock, flags); |
| 367 | } |
| 368 | |
| 369 | /* |
| 370 | * Must be called with lock->wait_lock held. |
| 371 | */ |
| 372 | void debug_mutex_set_owner(struct mutex *lock, |
| 373 | struct thread_info *new_owner __IP_DECL__) |
| 374 | { |
| 375 | lock->owner = new_owner; |
| 376 | DEBUG_WARN_ON(!list_empty(&lock->held_list)); |
| 377 | if (debug_mutex_on) { |
| 378 | list_add_tail(&lock->held_list, &debug_mutex_held_locks); |
| 379 | lock->acquire_ip = ip; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | void debug_mutex_init_waiter(struct mutex_waiter *waiter) |
| 384 | { |
| 385 | memset(waiter, 0x11, sizeof(*waiter)); |
| 386 | waiter->magic = waiter; |
| 387 | INIT_LIST_HEAD(&waiter->list); |
| 388 | } |
| 389 | |
| 390 | void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) |
| 391 | { |
| 392 | SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); |
| 393 | DEBUG_WARN_ON(list_empty(&lock->wait_list)); |
| 394 | DEBUG_WARN_ON(waiter->magic != waiter); |
| 395 | DEBUG_WARN_ON(list_empty(&waiter->list)); |
| 396 | } |
| 397 | |
| 398 | void debug_mutex_free_waiter(struct mutex_waiter *waiter) |
| 399 | { |
| 400 | DEBUG_WARN_ON(!list_empty(&waiter->list)); |
| 401 | memset(waiter, 0x22, sizeof(*waiter)); |
| 402 | } |
| 403 | |
| 404 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 405 | struct thread_info *ti __IP_DECL__) |
| 406 | { |
| 407 | SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); |
| 408 | check_deadlock(lock, 0, ti, ip); |
| 409 | /* Mark the current thread as blocked on the lock: */ |
| 410 | ti->task->blocked_on = waiter; |
| 411 | waiter->lock = lock; |
| 412 | } |
| 413 | |
| 414 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 415 | struct thread_info *ti) |
| 416 | { |
| 417 | DEBUG_WARN_ON(list_empty(&waiter->list)); |
| 418 | DEBUG_WARN_ON(waiter->task != ti->task); |
| 419 | DEBUG_WARN_ON(ti->task->blocked_on != waiter); |
| 420 | ti->task->blocked_on = NULL; |
| 421 | |
| 422 | list_del_init(&waiter->list); |
| 423 | waiter->task = NULL; |
| 424 | } |
| 425 | |
| 426 | void debug_mutex_unlock(struct mutex *lock) |
| 427 | { |
| 428 | DEBUG_WARN_ON(lock->magic != lock); |
| 429 | DEBUG_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
| 430 | DEBUG_WARN_ON(lock->owner != current_thread_info()); |
| 431 | if (debug_mutex_on) { |
| 432 | DEBUG_WARN_ON(list_empty(&lock->held_list)); |
| 433 | list_del_init(&lock->held_list); |
| 434 | } |
| 435 | } |
| 436 | |
| 437 | void debug_mutex_init(struct mutex *lock, const char *name) |
| 438 | { |
| 439 | /* |
| 440 | * Make sure we are not reinitializing a held lock: |
| 441 | */ |
| 442 | mutex_debug_check_no_locks_freed((void *)lock, (void *)(lock + 1)); |
| 443 | lock->owner = NULL; |
| 444 | INIT_LIST_HEAD(&lock->held_list); |
| 445 | lock->name = name; |
| 446 | lock->magic = lock; |
| 447 | } |
| 448 | |
| 449 | /*** |
| 450 | * mutex_destroy - mark a mutex unusable |
| 451 | * @lock: the mutex to be destroyed |
| 452 | * |
| 453 | * This function marks the mutex uninitialized, and any subsequent |
| 454 | * use of the mutex is forbidden. The mutex must not be locked when |
| 455 | * this function is called. |
| 456 | */ |
| 457 | void fastcall mutex_destroy(struct mutex *lock) |
| 458 | { |
| 459 | DEBUG_WARN_ON(mutex_is_locked(lock)); |
| 460 | lock->magic = NULL; |
| 461 | } |
| 462 | |
| 463 | EXPORT_SYMBOL_GPL(mutex_destroy); |
| 464 | |