tracing: add lockdep tracepoints for lock acquire/release
Augment the traces with lock names when lockdep is available:
1) | down_read_trylock() {
1) | _spin_lock_irqsave() {
1) | /* lock_acquire: &sem->wait_lock */
1) 4.201 us | }
1) | _spin_unlock_irqrestore() {
1) | /* lock_release: &sem->wait_lock */
1) 3.523 us | }
1) | /* lock_acquire: try read &mm->mmap_sem */
1) + 13.386 us | }
1) 1.635 us | find_vma();
1) | handle_mm_fault() {
1) | __do_fault() {
1) | filemap_fault() {
1) | find_lock_page() {
1) | find_get_page() {
1) | /* lock_acquire: read rcu_read_lock */
1) | /* lock_release: rcu_read_lock */
1) 5.697 us | }
1) 8.158 us | }
1) + 11.079 us | }
1) | _spin_lock() {
1) | /* lock_acquire: __pte_lockptr(page) */
1) 3.949 us | }
1) 1.460 us | page_add_file_rmap();
1) | _spin_unlock() {
1) | /* lock_release: __pte_lockptr(page) */
1) 3.115 us | }
1) | unlock_page() {
1) 1.421 us | page_waitqueue();
1) 1.220 us | __wake_up_bit();
1) 6.519 us | }
1) + 34.328 us | }
1) + 37.452 us | }
1) | up_read() {
1) | /* lock_release: &mm->mmap_sem */
1) | _spin_lock_irqsave() {
1) | /* lock_acquire: &sem->wait_lock */
1) 3.865 us | }
1) | _spin_unlock_irqrestore() {
1) | /* lock_release: &sem->wait_lock */
1) 8.562 us | }
1) + 17.370 us | }
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: =?ISO-8859-1?Q?T=F6r=F6k?= Edwin <edwintorok@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <1236166375.5330.7209.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 12539f7..c8abbb0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -623,7 +623,7 @@
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -735,7 +735,7 @@
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!spin_trylock(&trace_cmdline_lock))
+ if (!__raw_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
@@ -753,7 +753,7 @@
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- spin_unlock(&trace_cmdline_lock);
+ __raw_spin_unlock(&trace_cmdline_lock);
}
char *trace_find_cmdline(int pid)
@@ -3751,7 +3751,7 @@
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
- static DEFINE_SPINLOCK(trace_buf_lock);
+ static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
@@ -3773,7 +3773,8 @@
goto out;
pause_graph_tracing();
- spin_lock_irqsave(&trace_buf_lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
len = min(len, TRACE_BUF_SIZE-1);
@@ -3792,7 +3793,8 @@
ring_buffer_unlock_commit(tr->buffer, event);
out_unlock:
- spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
+ __raw_spin_unlock(&trace_buf_lock);
+ raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
preempt_enable_notrace();