summaryrefslogtreecommitdiff
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2012-09-13 11:39:51 +0200
committerIngo Molnar <mingo@kernel.org>2012-09-13 17:00:44 +0200
commitd094595078d00b63839d0c5ccb8b184ef242cb45 (patch)
tree31929a5e414de13c0c4df46f3e1b3f276f0c58bf /kernel/lockdep.c
parent0bd1189e239c76eb3a50e458548fbe7e4a5dfff1 (diff)
downloadlwn-d094595078d00b63839d0c5ccb8b184ef242cb45.tar.gz
lwn-d094595078d00b63839d0c5ccb8b184ef242cb45.zip
lockdep: Check if nested lock is actually held
It is considered good form to lock the lock you claim to be nested in. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> [ removed nest_lock arg to print_lock_nested_lock_not_held in favour of hlock->nest_lock, also renamed the lock arg to hlock since its a held_lock type ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/5051A9E7.5040501@canonical.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ea9ee4518c35..7981e5b2350d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2998,6 +2998,42 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
struct lock_class_key __lockdep_no_validate__;
+static int
+print_lock_nested_lock_not_held(struct task_struct *curr,
+ struct held_lock *hlock,
+ unsigned long ip)
+{
+ if (!debug_locks_off())
+ return 0;
+ if (debug_locks_silent)
+ return 0;
+
+ printk("\n");
+ printk("==================================\n");
+ printk("[ BUG: Nested lock was not taken ]\n");
+ print_kernel_ident();
+ printk("----------------------------------\n");
+
+ printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
+ print_lock(hlock);
+
+ printk("\nbut this task is not holding:\n");
+ printk("%s\n", hlock->nest_lock->name);
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
+
+ printk("\nother info that might help us debug this:\n");
+ lockdep_print_held_locks(curr);
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
+
+ return 0;
+}
+
+static int __lock_is_held(struct lockdep_map *lock);
+
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
* We maintain the dependency maps and validate the locking attempt:
@@ -3139,6 +3175,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
chain_key = iterate_chain_key(chain_key, id);
+ if (nest_lock && !__lock_is_held(nest_lock))
+ return print_lock_nested_lock_not_held(curr, hlock, ip);
+
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0;