|
@@ -123,8 +123,6 @@ static inline int debug_locks_off_graph_unlock(void)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int lockdep_initialized;
|
|
|
-
|
|
|
unsigned long nr_list_entries;
|
|
|
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
|
|
|
|
|
@@ -433,19 +431,6 @@ unsigned int nr_process_chains;
|
|
|
unsigned int max_lockdep_depth;
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
-/*
|
|
|
- * We cannot printk in early bootup code. Not even early_printk()
|
|
|
- * might work. So we mark any initialization errors and printk
|
|
|
- * about it later on, in lockdep_info().
|
|
|
- */
|
|
|
-static int lockdep_init_error;
|
|
|
-static const char *lock_init_error;
|
|
|
-static unsigned long lockdep_init_trace_data[20];
|
|
|
-static struct stack_trace lockdep_init_trace = {
|
|
|
- .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
|
|
|
- .entries = lockdep_init_trace_data,
|
|
|
-};
|
|
|
-
|
|
|
/*
|
|
|
* Various lockdep statistics:
|
|
|
*/
|
|
@@ -669,20 +654,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
struct hlist_head *hash_head;
|
|
|
struct lock_class *class;
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
- /*
|
|
|
- * If the architecture calls into lockdep before initializing
|
|
|
- * the hashes then we'll warn about it later. (we cannot printk
|
|
|
- * right now)
|
|
|
- */
|
|
|
- if (unlikely(!lockdep_initialized)) {
|
|
|
- lockdep_init();
|
|
|
- lockdep_init_error = 1;
|
|
|
- lock_init_error = lock->name;
|
|
|
- save_stack_trace(&lockdep_init_trace);
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
|
debug_locks_off();
|
|
|
printk(KERN_ERR
|
|
@@ -2010,6 +1981,53 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
|
|
|
return lock_classes + chain_hlocks[chain->base + i];
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Returns the index of the first held_lock of the current chain
|
|
|
+ */
|
|
|
+static inline int get_first_held_lock(struct task_struct *curr,
|
|
|
+ struct held_lock *hlock)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct held_lock *hlock_curr;
|
|
|
+
|
|
|
+ for (i = curr->lockdep_depth - 1; i >= 0; i--) {
|
|
|
+ hlock_curr = curr->held_locks + i;
|
|
|
+ if (hlock_curr->irq_context != hlock->irq_context)
|
|
|
+ break;
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ return ++i;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Checks whether the chain and the current held locks are consistent
|
|
|
+ * in depth and also in content. If they are not it most likely means
|
|
|
+ * that there was a collision during the calculation of the chain_key.
|
|
|
+ * Returns: 0 not passed, 1 passed
|
|
|
+ */
|
|
|
+static int check_no_collision(struct task_struct *curr,
|
|
|
+ struct held_lock *hlock,
|
|
|
+ struct lock_chain *chain)
|
|
|
+{
|
|
|
+#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
+ int i, j, id;
|
|
|
+
|
|
|
+ i = get_first_held_lock(curr, hlock);
|
|
|
+
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ for (j = 0; j < chain->depth - 1; j++, i++) {
|
|
|
+ id = curr->held_locks[i].class_idx - 1;
|
|
|
+
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Look up a dependency chain. If the key is not present yet then
|
|
|
* add it and return 1 - in this case the new dependency chain is
|
|
@@ -2023,7 +2041,6 @@ static inline int lookup_chain_cache(struct task_struct *curr,
|
|
|
struct lock_class *class = hlock_class(hlock);
|
|
|
struct hlist_head *hash_head = chainhashentry(chain_key);
|
|
|
struct lock_chain *chain;
|
|
|
- struct held_lock *hlock_curr;
|
|
|
int i, j;
|
|
|
|
|
|
/*
|
|
@@ -2041,6 +2058,9 @@ static inline int lookup_chain_cache(struct task_struct *curr,
|
|
|
if (chain->chain_key == chain_key) {
|
|
|
cache_hit:
|
|
|
debug_atomic_inc(chain_lookup_hits);
|
|
|
+ if (!check_no_collision(curr, hlock, chain))
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (very_verbose(class))
|
|
|
printk("\nhash chain already cached, key: "
|
|
|
"%016Lx tail class: [%p] %s\n",
|
|
@@ -2078,13 +2098,7 @@ cache_hit:
|
|
|
chain = lock_chains + nr_lock_chains++;
|
|
|
chain->chain_key = chain_key;
|
|
|
chain->irq_context = hlock->irq_context;
|
|
|
- /* Find the first held_lock of current chain */
|
|
|
- for (i = curr->lockdep_depth - 1; i >= 0; i--) {
|
|
|
- hlock_curr = curr->held_locks + i;
|
|
|
- if (hlock_curr->irq_context != hlock->irq_context)
|
|
|
- break;
|
|
|
- }
|
|
|
- i++;
|
|
|
+ i = get_first_held_lock(curr, hlock);
|
|
|
chain->depth = curr->lockdep_depth + 1 - i;
|
|
|
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
|
|
|
chain->base = nr_chain_hlocks;
|
|
@@ -2172,7 +2186,7 @@ static void check_chain_key(struct task_struct *curr)
|
|
|
{
|
|
|
#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
struct held_lock *hlock, *prev_hlock = NULL;
|
|
|
- unsigned int i, id;
|
|
|
+ unsigned int i;
|
|
|
u64 chain_key = 0;
|
|
|
|
|
|
for (i = 0; i < curr->lockdep_depth; i++) {
|
|
@@ -2189,17 +2203,16 @@ static void check_chain_key(struct task_struct *curr)
|
|
|
(unsigned long long)hlock->prev_chain_key);
|
|
|
return;
|
|
|
}
|
|
|
- id = hlock->class_idx - 1;
|
|
|
/*
|
|
|
* Whoops ran out of static storage again?
|
|
|
*/
|
|
|
- if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
|
|
|
return;
|
|
|
|
|
|
if (prev_hlock && (prev_hlock->irq_context !=
|
|
|
hlock->irq_context))
|
|
|
chain_key = 0;
|
|
|
- chain_key = iterate_chain_key(chain_key, id);
|
|
|
+ chain_key = iterate_chain_key(chain_key, hlock->class_idx);
|
|
|
prev_hlock = hlock;
|
|
|
}
|
|
|
if (chain_key != curr->curr_chain_key) {
|
|
@@ -3077,7 +3090,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
struct task_struct *curr = current;
|
|
|
struct lock_class *class = NULL;
|
|
|
struct held_lock *hlock;
|
|
|
- unsigned int depth, id;
|
|
|
+ unsigned int depth;
|
|
|
int chain_head = 0;
|
|
|
int class_idx;
|
|
|
u64 chain_key;
|
|
@@ -3180,11 +3193,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
* The 'key ID' is what is the most compact key value to drive
|
|
|
* the hash, not class->key.
|
|
|
*/
|
|
|
- id = class - lock_classes;
|
|
|
/*
|
|
|
* Whoops, we did it again.. ran straight out of our static allocation.
|
|
|
*/
|
|
|
- if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
|
|
|
+ if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
|
|
|
return 0;
|
|
|
|
|
|
chain_key = curr->curr_chain_key;
|
|
@@ -3202,7 +3214,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
chain_key = 0;
|
|
|
chain_head = 1;
|
|
|
}
|
|
|
- chain_key = iterate_chain_key(chain_key, id);
|
|
|
+ chain_key = iterate_chain_key(chain_key, class_idx);
|
|
|
|
|
|
if (nest_lock && !__lock_is_held(nest_lock))
|
|
|
return print_lock_nested_lock_not_held(curr, hlock, ip);
|
|
@@ -4013,28 +4025,6 @@ out_restore:
|
|
|
raw_local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-void lockdep_init(void)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- /*
|
|
|
- * Some architectures have their own start_kernel()
|
|
|
- * code which calls lockdep_init(), while we also
|
|
|
- * call lockdep_init() from the start_kernel() itself,
|
|
|
- * and we want to initialize the hashes only once:
|
|
|
- */
|
|
|
- if (lockdep_initialized)
|
|
|
- return;
|
|
|
-
|
|
|
- for (i = 0; i < CLASSHASH_SIZE; i++)
|
|
|
- INIT_HLIST_HEAD(classhash_table + i);
|
|
|
-
|
|
|
- for (i = 0; i < CHAINHASH_SIZE; i++)
|
|
|
- INIT_HLIST_HEAD(chainhash_table + i);
|
|
|
-
|
|
|
- lockdep_initialized = 1;
|
|
|
-}
|
|
|
-
|
|
|
void __init lockdep_info(void)
|
|
|
{
|
|
|
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
|
|
@@ -4061,14 +4051,6 @@ void __init lockdep_info(void)
|
|
|
|
|
|
printk(" per task-struct memory footprint: %lu bytes\n",
|
|
|
sizeof(struct held_lock) * MAX_LOCK_DEPTH);
|
|
|
-
|
|
|
-#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
- if (lockdep_init_error) {
|
|
|
- printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error);
|
|
|
- printk("Call stack leading to lockdep invocation was:\n");
|
|
|
- print_stack_trace(&lockdep_init_trace, 0);
|
|
|
- }
|
|
|
-#endif
|
|
|
}
|
|
|
|
|
|
static void
|