|
@@ -648,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
|
|
|
return count + 1;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Register a lock's class in the hash-table, if the class is not present
|
|
|
- * yet. Otherwise we look it up. We cache the result in the lock object
|
|
|
- * itself, so actual lookup of the hash should be once per lock object.
|
|
|
- */
|
|
|
static inline struct lock_class *
|
|
|
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
|
|
|
{
|
|
|
struct lockdep_subclass_key *key;
|
|
|
struct hlist_head *hash_head;
|
|
|
struct lock_class *class;
|
|
|
- bool is_static = false;
|
|
|
|
|
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
|
debug_locks_off();
|
|
@@ -672,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Static locks do not have their class-keys yet - for them the key
|
|
|
- * is the lock object itself. If the lock is in the per cpu area,
|
|
|
- * the canonical address of the lock (per cpu offset removed) is
|
|
|
- * used.
|
|
|
+ * If it is not initialised then it has never been locked,
|
|
|
+ * so it won't be present in the hash table.
|
|
|
*/
|
|
|
- if (unlikely(!lock->key)) {
|
|
|
- unsigned long can_addr, addr = (unsigned long)lock;
|
|
|
-
|
|
|
- if (__is_kernel_percpu_address(addr, &can_addr))
|
|
|
- lock->key = (void *)can_addr;
|
|
|
- else if (__is_module_percpu_address(addr, &can_addr))
|
|
|
- lock->key = (void *)can_addr;
|
|
|
- else if (static_obj(lock))
|
|
|
- lock->key = (void *)lock;
|
|
|
- else
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
- is_static = true;
|
|
|
- }
|
|
|
+ if (unlikely(!lock->key))
|
|
|
+ return NULL;
|
|
|
|
|
|
/*
|
|
|
* NOTE: the class-key must be unique. For dynamic locks, a static
|
|
@@ -721,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Static locks do not have their class-keys yet - for them the key is
|
|
|
+ * the lock object itself. If the lock is in the per cpu area, the
|
|
|
+ * canonical address of the lock (per cpu offset removed) is used.
|
|
|
+ */
|
|
|
+static bool assign_lock_key(struct lockdep_map *lock)
|
|
|
+{
|
|
|
+ unsigned long can_addr, addr = (unsigned long)lock;
|
|
|
+
|
|
|
+ if (__is_kernel_percpu_address(addr, &can_addr))
|
|
|
+ lock->key = (void *)can_addr;
|
|
|
+ else if (__is_module_percpu_address(addr, &can_addr))
|
|
|
+ lock->key = (void *)can_addr;
|
|
|
+ else if (static_obj(lock))
|
|
|
+ lock->key = (void *)lock;
|
|
|
+ else {
|
|
|
+ /* Debug-check: all keys must be persistent! */
|
|
|
+ debug_locks_off();
|
|
|
+ pr_err("INFO: trying to register non-static key.\n");
|
|
|
+ pr_err("the code is fine but needs lockdep annotation.\n");
|
|
|
+ pr_err("turning off the locking correctness validator.\n");
|
|
|
+ dump_stack();
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -739,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|
|
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
|
|
|
|
|
class = look_up_lock_class(lock, subclass);
|
|
|
- if (likely(!IS_ERR_OR_NULL(class)))
|
|
|
+ if (likely(class))
|
|
|
goto out_set_class_cache;
|
|
|
|
|
|
- /*
|
|
|
- * Debug-check: all keys must be persistent!
|
|
|
- */
|
|
|
- if (IS_ERR(class)) {
|
|
|
- debug_locks_off();
|
|
|
- printk("INFO: trying to register non-static key.\n");
|
|
|
- printk("the code is fine but needs lockdep annotation.\n");
|
|
|
- printk("turning off the locking correctness validator.\n");
|
|
|
- dump_stack();
|
|
|
+ if (!lock->key) {
|
|
|
+ if (!assign_lock_key(lock))
|
|
|
+ return NULL;
|
|
|
+ } else if (!static_obj(lock->key)) {
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -3273,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int __lock_is_held(struct lockdep_map *lock, int read);
|
|
|
+static int __lock_is_held(const struct lockdep_map *lock, int read);
|
|
|
|
|
|
/*
|
|
|
* This gets called for every mutex_lock*()/spin_lock*() operation.
|
|
@@ -3482,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
|
|
+static int match_held_lock(const struct held_lock *hlock,
|
|
|
+ const struct lockdep_map *lock)
|
|
|
{
|
|
|
if (hlock->instance == lock)
|
|
|
return 1;
|
|
|
|
|
|
if (hlock->references) {
|
|
|
- struct lock_class *class = lock->class_cache[0];
|
|
|
+ const struct lock_class *class = lock->class_cache[0];
|
|
|
|
|
|
if (!class)
|
|
|
class = look_up_lock_class(lock, 0);
|
|
@@ -3499,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
|
|
* Clearly if the lock hasn't been acquired _ever_, we're not
|
|
|
* holding it either, so report failure.
|
|
|
*/
|
|
|
- if (IS_ERR_OR_NULL(class))
|
|
|
+ if (!class)
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
@@ -3724,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static int __lock_is_held(struct lockdep_map *lock, int read)
|
|
|
+static int __lock_is_held(const struct lockdep_map *lock, int read)
|
|
|
{
|
|
|
struct task_struct *curr = current;
|
|
|
int i;
|
|
@@ -3938,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(lock_release);
|
|
|
|
|
|
-int lock_is_held_type(struct lockdep_map *lock, int read)
|
|
|
+int lock_is_held_type(const struct lockdep_map *lock, int read)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int ret = 0;
|
|
@@ -4295,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
|
|
* If the class exists we look it up and zap it:
|
|
|
*/
|
|
|
class = look_up_lock_class(lock, j);
|
|
|
- if (!IS_ERR_OR_NULL(class))
|
|
|
+ if (class)
|
|
|
zap_class(class);
|
|
|
}
|
|
|
/*
|