|
@@ -58,6 +58,10 @@
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/lock.h>
|
|
|
|
|
|
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
|
|
+#include <linux/slab.h>
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
|
int prove_locking = 1;
|
|
|
module_param(prove_locking, int, 0644);
|
|
@@ -724,6 +728,18 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
|
|
+static void cross_init(struct lockdep_map *lock, int cross);
|
|
|
+static int cross_lock(struct lockdep_map *lock);
|
|
|
+static int lock_acquire_crosslock(struct held_lock *hlock);
|
|
|
+static int lock_release_crosslock(struct lockdep_map *lock);
|
|
|
+#else
|
|
|
+static inline void cross_init(struct lockdep_map *lock, int cross) {}
|
|
|
+static inline int cross_lock(struct lockdep_map *lock) { return 0; }
|
|
|
+static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
|
|
|
+static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Register a lock's class in the hash-table, if the class is not present
|
|
|
* yet. Otherwise we look it up. We cache the result in the lock object
|
|
@@ -1795,6 +1811,9 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
|
|
|
if (nest)
|
|
|
return 2;
|
|
|
|
|
|
+ if (cross_lock(prev->instance))
|
|
|
+ continue;
|
|
|
+
|
|
|
return print_deadlock_bug(curr, prev, next);
|
|
|
}
|
|
|
return 1;
|
|
@@ -1962,30 +1981,36 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|
|
int distance = curr->lockdep_depth - depth + 1;
|
|
|
hlock = curr->held_locks + depth - 1;
|
|
|
/*
|
|
|
- * Only non-recursive-read entries get new dependencies
|
|
|
- * added:
|
|
|
+ * Only non-crosslock entries get new dependencies added.
|
|
|
+ * Crosslock entries will be added by commit later:
|
|
|
*/
|
|
|
- if (hlock->read != 2 && hlock->check) {
|
|
|
- int ret = check_prev_add(curr, hlock, next,
|
|
|
- distance, &trace, save);
|
|
|
- if (!ret)
|
|
|
- return 0;
|
|
|
-
|
|
|
+ if (!cross_lock(hlock->instance)) {
|
|
|
/*
|
|
|
- * Stop saving stack_trace if save_trace() was
|
|
|
- * called at least once:
|
|
|
+ * Only non-recursive-read entries get new dependencies
|
|
|
+ * added:
|
|
|
*/
|
|
|
- if (save && ret == 2)
|
|
|
- save = NULL;
|
|
|
+ if (hlock->read != 2 && hlock->check) {
|
|
|
+ int ret = check_prev_add(curr, hlock, next,
|
|
|
+ distance, &trace, save);
|
|
|
+ if (!ret)
|
|
|
+ return 0;
|
|
|
|
|
|
- /*
|
|
|
- * Stop after the first non-trylock entry,
|
|
|
- * as non-trylock entries have added their
|
|
|
- * own direct dependencies already, so this
|
|
|
- * lock is connected to them indirectly:
|
|
|
- */
|
|
|
- if (!hlock->trylock)
|
|
|
- break;
|
|
|
+ /*
|
|
|
+ * Stop saving stack_trace if save_trace() was
|
|
|
+ * called at least once:
|
|
|
+ */
|
|
|
+ if (save && ret == 2)
|
|
|
+ save = NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Stop after the first non-trylock entry,
|
|
|
+ * as non-trylock entries have added their
|
|
|
+ * own direct dependencies already, so this
|
|
|
+ * lock is connected to them indirectly:
|
|
|
+ */
|
|
|
+ if (!hlock->trylock)
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
depth--;
|
|
|
/*
|
|
@@ -3176,7 +3201,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|
|
/*
|
|
|
* Initialize a lock instance's lock-class mapping info:
|
|
|
*/
|
|
|
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|
|
+static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|
|
struct lock_class_key *key, int subclass)
|
|
|
{
|
|
|
int i;
|
|
@@ -3234,8 +3259,25 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|
|
raw_local_irq_restore(flags);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|
|
+ struct lock_class_key *key, int subclass)
|
|
|
+{
|
|
|
+ cross_init(lock, 0);
|
|
|
+ __lockdep_init_map(lock, name, key, subclass);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(lockdep_init_map);
|
|
|
|
|
|
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
|
|
+void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
|
|
|
+ struct lock_class_key *key, int subclass)
|
|
|
+{
|
|
|
+ cross_init(lock, 1);
|
|
|
+ __lockdep_init_map(lock, name, key, subclass);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
|
|
|
+#endif
|
|
|
+
|
|
|
struct lock_class_key __lockdep_no_validate__;
|
|
|
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
|
|
|
|
@@ -3291,6 +3333,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
int chain_head = 0;
|
|
|
int class_idx;
|
|
|
u64 chain_key;
|
|
|
+ int ret;
|
|
|
|
|
|
if (unlikely(!debug_locks))
|
|
|
return 0;
|
|
@@ -3339,7 +3382,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
|
|
|
class_idx = class - lock_classes + 1;
|
|
|
|
|
|
- if (depth) {
|
|
|
+ /* TODO: nest_lock is not implemented for crosslock yet. */
|
|
|
+ if (depth && !cross_lock(lock)) {
|
|
|
hlock = curr->held_locks + depth - 1;
|
|
|
if (hlock->class_idx == class_idx && nest_lock) {
|
|
|
if (hlock->references) {
|
|
@@ -3427,6 +3471,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
|
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
|
|
|
return 0;
|
|
|
|
|
|
+ ret = lock_acquire_crosslock(hlock);
|
|
|
+ /*
|
|
|
+ * 2 means normal acquire operations are needed. Otherwise, it's
|
|
|
+ * ok just to return with '0:fail, 1:success'.
|
|
|
+ */
|
|
|
+ if (ret != 2)
|
|
|
+ return ret;
|
|
|
+
|
|
|
curr->curr_chain_key = chain_key;
|
|
|
curr->lockdep_depth++;
|
|
|
check_chain_key(curr);
|
|
@@ -3664,11 +3716,19 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
|
|
struct task_struct *curr = current;
|
|
|
struct held_lock *hlock;
|
|
|
unsigned int depth;
|
|
|
- int i;
|
|
|
+ int ret, i;
|
|
|
|
|
|
if (unlikely(!debug_locks))
|
|
|
return 0;
|
|
|
|
|
|
+ ret = lock_release_crosslock(lock);
|
|
|
+ /*
|
|
|
+ * 2 means normal release operations are needed. Otherwise, it's
|
|
|
+ * ok just to return with '0:fail, 1:success'.
|
|
|
+ */
|
|
|
+ if (ret != 2)
|
|
|
+ return ret;
|
|
|
+
|
|
|
depth = curr->lockdep_depth;
|
|
|
/*
|
|
|
* So we're all set to release this lock.. wait what lock? We don't
|
|
@@ -4532,6 +4592,13 @@ asmlinkage __visible void lockdep_sys_exit(void)
|
|
|
curr->comm, curr->pid);
|
|
|
lockdep_print_held_locks(curr);
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The lock history for each syscall should be independent. So wipe the
|
|
|
+ * slate clean on return to userspace.
|
|
|
+ */
|
|
|
+ crossrelease_hist_end(XHLOCK_PROC);
|
|
|
+ crossrelease_hist_start(XHLOCK_PROC);
|
|
|
}
|
|
|
|
|
|
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|
@@ -4580,3 +4647,398 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|
|
dump_stack();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|
|
|
+
|
|
|
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
|
|
+
|
|
|
+/*
|
|
|
+ * Crossrelease works by recording a lock history for each thread and
|
|
|
+ * connecting those historic locks that were taken after the
|
|
|
+ * wait_for_completion() in the complete() context.
|
|
|
+ *
|
|
|
+ * Task-A Task-B
|
|
|
+ *
|
|
|
+ * mutex_lock(&A);
|
|
|
+ * mutex_unlock(&A);
|
|
|
+ *
|
|
|
+ * wait_for_completion(&C);
|
|
|
+ * lock_acquire_crosslock();
|
|
|
+ * atomic_inc_return(&cross_gen_id);
|
|
|
+ * |
|
|
|
+ * | mutex_lock(&B);
|
|
|
+ * | mutex_unlock(&B);
|
|
|
+ * |
|
|
|
+ * | complete(&C);
|
|
|
+ * `-- lock_commit_crosslock();
|
|
|
+ *
|
|
|
+ * Which will then add a dependency between B and C.
|
|
|
+ */
|
|
|
+
|
|
|
+#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
|
|
|
+
|
|
|
+/*
|
|
|
+ * Whenever a crosslock is held, cross_gen_id will be increased.
|
|
|
+ */
|
|
|
+static atomic_t cross_gen_id; /* Can be wrapped */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Lock history stacks; we have 3 nested lock history stacks:
|
|
|
+ *
|
|
|
+ * Hard IRQ
|
|
|
+ * Soft IRQ
|
|
|
+ * History / Task
|
|
|
+ *
|
|
|
+ * The thing is that once we complete a (Hard/Soft) IRQ the future task locks
|
|
|
+ * should not depend on any of the locks observed while running the IRQ.
|
|
|
+ *
|
|
|
+ * So what we do is rewind the history buffer and erase all our knowledge of
|
|
|
+ * that temporal event.
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * We need this to annotate lock history boundaries. Take for instance
|
|
|
+ * workqueues; each work is independent of the last. The completion of a future
|
|
|
+ * work does not depend on the completion of a past work (in general).
|
|
|
+ * Therefore we must not carry that (lock) dependency across works.
|
|
|
+ *
|
|
|
+ * This is true for many things; pretty much all kthreads fall into this
|
|
|
+ * pattern, where they have an 'idle' state and future completions do not
|
|
|
+ * depend on past completions. Its just that since they all have the 'same'
|
|
|
+ * form -- the kthread does the same over and over -- it doesn't typically
|
|
|
+ * matter.
|
|
|
+ *
|
|
|
+ * The same is true for system-calls, once a system call is completed (we've
|
|
|
+ * returned to userspace) the next system call does not depend on the lock
|
|
|
+ * history of the previous system call.
|
|
|
+ */
|
|
|
+void crossrelease_hist_start(enum xhlock_context_t c)
|
|
|
+{
|
|
|
+ if (current->xhlocks)
|
|
|
+ current->xhlock_idx_hist[c] = current->xhlock_idx;
|
|
|
+}
|
|
|
+
|
|
|
+void crossrelease_hist_end(enum xhlock_context_t c)
|
|
|
+{
|
|
|
+ if (current->xhlocks)
|
|
|
+ current->xhlock_idx = current->xhlock_idx_hist[c];
|
|
|
+}
|
|
|
+
|
|
|
+static int cross_lock(struct lockdep_map *lock)
|
|
|
+{
|
|
|
+ return lock ? lock->cross : 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is needed to decide the relationship between wrapable variables.
|
|
|
+ */
|
|
|
+static inline int before(unsigned int a, unsigned int b)
|
|
|
+{
|
|
|
+ return (int)(a - b) < 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
|
|
|
+{
|
|
|
+ return hlock_class(&xhlock->hlock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct lock_class *xlock_class(struct cross_lock *xlock)
|
|
|
+{
|
|
|
+ return hlock_class(&xlock->hlock);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Should we check a dependency with previous one?
|
|
|
+ */
|
|
|
+static inline int depend_before(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ return hlock->read != 2 && hlock->check && !hlock->trylock;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Should we check a dependency with next one?
|
|
|
+ */
|
|
|
+static inline int depend_after(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ return hlock->read != 2 && hlock->check;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check if the xhlock is valid, which would be false if,
|
|
|
+ *
|
|
|
+ * 1. Has not used after initializaion yet.
|
|
|
+ *
|
|
|
+ * Remind hist_lock is implemented as a ring buffer.
|
|
|
+ */
|
|
|
+static inline int xhlock_valid(struct hist_lock *xhlock)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * xhlock->hlock.instance must be !NULL.
|
|
|
+ */
|
|
|
+ return !!xhlock->hlock.instance;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Record a hist_lock entry.
|
|
|
+ *
|
|
|
+ * Irq disable is only required.
|
|
|
+ */
|
|
|
+static void add_xhlock(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ unsigned int idx = ++current->xhlock_idx;
|
|
|
+ struct hist_lock *xhlock = &xhlock(idx);
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_LOCKDEP
|
|
|
+ /*
|
|
|
+ * This can be done locklessly because they are all task-local
|
|
|
+ * state, we must however ensure IRQs are disabled.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(!irqs_disabled());
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Initialize hist_lock's members */
|
|
|
+ xhlock->hlock = *hlock;
|
|
|
+
|
|
|
+ xhlock->trace.nr_entries = 0;
|
|
|
+ xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
|
|
|
+ xhlock->trace.entries = xhlock->trace_entries;
|
|
|
+ xhlock->trace.skip = 3;
|
|
|
+ save_stack_trace(&xhlock->trace);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int same_context_xhlock(struct hist_lock *xhlock)
|
|
|
+{
|
|
|
+ return xhlock->hlock.irq_context == task_irq_context(current);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This should be lockless as far as possible because this would be
|
|
|
+ * called very frequently.
|
|
|
+ */
|
|
|
+static void check_add_xhlock(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Record a hist_lock, only in case that acquisitions ahead
|
|
|
+ * could depend on the held_lock. For example, if the held_lock
|
|
|
+ * is trylock then acquisitions ahead never depends on that.
|
|
|
+ * In that case, we don't need to record it. Just return.
|
|
|
+ */
|
|
|
+ if (!current->xhlocks || !depend_before(hlock))
|
|
|
+ return;
|
|
|
+
|
|
|
+ add_xhlock(hlock);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * For crosslock.
|
|
|
+ */
|
|
|
+static int add_xlock(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ struct cross_lock *xlock;
|
|
|
+ unsigned int gen_id;
|
|
|
+
|
|
|
+ if (!graph_lock())
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
|
|
|
+
|
|
|
+ gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
|
|
|
+ xlock->hlock = *hlock;
|
|
|
+ xlock->hlock.gen_id = gen_id;
|
|
|
+ graph_unlock();
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called for both normal and crosslock acquires. Normal locks will be
|
|
|
+ * pushed on the hist_lock queue. Cross locks will record state and
|
|
|
+ * stop regular lock_acquire() to avoid being placed on the held_lock
|
|
|
+ * stack.
|
|
|
+ *
|
|
|
+ * Return: 0 - failure;
|
|
|
+ * 1 - crosslock, done;
|
|
|
+ * 2 - normal lock, continue to held_lock[] ops.
|
|
|
+ */
|
|
|
+static int lock_acquire_crosslock(struct held_lock *hlock)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * CONTEXT 1 CONTEXT 2
|
|
|
+ * --------- ---------
|
|
|
+ * lock A (cross)
|
|
|
+ * X = atomic_inc_return(&cross_gen_id)
|
|
|
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
|
+ * Y = atomic_read_acquire(&cross_gen_id)
|
|
|
+ * lock B
|
|
|
+ *
|
|
|
+ * atomic_read_acquire() is for ordering between A and B,
|
|
|
+ * IOW, A happens before B, when CONTEXT 2 see Y >= X.
|
|
|
+ *
|
|
|
+ * Pairs with atomic_inc_return() in add_xlock().
|
|
|
+ */
|
|
|
+ hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
|
|
|
+
|
|
|
+ if (cross_lock(hlock->instance))
|
|
|
+ return add_xlock(hlock);
|
|
|
+
|
|
|
+ check_add_xhlock(hlock);
|
|
|
+ return 2;
|
|
|
+}
|
|
|
+
|
|
|
+static int copy_trace(struct stack_trace *trace)
|
|
|
+{
|
|
|
+ unsigned long *buf = stack_trace + nr_stack_trace_entries;
|
|
|
+ unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
|
|
|
+ unsigned int nr = min(max_nr, trace->nr_entries);
|
|
|
+
|
|
|
+ trace->nr_entries = nr;
|
|
|
+ memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
|
|
|
+ trace->entries = buf;
|
|
|
+ nr_stack_trace_entries += nr;
|
|
|
+
|
|
|
+ if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
|
|
|
+ if (!debug_locks_off_graph_unlock())
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
|
|
|
+ dump_stack();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
|
|
|
+{
|
|
|
+ unsigned int xid, pid;
|
|
|
+ u64 chain_key;
|
|
|
+
|
|
|
+ xid = xlock_class(xlock) - lock_classes;
|
|
|
+ chain_key = iterate_chain_key((u64)0, xid);
|
|
|
+ pid = xhlock_class(xhlock) - lock_classes;
|
|
|
+ chain_key = iterate_chain_key(chain_key, pid);
|
|
|
+
|
|
|
+ if (lookup_chain_cache(chain_key))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
|
|
|
+ chain_key))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
|
|
|
+ &xhlock->trace, copy_trace))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void commit_xhlocks(struct cross_lock *xlock)
|
|
|
+{
|
|
|
+ unsigned int cur = current->xhlock_idx;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ if (!graph_lock())
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (i = 0; i < MAX_XHLOCKS_NR; i++) {
|
|
|
+ struct hist_lock *xhlock = &xhlock(cur - i);
|
|
|
+
|
|
|
+ if (!xhlock_valid(xhlock))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (!same_context_xhlock(xhlock))
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * commit_xhlock() returns 0 with graph_lock already
|
|
|
+ * released if fail.
|
|
|
+ */
|
|
|
+ if (!commit_xhlock(xlock, xhlock))
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ graph_unlock();
|
|
|
+}
|
|
|
+
|
|
|
+void lock_commit_crosslock(struct lockdep_map *lock)
|
|
|
+{
|
|
|
+ struct cross_lock *xlock;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (unlikely(!debug_locks || current->lockdep_recursion))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!current->xhlocks)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Do commit hist_locks with the cross_lock, only in case that
|
|
|
+ * the cross_lock could depend on acquisitions after that.
|
|
|
+ *
|
|
|
+ * For example, if the cross_lock does not have the 'check' flag
|
|
|
+ * then we don't need to check dependencies and commit for that.
|
|
|
+ * Just skip it. In that case, of course, the cross_lock does
|
|
|
+ * not depend on acquisitions ahead, either.
|
|
|
+ *
|
|
|
+ * WARNING: Don't do that in add_xlock() in advance. When an
|
|
|
+ * acquisition context is different from the commit context,
|
|
|
+ * invalid(skipped) cross_lock might be accessed.
|
|
|
+ */
|
|
|
+ if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
|
|
|
+ return;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ check_flags(flags);
|
|
|
+ current->lockdep_recursion = 1;
|
|
|
+ xlock = &((struct lockdep_map_cross *)lock)->xlock;
|
|
|
+ commit_xhlocks(xlock);
|
|
|
+ current->lockdep_recursion = 0;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(lock_commit_crosslock);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return: 1 - crosslock, done;
|
|
|
+ * 2 - normal lock, continue to held_lock[] ops.
|
|
|
+ */
|
|
|
+static int lock_release_crosslock(struct lockdep_map *lock)
|
|
|
+{
|
|
|
+ return cross_lock(lock) ? 1 : 2;
|
|
|
+}
|
|
|
+
|
|
|
+static void cross_init(struct lockdep_map *lock, int cross)
|
|
|
+{
|
|
|
+ lock->cross = cross;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Crossrelease assumes that the ring buffer size of xhlocks
|
|
|
+ * is aligned with power of 2. So force it on build.
|
|
|
+ */
|
|
|
+ BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
|
|
|
+}
|
|
|
+
|
|
|
+void lockdep_init_task(struct task_struct *task)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ task->xhlock_idx = UINT_MAX;
|
|
|
+
|
|
|
+ for (i = 0; i < XHLOCK_CTX_NR; i++)
|
|
|
+ task->xhlock_idx_hist[i] = UINT_MAX;
|
|
|
+
|
|
|
+ task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
|
|
|
+ GFP_KERNEL);
|
|
|
+}
|
|
|
+
|
|
|
+void lockdep_free_task(struct task_struct *task)
|
|
|
+{
|
|
|
+ if (task->xhlocks) {
|
|
|
+ void *tmp = task->xhlocks;
|
|
|
+ /* Diable crossrelease for current */
|
|
|
+ task->xhlocks = NULL;
|
|
|
+ kfree(tmp);
|
|
|
+ }
|
|
|
+}
|
|
|
+#endif
|