|
@@ -50,16 +50,17 @@ EXPORT_SYMBOL(__mutex_init);
|
|
|
/*
|
|
|
* @owner: contains: 'struct task_struct *' to the current lock owner,
|
|
|
* NULL means not owned. Since task_struct pointers are aligned at
|
|
|
- * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
|
|
|
- * bits to store extra state.
|
|
|
+ * at least L1_CACHE_BYTES, we have low bits to store extra state.
|
|
|
*
|
|
|
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
|
|
|
* Bit1 indicates unlock needs to hand the lock to the top-waiter
|
|
|
+ * Bit2 indicates handoff has been done and we're waiting for pickup.
|
|
|
*/
|
|
|
#define MUTEX_FLAG_WAITERS 0x01
|
|
|
#define MUTEX_FLAG_HANDOFF 0x02
|
|
|
+#define MUTEX_FLAG_PICKUP 0x04
|
|
|
|
|
|
-#define MUTEX_FLAGS 0x03
|
|
|
+#define MUTEX_FLAGS 0x07
|
|
|
|
|
|
static inline struct task_struct *__owner_task(unsigned long owner)
|
|
|
{
|
|
@@ -72,38 +73,29 @@ static inline unsigned long __owner_flags(unsigned long owner)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Actual trylock that will work on any unlocked state.
|
|
|
- *
|
|
|
- * When setting the owner field, we must preserve the low flag bits.
|
|
|
- *
|
|
|
- * Be careful with @handoff, only set that in a wait-loop (where you set
|
|
|
- * HANDOFF) to avoid recursive lock attempts.
|
|
|
+ * Trylock variant that retuns the owning task on failure.
|
|
|
*/
|
|
|
-static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
|
|
|
+static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
|
|
|
{
|
|
|
unsigned long owner, curr = (unsigned long)current;
|
|
|
|
|
|
owner = atomic_long_read(&lock->owner);
|
|
|
for (;;) { /* must loop, can race against a flag */
|
|
|
unsigned long old, flags = __owner_flags(owner);
|
|
|
+ unsigned long task = owner & ~MUTEX_FLAGS;
|
|
|
|
|
|
- if (__owner_task(owner)) {
|
|
|
- if (handoff && unlikely(__owner_task(owner) == current)) {
|
|
|
- /*
|
|
|
- * Provide ACQUIRE semantics for the lock-handoff.
|
|
|
- *
|
|
|
- * We cannot easily use load-acquire here, since
|
|
|
- * the actual load is a failed cmpxchg, which
|
|
|
- * doesn't imply any barriers.
|
|
|
- *
|
|
|
- * Also, this is a fairly unlikely scenario, and
|
|
|
- * this contains the cost.
|
|
|
- */
|
|
|
- smp_mb(); /* ACQUIRE */
|
|
|
- return true;
|
|
|
- }
|
|
|
+ if (task) {
|
|
|
+ if (likely(task != curr))
|
|
|
+ break;
|
|
|
|
|
|
- return false;
|
|
|
+ if (likely(!(flags & MUTEX_FLAG_PICKUP)))
|
|
|
+ break;
|
|
|
+
|
|
|
+ flags &= ~MUTEX_FLAG_PICKUP;
|
|
|
+ } else {
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -111,15 +103,24 @@ static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
|
|
|
* past the point where we acquire it. This would be possible
|
|
|
* if we (accidentally) set the bit on an unlocked mutex.
|
|
|
*/
|
|
|
- if (handoff)
|
|
|
- flags &= ~MUTEX_FLAG_HANDOFF;
|
|
|
+ flags &= ~MUTEX_FLAG_HANDOFF;
|
|
|
|
|
|
old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
|
|
|
if (old == owner)
|
|
|
- return true;
|
|
|
+ return NULL;
|
|
|
|
|
|
owner = old;
|
|
|
}
|
|
|
+
|
|
|
+ return __owner_task(owner);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Actual trylock that will work on any unlocked state.
|
|
|
+ */
|
|
|
+static inline bool __mutex_trylock(struct mutex *lock)
|
|
|
+{
|
|
|
+ return !__mutex_trylock_or_owner(lock);
|
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -171,9 +172,9 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
|
|
|
|
|
|
/*
|
|
|
* Give up ownership to a specific task, when @task = NULL, this is equivalent
|
|
|
- * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
|
|
|
- * semantics like a regular unlock, the __mutex_trylock() provides matching
|
|
|
- * ACQUIRE semantics for the handoff.
|
|
|
+ * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
|
|
|
+ * WAITERS. Provides RELEASE semantics like a regular unlock, the
|
|
|
+ * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
|
|
|
*/
|
|
|
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
|
|
|
{
|
|
@@ -184,10 +185,13 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
|
|
|
+ DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
|
|
|
#endif
|
|
|
|
|
|
new = (owner & MUTEX_FLAG_WAITERS);
|
|
|
new |= (unsigned long)task;
|
|
|
+ if (task)
|
|
|
+ new |= MUTEX_FLAG_PICKUP;
|
|
|
|
|
|
old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
|
|
|
if (old == owner)
|
|
@@ -237,8 +241,8 @@ void __sched mutex_lock(struct mutex *lock)
|
|
|
EXPORT_SYMBOL(mutex_lock);
|
|
|
#endif
|
|
|
|
|
|
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|
|
- struct ww_acquire_ctx *ww_ctx)
|
|
|
+static __always_inline void
|
|
|
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
|
|
|
{
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
/*
|
|
@@ -277,17 +281,50 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|
|
ww_ctx->acquired++;
|
|
|
}
|
|
|
|
|
|
+static inline bool __sched
|
|
|
+__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
|
|
|
+{
|
|
|
+ return a->stamp - b->stamp <= LONG_MAX &&
|
|
|
+ (a->stamp != b->stamp || a > b);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Wake up any waiters that may have to back off when the lock is held by the
|
|
|
+ * given context.
|
|
|
+ *
|
|
|
+ * Due to the invariants on the wait list, this can only affect the first
|
|
|
+ * waiter with a context.
|
|
|
+ *
|
|
|
+ * The current task must not be on the wait list.
|
|
|
+ */
|
|
|
+static void __sched
|
|
|
+__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
|
|
|
+{
|
|
|
+ struct mutex_waiter *cur;
|
|
|
+
|
|
|
+ lockdep_assert_held(&lock->wait_lock);
|
|
|
+
|
|
|
+ list_for_each_entry(cur, &lock->wait_list, list) {
|
|
|
+ if (!cur->ww_ctx)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (cur->ww_ctx->acquired > 0 &&
|
|
|
+ __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
|
|
|
+ debug_mutex_wake_waiter(lock, cur);
|
|
|
+ wake_up_process(cur->task);
|
|
|
+ }
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* After acquiring lock with fastpath or when we lost out in contested
|
|
|
* slowpath, set ctx and wake up any waiters so they can recheck.
|
|
|
*/
|
|
|
static __always_inline void
|
|
|
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
|
|
- struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- struct mutex_waiter *cur;
|
|
|
-
|
|
|
ww_mutex_lock_acquired(lock, ctx);
|
|
|
|
|
|
lock->ctx = ctx;
|
|
@@ -311,46 +348,79 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
|
|
* Uh oh, we raced in fastpath, wake up everyone in this case,
|
|
|
* so they can see the new lock->ctx.
|
|
|
*/
|
|
|
- spin_lock_mutex(&lock->base.wait_lock, flags);
|
|
|
- list_for_each_entry(cur, &lock->base.wait_list, list) {
|
|
|
- debug_mutex_wake_waiter(&lock->base, cur);
|
|
|
- wake_up_process(cur->task);
|
|
|
- }
|
|
|
- spin_unlock_mutex(&lock->base.wait_lock, flags);
|
|
|
+ spin_lock(&lock->base.wait_lock);
|
|
|
+ __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
|
|
|
+ spin_unlock(&lock->base.wait_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * After acquiring lock in the slowpath set ctx and wake up any
|
|
|
- * waiters so they can recheck.
|
|
|
+ * After acquiring lock in the slowpath set ctx.
|
|
|
+ *
|
|
|
+ * Unlike for the fast path, the caller ensures that waiters are woken up where
|
|
|
+ * necessary.
|
|
|
*
|
|
|
* Callers must hold the mutex wait_lock.
|
|
|
*/
|
|
|
static __always_inline void
|
|
|
-ww_mutex_set_context_slowpath(struct ww_mutex *lock,
|
|
|
- struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
- struct mutex_waiter *cur;
|
|
|
-
|
|
|
ww_mutex_lock_acquired(lock, ctx);
|
|
|
lock->ctx = ctx;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
|
+
|
|
|
+static inline
|
|
|
+bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
|
|
+ struct mutex_waiter *waiter)
|
|
|
+{
|
|
|
+ struct ww_mutex *ww;
|
|
|
+
|
|
|
+ ww = container_of(lock, struct ww_mutex, base);
|
|
|
|
|
|
/*
|
|
|
- * Give any possible sleeping processes the chance to wake up,
|
|
|
- * so they can recheck if they have to back off.
|
|
|
+ * If ww->ctx is set the contents are undefined, only
|
|
|
+ * by acquiring wait_lock there is a guarantee that
|
|
|
+ * they are not invalid when reading.
|
|
|
+ *
|
|
|
+ * As such, when deadlock detection needs to be
|
|
|
+ * performed the optimistic spinning cannot be done.
|
|
|
+ *
|
|
|
+ * Check this in every inner iteration because we may
|
|
|
+ * be racing against another thread's ww_mutex_lock.
|
|
|
*/
|
|
|
- list_for_each_entry(cur, &lock->base.wait_list, list) {
|
|
|
- debug_mutex_wake_waiter(&lock->base, cur);
|
|
|
- wake_up_process(cur->task);
|
|
|
- }
|
|
|
+ if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we aren't on the wait list yet, cancel the spin
|
|
|
+ * if there are waiters. We want to avoid stealing the
|
|
|
+ * lock from a waiter with an earlier stamp, since the
|
|
|
+ * other thread may already own a lock that we also
|
|
|
+ * need.
|
|
|
+ */
|
|
|
+ if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Similarly, stop spinning if we are no longer the
|
|
|
+ * first waiter.
|
|
|
+ */
|
|
|
+ if (waiter && !__mutex_waiter_is_first(lock, waiter))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
|
|
/*
|
|
|
- * Look out! "owner" is an entirely speculative pointer
|
|
|
- * access and not reliable.
|
|
|
+ * Look out! "owner" is an entirely speculative pointer access and not
|
|
|
+ * reliable.
|
|
|
+ *
|
|
|
+ * "noinline" so that this function shows up on perf profiles.
|
|
|
*/
|
|
|
static noinline
|
|
|
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
|
|
|
+ struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
|
|
|
{
|
|
|
bool ret = true;
|
|
|
|
|
@@ -373,6 +443,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
|
|
|
+ ret = false;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
cpu_relax();
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
@@ -431,12 +506,10 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|
|
* with the spinner at the head of the OSQ, if present, until the owner is
|
|
|
* changed to itself.
|
|
|
*/
|
|
|
-static bool mutex_optimistic_spin(struct mutex *lock,
|
|
|
- struct ww_acquire_ctx *ww_ctx,
|
|
|
- const bool use_ww_ctx, const bool waiter)
|
|
|
+static __always_inline bool
|
|
|
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
|
|
+ const bool use_ww_ctx, struct mutex_waiter *waiter)
|
|
|
{
|
|
|
- struct task_struct *task = current;
|
|
|
-
|
|
|
if (!waiter) {
|
|
|
/*
|
|
|
* The purpose of the mutex_can_spin_on_owner() function is
|
|
@@ -460,40 +533,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
|
|
|
for (;;) {
|
|
|
struct task_struct *owner;
|
|
|
|
|
|
- if (use_ww_ctx && ww_ctx->acquired > 0) {
|
|
|
- struct ww_mutex *ww;
|
|
|
-
|
|
|
- ww = container_of(lock, struct ww_mutex, base);
|
|
|
- /*
|
|
|
- * If ww->ctx is set the contents are undefined, only
|
|
|
- * by acquiring wait_lock there is a guarantee that
|
|
|
- * they are not invalid when reading.
|
|
|
- *
|
|
|
- * As such, when deadlock detection needs to be
|
|
|
- * performed the optimistic spinning cannot be done.
|
|
|
- */
|
|
|
- if (READ_ONCE(ww->ctx))
|
|
|
- goto fail_unlock;
|
|
|
- }
|
|
|
+ /* Try to acquire the mutex... */
|
|
|
+ owner = __mutex_trylock_or_owner(lock);
|
|
|
+ if (!owner)
|
|
|
+ break;
|
|
|
|
|
|
/*
|
|
|
- * If there's an owner, wait for it to either
|
|
|
+ * There's an owner, wait for it to either
|
|
|
* release the lock or go to sleep.
|
|
|
*/
|
|
|
- owner = __mutex_owner(lock);
|
|
|
- if (owner) {
|
|
|
- if (waiter && owner == task) {
|
|
|
- smp_mb(); /* ACQUIRE */
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- if (!mutex_spin_on_owner(lock, owner))
|
|
|
- goto fail_unlock;
|
|
|
- }
|
|
|
-
|
|
|
- /* Try to acquire the mutex if it is unlocked. */
|
|
|
- if (__mutex_trylock(lock, waiter))
|
|
|
- break;
|
|
|
+ if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
|
|
|
+ goto fail_unlock;
|
|
|
|
|
|
/*
|
|
|
* The cpu_relax() call is a compiler barrier which forces
|
|
@@ -532,9 +582,9 @@ fail:
|
|
|
return false;
|
|
|
}
|
|
|
#else
|
|
|
-static bool mutex_optimistic_spin(struct mutex *lock,
|
|
|
- struct ww_acquire_ctx *ww_ctx,
|
|
|
- const bool use_ww_ctx, const bool waiter)
|
|
|
+static __always_inline bool
|
|
|
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
|
|
+ const bool use_ww_ctx, struct mutex_waiter *waiter)
|
|
|
{
|
|
|
return false;
|
|
|
}
|
|
@@ -594,23 +644,88 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
|
|
EXPORT_SYMBOL(ww_mutex_unlock);
|
|
|
|
|
|
static inline int __sched
|
|
|
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
|
|
|
+ struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
|
|
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
|
|
|
+ struct mutex_waiter *cur;
|
|
|
+
|
|
|
+ if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
|
|
|
+ goto deadlock;
|
|
|
|
|
|
- if (!hold_ctx)
|
|
|
+ /*
|
|
|
+ * If there is a waiter in front of us that has a context, then its
|
|
|
+ * stamp is earlier than ours and we must back off.
|
|
|
+ */
|
|
|
+ cur = waiter;
|
|
|
+ list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
|
|
|
+ if (cur->ww_ctx)
|
|
|
+ goto deadlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+deadlock:
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
|
|
+ ctx->contending_lock = ww;
|
|
|
+#endif
|
|
|
+ return -EDEADLK;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __sched
|
|
|
+__ww_mutex_add_waiter(struct mutex_waiter *waiter,
|
|
|
+ struct mutex *lock,
|
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
|
+{
|
|
|
+ struct mutex_waiter *cur;
|
|
|
+ struct list_head *pos;
|
|
|
+
|
|
|
+ if (!ww_ctx) {
|
|
|
+ list_add_tail(&waiter->list, &lock->wait_list);
|
|
|
return 0;
|
|
|
+ }
|
|
|
|
|
|
- if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
|
|
|
- (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
|
|
|
+ /*
|
|
|
+ * Add the waiter before the first waiter with a higher stamp.
|
|
|
+ * Waiters without a context are skipped to avoid starving
|
|
|
+ * them.
|
|
|
+ */
|
|
|
+ pos = &lock->wait_list;
|
|
|
+ list_for_each_entry_reverse(cur, &lock->wait_list, list) {
|
|
|
+ if (!cur->ww_ctx)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
|
|
|
+ /* Back off immediately if necessary. */
|
|
|
+ if (ww_ctx->acquired > 0) {
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
- DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
|
|
- ctx->contending_lock = ww;
|
|
|
+ struct ww_mutex *ww;
|
|
|
+
|
|
|
+ ww = container_of(lock, struct ww_mutex, base);
|
|
|
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
|
|
|
+ ww_ctx->contending_lock = ww;
|
|
|
#endif
|
|
|
- return -EDEADLK;
|
|
|
+ return -EDEADLK;
|
|
|
+ }
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ pos = &cur->list;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wake up the waiter so that it gets a chance to back
|
|
|
+ * off.
|
|
|
+ */
|
|
|
+ if (cur->ww_ctx->acquired > 0) {
|
|
|
+ debug_mutex_wake_waiter(lock, cur);
|
|
|
+ wake_up_process(cur->task);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
+ list_add_tail(&waiter->list, pos);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -622,15 +737,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
struct lockdep_map *nest_lock, unsigned long ip,
|
|
|
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
|
|
{
|
|
|
- struct task_struct *task = current;
|
|
|
struct mutex_waiter waiter;
|
|
|
- unsigned long flags;
|
|
|
bool first = false;
|
|
|
struct ww_mutex *ww;
|
|
|
int ret;
|
|
|
|
|
|
- if (use_ww_ctx) {
|
|
|
- ww = container_of(lock, struct ww_mutex, base);
|
|
|
+ might_sleep();
|
|
|
+
|
|
|
+ ww = container_of(lock, struct ww_mutex, base);
|
|
|
+ if (use_ww_ctx && ww_ctx) {
|
|
|
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
|
|
return -EALREADY;
|
|
|
}
|
|
@@ -638,36 +753,54 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
preempt_disable();
|
|
|
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
|
|
|
|
|
- if (__mutex_trylock(lock, false) ||
|
|
|
- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
|
|
|
+ if (__mutex_trylock(lock) ||
|
|
|
+ mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
|
|
|
/* got the lock, yay! */
|
|
|
lock_acquired(&lock->dep_map, ip);
|
|
|
- if (use_ww_ctx)
|
|
|
+ if (use_ww_ctx && ww_ctx)
|
|
|
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
|
|
preempt_enable();
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_lock(&lock->wait_lock);
|
|
|
/*
|
|
|
* After waiting to acquire the wait_lock, try again.
|
|
|
*/
|
|
|
- if (__mutex_trylock(lock, false))
|
|
|
+ if (__mutex_trylock(lock)) {
|
|
|
+ if (use_ww_ctx && ww_ctx)
|
|
|
+ __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
|
|
|
+
|
|
|
goto skip_wait;
|
|
|
+ }
|
|
|
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
|
- debug_mutex_add_waiter(lock, &waiter, task);
|
|
|
+ debug_mutex_add_waiter(lock, &waiter, current);
|
|
|
|
|
|
- /* add waiting tasks to the end of the waitqueue (FIFO): */
|
|
|
- list_add_tail(&waiter.list, &lock->wait_list);
|
|
|
- waiter.task = task;
|
|
|
+ lock_contended(&lock->dep_map, ip);
|
|
|
+
|
|
|
+ if (!use_ww_ctx) {
|
|
|
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
|
|
|
+ list_add_tail(&waiter.list, &lock->wait_list);
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
+ waiter.ww_ctx = MUTEX_POISON_WW_CTX;
|
|
|
+#endif
|
|
|
+ } else {
|
|
|
+ /* Add in stamp order, waking up waiters that must back off. */
|
|
|
+ ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
|
|
|
+ if (ret)
|
|
|
+ goto err_early_backoff;
|
|
|
+
|
|
|
+ waiter.ww_ctx = ww_ctx;
|
|
|
+ }
|
|
|
+
|
|
|
+ waiter.task = current;
|
|
|
|
|
|
if (__mutex_waiter_is_first(lock, &waiter))
|
|
|
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
|
|
|
|
|
|
- lock_contended(&lock->dep_map, ip);
|
|
|
-
|
|
|
- set_task_state(task, state);
|
|
|
+ set_current_state(state);
|
|
|
for (;;) {
|
|
|
/*
|
|
|
* Once we hold wait_lock, we're serialized against
|
|
@@ -675,7 +808,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* before testing the error conditions to make sure we pick up
|
|
|
* the handoff.
|
|
|
*/
|
|
|
- if (__mutex_trylock(lock, first))
|
|
|
+ if (__mutex_trylock(lock))
|
|
|
goto acquired;
|
|
|
|
|
|
/*
|
|
@@ -683,42 +816,47 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* wait_lock. This ensures the lock cancellation is ordered
|
|
|
* against mutex_unlock() and wake-ups do not go missing.
|
|
|
*/
|
|
|
- if (unlikely(signal_pending_state(state, task))) {
|
|
|
+ if (unlikely(signal_pending_state(state, current))) {
|
|
|
ret = -EINTR;
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- if (use_ww_ctx && ww_ctx->acquired > 0) {
|
|
|
- ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
|
|
|
+ if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
|
|
|
+ ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_unlock(&lock->wait_lock);
|
|
|
schedule_preempt_disabled();
|
|
|
|
|
|
- if (!first && __mutex_waiter_is_first(lock, &waiter)) {
|
|
|
- first = true;
|
|
|
- __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
|
|
+ /*
|
|
|
+ * ww_mutex needs to always recheck its position since its waiter
|
|
|
+ * list is not FIFO ordered.
|
|
|
+ */
|
|
|
+ if ((use_ww_ctx && ww_ctx) || !first) {
|
|
|
+ first = __mutex_waiter_is_first(lock, &waiter);
|
|
|
+ if (first)
|
|
|
+ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
|
|
}
|
|
|
|
|
|
- set_task_state(task, state);
|
|
|
+ set_current_state(state);
|
|
|
/*
|
|
|
* Here we order against unlock; we must either see it change
|
|
|
* state back to RUNNING and fall through the next schedule(),
|
|
|
* or we must see its unlock and acquire.
|
|
|
*/
|
|
|
- if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
|
|
|
- __mutex_trylock(lock, first))
|
|
|
+ if (__mutex_trylock(lock) ||
|
|
|
+ (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
|
|
|
break;
|
|
|
|
|
|
- spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_lock(&lock->wait_lock);
|
|
|
}
|
|
|
- spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_lock(&lock->wait_lock);
|
|
|
acquired:
|
|
|
- __set_task_state(task, TASK_RUNNING);
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
|
|
|
|
- mutex_remove_waiter(lock, &waiter, task);
|
|
|
+ mutex_remove_waiter(lock, &waiter, current);
|
|
|
if (likely(list_empty(&lock->wait_list)))
|
|
|
__mutex_clear_flag(lock, MUTEX_FLAGS);
|
|
|
|
|
@@ -728,30 +866,44 @@ skip_wait:
|
|
|
/* got the lock - cleanup and rejoice! */
|
|
|
lock_acquired(&lock->dep_map, ip);
|
|
|
|
|
|
- if (use_ww_ctx)
|
|
|
+ if (use_ww_ctx && ww_ctx)
|
|
|
ww_mutex_set_context_slowpath(ww, ww_ctx);
|
|
|
|
|
|
- spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_unlock(&lock->wait_lock);
|
|
|
preempt_enable();
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
|
- __set_task_state(task, TASK_RUNNING);
|
|
|
- mutex_remove_waiter(lock, &waiter, task);
|
|
|
- spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
|
+ mutex_remove_waiter(lock, &waiter, current);
|
|
|
+err_early_backoff:
|
|
|
+ spin_unlock(&lock->wait_lock);
|
|
|
debug_mutex_free_waiter(&waiter);
|
|
|
mutex_release(&lock->dep_map, 1, ip);
|
|
|
preempt_enable();
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int __sched
|
|
|
+__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
|
|
|
+ struct lockdep_map *nest_lock, unsigned long ip)
|
|
|
+{
|
|
|
+ return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
|
|
|
+}
|
|
|
+
|
|
|
+static int __sched
|
|
|
+__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
|
|
|
+ struct lockdep_map *nest_lock, unsigned long ip,
|
|
|
+ struct ww_acquire_ctx *ww_ctx)
|
|
|
+{
|
|
|
+ return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
void __sched
|
|
|
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
|
- subclass, NULL, _RET_IP_, NULL, 0);
|
|
|
+ __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
|
@@ -759,27 +911,21 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
|
|
void __sched
|
|
|
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
|
|
- 0, nest, _RET_IP_, NULL, 0);
|
|
|
+ __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
|
|
|
|
|
int __sched
|
|
|
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
- return __mutex_lock_common(lock, TASK_KILLABLE,
|
|
|
- subclass, NULL, _RET_IP_, NULL, 0);
|
|
|
+ return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
|
|
|
|
|
int __sched
|
|
|
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
|
|
- subclass, NULL, _RET_IP_, NULL, 0);
|
|
|
+ return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
|
|
|
|
@@ -824,35 +970,37 @@ ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
}
|
|
|
|
|
|
int __sched
|
|
|
-__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
might_sleep();
|
|
|
- ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
|
|
|
- 0, &ctx->dep_map, _RET_IP_, ctx, 1);
|
|
|
- if (!ret && ctx->acquired > 1)
|
|
|
+ ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
|
|
|
+ 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
|
|
+ ctx);
|
|
|
+ if (!ret && ctx && ctx->acquired > 1)
|
|
|
return ww_mutex_deadlock_injection(lock, ctx);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__ww_mutex_lock);
|
|
|
+EXPORT_SYMBOL_GPL(ww_mutex_lock);
|
|
|
|
|
|
int __sched
|
|
|
-__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
might_sleep();
|
|
|
- ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
|
|
|
- 0, &ctx->dep_map, _RET_IP_, ctx, 1);
|
|
|
+ ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
|
|
|
+ 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
|
|
|
+ ctx);
|
|
|
|
|
|
- if (!ret && ctx->acquired > 1)
|
|
|
+ if (!ret && ctx && ctx->acquired > 1)
|
|
|
return ww_mutex_deadlock_injection(lock, ctx);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
|
|
|
+EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
|
|
|
|
|
|
#endif
|
|
|
|
|
@@ -862,8 +1010,8 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
|
|
|
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
|
|
|
{
|
|
|
struct task_struct *next = NULL;
|
|
|
- unsigned long owner, flags;
|
|
|
DEFINE_WAKE_Q(wake_q);
|
|
|
+ unsigned long owner;
|
|
|
|
|
|
mutex_release(&lock->dep_map, 1, ip);
|
|
|
|
|
@@ -880,6 +1028,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
|
|
|
+ DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
|
|
|
#endif
|
|
|
|
|
|
if (owner & MUTEX_FLAG_HANDOFF)
|
|
@@ -897,7 +1046,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
|
|
owner = old;
|
|
|
}
|
|
|
|
|
|
- spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_lock(&lock->wait_lock);
|
|
|
debug_mutex_unlock(lock);
|
|
|
if (!list_empty(&lock->wait_list)) {
|
|
|
/* get the first entry from the wait-list: */
|
|
@@ -914,7 +1063,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
|
|
if (owner & MUTEX_FLAG_HANDOFF)
|
|
|
__mutex_handoff(lock, next);
|
|
|
|
|
|
- spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
+ spin_unlock(&lock->wait_lock);
|
|
|
|
|
|
wake_up_q(&wake_q);
|
|
|
}
|
|
@@ -977,37 +1126,34 @@ EXPORT_SYMBOL_GPL(mutex_lock_io);
|
|
|
static noinline void __sched
|
|
|
__mutex_lock_slowpath(struct mutex *lock)
|
|
|
{
|
|
|
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
|
|
|
- NULL, _RET_IP_, NULL, 0);
|
|
|
+ __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
__mutex_lock_killable_slowpath(struct mutex *lock)
|
|
|
{
|
|
|
- return __mutex_lock_common(lock, TASK_KILLABLE, 0,
|
|
|
- NULL, _RET_IP_, NULL, 0);
|
|
|
+ return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
|
|
{
|
|
|
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
|
|
|
- NULL, _RET_IP_, NULL, 0);
|
|
|
+ return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
- return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
|
|
|
- NULL, _RET_IP_, ctx, 1);
|
|
|
+ return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
|
|
|
+ _RET_IP_, ctx);
|
|
|
}
|
|
|
|
|
|
static noinline int __sched
|
|
|
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|
|
struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
- return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
|
|
|
- NULL, _RET_IP_, ctx, 1);
|
|
|
+ return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
|
|
|
+ _RET_IP_, ctx);
|
|
|
}
|
|
|
|
|
|
#endif
|
|
@@ -1028,7 +1174,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|
|
*/
|
|
|
int __sched mutex_trylock(struct mutex *lock)
|
|
|
{
|
|
|
- bool locked = __mutex_trylock(lock, false);
|
|
|
+ bool locked = __mutex_trylock(lock);
|
|
|
|
|
|
if (locked)
|
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
@@ -1039,32 +1185,34 @@ EXPORT_SYMBOL(mutex_trylock);
|
|
|
|
|
|
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
int __sched
|
|
|
-__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
might_sleep();
|
|
|
|
|
|
if (__mutex_trylock_fast(&lock->base)) {
|
|
|
- ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
+ if (ctx)
|
|
|
+ ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
return __ww_mutex_lock_slowpath(lock, ctx);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__ww_mutex_lock);
|
|
|
+EXPORT_SYMBOL(ww_mutex_lock);
|
|
|
|
|
|
int __sched
|
|
|
-__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
|
|
{
|
|
|
might_sleep();
|
|
|
|
|
|
if (__mutex_trylock_fast(&lock->base)) {
|
|
|
- ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
+ if (ctx)
|
|
|
+ ww_mutex_set_context_fastpath(lock, ctx);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
|
|
|
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
|
|
|
|
|
|
#endif
|
|
|
|