|
@@ -403,9 +403,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
if (!mutex_can_spin_on_owner(lock))
|
|
|
goto slowpath;
|
|
|
|
|
|
+ mcs_spin_lock(&lock->mcs_lock, &node);
|
|
|
for (;;) {
|
|
|
struct task_struct *owner;
|
|
|
- struct mcs_spinlock node;
|
|
|
|
|
|
if (use_ww_ctx && ww_ctx->acquired > 0) {
|
|
|
struct ww_mutex *ww;
|
|
@@ -420,19 +420,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* performed the optimistic spinning cannot be done.
|
|
|
*/
|
|
|
if (ACCESS_ONCE(ww->ctx))
|
|
|
- goto slowpath;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* If there's an owner, wait for it to either
|
|
|
* release the lock or go to sleep.
|
|
|
*/
|
|
|
- mcs_spin_lock(&lock->mcs_lock, &node);
|
|
|
owner = ACCESS_ONCE(lock->owner);
|
|
|
- if (owner && !mutex_spin_on_owner(lock, owner)) {
|
|
|
- mcs_spin_unlock(&lock->mcs_lock, &node);
|
|
|
- goto slowpath;
|
|
|
- }
|
|
|
+ if (owner && !mutex_spin_on_owner(lock, owner))
|
|
|
+ break;
|
|
|
|
|
|
if ((atomic_read(&lock->count) == 1) &&
|
|
|
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
|
|
@@ -449,7 +446,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
preempt_enable();
|
|
|
return 0;
|
|
|
}
|
|
|
- mcs_spin_unlock(&lock->mcs_lock, &node);
|
|
|
|
|
|
/*
|
|
|
* When there's no owner, we might have preempted between the
|
|
@@ -458,7 +454,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
* the owner complete.
|
|
|
*/
|
|
|
if (!owner && (need_resched() || rt_task(task)))
|
|
|
- goto slowpath;
|
|
|
+ break;
|
|
|
|
|
|
/*
|
|
|
* The cpu_relax() call is a compiler barrier which forces
|
|
@@ -468,6 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
|
*/
|
|
|
arch_mutex_cpu_relax();
|
|
|
}
|
|
|
+ mcs_spin_unlock(&lock->mcs_lock, &node);
|
|
|
slowpath:
|
|
|
#endif
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|