|
@@ -684,9 +684,16 @@ __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
|
|
|
unsigned long flags;
|
|
|
|
|
|
/*
|
|
|
- * some architectures leave the lock unlocked in the fastpath failure
|
|
|
+ * As a performance measurement, release the lock before doing other
|
|
|
+ * wakeup related duties to follow. This allows other tasks to acquire
|
|
|
+ * the lock sooner, while still handling cleanups in past unlock calls.
|
|
|
+ * This can be done as we do not enforce strict equivalence between the
|
|
|
+ * mutex counter and wait_list.
|
|
|
+ *
|
|
|
+ *
|
|
|
+ * Some architectures leave the lock unlocked in the fastpath failure
|
|
|
* case, others need to leave it locked. In the later case we have to
|
|
|
- * unlock it here
|
|
|
+ * unlock it here - as the lock counter is currently 0 or negative.
|
|
|
*/
|
|
|
if (__mutex_slowpath_needs_to_unlock())
|
|
|
atomic_set(&lock->count, 1);
|