|
@@ -671,10 +671,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
- mutex_release(&lock->dep_map, nested, _RET_IP_);
|
|
|
- debug_mutex_unlock(lock);
|
|
|
-
|
|
|
/*
|
|
|
* some architectures leave the lock unlocked in the fastpath failure
|
|
|
* case, others need to leave it locked. In the later case we have to
|
|
@@ -683,6 +679,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
|
|
if (__mutex_slowpath_needs_to_unlock())
|
|
|
atomic_set(&lock->count, 1);
|
|
|
|
|
|
+ spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
+ mutex_release(&lock->dep_map, nested, _RET_IP_);
|
|
|
+ debug_mutex_unlock(lock);
|
|
|
+
|
|
|
if (!list_empty(&lock->wait_list)) {
|
|
|
/* get the first entry from the wait-list: */
|
|
|
struct mutex_waiter *waiter =
|