|
@@ -46,12 +46,6 @@
|
|
|
# include <asm/mutex.h>
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * A negative mutex count indicates that waiters are sleeping waiting for the
|
|
|
- * mutex.
|
|
|
- */
|
|
|
-#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
|
|
|
-
|
|
|
void
|
|
|
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
|
|
{
|
|
@@ -483,8 +477,11 @@ slowpath:
|
|
|
#endif
|
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
|
|
- /* once more, can we acquire the lock? */
|
|
|
- if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
|
|
|
+ /*
|
|
|
+ * Once more, try to acquire the lock. Only try-lock the mutex if
|
|
|
+ * lock->count >= 0 to reduce unnecessary xchg operations.
|
|
|
+ */
|
|
|
+ if (atomic_read(&lock->count) >= 0 && (atomic_xchg(&lock->count, 0) == 1))
|
|
|
goto skip_wait;
|
|
|
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
@@ -504,9 +501,10 @@ slowpath:
|
|
|
* it's unlocked. Later on, if we sleep, this is the
|
|
|
* operation that gives us the lock. We xchg it to -1, so
|
|
|
* that when we release the lock, we properly wake up the
|
|
|
- * other waiters:
|
|
|
+ * other waiters. We only attempt the xchg if the count is
|
|
|
+ * non-negative in order to avoid unnecessary xchg operations:
|
|
|
*/
|
|
|
- if (MUTEX_SHOW_NO_WAITER(lock) &&
|
|
|
+ if (atomic_read(&lock->count) >= 0 &&
|
|
|
(atomic_xchg(&lock->count, -1) == 1))
|
|
|
break;
|
|
|
|