|
@@ -54,6 +54,11 @@ struct pv_node {
|
|
|
u8 state;
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Include queued spinlock statistics code
|
|
|
+ */
|
|
|
+#include "qspinlock_stat.h"
|
|
|
+
|
|
|
/*
|
|
|
* By replacing the regular queued_spin_trylock() with the function below,
|
|
|
* it will be called once when a lock waiter enter the PV slowpath before
|
|
@@ -65,9 +70,11 @@ struct pv_node {
|
|
|
static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
|
|
|
{
|
|
|
struct __qspinlock *l = (void *)lock;
|
|
|
+ int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
|
|
|
+ (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
|
|
|
|
|
|
- return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
|
|
|
- (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
|
|
|
+ qstat_inc(qstat_pv_lock_stealing, ret);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -137,11 +144,6 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
|
|
|
}
|
|
|
#endif /* _Q_PENDING_BITS == 8 */
|
|
|
|
|
|
-/*
|
|
|
- * Include queued spinlock statistics code
|
|
|
- */
|
|
|
-#include "qspinlock_stat.h"
|
|
|
-
|
|
|
/*
|
|
|
* Lock and MCS node addresses hash table for fast lookup
|
|
|
*
|