|
@@ -87,8 +87,6 @@ struct pv_node {
|
|
|
#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
|
|
|
static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
-
|
|
|
/*
|
|
|
* Stay in unfair lock mode as long as queued mode waiters are
|
|
|
* present in the MCS wait queue but the pending bit isn't set.
|
|
@@ -97,7 +95,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
|
|
|
int val = atomic_read(&lock->val);
|
|
|
|
|
|
if (!(val & _Q_LOCKED_PENDING_MASK) &&
|
|
|
- (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
|
|
|
+ (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
|
|
|
qstat_inc(qstat_pv_lock_stealing, true);
|
|
|
return true;
|
|
|
}
|
|
@@ -117,16 +115,12 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
|
|
|
#if _Q_PENDING_BITS == 8
|
|
|
static __always_inline void set_pending(struct qspinlock *lock)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
-
|
|
|
- WRITE_ONCE(l->pending, 1);
|
|
|
+ WRITE_ONCE(lock->pending, 1);
|
|
|
}
|
|
|
|
|
|
static __always_inline void clear_pending(struct qspinlock *lock)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
-
|
|
|
- WRITE_ONCE(l->pending, 0);
|
|
|
+ WRITE_ONCE(lock->pending, 0);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -136,10 +130,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
|
|
|
*/
|
|
|
static __always_inline int trylock_clear_pending(struct qspinlock *lock)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
-
|
|
|
- return !READ_ONCE(l->locked) &&
|
|
|
- (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
|
|
|
+ return !READ_ONCE(lock->locked) &&
|
|
|
+ (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
|
|
|
_Q_LOCKED_VAL) == _Q_PENDING_VAL);
|
|
|
}
|
|
|
#else /* _Q_PENDING_BITS == 8 */
|
|
@@ -384,7 +376,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
|
|
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
{
|
|
|
struct pv_node *pn = (struct pv_node *)node;
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
|
|
|
/*
|
|
|
* If the vCPU is indeed halted, advance its state to match that of
|
|
@@ -413,7 +404,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
* the hash table later on at unlock time, no atomic instruction is
|
|
|
* needed.
|
|
|
*/
|
|
|
- WRITE_ONCE(l->locked, _Q_SLOW_VAL);
|
|
|
+ WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
|
|
|
(void)pv_hash(lock, pn);
|
|
|
}
|
|
|
|
|
@@ -428,7 +419,6 @@ static u32
|
|
|
pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
{
|
|
|
struct pv_node *pn = (struct pv_node *)node;
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
struct qspinlock **lp = NULL;
|
|
|
int waitcnt = 0;
|
|
|
int loop;
|
|
@@ -479,13 +469,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
*
|
|
|
* Matches the smp_rmb() in __pv_queued_spin_unlock().
|
|
|
*/
|
|
|
- if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
|
|
|
+ if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
|
|
|
/*
|
|
|
* The lock was free and now we own the lock.
|
|
|
* Change the lock value back to _Q_LOCKED_VAL
|
|
|
* and unhash the table.
|
|
|
*/
|
|
|
- WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
|
|
|
+ WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
|
|
|
WRITE_ONCE(*lp, NULL);
|
|
|
goto gotlock;
|
|
|
}
|
|
@@ -493,7 +483,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
WRITE_ONCE(pn->state, vcpu_hashed);
|
|
|
qstat_inc(qstat_pv_wait_head, true);
|
|
|
qstat_inc(qstat_pv_wait_again, waitcnt);
|
|
|
- pv_wait(&l->locked, _Q_SLOW_VAL);
|
|
|
+ pv_wait(&lock->locked, _Q_SLOW_VAL);
|
|
|
|
|
|
/*
|
|
|
* Because of lock stealing, the queue head vCPU may not be
|
|
@@ -518,7 +508,6 @@ gotlock:
|
|
|
__visible void
|
|
|
__pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
struct pv_node *node;
|
|
|
|
|
|
if (unlikely(locked != _Q_SLOW_VAL)) {
|
|
@@ -547,7 +536,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
|
|
|
* Now that we have a reference to the (likely) blocked pv_node,
|
|
|
* release the lock.
|
|
|
*/
|
|
|
- smp_store_release(&l->locked, 0);
|
|
|
+ smp_store_release(&lock->locked, 0);
|
|
|
|
|
|
/*
|
|
|
* At this point the memory pointed at by lock can be freed/reused,
|
|
@@ -573,7 +562,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
|
|
|
#ifndef __pv_queued_spin_unlock
|
|
|
__visible void __pv_queued_spin_unlock(struct qspinlock *lock)
|
|
|
{
|
|
|
- struct __qspinlock *l = (void *)lock;
|
|
|
u8 locked;
|
|
|
|
|
|
/*
|
|
@@ -581,7 +569,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
|
|
|
* unhash. Otherwise it would be possible to have multiple @lock
|
|
|
* entries, which would be BAD.
|
|
|
*/
|
|
|
- locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
|
|
|
+ locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
|
|
|
if (likely(locked == _Q_LOCKED_VAL))
|
|
|
return;
|
|
|
|