|
@@ -20,7 +20,6 @@
|
|
#include <linux/cpumask.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/hardirq.h>
|
|
-#include <linux/mutex.h>
|
|
|
|
#include <asm/qrwlock.h>
|
|
#include <asm/qrwlock.h>
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -35,7 +34,7 @@ static __always_inline void
|
|
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
|
|
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
|
|
{
|
|
{
|
|
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
|
|
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
|
|
- arch_mutex_cpu_relax();
|
|
|
|
|
|
+ cpu_relax_lowlatency();
|
|
cnts = smp_load_acquire((u32 *)&lock->cnts);
|
|
cnts = smp_load_acquire((u32 *)&lock->cnts);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
|
|
* to make sure that the write lock isn't taken.
|
|
* to make sure that the write lock isn't taken.
|
|
*/
|
|
*/
|
|
while (atomic_read(&lock->cnts) & _QW_WMASK)
|
|
while (atomic_read(&lock->cnts) & _QW_WMASK)
|
|
- arch_mutex_cpu_relax();
|
|
|
|
|
|
+ cpu_relax_lowlatency();
|
|
|
|
|
|
cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
|
|
cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
|
|
rspin_until_writer_unlock(lock, cnts);
|
|
rspin_until_writer_unlock(lock, cnts);
|
|
@@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
|
|
cnts | _QW_WAITING) == cnts))
|
|
cnts | _QW_WAITING) == cnts))
|
|
break;
|
|
break;
|
|
|
|
|
|
- arch_mutex_cpu_relax();
|
|
|
|
|
|
+ cpu_relax_lowlatency();
|
|
}
|
|
}
|
|
|
|
|
|
/* When no more readers, set the locked flag */
|
|
/* When no more readers, set the locked flag */
|
|
@@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
|
|
_QW_LOCKED) == _QW_WAITING))
|
|
_QW_LOCKED) == _QW_WAITING))
|
|
break;
|
|
break;
|
|
|
|
|
|
- arch_mutex_cpu_relax();
|
|
|
|
|
|
+ cpu_relax_lowlatency();
|
|
}
|
|
}
|
|
unlock:
|
|
unlock:
|
|
arch_spin_unlock(&lock->lock);
|
|
arch_spin_unlock(&lock->lock);
|