|
@@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static inline bool owner_on_cpu(struct task_struct *owner)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * As lock holder preemption issue, we both skip spinning if
|
|
|
+ * task is not on cpu or its cpu is preempted
|
|
|
+ */
|
|
|
+ return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
|
|
+}
|
|
|
+
|
|
|
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
|
|
{
|
|
|
struct task_struct *owner;
|
|
@@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
|
|
|
|
|
rcu_read_lock();
|
|
|
owner = READ_ONCE(sem->owner);
|
|
|
- if (!owner || !is_rwsem_owner_spinnable(owner)) {
|
|
|
- ret = !owner; /* !owner is spinnable */
|
|
|
- goto done;
|
|
|
+ if (owner) {
|
|
|
+ ret = is_rwsem_owner_spinnable(owner) &&
|
|
|
+ owner_on_cpu(owner);
|
|
|
}
|
|
|
-
|
|
|
- /*
|
|
|
- * As lock holder preemption issue, we both skip spinning if task is not
|
|
|
- * on cpu or its cpu is preempted
|
|
|
- */
|
|
|
- ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
|
|
-done:
|
|
|
rcu_read_unlock();
|
|
|
return ret;
|
|
|
}
|
|
@@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
|
|
|
* abort spinning when need_resched or owner is not running or
|
|
|
* owner's cpu is preempted.
|
|
|
*/
|
|
|
- if (!owner->on_cpu || need_resched() ||
|
|
|
- vcpu_is_preempted(task_cpu(owner))) {
|
|
|
+ if (need_resched() || !owner_on_cpu(owner)) {
|
|
|
rcu_read_unlock();
|
|
|
return false;
|
|
|
}
|