|
@@ -364,7 +364,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
|
|
*/
|
|
|
barrier();
|
|
|
|
|
|
- if (!owner->on_cpu || need_resched()) {
|
|
|
+ /*
|
|
|
+ * Use vcpu_is_preempted to detect lock holder preemption issue.
|
|
|
+ */
|
|
|
+ if (!owner->on_cpu || need_resched() ||
|
|
|
+ vcpu_is_preempted(task_cpu(owner))) {
|
|
|
ret = false;
|
|
|
break;
|
|
|
}
|
|
@@ -389,8 +393,13 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|
|
|
|
|
rcu_read_lock();
|
|
|
owner = __mutex_owner(lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * As lock holder preemption issue, we both skip spinning if task is not
|
|
|
+ * on cpu or its cpu is preempted
|
|
|
+ */
|
|
|
if (owner)
|
|
|
- retval = owner->on_cpu;
|
|
|
+ retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
/*
|