|
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
|
|
__raw_callee_save___native_queued_spin_unlock;
|
|
__raw_callee_save___native_queued_spin_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool native_vcpu_is_preempted(int cpu)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
struct pv_lock_ops pv_lock_ops = {
|
|
struct pv_lock_ops pv_lock_ops = {
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
|
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
|
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
|
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
|
.wait = paravirt_nop,
|
|
.wait = paravirt_nop,
|
|
.kick = paravirt_nop,
|
|
.kick = paravirt_nop,
|
|
|
|
+ .vcpu_is_preempted = native_vcpu_is_preempted,
|
|
#endif /* SMP */
|
|
#endif /* SMP */
|
|
};
|
|
};
|
|
EXPORT_SYMBOL(pv_lock_ops);
|
|
EXPORT_SYMBOL(pv_lock_ops);
|