|
@@ -589,6 +589,7 @@ out:
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
__visible bool __kvm_vcpu_is_preempted(long cpu)
|
|
|
{
|
|
|
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
|
@@ -597,6 +598,29 @@ __visible bool __kvm_vcpu_is_preempted(long cpu)
|
|
|
}
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
|
|
|
|
|
+#else
|
|
|
+
|
|
|
+#include <asm/asm-offsets.h>
|
|
|
+
|
|
|
+extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
|
|
|
+ * restoring to/from the stack.
|
|
|
+ */
|
|
|
+asm(
|
|
|
+".pushsection .text;"
|
|
|
+".global __raw_callee_save___kvm_vcpu_is_preempted;"
|
|
|
+".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
|
|
|
+"__raw_callee_save___kvm_vcpu_is_preempted:"
|
|
|
+"movq __per_cpu_offset(,%rdi,8), %rax;"
|
|
|
+"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
|
|
+"setne %al;"
|
|
|
+"ret;"
|
|
|
+".popsection");
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
|
|
*/
|