|
@@ -2475,7 +2475,8 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
|
|
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
SECONDARY_EXEC_WBINVD_EXITING |
|
|
|
- SECONDARY_EXEC_XSAVES;
|
|
|
+ SECONDARY_EXEC_XSAVES |
|
|
|
+ SECONDARY_EXEC_PCOMMIT;
|
|
|
|
|
|
if (enable_ept) {
|
|
|
/* nested EPT: emulate EPT also to L1 */
|
|
@@ -3016,7 +3017,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
SECONDARY_EXEC_SHADOW_VMCS |
|
|
|
SECONDARY_EXEC_XSAVES |
|
|
|
- SECONDARY_EXEC_ENABLE_PML;
|
|
|
+ SECONDARY_EXEC_ENABLE_PML |
|
|
|
+ SECONDARY_EXEC_PCOMMIT;
|
|
|
if (adjust_vmx_controls(min2, opt2,
|
|
|
MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
&_cpu_based_2nd_exec_control) < 0)
|
|
@@ -4571,6 +4573,9 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
|
|
|
/* PML is enabled/disabled in creating/destorying vcpu */
|
|
|
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
|
|
|
|
|
|
+ /* Currently, we allow L1 guest to directly run pcommit instruction. */
|
|
|
+ exec_control &= ~SECONDARY_EXEC_PCOMMIT;
|
|
|
+
|
|
|
return exec_control;
|
|
|
}
|
|
|
|
|
@@ -4614,10 +4619,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
|
|
|
|
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
|
|
|
|
|
|
- if (cpu_has_secondary_exec_ctrls()) {
|
|
|
+ if (cpu_has_secondary_exec_ctrls())
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
|
|
|
vmx_secondary_exec_control(vmx));
|
|
|
- }
|
|
|
|
|
|
if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
|
|
|
vmcs_write64(EOI_EXIT_BITMAP0, 0);
|
|
@@ -7212,6 +7216,13 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static int handle_pcommit(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ /* we never catch pcommit instruct for L1 guest. */
|
|
|
+ WARN_ON(1);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
|
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
|
@@ -7262,6 +7273,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|
|
[EXIT_REASON_XSAVES] = handle_xsaves,
|
|
|
[EXIT_REASON_XRSTORS] = handle_xrstors,
|
|
|
[EXIT_REASON_PML_FULL] = handle_pml_full,
|
|
|
+ [EXIT_REASON_PCOMMIT] = handle_pcommit,
|
|
|
};
|
|
|
|
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
@@ -7563,6 +7575,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|
|
* the XSS exit bitmap in vmcs12.
|
|
|
*/
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
|
|
|
+ case EXIT_REASON_PCOMMIT:
|
|
|
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
|
|
|
default:
|
|
|
return true;
|
|
|
}
|
|
@@ -8697,6 +8711,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|
|
if (best)
|
|
|
best->ebx &= ~bit(X86_FEATURE_INVPCID);
|
|
|
}
|
|
|
+
|
|
|
+ if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
|
|
|
+ if (guest_cpuid_has_pcommit(vcpu))
|
|
|
+ vmx->nested.nested_vmx_secondary_ctls_high |=
|
|
|
+ SECONDARY_EXEC_PCOMMIT;
|
|
|
+ else
|
|
|
+ vmx->nested.nested_vmx_secondary_ctls_high &=
|
|
|
+ ~SECONDARY_EXEC_PCOMMIT;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
|
@@ -9310,7 +9333,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
SECONDARY_EXEC_RDTSCP |
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
- SECONDARY_EXEC_APIC_REGISTER_VIRT);
|
|
|
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
+ SECONDARY_EXEC_PCOMMIT);
|
|
|
if (nested_cpu_has(vmcs12,
|
|
|
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
|
|
|
exec_control |= vmcs12->secondary_vm_exec_control;
|