|
@@ -6746,6 +6746,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int handle_invvpid(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
|
@@ -6791,6 +6797,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
|
|
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
|
|
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
|
|
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
|
|
[EXIT_REASON_INVEPT] = handle_invept,
|
|
[EXIT_REASON_INVEPT] = handle_invept,
|
|
|
|
+ [EXIT_REASON_INVVPID] = handle_invvpid,
|
|
};
|
|
};
|
|
|
|
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
@@ -7026,7 +7033,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
|
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
|
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
|
- case EXIT_REASON_INVEPT:
|
|
|
|
|
|
+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
|
|
/*
|
|
/*
|
|
* VMX instructions trap unconditionally. This allows L1 to
|
|
* VMX instructions trap unconditionally. This allows L1 to
|
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|