|
@@ -9693,12 +9693,16 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
|
|
|
* 'always'
|
|
|
*/
|
|
|
if (static_branch_likely(&vmx_l1d_flush_cond)) {
|
|
|
+ bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
|
|
|
+
|
|
|
/*
|
|
|
* Clear the flush bit, it gets set again either from
|
|
|
* vcpu_run() or from one of the unsafe VMEXIT
|
|
|
* handlers.
|
|
|
*/
|
|
|
vcpu->arch.l1tf_flush_l1d = false;
|
|
|
+ if (!flush_l1d)
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
vcpu->stat.l1d_flush++;
|
|
@@ -10228,10 +10232,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
|
|
|
(unsigned long)¤t_evmcs->host_rsp : 0;
|
|
|
|
|
|
- if (static_branch_unlikely(&vmx_l1d_should_flush)) {
|
|
|
- if (vcpu->arch.l1tf_flush_l1d)
|
|
|
- vmx_l1d_flush(vcpu);
|
|
|
- }
|
|
|
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
|
|
|
+ vmx_l1d_flush(vcpu);
|
|
|
|
|
|
asm(
|
|
|
/* Store host registers */
|