|
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|
|
local_irq_enable();
|
|
|
kvm_vcpu_block(vcpu);
|
|
|
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
|
|
- local_irq_disable();
|
|
|
+ hard_irq_disable();
|
|
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
|
|
r = 1;
|
|
@@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- local_irq_disable();
|
|
|
s = kvmppc_prepare_to_enter(vcpu);
|
|
|
if (s <= 0) {
|
|
|
- local_irq_enable();
|
|
|
ret = s;
|
|
|
goto out;
|
|
|
}
|
|
|
+ /* interrupts now hard-disabled */
|
|
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
/* Save userspace FPU state in stack */
|
|
@@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
* aren't already exiting to userspace for some other reason.
|
|
|
*/
|
|
|
if (!(r & RESUME_HOST)) {
|
|
|
- local_irq_disable();
|
|
|
s = kvmppc_prepare_to_enter(vcpu);
|
|
|
- if (s <= 0) {
|
|
|
- local_irq_enable();
|
|
|
+ if (s <= 0)
|
|
|
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
|
|
|
- } else {
|
|
|
+ else {
|
|
|
+ /* interrupts now hard-disabled */
|
|
|
kvmppc_fix_ee_before_entry();
|
|
|
}
|
|
|
}
|