|
|
@@ -3813,12 +3813,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
|
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
|
|
|
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
|
|
|
{
|
|
|
+ kvmppc_rmap_reset(kvm);
|
|
|
+ kvm->arch.process_table = 0;
|
|
|
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
+ kvm->arch.radix = 0;
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
kvmppc_free_radix(kvm);
|
|
|
kvmppc_update_lpcr(kvm, LPCR_VPM1,
|
|
|
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
|
|
|
- kvmppc_rmap_reset(kvm);
|
|
|
- kvm->arch.radix = 0;
|
|
|
- kvm->arch.process_table = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
@@ -3831,10 +3834,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
+ kvmppc_rmap_reset(kvm);
|
|
|
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
+ kvm->arch.radix = 1;
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
kvmppc_free_hpt(&kvm->arch.hpt);
|
|
|
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
|
|
|
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
|
|
|
- kvm->arch.radix = 1;
|
|
|
return 0;
|
|
|
}
|
|
|
|