|
@@ -235,39 +235,12 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
|
|
|
/* Restore ASID once we are scheduled back after preemption */
|
|
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
{
|
|
|
- unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
|
|
unsigned long flags;
|
|
|
|
|
|
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
|
|
|
|
|
|
- /* Allocate new kernel and user ASIDs if needed */
|
|
|
-
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
- if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
|
|
|
- asid_version_mask(cpu)) {
|
|
|
- kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
|
|
|
- vcpu->arch.guest_kernel_asid[cpu] =
|
|
|
- vcpu->arch.guest_kernel_mm.context.asid[cpu];
|
|
|
-
|
|
|
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
|
|
- cpu_context(cpu, current->mm));
|
|
|
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
|
|
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
|
|
- }
|
|
|
-
|
|
|
- if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
|
|
|
- asid_version_mask(cpu)) {
|
|
|
- kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
|
|
- vcpu->arch.guest_user_asid[cpu] =
|
|
|
- vcpu->arch.guest_user_mm.context.asid[cpu];
|
|
|
-
|
|
|
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
|
|
- cpu_context(cpu, current->mm));
|
|
|
- kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
|
|
- vcpu->arch.guest_user_asid[cpu]);
|
|
|
- }
|
|
|
-
|
|
|
if (vcpu->arch.last_sched_cpu != cpu) {
|
|
|
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
|
|
|
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
|
@@ -279,25 +252,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
kvm_mips_migrate_count(vcpu);
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * If we preempted while the guest was executing, then reload the ASID
|
|
|
- * based on the mode of the Guest (Kernel/User)
|
|
|
- */
|
|
|
- if (current->flags & PF_VCPU) {
|
|
|
- if (KVM_GUEST_KERNEL_MODE(vcpu))
|
|
|
- write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] &
|
|
|
- asid_mask);
|
|
|
- else
|
|
|
- write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] &
|
|
|
- asid_mask);
|
|
|
- ehb();
|
|
|
- }
|
|
|
-
|
|
|
/* restore guest state to registers */
|
|
|
kvm_mips_callbacks->vcpu_load(vcpu, cpu);
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
-
|
|
|
}
|
|
|
|
|
|
/* ASID can change if another task is scheduled during preemption */
|
|
@@ -314,15 +272,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|
|
/* save guest state in registers */
|
|
|
kvm_mips_callbacks->vcpu_put(vcpu, cpu);
|
|
|
|
|
|
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
|
|
|
- asid_version_mask(cpu))) {
|
|
|
- kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
|
|
|
- cpu_context(cpu, current->mm));
|
|
|
- drop_mmu_context(current->mm, cpu);
|
|
|
- }
|
|
|
- write_c0_entryhi(cpu_asid(cpu, current->mm));
|
|
|
- ehb();
|
|
|
-
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|