|
|
@@ -35,6 +35,7 @@
|
|
|
|
|
|
#include <asm/reg.h>
|
|
|
#include <asm/cputable.h>
|
|
|
+#include <asm/cache.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
@@ -69,6 +70,13 @@
|
|
|
|
|
|
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
|
|
|
|
|
+#if defined(CONFIG_PPC_64K_PAGES)
|
|
|
+#define MPP_BUFFER_ORDER 0
|
|
|
+#elif defined(CONFIG_PPC_4K_PAGES)
|
|
|
+#define MPP_BUFFER_ORDER 3
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
|
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
@@ -1320,6 +1328,13 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
|
|
|
vcore->first_vcpuid = core * threads_per_subcore;
|
|
|
vcore->kvm = kvm;
|
|
|
|
|
|
+ vcore->mpp_buffer_is_valid = false;
|
|
|
+
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
+ vcore->mpp_buffer = (void *)__get_free_pages(
|
|
|
+ GFP_KERNEL|__GFP_ZERO,
|
|
|
+ MPP_BUFFER_ORDER);
|
|
|
+
|
|
|
return vcore;
|
|
|
}
|
|
|
|
|
|
@@ -1586,6 +1601,33 @@ static int on_primary_thread(void)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
|
|
|
+{
|
|
|
+ phys_addr_t phy_addr, mpp_addr;
|
|
|
+
|
|
|
+ phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
|
|
|
+ mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
|
|
+
|
|
|
+ mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
|
|
|
+ logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
|
|
|
+
|
|
|
+ vc->mpp_buffer_is_valid = true;
|
|
|
+}
|
|
|
+
|
|
|
+static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
|
|
|
+{
|
|
|
+ phys_addr_t phy_addr, mpp_addr;
|
|
|
+
|
|
|
+ phy_addr = virt_to_phys(vc->mpp_buffer);
|
|
|
+ mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
|
|
+
|
|
|
+ /* We must abort any in-progress save operations to ensure
|
|
|
+ * the table is valid so that prefetch engine knows when to
|
|
|
+ * stop prefetching. */
|
|
|
+ logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
|
|
|
+ mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Run a set of guest threads on a physical core.
|
|
|
* Called with vc->lock held.
|
|
|
@@ -1663,9 +1705,16 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
|
|
|
|
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
|
|
|
|
|
+ if (vc->mpp_buffer_is_valid)
|
|
|
+ kvmppc_start_restoring_l2_cache(vc);
|
|
|
+
|
|
|
__kvmppc_vcore_entry();
|
|
|
|
|
|
spin_lock(&vc->lock);
|
|
|
+
|
|
|
+ if (vc->mpp_buffer)
|
|
|
+ kvmppc_start_saving_l2_cache(vc);
|
|
|
+
|
|
|
/* disable sending of IPIs on virtual external irqs */
|
|
|
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
|
|
|
vcpu->cpu = -1;
|
|
|
@@ -2413,8 +2462,14 @@ static void kvmppc_free_vcores(struct kvm *kvm)
|
|
|
{
|
|
|
long int i;
|
|
|
|
|
|
- for (i = 0; i < KVM_MAX_VCORES; ++i)
|
|
|
+ for (i = 0; i < KVM_MAX_VCORES; ++i) {
|
|
|
+ if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
|
|
|
+ struct kvmppc_vcore *vc = kvm->arch.vcores[i];
|
|
|
+ free_pages((unsigned long)vc->mpp_buffer,
|
|
|
+ MPP_BUFFER_ORDER);
|
|
|
+ }
|
|
|
kfree(kvm->arch.vcores[i]);
|
|
|
+ }
|
|
|
kvm->arch.online_vcores = 0;
|
|
|
}
|
|
|
|