|
@@ -3008,6 +3008,73 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
goto out_srcu;
|
|
goto out_srcu;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_KVM_XICS
|
|
|
|
+/*
|
|
|
|
+ * Allocate a per-core structure for managing state about which cores are
|
|
|
|
+ * running in the host versus the guest and for exchanging data between
|
|
|
|
+ * real mode KVM and CPU running in the host.
|
|
|
|
+ * This is only done for the first VM.
|
|
|
|
+ * The allocated structure stays even if all VMs have stopped.
|
|
|
|
+ * It is only freed when the kvm-hv module is unloaded.
|
|
|
|
+ * It's OK for this routine to fail, we just don't support host
|
|
|
|
+ * core operations like redirecting H_IPI wakeups.
|
|
|
|
+ */
|
|
|
|
+void kvmppc_alloc_host_rm_ops(void)
|
|
|
|
+{
|
|
|
|
+ struct kvmppc_host_rm_ops *ops;
|
|
|
|
+ unsigned long l_ops;
|
|
|
|
+ int cpu, core;
|
|
|
|
+ int size;
|
|
|
|
+
|
|
|
|
+ /* Not the first time here ? */
|
|
|
|
+ if (kvmppc_host_rm_ops_hv != NULL)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
|
|
|
|
+ if (!ops)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
|
|
|
|
+ ops->rm_core = kzalloc(size, GFP_KERNEL);
|
|
|
|
+
|
|
|
|
+ if (!ops->rm_core) {
|
|
|
|
+ kfree(ops);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
|
|
|
|
+ if (!cpu_online(cpu))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ core = cpu >> threads_shift;
|
|
|
|
+ ops->rm_core[core].rm_state.in_host = 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Make the contents of the kvmppc_host_rm_ops structure visible
|
|
|
|
+ * to other CPUs before we assign it to the global variable.
|
|
|
|
+ * Do an atomic assignment (no locks used here), but if someone
|
|
|
|
+ * beats us to it, just free our copy and return.
|
|
|
|
+ */
|
|
|
|
+ smp_wmb();
|
|
|
|
+ l_ops = (unsigned long) ops;
|
|
|
|
+
|
|
|
|
+ if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
|
|
|
|
+ kfree(ops->rm_core);
|
|
|
|
+ kfree(ops);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void kvmppc_free_host_rm_ops(void)
|
|
|
|
+{
|
|
|
|
+ if (kvmppc_host_rm_ops_hv) {
|
|
|
|
+ kfree(kvmppc_host_rm_ops_hv->rm_core);
|
|
|
|
+ kfree(kvmppc_host_rm_ops_hv);
|
|
|
|
+ kvmppc_host_rm_ops_hv = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
|
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
|
{
|
|
{
|
|
unsigned long lpcr, lpid;
|
|
unsigned long lpcr, lpid;
|
|
@@ -3020,6 +3087,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
kvm->arch.lpid = lpid;
|
|
kvm->arch.lpid = lpid;
|
|
|
|
|
|
|
|
+ kvmppc_alloc_host_rm_ops();
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Since we don't flush the TLB when tearing down a VM,
|
|
* Since we don't flush the TLB when tearing down a VM,
|
|
* and this lpid might have previously been used,
|
|
* and this lpid might have previously been used,
|
|
@@ -3253,6 +3322,7 @@ static int kvmppc_book3s_init_hv(void)
|
|
|
|
|
|
static void kvmppc_book3s_exit_hv(void)
|
|
static void kvmppc_book3s_exit_hv(void)
|
|
{
|
|
{
|
|
|
|
+ kvmppc_free_host_rm_ops();
|
|
kvmppc_hv_ops = NULL;
|
|
kvmppc_hv_ops = NULL;
|
|
}
|
|
}
|
|
|
|
|