|
@@ -454,6 +454,98 @@ static void __init sev_map_percpu_data(void)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
|
|
|
+
|
|
|
+static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int cpu, apic_id, icr;
|
|
|
+ int min = 0, max = 0;
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ __uint128_t ipi_bitmap = 0;
|
|
|
+#else
|
|
|
+ u64 ipi_bitmap = 0;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (cpumask_empty(mask))
|
|
|
+ return;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ switch (vector) {
|
|
|
+ default:
|
|
|
+ icr = APIC_DM_FIXED | vector;
|
|
|
+ break;
|
|
|
+ case NMI_VECTOR:
|
|
|
+ icr = APIC_DM_NMI;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_cpu(cpu, mask) {
|
|
|
+ apic_id = per_cpu(x86_cpu_to_apicid, cpu);
|
|
|
+ if (!ipi_bitmap) {
|
|
|
+ min = max = apic_id;
|
|
|
+ } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
|
|
|
+ ipi_bitmap <<= min - apic_id;
|
|
|
+ min = apic_id;
|
|
|
+ } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
|
|
+ max = apic_id < max ? max : apic_id;
|
|
|
+ } else {
|
|
|
+ kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
|
+ (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
|
|
+ min = max = apic_id;
|
|
|
+ ipi_bitmap = 0;
|
|
|
+ }
|
|
|
+ __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ipi_bitmap) {
|
|
|
+ kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
|
+ (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
|
|
+ }
|
|
|
+
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
|
|
|
+{
|
|
|
+ __send_ipi_mask(mask, vector);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
|
|
+{
|
|
|
+ unsigned int this_cpu = smp_processor_id();
|
|
|
+ struct cpumask new_mask;
|
|
|
+ const struct cpumask *local_mask;
|
|
|
+
|
|
|
+ cpumask_copy(&new_mask, mask);
|
|
|
+ cpumask_clear_cpu(this_cpu, &new_mask);
|
|
|
+ local_mask = &new_mask;
|
|
|
+ __send_ipi_mask(local_mask, vector);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_send_ipi_allbutself(int vector)
|
|
|
+{
|
|
|
+ kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_send_ipi_all(int vector)
|
|
|
+{
|
|
|
+ __send_ipi_mask(cpu_online_mask, vector);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Set the IPI entry points
|
|
|
+ */
|
|
|
+static void kvm_setup_pv_ipi(void)
|
|
|
+{
|
|
|
+ apic->send_IPI_mask = kvm_send_ipi_mask;
|
|
|
+ apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
|
|
|
+ apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
|
|
|
+ apic->send_IPI_all = kvm_send_ipi_all;
|
|
|
+ pr_info("KVM setup pv IPIs\n");
|
|
|
+}
|
|
|
+
|
|
|
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
|
|
|
{
|
|
|
native_smp_prepare_cpus(max_cpus);
|
|
@@ -626,6 +718,10 @@ static uint32_t __init kvm_detect(void)
|
|
|
|
|
|
static void __init kvm_apic_init(void)
|
|
|
{
|
|
|
+#if defined(CONFIG_SMP)
|
|
|
+ if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
|
|
|
+ kvm_setup_pv_ipi();
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static void __init kvm_init_platform(void)
|