|
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
|
|
|
}
|
|
|
|
|
|
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
|
|
|
- unsigned long ipi_bitmap_high, int min,
|
|
|
+ unsigned long ipi_bitmap_high, u32 min,
|
|
|
unsigned long icr, int op_64_bit)
|
|
|
{
|
|
|
int i;
|
|
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
|
|
|
rcu_read_lock();
|
|
|
map = rcu_dereference(kvm->arch.apic_map);
|
|
|
|
|
|
+ if (min > map->max_apic_id)
|
|
|
+ goto out;
|
|
|
/* Bits above cluster_size are masked in the caller. */
|
|
|
- for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
|
|
|
- vcpu = map->phys_map[min + i]->vcpu;
|
|
|
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
|
|
|
+ for_each_set_bit(i, &ipi_bitmap_low,
|
|
|
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
|
|
|
+ if (map->phys_map[min + i]) {
|
|
|
+ vcpu = map->phys_map[min + i]->vcpu;
|
|
|
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
min += cluster_size;
|
|
|
- for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
|
|
|
- vcpu = map->phys_map[min + i]->vcpu;
|
|
|
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
|
|
|
+
|
|
|
+ if (min > map->max_apic_id)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ for_each_set_bit(i, &ipi_bitmap_high,
|
|
|
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
|
|
|
+ if (map->phys_map[min + i]) {
|
|
|
+ vcpu = map->phys_map[min + i]->vcpu;
|
|
|
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
+out:
|
|
|
rcu_read_unlock();
|
|
|
return count;
|
|
|
}
|