|
@@ -316,21 +316,24 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
struct vgic_irq *irq;
|
|
|
u32 *intids;
|
|
|
- int irq_count = dist->lpi_list_count, i = 0;
|
|
|
+ int irq_count, i = 0;
|
|
|
|
|
|
/*
|
|
|
- * We use the current value of the list length, which may change
|
|
|
- * after the kmalloc. We don't care, because the guest shouldn't
|
|
|
- * change anything while the command handling is still running,
|
|
|
- * and in the worst case we would miss a new IRQ, which one wouldn't
|
|
|
- * expect to be covered by this command anyway.
|
|
|
+ * There is an obvious race between allocating the array and LPIs
|
|
|
+ * being mapped/unmapped. If we ended up here as a result of a
|
|
|
+ * command, we're safe (locks are held, preventing another
|
|
|
+ * command). If coming from another path (such as enabling LPIs),
|
|
|
+ * we must be careful not to overrun the array.
|
|
|
*/
|
|
|
+ irq_count = READ_ONCE(dist->lpi_list_count);
|
|
|
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
|
|
|
if (!intids)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
spin_lock(&dist->lpi_list_lock);
|
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
|
+ if (i == irq_count)
|
|
|
+ break;
|
|
|
/* We don't need to "get" the IRQ, as we hold the list lock. */
|
|
|
if (irq->target_vcpu != vcpu)
|
|
|
continue;
|