|
|
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
|
irq->target_vcpu = vcpu;
|
|
|
irq->group = 1;
|
|
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* There could be a race with another vgic_add_lpi(), so we need to
|
|
|
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
|
dist->lpi_list_count++;
|
|
|
|
|
|
out_unlock:
|
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* We "cache" the configuration table entries in our struct vgic_irq's.
|
|
|
@@ -339,7 +339,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
if (!intids)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
|
if (i == irq_count)
|
|
|
break;
|
|
|
@@ -348,7 +348,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
continue;
|
|
|
intids[i++] = irq->intid;
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
*intid_ptr = intids;
|
|
|
return i;
|