|
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
|
{
|
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
|
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
|
|
|
+ unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
/* In this case there is no put, since we keep the reference. */
|
|
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
|
irq->intid = intid;
|
|
|
irq->target_vcpu = vcpu;
|
|
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* There could be a race with another vgic_add_lpi(), so we need to
|
|
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
|
dist->lpi_list_count++;
|
|
|
|
|
|
out_unlock:
|
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* We "cache" the configuration table entries in our struct vgic_irq's.
|
|
@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
|
|
int ret;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
|
- &prop, 1);
|
|
|
+ ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
|
+ &prop, 1);
|
|
|
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
{
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
struct vgic_irq *irq;
|
|
|
+ unsigned long flags;
|
|
|
u32 *intids;
|
|
|
int irq_count, i = 0;
|
|
|
|
|
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
if (!intids)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
|
if (i == irq_count)
|
|
|
break;
|
|
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
continue;
|
|
|
intids[i++] = irq->intid;
|
|
|
}
|
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
|
|
*intid_ptr = intids;
|
|
|
return i;
|
|
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
|
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
irq->target_vcpu = vcpu;
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
|
|
|
if (irq->hw) {
|
|
|
struct its_vlpi_map map;
|
|
@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
|
|
* this very same byte in the last iteration. Reuse that.
|
|
|
*/
|
|
|
if (byte_offset != last_byte_offset) {
|
|
|
- ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
|
|
|
- &pendmask, 1);
|
|
|
+ ret = kvm_read_guest_lock(vcpu->kvm,
|
|
|
+ pendbase + byte_offset,
|
|
|
+ &pendmask, 1);
|
|
|
if (ret) {
|
|
|
kfree(intids);
|
|
|
return ret;
|
|
@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
|
|
|
return false;
|
|
|
|
|
|
/* Each 1st level entry is represented by a 64-bit value. */
|
|
|
- if (kvm_read_guest(its->dev->kvm,
|
|
|
+ if (kvm_read_guest_lock(its->dev->kvm,
|
|
|
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
|
|
&indirect_ptr, sizeof(indirect_ptr)))
|
|
|
return false;
|
|
@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
|
|
|
cbaser = CBASER_ADDRESS(its->cbaser);
|
|
|
|
|
|
while (its->cwriter != its->creadr) {
|
|
|
- int ret = kvm_read_guest(kvm, cbaser + its->creadr,
|
|
|
- cmd_buf, ITS_CMD_SIZE);
|
|
|
+ int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
|
|
|
+ cmd_buf, ITS_CMD_SIZE);
|
|
|
/*
|
|
|
* If kvm_read_guest() fails, this could be due to the guest
|
|
|
* programming a bogus value in CBASER or something else going
|
|
@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
|
|
|
int next_offset;
|
|
|
size_t byte_offset;
|
|
|
|
|
|
- ret = kvm_read_guest(kvm, gpa, entry, esz);
|
|
|
+ ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
|
|
|
int ret;
|
|
|
|
|
|
BUG_ON(esz > sizeof(val));
|
|
|
- ret = kvm_read_guest(kvm, gpa, &val, esz);
|
|
|
+ ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
val = le64_to_cpu(val);
|