|
@@ -281,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
|
|
int ret;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
|
- &prop, 1);
|
|
|
+ ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
|
+ &prop, 1);
|
|
|
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -444,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
|
|
* this very same byte in the last iteration. Reuse that.
|
|
|
*/
|
|
|
if (byte_offset != last_byte_offset) {
|
|
|
- ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
|
|
|
- &pendmask, 1);
|
|
|
+ ret = kvm_read_guest_lock(vcpu->kvm,
|
|
|
+ pendbase + byte_offset,
|
|
|
+ &pendmask, 1);
|
|
|
if (ret) {
|
|
|
kfree(intids);
|
|
|
return ret;
|
|
@@ -789,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
|
|
|
return false;
|
|
|
|
|
|
/* Each 1st level entry is represented by a 64-bit value. */
|
|
|
- if (kvm_read_guest(its->dev->kvm,
|
|
|
+ if (kvm_read_guest_lock(its->dev->kvm,
|
|
|
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
|
|
&indirect_ptr, sizeof(indirect_ptr)))
|
|
|
return false;
|
|
@@ -1370,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
|
|
|
cbaser = CBASER_ADDRESS(its->cbaser);
|
|
|
|
|
|
while (its->cwriter != its->creadr) {
|
|
|
- int ret = kvm_read_guest(kvm, cbaser + its->creadr,
|
|
|
- cmd_buf, ITS_CMD_SIZE);
|
|
|
+ int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
|
|
|
+ cmd_buf, ITS_CMD_SIZE);
|
|
|
/*
|
|
|
* If kvm_read_guest() fails, this could be due to the guest
|
|
|
* programming a bogus value in CBASER or something else going
|