|
@@ -728,7 +728,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|
|
spin_unlock(&kvm_lock);
|
|
|
kvm_free_irq_routing(kvm);
|
|
|
for (i = 0; i < KVM_NR_BUSES; i++) {
|
|
|
- kvm_io_bus_destroy(kvm->buses[i]);
|
|
|
+ if (kvm->buses[i])
|
|
|
+ kvm_io_bus_destroy(kvm->buses[i]);
|
|
|
kvm->buses[i] = NULL;
|
|
|
}
|
|
|
kvm_coalesced_mmio_free(kvm);
|
|
@@ -3476,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
|
|
};
|
|
|
|
|
|
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
|
|
+ if (!bus)
|
|
|
+ return -ENOMEM;
|
|
|
r = __kvm_io_bus_write(vcpu, bus, &range, val);
|
|
|
return r < 0 ? r : 0;
|
|
|
}
|
|
@@ -3493,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
|
|
|
};
|
|
|
|
|
|
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
|
|
+ if (!bus)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
/* First try the device referenced by cookie. */
|
|
|
if ((cookie >= 0) && (cookie < bus->dev_count) &&
|
|
@@ -3543,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
|
|
};
|
|
|
|
|
|
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
|
|
+ if (!bus)
|
|
|
+ return -ENOMEM;
|
|
|
r = __kvm_io_bus_read(vcpu, bus, &range, val);
|
|
|
return r < 0 ? r : 0;
|
|
|
}
|
|
@@ -3555,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
|
|
struct kvm_io_bus *new_bus, *bus;
|
|
|
|
|
|
bus = kvm->buses[bus_idx];
|
|
|
+ if (!bus)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
/* exclude ioeventfd which is limited by maximum fd */
|
|
|
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
|
|
|
return -ENOSPC;
|
|
@@ -3574,45 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
|
|
}
|
|
|
|
|
|
/* Caller must hold slots_lock. */
|
|
|
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|
|
- struct kvm_io_device *dev)
|
|
|
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|
|
+ struct kvm_io_device *dev)
|
|
|
{
|
|
|
- int i, r;
|
|
|
+ int i;
|
|
|
struct kvm_io_bus *new_bus, *bus;
|
|
|
|
|
|
bus = kvm->buses[bus_idx];
|
|
|
-
|
|
|
- /*
|
|
|
- * It's possible the bus being released before hand. If so,
|
|
|
- * we're done here.
|
|
|
- */
|
|
|
if (!bus)
|
|
|
- return 0;
|
|
|
+ return;
|
|
|
|
|
|
- r = -ENOENT;
|
|
|
for (i = 0; i < bus->dev_count; i++)
|
|
|
if (bus->range[i].dev == dev) {
|
|
|
- r = 0;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
+ if (i == bus->dev_count)
|
|
|
+ return;
|
|
|
|
|
|
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
|
|
|
sizeof(struct kvm_io_range)), GFP_KERNEL);
|
|
|
- if (!new_bus)
|
|
|
- return -ENOMEM;
|
|
|
+ if (!new_bus) {
|
|
|
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
|
|
|
+ goto broken;
|
|
|
+ }
|
|
|
|
|
|
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
|
|
|
new_bus->dev_count--;
|
|
|
memcpy(new_bus->range + i, bus->range + i + 1,
|
|
|
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
|
|
|
|
|
|
+broken:
|
|
|
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
|
|
|
synchronize_srcu_expedited(&kvm->srcu);
|
|
|
kfree(bus);
|
|
|
- return r;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|
@@ -3625,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
|
|
|
|
|
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
|
|
|
+ if (!bus)
|
|
|
+ goto out_unlock;
|
|
|
|
|
|
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
|
|
|
if (dev_idx < 0)
|