|
|
@@ -680,14 +680,63 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
|
|
|
+/**
|
|
|
+ * vgic_v3_insert_redist_region - Insert a new redistributor region
|
|
|
+ *
|
|
|
+ * Performs various checks before inserting the rdist region in the list.
|
|
|
+ * Those tests depend on whether the size of the rdist region is known
|
|
|
+ * (ie. count != 0). The list is sorted by rdist region index.
|
|
|
+ *
|
|
|
+ * @kvm: kvm handle
|
|
|
+ * @index: redist region index
|
|
|
+ * @base: base of the new rdist region
|
|
|
+ * @count: number of redistributors the region is made of (0 in the old style
|
|
|
+ * single region, whose size is induced from the number of vcpus)
|
|
|
+ *
|
|
|
+ * Return 0 on success, < 0 otherwise
|
|
|
+ */
|
|
|
+static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
|
|
|
+ gpa_t base, uint32_t count)
|
|
|
{
|
|
|
- struct vgic_dist *vgic = &kvm->arch.vgic;
|
|
|
+ struct vgic_dist *d = &kvm->arch.vgic;
|
|
|
struct vgic_redist_region *rdreg;
|
|
|
+ struct list_head *rd_regions = &d->rd_regions;
|
|
|
+ size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
|
|
|
int ret;
|
|
|
|
|
|
- /* vgic_check_ioaddr makes sure we don't do this twice */
|
|
|
- if (!list_empty(&vgic->rd_regions))
|
|
|
+ /* single rdist region already set ?*/
|
|
|
+ if (!count && !list_empty(rd_regions))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* cross the end of memory ? */
|
|
|
+ if (base + size < base)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (list_empty(rd_regions)) {
|
|
|
+ if (index != 0)
|
|
|
+ return -EINVAL;
|
|
|
+ } else {
|
|
|
+ rdreg = list_last_entry(rd_regions,
|
|
|
+ struct vgic_redist_region, list);
|
|
|
+ if (index != rdreg->index + 1)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* Cannot add an explicitly sized regions after legacy region */
|
|
|
+ if (!rdreg->count)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For legacy single-region redistributor regions (!count),
|
|
|
+ * check that the redistributor region does not overlap with the
|
|
|
+ * distributor's address space.
|
|
|
+ */
|
|
|
+ if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
|
|
|
+ vgic_dist_overlap(kvm, base, size))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* collision with any other rdist region? */
|
|
|
+ if (vgic_v3_rdist_overlap(kvm, base, size))
|
|
|
return -EINVAL;
|
|
|
|
|
|
rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL);
|
|
|
@@ -696,17 +745,29 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
|
|
|
|
|
|
rdreg->base = VGIC_ADDR_UNDEF;
|
|
|
|
|
|
- ret = vgic_check_ioaddr(kvm, &rdreg->base, addr, SZ_64K);
|
|
|
+ ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K);
|
|
|
if (ret)
|
|
|
- goto out;
|
|
|
+ goto free;
|
|
|
|
|
|
- rdreg->base = addr;
|
|
|
- if (!vgic_v3_check_base(kvm)) {
|
|
|
- ret = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ rdreg->base = base;
|
|
|
+ rdreg->count = count;
|
|
|
+ rdreg->free_index = 0;
|
|
|
+ rdreg->index = index;
|
|
|
|
|
|
- list_add(&rdreg->list, &vgic->rd_regions);
|
|
|
+ list_add_tail(&rdreg->list, rd_regions);
|
|
|
+ return 0;
|
|
|
+free:
|
|
|
+ kfree(rdreg);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = vgic_v3_insert_redist_region(kvm, 0, addr, 0);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
/*
|
|
|
* Register iodevs for each existing VCPU. Adding more VCPUs
|
|
|
@@ -717,10 +778,6 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr)
|
|
|
return ret;
|
|
|
|
|
|
return 0;
|
|
|
-
|
|
|
-out:
|
|
|
- kfree(rdreg);
|
|
|
- return ret;
|
|
|
}
|
|
|
|
|
|
int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
|