|
@@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
|
|
|
|
|
|
/* IOMMU */
|
|
|
|
|
|
+static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
|
|
|
+
|
|
|
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|
|
size_t size)
|
|
|
{
|
|
@@ -1076,6 +1078,8 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|
|
unsigned int align = 0;
|
|
|
unsigned int count, start;
|
|
|
unsigned long flags;
|
|
|
+ dma_addr_t iova;
|
|
|
+ int i;
|
|
|
|
|
|
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
|
|
|
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
|
|
@@ -1087,30 +1091,78 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|
|
align = (1 << (order - mapping->order)) - 1;
|
|
|
|
|
|
spin_lock_irqsave(&mapping->lock, flags);
|
|
|
- start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
|
|
|
- count, align);
|
|
|
- if (start > mapping->bits) {
|
|
|
- spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
- return DMA_ERROR_CODE;
|
|
|
+ for (i = 0; i < mapping->nr_bitmaps; i++) {
|
|
|
+ start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
|
|
+ mapping->bits, 0, count, align);
|
|
|
+
|
|
|
+ if (start > mapping->bits)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ bitmap_set(mapping->bitmaps[i], start, count);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- bitmap_set(mapping->bitmap, start, count);
|
|
|
+ /*
|
|
|
+ * No unused range found. Try to extend the existing mapping
|
|
|
+ * and perform a second attempt to reserve an IO virtual
|
|
|
+ * address range of size bytes.
|
|
|
+ */
|
|
|
+ if (i == mapping->nr_bitmaps) {
|
|
|
+ if (extend_iommu_mapping(mapping)) {
|
|
|
+ spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
+ return DMA_ERROR_CODE;
|
|
|
+ }
|
|
|
+
|
|
|
+ start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
|
|
+ mapping->bits, 0, count, align);
|
|
|
+
|
|
|
+ if (start > mapping->bits) {
|
|
|
+ spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
+ return DMA_ERROR_CODE;
|
|
|
+ }
|
|
|
+
|
|
|
+ bitmap_set(mapping->bitmaps[i], start, count);
|
|
|
+ }
|
|
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
|
|
|
- return mapping->base + (start << (mapping->order + PAGE_SHIFT));
|
|
|
+ iova = mapping->base + (mapping->size * i);
|
|
|
+ iova += start << (mapping->order + PAGE_SHIFT);
|
|
|
+
|
|
|
+ return iova;
|
|
|
}
|
|
|
|
|
|
static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|
|
dma_addr_t addr, size_t size)
|
|
|
{
|
|
|
- unsigned int start = (addr - mapping->base) >>
|
|
|
- (mapping->order + PAGE_SHIFT);
|
|
|
- unsigned int count = ((size >> PAGE_SHIFT) +
|
|
|
- (1 << mapping->order) - 1) >> mapping->order;
|
|
|
+ unsigned int start, count;
|
|
|
unsigned long flags;
|
|
|
+ dma_addr_t bitmap_base;
|
|
|
+ u32 bitmap_index;
|
|
|
+
|
|
|
+ if (!size)
|
|
|
+ return;
|
|
|
+
|
|
|
+ bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
|
|
|
+ BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
|
|
|
+
|
|
|
+ bitmap_base = mapping->base + mapping->size * bitmap_index;
|
|
|
+
|
|
|
+ start = (addr - bitmap_base) >> (mapping->order + PAGE_SHIFT);
|
|
|
+
|
|
|
+ if (addr + size > bitmap_base + mapping->size) {
|
|
|
+ /*
|
|
|
+ * The address range to be freed reaches into the iova
|
|
|
+ * range of the next bitmap. This should not happen as
|
|
|
+ * we don't allow this in __alloc_iova (at the
|
|
|
+ * moment).
|
|
|
+ */
|
|
|
+ BUG();
|
|
|
+ } else
|
|
|
+ count = ((size >> PAGE_SHIFT) +
|
|
|
+ (1 << mapping->order) - 1) >> mapping->order;
|
|
|
|
|
|
spin_lock_irqsave(&mapping->lock, flags);
|
|
|
- bitmap_clear(mapping->bitmap, start, count);
|
|
|
+ bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
|
|
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1890,8 +1942,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
|
|
int order)
|
|
|
{
|
|
|
unsigned int count = size >> (PAGE_SHIFT + order);
|
|
|
- unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
|
|
|
struct dma_iommu_mapping *mapping;
|
|
|
+ int extensions = 0;
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
if (!count)
|
|
@@ -1901,23 +1953,35 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
|
|
if (!mapping)
|
|
|
goto err;
|
|
|
|
|
|
- mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
|
- if (!mapping->bitmap)
|
|
|
+ mapping->bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
|
|
|
+ mapping->bitmaps = kzalloc((extensions + 1) * sizeof(unsigned long *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!mapping->bitmaps)
|
|
|
goto err2;
|
|
|
|
|
|
+ mapping->bitmaps[0] = kzalloc(mapping->bitmap_size, GFP_KERNEL);
|
|
|
+ if (!mapping->bitmaps[0])
|
|
|
+ goto err3;
|
|
|
+
|
|
|
+ mapping->nr_bitmaps = 1;
|
|
|
+ mapping->extensions = extensions;
|
|
|
mapping->base = base;
|
|
|
- mapping->bits = BITS_PER_BYTE * bitmap_size;
|
|
|
+ mapping->size = size;
|
|
|
mapping->order = order;
|
|
|
+ mapping->bits = BITS_PER_BYTE * mapping->bitmap_size;
|
|
|
+
|
|
|
spin_lock_init(&mapping->lock);
|
|
|
|
|
|
mapping->domain = iommu_domain_alloc(bus);
|
|
|
if (!mapping->domain)
|
|
|
- goto err3;
|
|
|
+ goto err4;
|
|
|
|
|
|
kref_init(&mapping->kref);
|
|
|
return mapping;
|
|
|
+err4:
|
|
|
+ kfree(mapping->bitmaps[0]);
|
|
|
err3:
|
|
|
- kfree(mapping->bitmap);
|
|
|
+ kfree(mapping->bitmaps);
|
|
|
err2:
|
|
|
kfree(mapping);
|
|
|
err:
|
|
@@ -1927,14 +1991,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
|
|
|
|
|
|
static void release_iommu_mapping(struct kref *kref)
|
|
|
{
|
|
|
+ int i;
|
|
|
struct dma_iommu_mapping *mapping =
|
|
|
container_of(kref, struct dma_iommu_mapping, kref);
|
|
|
|
|
|
iommu_domain_free(mapping->domain);
|
|
|
- kfree(mapping->bitmap);
|
|
|
+ for (i = 0; i < mapping->nr_bitmaps; i++)
|
|
|
+ kfree(mapping->bitmaps[i]);
|
|
|
+ kfree(mapping->bitmaps);
|
|
|
kfree(mapping);
|
|
|
}
|
|
|
|
|
|
+static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
|
|
|
+{
|
|
|
+ int next_bitmap;
|
|
|
+
|
|
|
+ if (mapping->nr_bitmaps > mapping->extensions)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ next_bitmap = mapping->nr_bitmaps;
|
|
|
+ mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (!mapping->bitmaps[next_bitmap])
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ mapping->nr_bitmaps++;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
|
|
|
{
|
|
|
if (mapping)
|