|
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
|
iovad->granule = granule;
|
|
|
iovad->start_pfn = start_pfn;
|
|
|
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
|
|
|
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
|
|
|
iovad->flush_cb = NULL;
|
|
|
iovad->fq = NULL;
|
|
|
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
|
|
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
|
|
|
|
|
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
|
|
|
if (free->pfn_hi < iovad->dma_32bit_pfn &&
|
|
|
- free->pfn_lo >= cached_iova->pfn_lo)
|
|
|
+ free->pfn_lo >= cached_iova->pfn_lo) {
|
|
|
iovad->cached32_node = rb_next(&free->node);
|
|
|
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
|
|
|
+ }
|
|
|
|
|
|
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
|
|
|
if (free->pfn_lo >= cached_iova->pfn_lo)
|
|
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|
|
|
|
|
/* Walk the tree backwards */
|
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
|
|
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
|
|
|
+ size >= iovad->max32_alloc_size)
|
|
|
+ goto iova32_full;
|
|
|
+
|
|
|
curr = __get_cached_rbnode(iovad, limit_pfn);
|
|
|
curr_iova = rb_entry(curr, struct iova, node);
|
|
|
do {
|
|
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|
|
curr_iova = rb_entry(curr, struct iova, node);
|
|
|
} while (curr && new_pfn <= curr_iova->pfn_hi);
|
|
|
|
|
|
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
|
|
|
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ if (limit_pfn < size || new_pfn < iovad->start_pfn)
|
|
|
+ goto iova32_full;
|
|
|
|
|
|
/* pfn_lo will point to size aligned address if size_aligned is set */
|
|
|
new->pfn_lo = new_pfn;
|
|
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|
|
__cached_rbnode_insert_update(iovad, new);
|
|
|
|
|
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
|
|
-
|
|
|
-
|
|
|
return 0;
|
|
|
+
|
|
|
+iova32_full:
|
|
|
+ iovad->max32_alloc_size = size;
|
|
|
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
static struct kmem_cache *iova_cache;
|