|
@@ -24,6 +24,9 @@
|
|
|
#include <linux/bitops.h>
|
|
|
#include <linux/cpu.h>
|
|
|
|
|
|
+/* The anchor node sits above the top of the usable address space */
|
|
|
+#define IOVA_ANCHOR ~0UL
|
|
|
+
|
|
|
static bool iova_rcache_insert(struct iova_domain *iovad,
|
|
|
unsigned long pfn,
|
|
|
unsigned long size);
|
|
@@ -55,6 +58,9 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
|
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
|
|
|
iovad->flush_cb = NULL;
|
|
|
iovad->fq = NULL;
|
|
|
+ iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
|
|
|
+ rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
|
|
|
+ rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
|
|
|
init_iova_rcaches(iovad);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(init_iova_domain);
|
|
@@ -119,7 +125,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
|
|
|
if (!cached_node)
|
|
|
cached_node = iovad->cached_node;
|
|
|
if (!cached_node)
|
|
|
- return rb_last(&iovad->rbroot);
|
|
|
+ return rb_prev(&iovad->anchor.node);
|
|
|
|
|
|
curr_iova = rb_entry(cached_node, struct iova, node);
|
|
|
*limit_pfn = min(*limit_pfn, curr_iova->pfn_lo);
|
|
@@ -242,7 +248,8 @@ EXPORT_SYMBOL(alloc_iova_mem);
|
|
|
|
|
|
void free_iova_mem(struct iova *iova)
|
|
|
{
|
|
|
- kmem_cache_free(iova_cache, iova);
|
|
|
+ if (iova->pfn_lo != IOVA_ANCHOR)
|
|
|
+ kmem_cache_free(iova_cache, iova);
|
|
|
}
|
|
|
EXPORT_SYMBOL(free_iova_mem);
|
|
|
|
|
@@ -676,6 +683,10 @@ reserve_iova(struct iova_domain *iovad,
|
|
|
struct iova *iova;
|
|
|
unsigned int overlap = 0;
|
|
|
|
|
|
+ /* Don't allow nonsensical pfns */
|
|
|
+ if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
|
|
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
|
|
|
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
|