|
@@ -15,6 +15,9 @@
|
|
|
#include <linux/rculist.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
#include <linux/mutex.h>
|
|
|
+#include <linux/migrate.h>
|
|
|
+#include <linux/hugetlb.h>
|
|
|
+#include <linux/swap.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
static DEFINE_MUTEX(mem_list_mutex);
|
|
@@ -72,6 +75,55 @@ bool mm_iommu_preregistered(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
|
|
|
|
|
|
+/*
|
|
|
+ * Taken from alloc_migrate_target with changes to remove CMA allocations
|
|
|
+ */
|
|
|
+struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
|
|
|
+ int **resultp)
|
|
|
+{
|
|
|
+ gfp_t gfp_mask = GFP_USER;
|
|
|
+ struct page *new_page;
|
|
|
+
|
|
|
+ if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (PageHighMem(page))
|
|
|
+ gfp_mask |= __GFP_HIGHMEM;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We don't want the allocation to force an OOM if possibe
|
|
|
+ */
|
|
|
+ new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
|
|
|
+ return new_page;
|
|
|
+}
|
|
|
+
|
|
|
+static int mm_iommu_move_page_from_cma(struct page *page)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+ LIST_HEAD(cma_migrate_pages);
|
|
|
+
|
|
|
+ /* Ignore huge pages for now */
|
|
|
+ if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ lru_add_drain();
|
|
|
+ ret = isolate_lru_page(page);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ list_add(&page->lru, &cma_migrate_pages);
|
|
|
+ put_page(page); /* Drop the gup reference */
|
|
|
+
|
|
|
+ ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
|
|
|
+ NULL, 0, MIGRATE_SYNC, MR_CMA);
|
|
|
+ if (ret) {
|
|
|
+ if (!list_empty(&cma_migrate_pages))
|
|
|
+ putback_movable_pages(&cma_migrate_pages);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
long mm_iommu_get(unsigned long ua, unsigned long entries,
|
|
|
struct mm_iommu_table_group_mem_t **pmem)
|
|
|
{
|
|
@@ -124,15 +176,36 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
|
|
|
for (i = 0; i < entries; ++i) {
|
|
|
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
|
|
|
1/* pages */, 1/* iswrite */, &page)) {
|
|
|
+ ret = -EFAULT;
|
|
|
for (j = 0; j < i; ++j)
|
|
|
- put_page(pfn_to_page(
|
|
|
- mem->hpas[j] >> PAGE_SHIFT));
|
|
|
+ put_page(pfn_to_page(mem->hpas[j] >>
|
|
|
+ PAGE_SHIFT));
|
|
|
vfree(mem->hpas);
|
|
|
kfree(mem);
|
|
|
- ret = -EFAULT;
|
|
|
goto unlock_exit;
|
|
|
}
|
|
|
-
|
|
|
+ /*
|
|
|
+ * If we get a page from the CMA zone, since we are going to
|
|
|
+ * be pinning these entries, we might as well move them out
|
|
|
+ * of the CMA zone if possible. NOTE: faulting in + migration
|
|
|
+ * can be expensive. Batching can be considered later
|
|
|
+ */
|
|
|
+ if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
|
|
|
+ if (mm_iommu_move_page_from_cma(page))
|
|
|
+ goto populate;
|
|
|
+ if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
|
|
|
+ 1/* pages */, 1/* iswrite */,
|
|
|
+ &page)) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ for (j = 0; j < i; ++j)
|
|
|
+ put_page(pfn_to_page(mem->hpas[j] >>
|
|
|
+ PAGE_SHIFT));
|
|
|
+ vfree(mem->hpas);
|
|
|
+ kfree(mem);
|
|
|
+ goto unlock_exit;
|
|
|
+ }
|
|
|
+ }
|
|
|
+populate:
|
|
|
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
|
|
}
|
|
|
|