|
@@ -19,6 +19,7 @@
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <linux/swap.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
+#include <asm/pte-walk.h>
|
|
|
|
|
|
static DEFINE_MUTEX(mem_list_mutex);
|
|
|
|
|
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
|
|
|
struct rcu_head rcu;
|
|
|
unsigned long used;
|
|
|
atomic64_t mapped;
|
|
|
+ unsigned int pageshift;
|
|
|
u64 ua; /* userspace address */
|
|
|
u64 entries; /* number of entries in hpas[] */
|
|
|
u64 *hpas; /* vmalloc'ed */
|
|
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|
|
{
|
|
|
struct mm_iommu_table_group_mem_t *mem;
|
|
|
long i, j, ret = 0, locked_entries = 0;
|
|
|
+ unsigned int pageshift;
|
|
|
+ unsigned long flags;
|
|
|
struct page *page = NULL;
|
|
|
|
|
|
mutex_lock(&mem_list_mutex);
|
|
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|
|
goto unlock_exit;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * For a starting point for a maximum page size calculation
|
|
|
+ * we use @ua and @entries natural alignment to allow IOMMU pages
|
|
|
+ * smaller than huge pages but still bigger than PAGE_SIZE.
|
|
|
+ */
|
|
|
+ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
|
|
|
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
|
|
|
if (!mem->hpas) {
|
|
|
kfree(mem);
|
|
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
|
|
}
|
|
|
}
|
|
|
populate:
|
|
|
+ pageshift = PAGE_SHIFT;
|
|
|
+ if (PageCompound(page)) {
|
|
|
+ pte_t *pte;
|
|
|
+ struct page *head = compound_head(page);
|
|
|
+ unsigned int compshift = compound_order(head);
|
|
|
+
|
|
|
+ local_irq_save(flags); /* disables as well */
|
|
|
+ pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ /* Double check it is still the same pinned page */
|
|
|
+ if (pte && pte_page(*pte) == head &&
|
|
|
+ pageshift == compshift)
|
|
|
+ pageshift = max_t(unsigned int, pageshift,
|
|
|
+ PAGE_SHIFT);
|
|
|
+ }
|
|
|
+ mem->pageshift = min(mem->pageshift, pageshift);
|
|
|
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
|
|
EXPORT_SYMBOL_GPL(mm_iommu_find);
|
|
|
|
|
|
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|
|
- unsigned long ua, unsigned long *hpa)
|
|
|
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
|
|
{
|
|
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
|
|
u64 *va = &mem->hpas[entry];
|
|
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|
|
if (entry >= mem->entries)
|
|
|
return -EFAULT;
|
|
|
|
|
|
+ if (pageshift > mem->pageshift)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
*hpa = *va | (ua & ~PAGE_MASK);
|
|
|
|
|
|
return 0;
|
|
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
|
|
|
|
|
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
|
|
- unsigned long ua, unsigned long *hpa)
|
|
|
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
|
|
{
|
|
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
|
|
void *va = &mem->hpas[entry];
|
|
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
|
|
if (entry >= mem->entries)
|
|
|
return -EFAULT;
|
|
|
|
|
|
+ if (pageshift > mem->pageshift)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
pa = (void *) vmalloc_to_phys(va);
|
|
|
if (!pa)
|
|
|
return -EFAULT;
|