|
@@ -348,6 +348,16 @@ static void hmm_pfns_clear(uint64_t *pfns,
|
|
|
*pfns = 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
|
|
|
+ * @start: range virtual start address (inclusive)
|
|
|
+ * @end: range virtual end address (exclusive)
|
|
|
+ * @walk: mm_walk structure
|
|
|
+ * Returns: 0 on success, -EAGAIN after page fault, or page fault error
|
|
|
+ *
|
|
|
+ * This function will be called whenever pmd_none() or pte_none() returns true,
|
|
|
+ * or whenever there is no page directory covering the virtual address range.
|
|
|
+ */
|
|
|
static int hmm_vma_walk_hole(unsigned long addr,
|
|
|
unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
@@ -357,31 +367,6 @@ static int hmm_vma_walk_hole(unsigned long addr,
|
|
|
uint64_t *pfns = range->pfns;
|
|
|
unsigned long i;
|
|
|
|
|
|
- hmm_vma_walk->last = addr;
|
|
|
- i = (addr - range->start) >> PAGE_SHIFT;
|
|
|
- for (; addr < end; addr += PAGE_SIZE, i++) {
|
|
|
- pfns[i] = HMM_PFN_EMPTY;
|
|
|
- if (hmm_vma_walk->fault) {
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = hmm_vma_do_fault(walk, addr, &pfns[i]);
|
|
|
- if (ret != -EAGAIN)
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return hmm_vma_walk->fault ? -EAGAIN : 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int hmm_vma_walk_clear(unsigned long addr,
|
|
|
- unsigned long end,
|
|
|
- struct mm_walk *walk)
|
|
|
-{
|
|
|
- struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
- struct hmm_range *range = hmm_vma_walk->range;
|
|
|
- uint64_t *pfns = range->pfns;
|
|
|
- unsigned long i;
|
|
|
-
|
|
|
hmm_vma_walk->last = addr;
|
|
|
i = (addr - range->start) >> PAGE_SHIFT;
|
|
|
for (; addr < end; addr += PAGE_SIZE, i++) {
|
|
@@ -440,10 +425,10 @@ again:
|
|
|
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
|
|
|
goto again;
|
|
|
if (pmd_protnone(pmd))
|
|
|
- return hmm_vma_walk_clear(start, end, walk);
|
|
|
+ return hmm_vma_walk_hole(start, end, walk);
|
|
|
|
|
|
if (write_fault && !pmd_write(pmd))
|
|
|
- return hmm_vma_walk_clear(start, end, walk);
|
|
|
+ return hmm_vma_walk_hole(start, end, walk);
|
|
|
|
|
|
pfn = pmd_pfn(pmd) + pte_index(addr);
|
|
|
flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
|
|
@@ -462,7 +447,7 @@ again:
|
|
|
pfns[i] = 0;
|
|
|
|
|
|
if (pte_none(pte)) {
|
|
|
- pfns[i] = HMM_PFN_EMPTY;
|
|
|
+ pfns[i] = 0;
|
|
|
if (hmm_vma_walk->fault)
|
|
|
goto fault;
|
|
|
continue;
|
|
@@ -513,8 +498,8 @@ again:
|
|
|
|
|
|
fault:
|
|
|
pte_unmap(ptep);
|
|
|
- /* Fault all pages in range */
|
|
|
- return hmm_vma_walk_clear(start, end, walk);
|
|
|
+ /* Fault any virtual address we were asked to fault */
|
|
|
+ return hmm_vma_walk_hole(start, end, walk);
|
|
|
}
|
|
|
pte_unmap(ptep - 1);
|
|
|
|