|
@@ -324,14 +324,6 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
-static void hmm_pfns_special(uint64_t *pfns,
|
|
|
- unsigned long addr,
|
|
|
- unsigned long end)
|
|
|
-{
|
|
|
- for (; addr < end; addr += PAGE_SIZE, pfns++)
|
|
|
- *pfns = HMM_PFN_SPECIAL;
|
|
|
-}
|
|
|
-
|
|
|
static int hmm_pfns_bad(unsigned long addr,
|
|
|
unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
@@ -529,6 +521,14 @@ fault:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void hmm_pfns_special(struct hmm_range *range)
|
|
|
+{
|
|
|
+ unsigned long addr = range->start, i = 0;
|
|
|
+
|
|
|
+ for (; addr < range->end; addr += PAGE_SIZE, i++)
|
|
|
+ range->pfns[i] = HMM_PFN_SPECIAL;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
|
|
|
* @range: range being snapshotted
|
|
@@ -553,12 +553,6 @@ int hmm_vma_get_pfns(struct hmm_range *range)
|
|
|
struct mm_walk mm_walk;
|
|
|
struct hmm *hmm;
|
|
|
|
|
|
- /* FIXME support hugetlb fs */
|
|
|
- if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
|
- hmm_pfns_special(range->pfns, range->start, range->end);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
/* Sanity check, this really should not happen ! */
|
|
|
if (range->start < vma->vm_start || range->start >= vma->vm_end)
|
|
|
return -EINVAL;
|
|
@@ -572,6 +566,12 @@ int hmm_vma_get_pfns(struct hmm_range *range)
|
|
|
if (!hmm->mmu_notifier.ops)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ /* FIXME support hugetlb fs */
|
|
|
+ if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
|
+ hmm_pfns_special(range);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
if (!(vma->vm_flags & VM_READ)) {
|
|
|
/*
|
|
|
* If vma do not allow read access, then assume that it does
|
|
@@ -740,6 +740,12 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
|
|
|
if (!hmm->mmu_notifier.ops)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ /* FIXME support hugetlb fs */
|
|
|
+ if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
|
+ hmm_pfns_special(range);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
if (!(vma->vm_flags & VM_READ)) {
|
|
|
/*
|
|
|
* If vma do not allow read access, then assume that it does
|
|
@@ -751,12 +757,6 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
|
|
|
return -EPERM;
|
|
|
}
|
|
|
|
|
|
- /* FIXME support hugetlb fs */
|
|
|
- if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
|
- hmm_pfns_special(range->pfns, range->start, range->end);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
/* Initialize range to track CPU page table update */
|
|
|
spin_lock(&hmm->lock);
|
|
|
range->valid = true;
|