|
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
|
|
|
|
+{
|
|
|
|
+ if (addr & ~(huge_page_mask(hstate_vma(vma))))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* We cannot handle pagefaults against hugetlb pages at all. They cause
|
|
* We cannot handle pagefaults against hugetlb pages at all. They cause
|
|
* handle_mm_fault() to try to instantiate regular-sized pages in the
|
|
* handle_mm_fault() to try to instantiate regular-sized pages in the
|
|
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
|
|
.fault = hugetlb_vm_op_fault,
|
|
.fault = hugetlb_vm_op_fault,
|
|
.open = hugetlb_vm_op_open,
|
|
.open = hugetlb_vm_op_open,
|
|
.close = hugetlb_vm_op_close,
|
|
.close = hugetlb_vm_op_close,
|
|
|
|
+ .split = hugetlb_vm_op_split,
|
|
};
|
|
};
|
|
|
|
|
|
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
|
|
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
|