|
@@ -637,14 +637,9 @@ EXPORT_SYMBOL_GPL(linear_hugepage_index);
|
|
|
*/
|
|
|
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
|
|
|
{
|
|
|
- struct hstate *hstate;
|
|
|
-
|
|
|
- if (!is_vm_hugetlb_page(vma))
|
|
|
- return PAGE_SIZE;
|
|
|
-
|
|
|
- hstate = hstate_vma(vma);
|
|
|
-
|
|
|
- return 1UL << huge_page_shift(hstate);
|
|
|
+ if (vma->vm_ops && vma->vm_ops->pagesize)
|
|
|
+ return vma->vm_ops->pagesize(vma);
|
|
|
+ return PAGE_SIZE;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
|
|
|
|
|
@@ -3151,6 +3146,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ struct hstate *hstate = hstate_vma(vma);
|
|
|
+
|
|
|
+ return 1UL << huge_page_shift(hstate);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* We cannot handle pagefaults against hugetlb pages at all. They cause
|
|
|
* handle_mm_fault() to try to instantiate regular-sized pages in the
|
|
@@ -3168,6 +3170,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
|
|
|
.open = hugetlb_vm_op_open,
|
|
|
.close = hugetlb_vm_op_close,
|
|
|
.split = hugetlb_vm_op_split,
|
|
|
+ .pagesize = hugetlb_vm_op_pagesize,
|
|
|
};
|
|
|
|
|
|
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
|