|
|
@@ -3511,9 +3511,8 @@ static int fault_around_bytes_get(void *data, u64 *val)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * fault_around_pages() and fault_around_mask() expects fault_around_bytes
|
|
|
- * rounded down to nearest page order. It's what do_fault_around() expects to
|
|
|
- * see.
|
|
|
+ * fault_around_bytes must be rounded down to the nearest page order as it's
|
|
|
+ * what do_fault_around() expects to see.
|
|
|
*/
|
|
|
static int fault_around_bytes_set(void *data, u64 val)
|
|
|
{
|
|
|
@@ -3556,13 +3555,14 @@ late_initcall(fault_around_debugfs);
|
|
|
* This function doesn't cross the VMA boundaries, in order to call map_pages()
|
|
|
* only once.
|
|
|
*
|
|
|
- * fault_around_pages() defines how many pages we'll try to map.
|
|
|
- * do_fault_around() expects it to return a power of two less than or equal to
|
|
|
- * PTRS_PER_PTE.
|
|
|
+ * fault_around_bytes defines how many bytes we'll try to map.
|
|
|
+ * do_fault_around() expects it to be set to a power of two less than or equal
|
|
|
+ * to PTRS_PER_PTE.
|
|
|
*
|
|
|
- * The virtual address of the area that we map is naturally aligned to the
|
|
|
- * fault_around_pages() value (and therefore to page order). This way it's
|
|
|
- * easier to guarantee that we don't cross page table boundaries.
|
|
|
+ * The virtual address of the area that we map is naturally aligned to
|
|
|
+ * fault_around_bytes rounded down to the machine page size
|
|
|
+ * (and therefore to page order). This way it's easier to guarantee
|
|
|
+ * that we don't cross page table boundaries.
|
|
|
*/
|
|
|
static int do_fault_around(struct vm_fault *vmf)
|
|
|
{
|
|
|
@@ -3579,8 +3579,8 @@ static int do_fault_around(struct vm_fault *vmf)
|
|
|
start_pgoff -= off;
|
|
|
|
|
|
/*
|
|
|
- * end_pgoff is either end of page table or end of vma
|
|
|
- * or fault_around_pages() from start_pgoff, depending what is nearest.
|
|
|
+ * end_pgoff is either the end of the page table, the end of
|
|
|
+ * the vma or nr_pages from start_pgoff, depending what is nearest.
|
|
|
*/
|
|
|
end_pgoff = start_pgoff -
|
|
|
((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
|