|
@@ -416,6 +416,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|
|
|
|
|
if (rc == VM_FAULT_NOPAGE) {
|
|
if (rc == VM_FAULT_NOPAGE) {
|
|
unsigned long i;
|
|
unsigned long i;
|
|
|
|
+ pgoff_t pgoff;
|
|
|
|
|
|
/*
|
|
/*
|
|
* In the device-dax case the only possibility for a
|
|
* In the device-dax case the only possibility for a
|
|
@@ -423,6 +424,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|
* mapped. No need to consider the zero page, or racing
|
|
* mapped. No need to consider the zero page, or racing
|
|
* conflicting mappings.
|
|
* conflicting mappings.
|
|
*/
|
|
*/
|
|
|
|
+ pgoff = linear_page_index(vmf->vma, vmf->address
|
|
|
|
+ & ~(fault_size - 1));
|
|
for (i = 0; i < fault_size / PAGE_SIZE; i++) {
|
|
for (i = 0; i < fault_size / PAGE_SIZE; i++) {
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
@@ -430,6 +433,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|
if (page->mapping)
|
|
if (page->mapping)
|
|
continue;
|
|
continue;
|
|
page->mapping = filp->f_mapping;
|
|
page->mapping = filp->f_mapping;
|
|
|
|
+ page->index = pgoff + i;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
dax_read_unlock(id);
|
|
dax_read_unlock(id);
|