|
@@ -240,6 +240,23 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void dax_unlock_mapping_entry(struct address_space *mapping,
|
|
|
+ pgoff_t index)
|
|
|
+{
|
|
|
+ void *entry, **slot;
|
|
|
+
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
|
|
|
+ if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
|
|
|
+ !slot_locked(mapping, slot))) {
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ unlock_slot(mapping, slot);
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
|
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
|
|
|
+}
|
|
|
+
|
|
|
static void put_locked_mapping_entry(struct address_space *mapping,
|
|
|
pgoff_t index, void *entry)
|
|
|
{
|
|
@@ -433,22 +450,6 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|
|
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
|
|
|
}
|
|
|
|
|
|
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|
|
-{
|
|
|
- void *entry, **slot;
|
|
|
-
|
|
|
- spin_lock_irq(&mapping->tree_lock);
|
|
|
- entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
|
|
|
- if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
|
|
|
- !slot_locked(mapping, slot))) {
|
|
|
- spin_unlock_irq(&mapping->tree_lock);
|
|
|
- return;
|
|
|
- }
|
|
|
- unlock_slot(mapping, slot);
|
|
|
- spin_unlock_irq(&mapping->tree_lock);
|
|
|
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
|
|
|
* entry to get unlocked before deleting it.
|
|
@@ -500,10 +501,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
|
|
/* This will replace locked radix tree entry with a hole page */
|
|
|
page = find_or_create_page(mapping, vmf->pgoff,
|
|
|
vmf->gfp_mask | __GFP_ZERO);
|
|
|
- if (!page) {
|
|
|
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
|
|
+ if (!page)
|
|
|
return VM_FAULT_OOM;
|
|
|
- }
|
|
|
vmf->page = page;
|
|
|
return VM_FAULT_LOCKED;
|
|
|
}
|
|
@@ -954,7 +953,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
struct iomap iomap = { 0 };
|
|
|
unsigned flags = IOMAP_FAULT;
|
|
|
int error, major = 0;
|
|
|
- int locked_status = 0;
|
|
|
+ int vmf_ret = 0;
|
|
|
void *entry;
|
|
|
|
|
|
/*
|
|
@@ -1007,13 +1006,11 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
|
|
|
if (error)
|
|
|
goto finish_iomap;
|
|
|
- if (!radix_tree_exceptional_entry(entry)) {
|
|
|
- vmf->page = entry;
|
|
|
- locked_status = VM_FAULT_LOCKED;
|
|
|
- } else {
|
|
|
- vmf->entry = entry;
|
|
|
- locked_status = VM_FAULT_DAX_LOCKED;
|
|
|
- }
|
|
|
+
|
|
|
+ __SetPageUptodate(vmf->cow_page);
|
|
|
+ vmf_ret = finish_fault(vmf);
|
|
|
+ if (!vmf_ret)
|
|
|
+ vmf_ret = VM_FAULT_DONE_COW;
|
|
|
goto finish_iomap;
|
|
|
}
|
|
|
|
|
@@ -1030,7 +1027,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
case IOMAP_HOLE:
|
|
|
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
|
|
|
- locked_status = dax_load_hole(mapping, entry, vmf);
|
|
|
+ vmf_ret = dax_load_hole(mapping, entry, vmf);
|
|
|
break;
|
|
|
}
|
|
|
/*FALLTHRU*/
|
|
@@ -1042,7 +1039,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
|
|
|
finish_iomap:
|
|
|
if (ops->iomap_end) {
|
|
|
- if (error) {
|
|
|
+ if (error || (vmf_ret & VM_FAULT_ERROR)) {
|
|
|
/* keep previous error */
|
|
|
ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
|
|
|
&iomap);
|
|
@@ -1052,7 +1049,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
}
|
|
|
}
|
|
|
unlock_entry:
|
|
|
- if (!locked_status || error)
|
|
|
+ if (vmf_ret != VM_FAULT_LOCKED || error)
|
|
|
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
|
|
out:
|
|
|
if (error == -ENOMEM)
|
|
@@ -1060,9 +1057,9 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
/* -EBUSY is fine, somebody else faulted on the same PTE */
|
|
|
if (error < 0 && error != -EBUSY)
|
|
|
return VM_FAULT_SIGBUS | major;
|
|
|
- if (locked_status) {
|
|
|
+ if (vmf_ret) {
|
|
|
WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
|
|
|
- return locked_status;
|
|
|
+ return vmf_ret;
|
|
|
}
|
|
|
return VM_FAULT_NOPAGE | major;
|
|
|
}
|