|
@@ -31,6 +31,7 @@
|
|
#include <linux/vmstat.h>
|
|
#include <linux/vmstat.h>
|
|
#include <linux/pfn_t.h>
|
|
#include <linux/pfn_t.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/sizes.h>
|
|
|
|
+#include <linux/mmu_notifier.h>
|
|
#include <linux/iomap.h>
|
|
#include <linux/iomap.h>
|
|
#include "internal.h"
|
|
#include "internal.h"
|
|
|
|
|
|
@@ -614,6 +615,59 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
|
|
return new_entry;
|
|
return new_entry;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline unsigned long
|
|
|
|
+pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ unsigned long address;
|
|
|
|
+
|
|
|
|
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
|
|
|
+ VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
|
|
|
+ return address;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Walk all mappings of a given index of a file and writeprotect them */
|
|
|
|
+static void dax_mapping_entry_mkclean(struct address_space *mapping,
|
|
|
|
+ pgoff_t index, unsigned long pfn)
|
|
|
|
+{
|
|
|
|
+ struct vm_area_struct *vma;
|
|
|
|
+ pte_t *ptep;
|
|
|
|
+ pte_t pte;
|
|
|
|
+ spinlock_t *ptl;
|
|
|
|
+ bool changed;
|
|
|
|
+
|
|
|
|
+ i_mmap_lock_read(mapping);
|
|
|
|
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
|
|
|
|
+ unsigned long address;
|
|
|
|
+
|
|
|
|
+ cond_resched();
|
|
|
|
+
|
|
|
|
+ if (!(vma->vm_flags & VM_SHARED))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ address = pgoff_address(index, vma);
|
|
|
|
+ changed = false;
|
|
|
|
+ if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
|
|
|
|
+ continue;
|
|
|
|
+ if (pfn != pte_pfn(*ptep))
|
|
|
|
+ goto unlock;
|
|
|
|
+ if (!pte_dirty(*ptep) && !pte_write(*ptep))
|
|
|
|
+ goto unlock;
|
|
|
|
+
|
|
|
|
+ flush_cache_page(vma, address, pfn);
|
|
|
|
+ pte = ptep_clear_flush(vma, address, ptep);
|
|
|
|
+ pte = pte_wrprotect(pte);
|
|
|
|
+ pte = pte_mkclean(pte);
|
|
|
|
+ set_pte_at(vma->vm_mm, address, ptep, pte);
|
|
|
|
+ changed = true;
|
|
|
|
+unlock:
|
|
|
|
+ pte_unmap_unlock(ptep, ptl);
|
|
|
|
+
|
|
|
|
+ if (changed)
|
|
|
|
+ mmu_notifier_invalidate_page(vma->vm_mm, address);
|
|
|
|
+ }
|
|
|
|
+ i_mmap_unlock_read(mapping);
|
|
|
|
+}
|
|
|
|
+
|
|
static int dax_writeback_one(struct block_device *bdev,
|
|
static int dax_writeback_one(struct block_device *bdev,
|
|
struct address_space *mapping, pgoff_t index, void *entry)
|
|
struct address_space *mapping, pgoff_t index, void *entry)
|
|
{
|
|
{
|
|
@@ -687,7 +741,17 @@ static int dax_writeback_one(struct block_device *bdev,
|
|
goto unmap;
|
|
goto unmap;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
|
|
wb_cache_pmem(dax.addr, dax.size);
|
|
wb_cache_pmem(dax.addr, dax.size);
|
|
|
|
+ /*
|
|
|
|
+ * After we have flushed the cache, we can clear the dirty tag. There
|
|
|
|
+ * cannot be new dirty data in the pfn after the flush has completed as
|
|
|
|
+ * the pfn mappings are writeprotected and fault waits for mapping
|
|
|
|
+ * entry lock.
|
|
|
|
+ */
|
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
|
+ radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
|
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
unmap:
|
|
unmap:
|
|
dax_unmap_atomic(bdev, &dax);
|
|
dax_unmap_atomic(bdev, &dax);
|
|
put_locked_mapping_entry(mapping, index, entry);
|
|
put_locked_mapping_entry(mapping, index, entry);
|