|
@@ -626,16 +626,34 @@ void try_to_unmap_flush(void)
|
|
}
|
|
}
|
|
cpumask_clear(&tlb_ubc->cpumask);
|
|
cpumask_clear(&tlb_ubc->cpumask);
|
|
tlb_ubc->flush_required = false;
|
|
tlb_ubc->flush_required = false;
|
|
|
|
+ tlb_ubc->writable = false;
|
|
put_cpu();
|
|
put_cpu();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Flush iff there are potentially writable TLB entries that can race with IO */
|
|
|
|
+void try_to_unmap_flush_dirty(void)
|
|
|
|
+{
|
|
|
|
+ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
|
|
|
|
+
|
|
|
|
+ if (tlb_ubc->writable)
|
|
|
|
+ try_to_unmap_flush();
|
|
|
|
+}
|
|
|
|
+
|
|
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
|
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
|
- struct page *page)
|
|
|
|
|
|
+ struct page *page, bool writable)
|
|
{
|
|
{
|
|
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
|
|
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
|
|
|
|
|
|
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
|
|
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
|
|
tlb_ubc->flush_required = true;
|
|
tlb_ubc->flush_required = true;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the PTE was dirty then it's best to assume it's writable. The
|
|
|
|
+ * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
|
|
|
|
+ * before the page is queued for IO.
|
|
|
|
+ */
|
|
|
|
+ if (writable)
|
|
|
|
+ tlb_ubc->writable = true;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -658,7 +676,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
|
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
|
- struct page *page)
|
|
|
|
|
|
+ struct page *page, bool writable)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1315,11 +1333,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
*/
|
|
*/
|
|
pteval = ptep_get_and_clear(mm, address, pte);
|
|
pteval = ptep_get_and_clear(mm, address, pte);
|
|
|
|
|
|
- /* Potentially writable TLBs must be flushed before IO */
|
|
|
|
- if (pte_dirty(pteval))
|
|
|
|
- flush_tlb_page(vma, address);
|
|
|
|
- else
|
|
|
|
- set_tlb_ubc_flush_pending(mm, page);
|
|
|
|
|
|
+ set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
|
|
} else {
|
|
} else {
|
|
pteval = ptep_clear_flush(vma, address, pte);
|
|
pteval = ptep_clear_flush(vma, address, pte);
|
|
}
|
|
}
|