|
@@ -272,10 +272,13 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
|
|
|
* that were required.
|
|
|
*/
|
|
|
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
|
|
- unsigned long start, unsigned long end)
|
|
|
+ unsigned long start, unsigned long end, bool force)
|
|
|
{
|
|
|
struct mmu_gather_batch *batch, *next;
|
|
|
|
|
|
+ if (force)
|
|
|
+ __tlb_adjust_range(tlb, start, end - start);
|
|
|
+
|
|
|
tlb_flush_mmu(tlb);
|
|
|
|
|
|
/* keep the page table cache within bounds */
|
|
@@ -404,12 +407,23 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
|
|
unsigned long start, unsigned long end)
|
|
|
{
|
|
|
arch_tlb_gather_mmu(tlb, mm, start, end);
|
|
|
+ inc_tlb_flush_pending(tlb->mm);
|
|
|
}
|
|
|
|
|
|
void tlb_finish_mmu(struct mmu_gather *tlb,
|
|
|
unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- arch_tlb_finish_mmu(tlb, start, end);
|
|
|
+ /*
|
|
|
+ * If there are parallel threads are doing PTE changes on same range
|
|
|
+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
|
|
|
+ * flush by batching, a thread has stable TLB entry can fail to flush
|
|
|
+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
|
|
|
+ * forcefully if we detect parallel PTE batching threads.
|
|
|
+ */
|
|
|
+ bool force = mm_tlb_flush_nested(tlb->mm);
|
|
|
+
|
|
|
+ arch_tlb_finish_mmu(tlb, start, end, force);
|
|
|
+ dec_tlb_flush_pending(tlb->mm);
|
|
|
}
|
|
|
|
|
|
/*
|