|
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
-void tlb_flush_mmu(struct mmu_gather *tlb)
|
|
|
|
|
|
+static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
|
{
|
|
{
|
|
- struct mmu_gather_batch *batch;
|
|
|
|
-
|
|
|
|
- if (!tlb->need_flush)
|
|
|
|
- return;
|
|
|
|
tlb->need_flush = 0;
|
|
tlb->need_flush = 0;
|
|
tlb_flush(tlb);
|
|
tlb_flush(tlb);
|
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
|
tlb_table_flush(tlb);
|
|
tlb_table_flush(tlb);
|
|
#endif
|
|
#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
|
|
|
+{
|
|
|
|
+ struct mmu_gather_batch *batch;
|
|
|
|
|
|
for (batch = &tlb->local; batch; batch = batch->next) {
|
|
for (batch = &tlb->local; batch; batch = batch->next) {
|
|
free_pages_and_swap_cache(batch->pages, batch->nr);
|
|
free_pages_and_swap_cache(batch->pages, batch->nr);
|
|
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
|
|
tlb->active = &tlb->local;
|
|
tlb->active = &tlb->local;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void tlb_flush_mmu(struct mmu_gather *tlb)
|
|
|
|
+{
|
|
|
|
+ if (!tlb->need_flush)
|
|
|
|
+ return;
|
|
|
|
+ tlb_flush_mmu_tlbonly(tlb);
|
|
|
|
+ tlb_flush_mmu_free(tlb);
|
|
|
|
+}
|
|
|
|
+
|
|
/* tlb_finish_mmu
|
|
/* tlb_finish_mmu
|
|
* Called at the end of the shootdown operation to free up any resources
|
|
* Called at the end of the shootdown operation to free up any resources
|
|
* that were required.
|
|
* that were required.
|
|
@@ -1127,8 +1136,10 @@ again:
|
|
if (PageAnon(page))
|
|
if (PageAnon(page))
|
|
rss[MM_ANONPAGES]--;
|
|
rss[MM_ANONPAGES]--;
|
|
else {
|
|
else {
|
|
- if (pte_dirty(ptent))
|
|
|
|
|
|
+ if (pte_dirty(ptent)) {
|
|
|
|
+ force_flush = 1;
|
|
set_page_dirty(page);
|
|
set_page_dirty(page);
|
|
|
|
+ }
|
|
if (pte_young(ptent) &&
|
|
if (pte_young(ptent) &&
|
|
likely(!(vma->vm_flags & VM_SEQ_READ)))
|
|
likely(!(vma->vm_flags & VM_SEQ_READ)))
|
|
mark_page_accessed(page);
|
|
mark_page_accessed(page);
|
|
@@ -1137,9 +1148,10 @@ again:
|
|
page_remove_rmap(page);
|
|
page_remove_rmap(page);
|
|
if (unlikely(page_mapcount(page) < 0))
|
|
if (unlikely(page_mapcount(page) < 0))
|
|
print_bad_pte(vma, addr, ptent, page);
|
|
print_bad_pte(vma, addr, ptent, page);
|
|
- force_flush = !__tlb_remove_page(tlb, page);
|
|
|
|
- if (force_flush)
|
|
|
|
|
|
+ if (unlikely(!__tlb_remove_page(tlb, page))) {
|
|
|
|
+ force_flush = 1;
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -1174,18 +1186,11 @@ again:
|
|
|
|
|
|
add_mm_rss_vec(mm, rss);
|
|
add_mm_rss_vec(mm, rss);
|
|
arch_leave_lazy_mmu_mode();
|
|
arch_leave_lazy_mmu_mode();
|
|
- pte_unmap_unlock(start_pte, ptl);
|
|
|
|
|
|
|
|
- /*
|
|
|
|
- * mmu_gather ran out of room to batch pages, we break out of
|
|
|
|
- * the PTE lock to avoid doing the potential expensive TLB invalidate
|
|
|
|
- * and page-free while holding it.
|
|
|
|
- */
|
|
|
|
|
|
+ /* Do the actual TLB flush before dropping ptl */
|
|
if (force_flush) {
|
|
if (force_flush) {
|
|
unsigned long old_end;
|
|
unsigned long old_end;
|
|
|
|
|
|
- force_flush = 0;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Flush the TLB just for the previous segment,
|
|
* Flush the TLB just for the previous segment,
|
|
* then update the range to be the remaining
|
|
* then update the range to be the remaining
|
|
@@ -1193,11 +1198,21 @@ again:
|
|
*/
|
|
*/
|
|
old_end = tlb->end;
|
|
old_end = tlb->end;
|
|
tlb->end = addr;
|
|
tlb->end = addr;
|
|
-
|
|
|
|
- tlb_flush_mmu(tlb);
|
|
|
|
-
|
|
|
|
|
|
+ tlb_flush_mmu_tlbonly(tlb);
|
|
tlb->start = addr;
|
|
tlb->start = addr;
|
|
tlb->end = old_end;
|
|
tlb->end = old_end;
|
|
|
|
+ }
|
|
|
|
+ pte_unmap_unlock(start_pte, ptl);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we forced a TLB flush (either due to running out of
|
|
|
|
+ * batch buffers or because we needed to flush dirty TLB
|
|
|
|
+ * entries before releasing the ptl), free the batched
|
|
|
|
+ * memory too. Restart if we didn't do everything.
|
|
|
|
+ */
|
|
|
|
+ if (force_flush) {
|
|
|
|
+ force_flush = 0;
|
|
|
|
+ tlb_flush_mmu_free(tlb);
|
|
|
|
|
|
if (addr != end)
|
|
if (addr != end)
|
|
goto again;
|
|
goto again;
|