|
@@ -107,6 +107,11 @@ struct mmu_gather {
|
|
|
struct mmu_gather_batch local;
|
|
|
struct page *__pages[MMU_GATHER_BUNDLE];
|
|
|
unsigned int batch_count;
|
|
|
+ /*
|
|
|
+ * __tlb_adjust_range will track the new addr here,
|
|
|
+ * that that we can adjust the range after the flush
|
|
|
+ */
|
|
|
+ unsigned long addr;
|
|
|
};
|
|
|
|
|
|
#define HAVE_GENERIC_MMU_GATHER
|
|
@@ -115,23 +120,19 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
|
|
|
void tlb_flush_mmu(struct mmu_gather *tlb);
|
|
|
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
|
|
|
unsigned long end);
|
|
|
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
|
|
|
-
|
|
|
-/* tlb_remove_page
|
|
|
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
|
|
|
- * required.
|
|
|
- */
|
|
|
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
|
-{
|
|
|
- if (!__tlb_remove_page(tlb, page))
|
|
|
- tlb_flush_mmu(tlb);
|
|
|
-}
|
|
|
+bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
|
|
|
|
|
|
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
|
|
|
unsigned long address)
|
|
|
{
|
|
|
tlb->start = min(tlb->start, address);
|
|
|
tlb->end = max(tlb->end, address + PAGE_SIZE);
|
|
|
+ /*
|
|
|
+ * Track the last address with which we adjusted the range. This
|
|
|
+ * will be used later to adjust again after a mmu_flush due to
|
|
|
+ * failed __tlb_remove_page
|
|
|
+ */
|
|
|
+ tlb->addr = address;
|
|
|
}
|
|
|
|
|
|
static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
|
@@ -144,6 +145,27 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/* tlb_remove_page
|
|
|
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
|
|
|
+ * required.
|
|
|
+ */
|
|
|
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
|
+{
|
|
|
+ if (__tlb_remove_page(tlb, page)) {
|
|
|
+ tlb_flush_mmu(tlb);
|
|
|
+ __tlb_adjust_range(tlb, tlb->addr);
|
|
|
+ __tlb_remove_page(tlb, page);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
|
|
|
+{
|
|
|
+ /* active->nr should be zero when we call this */
|
|
|
+ VM_BUG_ON_PAGE(tlb->active->nr, page);
|
|
|
+ __tlb_adjust_range(tlb, tlb->addr);
|
|
|
+ return __tlb_remove_page(tlb, page);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* In the case of tlb vma handling, we can optimise these away in the
|
|
|
* case where we're doing a full MM flush. When we're doing a munmap,
|