|
@@ -125,10 +125,11 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
|
|
int page_size);
|
|
int page_size);
|
|
|
|
|
|
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
|
|
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
|
|
- unsigned long address)
|
|
|
|
|
|
+ unsigned long address,
|
|
|
|
+ unsigned int range_size)
|
|
{
|
|
{
|
|
tlb->start = min(tlb->start, address);
|
|
tlb->start = min(tlb->start, address);
|
|
- tlb->end = max(tlb->end, address + PAGE_SIZE);
|
|
|
|
|
|
+ tlb->end = max(tlb->end, address + range_size);
|
|
/*
|
|
/*
|
|
* Track the last address with which we adjusted the range. This
|
|
* Track the last address with which we adjusted the range. This
|
|
* will be used later to adjust again after a mmu_flush due to
|
|
* will be used later to adjust again after a mmu_flush due to
|
|
@@ -153,7 +154,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
|
|
if (__tlb_remove_page_size(tlb, page, page_size)) {
|
|
if (__tlb_remove_page_size(tlb, page, page_size)) {
|
|
tlb_flush_mmu(tlb);
|
|
tlb_flush_mmu(tlb);
|
|
tlb->page_size = page_size;
|
|
tlb->page_size = page_size;
|
|
- __tlb_adjust_range(tlb, tlb->addr);
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, tlb->addr, page_size);
|
|
__tlb_remove_page_size(tlb, page, page_size);
|
|
__tlb_remove_page_size(tlb, page, page_size);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -177,7 +178,7 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
|
|
/* active->nr should be zero when we call this */
|
|
/* active->nr should be zero when we call this */
|
|
VM_BUG_ON_PAGE(tlb->active->nr, page);
|
|
VM_BUG_ON_PAGE(tlb->active->nr, page);
|
|
tlb->page_size = PAGE_SIZE;
|
|
tlb->page_size = PAGE_SIZE;
|
|
- __tlb_adjust_range(tlb, tlb->addr);
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, tlb->addr, PAGE_SIZE);
|
|
return __tlb_remove_page(tlb, page);
|
|
return __tlb_remove_page(tlb, page);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -215,7 +216,7 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
|
|
*/
|
|
*/
|
|
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
|
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
|
do { \
|
|
do { \
|
|
- __tlb_adjust_range(tlb, address); \
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
|
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
|
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
@@ -227,29 +228,47 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
|
|
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
|
|
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
|
|
|
|
- do { \
|
|
|
|
- __tlb_adjust_range(tlb, address); \
|
|
|
|
- __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
|
|
|
|
|
|
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
|
|
|
|
+ do { \
|
|
|
|
+ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
|
|
|
|
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * For things like page tables caches (ie caching addresses "inside" the
|
|
|
|
+ * page tables, like x86 does), for legacy reasons, flushing an
|
|
|
|
+ * individual page had better flush the page table caches behind it. This
|
|
|
|
+ * is definitely how x86 works, for example. And if you have an
|
|
|
|
+ * architected non-legacy page table cache (which I'm not aware of
|
|
|
|
+ * anybody actually doing), you're going to have some architecturally
|
|
|
|
+ * explicit flushing for that, likely *separate* from a regular TLB entry
|
|
|
|
+ * flush, and thus you'd need more than just some range expansion..
|
|
|
|
+ *
|
|
|
|
+ * So if we ever find an architecture
|
|
|
|
+ * that would want something that odd, I think it is up to that
|
|
|
|
+ * architecture to do its own odd thing, not cause pain for others
|
|
|
|
+ * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
|
|
|
|
+ *
|
|
|
|
+ * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
|
|
|
|
+ */
|
|
|
|
+
|
|
#define pte_free_tlb(tlb, ptep, address) \
|
|
#define pte_free_tlb(tlb, ptep, address) \
|
|
do { \
|
|
do { \
|
|
- __tlb_adjust_range(tlb, address); \
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
|
__pte_free_tlb(tlb, ptep, address); \
|
|
__pte_free_tlb(tlb, ptep, address); \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
#ifndef __ARCH_HAS_4LEVEL_HACK
|
|
#ifndef __ARCH_HAS_4LEVEL_HACK
|
|
#define pud_free_tlb(tlb, pudp, address) \
|
|
#define pud_free_tlb(tlb, pudp, address) \
|
|
do { \
|
|
do { \
|
|
- __tlb_adjust_range(tlb, address); \
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
|
__pud_free_tlb(tlb, pudp, address); \
|
|
__pud_free_tlb(tlb, pudp, address); \
|
|
} while (0)
|
|
} while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#define pmd_free_tlb(tlb, pmdp, address) \
|
|
#define pmd_free_tlb(tlb, pmdp, address) \
|
|
do { \
|
|
do { \
|
|
- __tlb_adjust_range(tlb, address); \
|
|
|
|
|
|
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
|
__pmd_free_tlb(tlb, pmdp, address); \
|
|
__pmd_free_tlb(tlb, pmdp, address); \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|