|
@@ -120,12 +120,18 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|
|
|
|
|
spin_lock_irqsave(&mm->context.lock, flags);
|
|
|
|
|
|
- if (tb->hugepage_shift == PAGE_SHIFT) {
|
|
|
+ if (tb->hugepage_shift < HPAGE_SHIFT) {
|
|
|
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
|
|
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
|
|
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
|
|
base = __pa(base);
|
|
|
- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
|
|
+ if (tb->hugepage_shift == PAGE_SHIFT)
|
|
|
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
|
|
+#if defined(CONFIG_HUGETLB_PAGE)
|
|
|
+ else
|
|
|
+ __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
|
|
|
+ tb->hugepage_shift);
|
|
|
+#endif
|
|
|
}
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
|
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
|
@@ -152,8 +158,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
|
|
|
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
|
|
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
|
|
base = __pa(base);
|
|
|
- __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries,
|
|
|
- hugepage_shift);
|
|
|
+ if (hugepage_shift == PAGE_SHIFT)
|
|
|
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
|
|
|
+ nentries);
|
|
|
+#if defined(CONFIG_HUGETLB_PAGE)
|
|
|
+ else
|
|
|
+ __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
|
|
|
+ nentries, hugepage_shift);
|
|
|
+#endif
|
|
|
}
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
|
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|