|
@@ -91,11 +91,23 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
dsb(ish);
|
|
dsb(ish);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
- unsigned long start, unsigned long end)
|
|
|
|
|
|
+/*
|
|
|
|
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
|
|
|
|
+ * necessarily a performance improvement.
|
|
|
|
+ */
|
|
|
|
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
|
|
|
|
+
|
|
|
|
+static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
+ unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
|
|
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
|
|
unsigned long addr;
|
|
unsigned long addr;
|
|
|
|
+
|
|
|
|
+ if ((end - start) > MAX_TLB_RANGE) {
|
|
|
|
+ flush_tlb_mm(vma->vm_mm);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
start = asid | (start >> 12);
|
|
start = asid | (start >> 12);
|
|
end = asid | (end >> 12);
|
|
end = asid | (end >> 12);
|
|
|
|
|
|
@@ -105,9 +117,15 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
dsb(ish);
|
|
dsb(ish);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
|
|
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
unsigned long addr;
|
|
unsigned long addr;
|
|
|
|
+
|
|
|
|
+ if ((end - start) > MAX_TLB_RANGE) {
|
|
|
|
+ flush_tlb_all();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
start >>= 12;
|
|
start >>= 12;
|
|
end >>= 12;
|
|
end >>= 12;
|
|
|
|
|
|
@@ -118,29 +136,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
|
|
isb();
|
|
isb();
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
|
|
|
|
- * necessarily a performance improvement.
|
|
|
|
- */
|
|
|
|
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
|
|
|
|
-
|
|
|
|
-static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
- unsigned long start, unsigned long end)
|
|
|
|
-{
|
|
|
|
- if ((end - start) <= MAX_TLB_RANGE)
|
|
|
|
- __flush_tlb_range(vma, start, end);
|
|
|
|
- else
|
|
|
|
- flush_tlb_mm(vma->vm_mm);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
-{
|
|
|
|
- if ((end - start) <= MAX_TLB_RANGE)
|
|
|
|
- __flush_tlb_kernel_range(start, end);
|
|
|
|
- else
|
|
|
|
- flush_tlb_all();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
|
|
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
|
|
* table levels (pgd/pud/pmd).
|
|
* table levels (pgd/pud/pmd).
|