Browse Source

arm64: Clean up __flush_tlb(_kernel)_range functions

This patch moves the MAX_TLB_RANGE check into the
flush_tlb(_kernel)_range functions directly to avoid the
undescore-prefixed definitions (and for consistency with a subsequent
patch).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Catalin Marinas 10 years ago
parent
commit
da4e73303e
1 changed files with 21 additions and 26 deletions
  1. 21 26
      arch/arm64/include/asm/tlbflush.h

+ 21 - 26
arch/arm64/include/asm/tlbflush.h

@@ -91,11 +91,23 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 	dsb(ish);
 	dsb(ish);
 }
 }
 
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end)
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
 {
 {
 	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
 	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
 	unsigned long addr;
 	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
 	start = asid | (start >> 12);
 	start = asid | (start >> 12);
 	end = asid | (end >> 12);
 	end = asid | (end >> 12);
 
 
@@ -105,9 +117,15 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ish);
 	dsb(ish);
 }
 }
 
 
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 {
 	unsigned long addr;
 	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
+		flush_tlb_all();
+		return;
+	}
+
 	start >>= 12;
 	start >>= 12;
 	end >>= 12;
 	end >>= 12;
 
 
@@ -118,29 +136,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
 	isb();
 	isb();
 }
 }
 
 
-/*
- * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
- * necessarily a performance improvement.
- */
-#define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_range(vma, start, end);
-	else
-		flush_tlb_mm(vma->vm_mm);
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_kernel_range(start, end);
-	else
-		flush_tlb_all();
-}
-
 /*
 /*
  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  * table levels (pgd/pud/pmd).
  * table levels (pgd/pud/pmd).