Explorar el Código

sh64: Invert page fault fast-path error path values.

This brings the sh64 version in line with the sh32 one with regards to
how errors are handled. Base work for further unification of the
implementations.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt hace 13 años
padre
commit
4de5185629
Se han modificado 2 ficheros con 19 adiciones y 19 borrados
  1. 1 1
      arch/sh/kernel/cpu/sh5/entry.S
  2. 18 18
      arch/sh/mm/tlbex_64.c

+ 1 - 1
arch/sh/kernel/cpu/sh5/entry.S

@@ -335,7 +335,7 @@ tlb_miss:
 	/* If the fast path handler fixed the fault, just drop through quickly
 	   to the restore code right away to return to the excepting context.
 	   */
-	beqi/u	r2, 0, tr1
+	bnei/u	r2, 0, tr1
 
 fast_tlb_miss_restore:
 	ld.q	SP, SAVED_TR0, r2

+ 18 - 18
arch/sh/mm/tlbex_64.c

@@ -53,23 +53,23 @@ static int handle_vmalloc_fault(struct mm_struct *mm,
 
 	pud = pud_offset(dir, address);
 	if (pud_none_or_clear_bad(pud))
-		return 0;
+		return 1;
 
 	pmd = pmd_offset(pud, address);
 	if (pmd_none_or_clear_bad(pmd))
-		return 0;
+		return 1;
 
 	pte = pte_offset_kernel(pmd, address);
 	entry = *pte;
 
 	if (pte_none(entry) || !pte_present(entry))
-		return 0;
+		return 1;
 	if ((pte_val(entry) & protection_flags) != protection_flags)
-		return 0;
+		return 1;
 
 	update_mmu_cache(NULL, address, pte);
 
-	return 1;
+	return 0;
 }
 
 static int handle_tlbmiss(struct mm_struct *mm,
@@ -94,27 +94,27 @@ static int handle_tlbmiss(struct mm_struct *mm,
 	   the next test is necessary.  - RPC */
 	if (address >= (unsigned long) TASK_SIZE)
 		/* upper half - never has page table entries. */
-		return 0;
+		return 1;
 
 	dir = pgd_offset(mm, address);
 	if (pgd_none(*dir) || !pgd_present(*dir))
-		return 0;
+		return 1;
 	if (!pgd_present(*dir))
-		return 0;
+		return 1;
 
 	pud = pud_offset(dir, address);
 	if (pud_none(*pud) || !pud_present(*pud))
-		return 0;
+		return 1;
 
 	pmd = pmd_offset(pud, address);
 	if (pmd_none(*pmd) || !pmd_present(*pmd))
-		return 0;
+		return 1;
 
 	pte = pte_offset_kernel(pmd, address);
 	entry = *pte;
 
 	if (pte_none(entry) || !pte_present(entry))
-		return 0;
+		return 1;
 
 	/*
 	 * If the page doesn't have sufficient protection bits set to
@@ -123,11 +123,11 @@ static int handle_tlbmiss(struct mm_struct *mm,
 	 * handler.
 	 */
 	if ((pte_val(entry) & protection_flags) != protection_flags)
-		return 0;
+		return 1;
 
 	update_mmu_cache(NULL, address, pte);
 
-	return 1;
+	return 0;
 }
 
 /*
@@ -214,12 +214,12 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
 			 * Process-contexts can never have this address
 			 * range mapped
 			 */
-			if (handle_vmalloc_fault(mm, protection_flags, address))
-				return 1;
+			if (handle_vmalloc_fault(mm, protection_flags, address) == 0)
+				return 0;
 	} else if (!in_interrupt() && mm) {
-		if (handle_tlbmiss(mm, protection_flags, address))
-			return 1;
+		if (handle_tlbmiss(mm, protection_flags, address) == 0)
+			return 0;
 	}
 
-	return 0;
+	return 1;
 }