Explorar el Código

arm64: mm: use inner-shareable barriers for inner-shareable maintenance

In order to ensure ordering and completion of inner-shareable maintenance
instructions (cache and TLB) on AArch64, we can use the -ish suffix to
the dmb and dsb instructions respectively.

This patch updates our low-level cache and tlb maintenance routines to
use the inner-shareable barrier variants where appropriate.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Will Deacon hace 11 años
padre
commit
dc60b777fc
Se han modificado 2 ficheros con 4 adiciones y 4 borrados
  1. 3 3
      arch/arm64/mm/cache.S
  2. 1 1
      arch/arm64/mm/proc.S

+ 3 - 3
arch/arm64/mm/cache.S

@@ -31,7 +31,7 @@
  *	Corrupted registers: x0-x7, x9-x11
  */
 __flush_dcache_all:
-	dsb	sy				// ensure ordering with previous memory accesses
+	dmb	sy				// ensure ordering with previous memory accesses
 	mrs	x0, clidr_el1			// read clidr
 	and	x3, x0, #0x7000000		// extract loc from clidr
 	lsr	x3, x3, #23			// left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc	cvau, x4	)		// clean D line to PoU
 	add	x4, x4, x2
 	cmp	x4, x1
 	b.lo	1b
-	dsb	sy
+	dsb	ish
 
 	icache_line_size x2, x3
 	sub	x3, x2, #1
@@ -139,7 +139,7 @@ USER(9f, ic	ivau, x4	)		// invalidate I line PoU
 	cmp	x4, x1
 	b.lo	1b
 9:						// ignore any faulting cache operation
-	dsb	sy
+	dsb	ish
 	isb
 	ret
 ENDPROC(flush_icache_range)

+ 1 - 1
arch/arm64/mm/proc.S

@@ -182,7 +182,7 @@ ENDPROC(cpu_do_switch_mm)
 ENTRY(__cpu_setup)
 	ic	iallu				// I+BTB cache invalidate
 	tlbi	vmalle1is			// invalidate I + D TLBs
-	dsb	sy
+	dsb	ish
 
 	mov	x0, #3 << 20
 	msr	cpacr_el1, x0			// Enable FP/ASIMD