فهرست منبع

xtensa: nommu: set up cache and atomctl in initialize_mmu

initialize_mmu sets up atomctl SR which is needed for s32c1i to function
correctly even in noMMU configurations. It's also a good place to set up
caching attributes of physical memory.

Allow enabling INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX in noMMU
configurations for setting up atomctl and cache attributes.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Max Filippov 11 سال پیش
والد
کامیت
2eabc1800d
2فایلهای تغییر یافته به همراه36 افزوده شده و 2 حذف شده
  1. 0 1
      arch/xtensa/Kconfig
  2. 36 1
      arch/xtensa/include/asm/initialize_mmu.h

+ 0 - 1
arch/xtensa/Kconfig

@@ -191,7 +191,6 @@ config HOTPLUG_CPU
 
 config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 	bool "Initialize Xtensa MMU inside the Linux kernel code"
-	depends on MMU
 	default y
 	help
 	  Earlier version initialized the MMU in the exception vector

+ 36 - 1
arch/xtensa/include/asm/initialize_mmu.h

@@ -26,8 +26,16 @@
 #include <asm/pgtable.h>
 #include <asm/vectors.h>
 
+#if XCHAL_HAVE_PTP_MMU
 #define CA_BYPASS	(_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
 #define CA_WRITEBACK	(_PAGE_CA_WB     | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK	(0x4)
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
 
 #ifdef __ASSEMBLY__
 
@@ -75,7 +83,7 @@
 
 	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
 
-	movi	a2, 0x40000006
+	movi	a2, 0x40000000 | XCHAL_SPANNING_WAY
 	idtlb	a2
 	iitlb	a2
 	isync
@@ -153,6 +161,33 @@
 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
 	  XCHAL_HAVE_SPANNING_WAY */
 
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+	/* Enable data and instruction cache in the DEFAULT_MEMORY region
+	 * if the processor has DTLB and ITLB.
+	 */
+
+	movi	a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
+	movi	a6, ~_PAGE_ATTRIB_MASK
+	movi	a7, CA_WRITEBACK
+	movi	a8, 0x20000000
+	movi	a9, PLATFORM_DEFAULT_MEM_SIZE
+	j	2f
+1:
+	sub	a9, a9, a8
+2:
+	rdtlb1	a3, a5
+	ritlb1	a4, a5
+	and	a3, a3, a6
+	and	a4, a4, a6
+	or	a3, a3, a7
+	or	a4, a4, a7
+	wdtlb	a3, a5
+	witlb	a4, a5
+	add	a5, a5, a8
+	bltu	a8, a9, 1b
+
+#endif
+
 	.endm
 
 #endif /*__ASSEMBLY__*/