Răsfoiți Sursa

arm64: Introduce VA_BITS and translation level options

This patch adds virtual address space size and a level of translation
tables to kernel configuration. It facilicates introduction of
different MMU options, such as 4KB + 4 levels, 16KB + 4 levels and
64KB + 3 levels, easily.

The idea is based on the discussion with Catalin Marinas:
http://www.spinics.net/linux/lists/arm-kernel/msg319552.html

Signed-off-by: Jungseok Lee <jays.lee@samsung.com>
Reviewed-by: Sungjinn Chung <sungjinn.chung@samsung.com>
Acked-by: Kukjin Kim <kgene.kim@samsung.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Jungseok Lee <jungseoklee85@gmail.com>
Jungseok Lee 11 ani în urmă
părinte
comite
e41ceed035

+ 44 - 1
arch/arm64/Kconfig

@@ -157,14 +157,57 @@ endmenu
 
 
 menu "Kernel Features"
 menu "Kernel Features"
 
 
+choice
+	prompt "Page size"
+	default ARM64_4K_PAGES
+	help
+	  Page size (translation granule) configuration.
+
+config ARM64_4K_PAGES
+	bool "4KB"
+	help
+	  This feature enables 4KB pages support.
+
 config ARM64_64K_PAGES
 config ARM64_64K_PAGES
-	bool "Enable 64KB pages support"
+	bool "64KB"
 	help
 	help
 	  This feature enables 64KB pages support (4KB by default)
 	  This feature enables 64KB pages support (4KB by default)
 	  allowing only two levels of page tables and faster TLB
 	  allowing only two levels of page tables and faster TLB
 	  look-up. AArch32 emulation is not available when this feature
 	  look-up. AArch32 emulation is not available when this feature
 	  is enabled.
 	  is enabled.
 
 
+endchoice
+
+choice
+	prompt "Virtual address space size"
+	default ARM64_VA_BITS_39 if ARM64_4K_PAGES
+	default ARM64_VA_BITS_42 if ARM64_64K_PAGES
+	help
+	  Allows choosing one of multiple possible virtual address
+	  space sizes. The level of translation table is determined by
+	  a combination of page size and virtual address space size.
+
+config ARM64_VA_BITS_39
+	bool "39-bit"
+	depends on ARM64_4K_PAGES
+
+config ARM64_VA_BITS_42
+	bool "42-bit"
+	depends on ARM64_64K_PAGES
+
+endchoice
+
+config ARM64_VA_BITS
+	int
+	default 39 if ARM64_VA_BITS_39
+	default 42 if ARM64_VA_BITS_42
+
+config ARM64_2_LEVELS
+	def_bool y if ARM64_64K_PAGES && ARM64_VA_BITS_42
+
+config ARM64_3_LEVELS
+	def_bool y if ARM64_4K_PAGES && ARM64_VA_BITS_39
+
 config CPU_BIG_ENDIAN
 config CPU_BIG_ENDIAN
        bool "Build big-endian kernel"
        bool "Build big-endian kernel"
        help
        help

+ 1 - 5
arch/arm64/include/asm/memory.h

@@ -41,11 +41,7 @@
  * The module space lives between the addresses given by TASK_SIZE
  * The module space lives between the addresses given by TASK_SIZE
  * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define VA_BITS			(42)
-#else
-#define VA_BITS			(39)
-#endif
+#define VA_BITS			(CONFIG_ARM64_VA_BITS)
 #define PAGE_OFFSET		(UL(0xffffffffffffffff) << (VA_BITS - 1))
 #define PAGE_OFFSET		(UL(0xffffffffffffffff) << (VA_BITS - 1))
 #define MODULES_END		(PAGE_OFFSET)
 #define MODULES_END		(PAGE_OFFSET)
 #define MODULES_VADDR		(MODULES_END - SZ_64M)
 #define MODULES_VADDR		(MODULES_END - SZ_64M)

+ 1 - 1
arch/arm64/include/asm/page.h

@@ -42,7 +42,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
-#ifdef CONFIG_ARM64_64K_PAGES
+#ifdef CONFIG_ARM64_2_LEVELS
 #include <asm/pgtable-2level-types.h>
 #include <asm/pgtable-2level-types.h>
 #else
 #else
 #include <asm/pgtable-3level-types.h>
 #include <asm/pgtable-3level-types.h>

+ 2 - 2
arch/arm64/include/asm/pgalloc.h

@@ -26,7 +26,7 @@
 
 
 #define check_pgt_cache()		do { } while (0)
 #define check_pgt_cache()		do { } while (0)
 
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
 
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 {
@@ -44,7 +44,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 }
 }
 
 
-#endif	/* CONFIG_ARM64_64K_PAGES */
+#endif	/* CONFIG_ARM64_2_LEVELS */
 
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);

+ 1 - 1
arch/arm64/include/asm/pgtable-hwdef.h

@@ -16,7 +16,7 @@
 #ifndef __ASM_PGTABLE_HWDEF_H
 #ifndef __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 
 
-#ifdef CONFIG_ARM64_64K_PAGES
+#ifdef CONFIG_ARM64_2_LEVELS
 #include <asm/pgtable-2level-hwdef.h>
 #include <asm/pgtable-2level-hwdef.h>
 #else
 #else
 #include <asm/pgtable-3level-hwdef.h>
 #include <asm/pgtable-3level-hwdef.h>

+ 4 - 4
arch/arm64/include/asm/pgtable.h

@@ -47,7 +47,7 @@ extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
 
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 #endif
 #endif
 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -323,7 +323,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
  */
  */
 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
 
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
 
 
 #define pud_none(pud)		(!pud_val(pud))
 #define pud_none(pud)		(!pud_val(pud))
 #define pud_bad(pud)		(!(pud_val(pud) & 2))
 #define pud_bad(pud)		(!(pud_val(pud) & 2))
@@ -345,7 +345,7 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
 }
 }
 
 
-#endif	/* CONFIG_ARM64_64K_PAGES */
+#endif	/* CONFIG_ARM64_2_LEVELS */
 
 
 /* to find an entry in a page-table-directory */
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -356,7 +356,7 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
 
 
 /* Find an entry in the second-level page table.. */
 /* Find an entry in the second-level page table.. */
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 {
 {

+ 1 - 1
arch/arm64/include/asm/tlb.h

@@ -91,7 +91,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 	tlb_remove_page(tlb, pte);
 	tlb_remove_page(tlb, pte);
 }
 }
 
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 				  unsigned long addr)
 				  unsigned long addr)
 {
 {