Browse Source

Merge branch 'hyp-boot-mode-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into devel-stable

Russell King 13 years ago
parent
commit
648f3b6998

+ 21 - 1
Documentation/arm/Booting

@@ -154,13 +154,33 @@ In either case, the following conditions must be met:
 
 
 - CPU mode
 - CPU mode
   All forms of interrupts must be disabled (IRQs and FIQs)
   All forms of interrupts must be disabled (IRQs and FIQs)
-  The CPU must be in SVC mode.  (A special exception exists for Angel)
+
+  For CPUs which do not include the ARM virtualization extensions, the
+  CPU must be in SVC mode.  (A special exception exists for Angel)
+
+  CPUs which include support for the virtualization extensions can be
+  entered in HYP mode in order to enable the kernel to make full use of
+  these extensions.  This is the recommended boot method for such CPUs,
+  unless the virtualisations are already in use by a pre-installed
+  hypervisor.
+
+  If the kernel is not entered in HYP mode for any reason, it must be
+  entered in SVC mode.
 
 
 - Caches, MMUs
 - Caches, MMUs
   The MMU must be off.
   The MMU must be off.
   Instruction cache may be on or off.
   Instruction cache may be on or off.
   Data cache must be off.
   Data cache must be off.
 
 
+  If the kernel is entered in HYP mode, the above requirements apply to
+  the HYP mode configuration in addition to the ordinary PL1 (privileged
+  kernel modes) configuration.  In addition, all traps into the
+  hypervisor must be disabled, and PL1 access must be granted for all
+  peripherals and CPU resources for which this is architecturally
+  possible.  Except for entering in HYP mode, the system configuration
+  should be such that a kernel which does not include support for the
+  virtualization extensions can boot correctly without extra help.
+
 - The boot loader is expected to call the kernel image by jumping
 - The boot loader is expected to call the kernel image by jumping
   directly to the first instruction of the kernel image.
   directly to the first instruction of the kernel image.
 
 

+ 1 - 0
arch/arm/boot/compressed/.gitignore

@@ -1,6 +1,7 @@
 ashldi3.S
 ashldi3.S
 font.c
 font.c
 lib1funcs.S
 lib1funcs.S
+hyp-stub.S
 piggy.gzip
 piggy.gzip
 piggy.lzo
 piggy.lzo
 piggy.lzma
 piggy.lzma

+ 8 - 1
arch/arm/boot/compressed/Makefile

@@ -30,6 +30,10 @@ FONTC	= $(srctree)/drivers/video/console/font_acorn_8x8.c
 OBJS		+= string.o
 OBJS		+= string.o
 CFLAGS_string.o	:= -Os
 CFLAGS_string.o	:= -Os
 
 
+ifeq ($(CONFIG_ARM_VIRT_EXT),y)
+OBJS		+= hyp-stub.o
+endif
+
 #
 #
 # Architecture dependencies
 # Architecture dependencies
 #
 #
@@ -126,7 +130,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 endif
 
 
 ccflags-y := -fpic -fno-builtin -I$(obj)
 ccflags-y := -fpic -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all
+asflags-y := -Wa,-march=all -DZIMAGE
 
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
 # Supply kernel BSS size to the decompressor via a linker symbol.
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
@@ -198,3 +202,6 @@ $(obj)/font.c: $(FONTC)
 
 
 $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
 $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
 	@sed "$(SEDFLAGS)" < $< > $@
 	@sed "$(SEDFLAGS)" < $< > $@
+
+$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
+	$(call cmd,shipped)

+ 64 - 7
arch/arm/boot/compressed/head.S

@@ -9,6 +9,7 @@
  * published by the Free Software Foundation.
  * published by the Free Software Foundation.
  */
  */
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 
 /*
 /*
  * Debugging stuff
  * Debugging stuff
@@ -132,7 +133,12 @@ start:
 		.word	start			@ absolute load/run zImage address
 		.word	start			@ absolute load/run zImage address
 		.word	_edata			@ zImage end address
 		.word	_edata			@ zImage end address
  THUMB(		.thumb			)
  THUMB(		.thumb			)
-1:		mov	r7, r1			@ save architecture ID
+1:
+		mrs	r9, cpsr
+#ifdef CONFIG_ARM_VIRT_EXT
+		bl	__hyp_stub_install	@ get into SVC mode, reversibly
+#endif
+		mov	r7, r1			@ save architecture ID
 		mov	r8, r2			@ save atags pointer
 		mov	r8, r2			@ save atags pointer
 
 
 #ifndef __ARM_ARCH_2__
 #ifndef __ARM_ARCH_2__
@@ -148,9 +154,9 @@ start:
  ARM(		swi	0x123456	)	@ angel_SWI_ARM
  ARM(		swi	0x123456	)	@ angel_SWI_ARM
  THUMB(		svc	0xab		)	@ angel_SWI_THUMB
  THUMB(		svc	0xab		)	@ angel_SWI_THUMB
 not_angel:
 not_angel:
-		mrs	r2, cpsr		@ turn off interrupts to
-		orr	r2, r2, #0xc0		@ prevent angel from running
-		msr	cpsr_c, r2
+		safe_svcmode_maskall r0
+		msr	spsr_cxsf, r9		@ Save the CPU boot mode in
+						@ SPSR
 #else
 #else
 		teqp	pc, #0x0c000003		@ turn off interrupts
 		teqp	pc, #0x0c000003		@ turn off interrupts
 #endif
 #endif
@@ -350,6 +356,20 @@ dtb_check_done:
 		adr	r5, restart
 		adr	r5, restart
 		bic	r5, r5, #31
 		bic	r5, r5, #31
 
 
+/* Relocate the hyp vector base if necessary */
+#ifdef CONFIG_ARM_VIRT_EXT
+		mrs	r0, spsr
+		and	r0, r0, #MODE_MASK
+		cmp	r0, #HYP_MODE
+		bne	1f
+
+		bl	__hyp_get_vectors
+		sub	r0, r0, r5
+		add	r0, r0, r10
+		bl	__hyp_set_vectors
+1:
+#endif
+
 		sub	r9, r6, r5		@ size to copy
 		sub	r9, r6, r5		@ size to copy
 		add	r9, r9, #31		@ rounded up to a multiple
 		add	r9, r9, #31		@ rounded up to a multiple
 		bic	r9, r9, #31		@ ... of 32 bytes
 		bic	r9, r9, #31		@ ... of 32 bytes
@@ -458,11 +478,29 @@ not_relocated:	mov	r0, #0
 		bl	decompress_kernel
 		bl	decompress_kernel
 		bl	cache_clean_flush
 		bl	cache_clean_flush
 		bl	cache_off
 		bl	cache_off
-		mov	r0, #0			@ must be zero
 		mov	r1, r7			@ restore architecture number
 		mov	r1, r7			@ restore architecture number
 		mov	r2, r8			@ restore atags pointer
 		mov	r2, r8			@ restore atags pointer
- ARM(		mov	pc, r4	)		@ call kernel
- THUMB(		bx	r4	)		@ entry point is always ARM
+
+#ifdef CONFIG_ARM_VIRT_EXT
+		mrs	r0, spsr		@ Get saved CPU boot mode
+		and	r0, r0, #MODE_MASK
+		cmp	r0, #HYP_MODE		@ if not booted in HYP mode...
+		bne	__enter_kernel		@ boot kernel directly
+
+		adr	r12, .L__hyp_reentry_vectors_offset
+		ldr	r0, [r12]
+		add	r0, r0, r12
+
+		bl	__hyp_set_vectors
+		__HVC(0)			@ otherwise bounce to hyp mode
+
+		b	.			@ should never be reached
+
+		.align	2
+.L__hyp_reentry_vectors_offset:	.long	__hyp_reentry_vectors - .
+#else
+		b	__enter_kernel
+#endif
 
 
 		.align	2
 		.align	2
 		.type	LC0, #object
 		.type	LC0, #object
@@ -1196,6 +1234,25 @@ memdump:	mov	r12, r0
 #endif
 #endif
 
 
 		.ltorg
 		.ltorg
+
+#ifdef CONFIG_ARM_VIRT_EXT
+.align 5
+__hyp_reentry_vectors:
+		W(b)	.			@ reset
+		W(b)	.			@ undef
+		W(b)	.			@ svc
+		W(b)	.			@ pabort
+		W(b)	.			@ dabort
+		W(b)	__enter_kernel		@ hyp
+		W(b)	.			@ irq
+		W(b)	.			@ fiq
+#endif /* CONFIG_ARM_VIRT_EXT */
+
+__enter_kernel:
+		mov	r0, #0			@ must be 0
+ ARM(		mov	pc, r4	)		@ call kernel
+ THUMB(		bx	r4	)		@ entry point is always ARM
+
 reloc_code_end:
 reloc_code_end:
 
 
 		.align
 		.align

+ 28 - 0
arch/arm/include/asm/assembler.h

@@ -22,6 +22,7 @@
 
 
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/domain.h>
 #include <asm/domain.h>
+#include <asm/opcodes-virt.h>
 
 
 #define IOMEM(x)	(x)
 #define IOMEM(x)	(x)
 
 
@@ -239,6 +240,33 @@
 	.endm
 	.endm
 #endif
 #endif
 
 
+/*
+ * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
+ * a scratch register for the macro to overwrite.
+ *
+ * This macro is intended for forcing the CPU into SVC mode at boot time.
+ * you cannot return to the original mode.
+ *
+ * Beware, it also clobers LR.
+ */
+.macro safe_svcmode_maskall reg:req
+	mrs	\reg , cpsr
+	mov	lr , \reg
+	and	lr , lr , #MODE_MASK
+	cmp	lr , #HYP_MODE
+	orr	\reg , \reg , #PSR_A_BIT | PSR_I_BIT | PSR_F_BIT
+	bic	\reg , \reg , #MODE_MASK
+	orr	\reg , \reg , #SVC_MODE
+THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
+	msr	spsr_cxsf, \reg
+	adr	lr, BSYM(2f)
+	bne	1f
+	__MSR_ELR_HYP(14)
+	__ERET
+1:	movs	pc, lr
+2:
+.endm
+
 /*
 /*
  * STRT/LDRT access macros with ARM and Thumb-2 variants
  * STRT/LDRT access macros with ARM and Thumb-2 variants
  */
  */

+ 39 - 0
arch/arm/include/asm/opcodes-virt.h

@@ -0,0 +1,39 @@
+/*
+ * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions
+ * Copyright (C) 2012  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __ASM_ARM_OPCODES_VIRT_H
+#define __ASM_ARM_OPCODES_VIRT_H
+
+#include <asm/opcodes.h>
+
+#define __HVC(imm16) __inst_arm_thumb32(				\
+	0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F),	\
+	0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF)	\
+)
+
+#define __ERET	__inst_arm_thumb32(					\
+	0xE160006E,							\
+	0xF3DE8F00							\
+)
+
+#define __MSR_ELR_HYP(regnum)	__inst_arm_thumb32(			\
+	0xE12EF300 | regnum,						\
+	0xF3808E30 | (regnum << 16)					\
+)
+
+#endif /* ! __ASM_ARM_OPCODES_VIRT_H */

+ 166 - 15
arch/arm/include/asm/opcodes.h

@@ -18,6 +18,33 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
 #define ARM_OPCODE_CONDTEST_UNCOND 2
 #define ARM_OPCODE_CONDTEST_UNCOND 2
 
 
 
 
+/*
+ * Assembler opcode byteswap helpers.
+ * These are only intended for use by this header: don't use them directly,
+ * because they will be suboptimal in most cases.
+ */
+#define ___asm_opcode_swab32(x) (	\
+	  (((x) << 24) & 0xFF000000)	\
+	| (((x) <<  8) & 0x00FF0000)	\
+	| (((x) >>  8) & 0x0000FF00)	\
+	| (((x) >> 24) & 0x000000FF)	\
+)
+#define ___asm_opcode_swab16(x) (	\
+	  (((x) << 8) & 0xFF00)		\
+	| (((x) >> 8) & 0x00FF)		\
+)
+#define ___asm_opcode_swahb32(x) (	\
+	  (((x) << 8) & 0xFF00FF00)	\
+	| (((x) >> 8) & 0x00FF00FF)	\
+)
+#define ___asm_opcode_swahw32(x) (	\
+	  (((x) << 16) & 0xFFFF0000)	\
+	| (((x) >> 16) & 0x0000FFFF)	\
+)
+#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
+#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
+
+
 /*
 /*
  * Opcode byteswap helpers
  * Opcode byteswap helpers
  *
  *
@@ -41,39 +68,163 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
  * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
  * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
  * represent any valid Thumb-2 instruction.  For this range,
  * represent any valid Thumb-2 instruction.  For this range,
  * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
  * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
+ *
+ * The ___asm variants are intended only for use by this header, in situations
+ * involving inline assembler.  For .S files, the normal __opcode_*() macros
+ * should do the right thing.
  */
  */
+#ifdef __ASSEMBLY__
 
 
-#ifndef __ASSEMBLY__
+#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
+#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
+#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
+#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
+#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
+#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
+
+#else /* ! __ASSEMBLY__ */
 
 
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/swab.h>
 #include <linux/swab.h>
 
 
+#define ___opcode_swab32(x) swab32(x)
+#define ___opcode_swab16(x) swab16(x)
+#define ___opcode_swahb32(x) swahb32(x)
+#define ___opcode_swahw32(x) swahw32(x)
+#define ___opcode_identity32(x) ((u32)(x))
+#define ___opcode_identity16(x) ((u16)(x))
+
+#endif /* ! __ASSEMBLY__ */
+
+
 #ifdef CONFIG_CPU_ENDIAN_BE8
 #ifdef CONFIG_CPU_ENDIAN_BE8
-#define __opcode_to_mem_arm(x) swab32(x)
-#define __opcode_to_mem_thumb16(x) swab16(x)
-#define __opcode_to_mem_thumb32(x) swahb32(x)
-#else
-#define __opcode_to_mem_arm(x) ((u32)(x))
-#define __opcode_to_mem_thumb16(x) ((u16)(x))
-#define __opcode_to_mem_thumb32(x) swahw32(x)
+
+#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
+#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
+
+#else /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+/*
+ * On BE32 systems, using 32-bit accesses to store Thumb instructions will not
+ * work in all cases, due to alignment constraints.  For now, a correct
+ * version is not provided for BE32.
+ */
+#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
 #endif
 #endif
 
 
+#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
+
 #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
 #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
 #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
 #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
 #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
 #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
+#endif
 
 
 /* Operations specific to Thumb opcodes */
 /* Operations specific to Thumb opcodes */
 
 
 /* Instruction size checks: */
 /* Instruction size checks: */
-#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL)
-#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL)
+#define __opcode_is_thumb32(x) (		\
+	   ((x) & 0xF8000000) == 0xE8000000	\
+	|| ((x) & 0xF0000000) == 0xF0000000	\
+)
+#define __opcode_is_thumb16(x) (					\
+	   ((x) & 0xFFFF0000) == 0					\
+	&& !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000)	\
+)
 
 
 /* Operations to construct or split 32-bit Thumb instructions: */
 /* Operations to construct or split 32-bit Thumb instructions: */
-#define __opcode_thumb32_first(x) ((u16)((x) >> 16))
-#define __opcode_thumb32_second(x) ((u16)(x))
-#define __opcode_thumb32_compose(first, second) \
-	(((u32)(u16)(first) << 16) | (u32)(u16)(second))
+#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
+#define __opcode_thumb32_second(x) (___opcode_identity16(x))
+#define __opcode_thumb32_compose(first, second) (			\
+	  (___opcode_identity32(___opcode_identity16(first)) << 16)	\
+	| ___opcode_identity32(___opcode_identity16(second))		\
+)
+#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
+#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
+#define ___asm_opcode_thumb32_compose(first, second) (			    \
+	  (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
+	| ___asm_opcode_identity32(___asm_opcode_identity16(second))	    \
+)
 
 
-#endif /* __ASSEMBLY__ */
+/*
+ * Opcode injection helpers
+ *
+ * In rare cases it is necessary to assemble an opcode which the
+ * assembler does not support directly, or which would normally be
+ * rejected because of the CFLAGS or AFLAGS used to build the affected
+ * file.
+ *
+ * Before using these macros, consider carefully whether it is feasible
+ * instead to change the build flags for your file, or whether it really
+ * makes sense to support old assembler versions when building that
+ * particular kernel feature.
+ *
+ * The macros defined here should only be used where there is no viable
+ * alternative.
+ *
+ *
+ * __inst_arm(x): emit the specified ARM opcode
+ * __inst_thumb16(x): emit the specified 16-bit Thumb opcode
+ * __inst_thumb32(x): emit the specified 32-bit Thumb opcode
+ *
+ * __inst_arm_thumb16(arm, thumb): emit either the specified arm or
+ *	16-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *	kernel is being built
+ *
+ * __inst_arm_thumb32(arm, thumb): emit either the specified arm or
+ *	32-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *	kernel is being built
+ *
+ *
+ * Note that using these macros directly is poor practice.  Instead, you
+ * should use them to define human-readable wrapper macros to encode the
+ * instructions that you care about.  In code which might run on ARMv7 or
+ * above, you can usually use the __inst_arm_thumb{16,32} macros to
+ * specify the ARM and Thumb alternatives at the same time.  This ensures
+ * that the correct opcode gets emitted depending on the instruction set
+ * used for the kernel build.
+ *
+ * Look at opcodes-virt.h for an example of how to use these macros.
+ */
+#include <linux/stringify.h>
+
+#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
+#define __inst_thumb32(x) ___inst_thumb32(				\
+	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)),	\
+	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x))	\
+)
+#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
+	__inst_thumb16(thumb_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
+	__inst_thumb32(thumb_opcode)
+#else
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#endif
+
+/* Helpers for the helpers.  Don't use these directly. */
+#ifdef __ASSEMBLY__
+#define ___inst_arm(x) .long x
+#define ___inst_thumb16(x) .short x
+#define ___inst_thumb32(first, second) .short first, second
+#else
+#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
+#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
+#define ___inst_thumb32(first, second) \
+	".short " __stringify(first) ", " __stringify(second) "\n\t"
+#endif
 
 
 #endif /* __ASM_ARM_OPCODES_H */
 #endif /* __ASM_ARM_OPCODES_H */

+ 1 - 0
arch/arm/include/asm/ptrace.h

@@ -44,6 +44,7 @@
 #define IRQ_MODE	0x00000012
 #define IRQ_MODE	0x00000012
 #define SVC_MODE	0x00000013
 #define SVC_MODE	0x00000013
 #define ABT_MODE	0x00000017
 #define ABT_MODE	0x00000017
+#define HYP_MODE	0x0000001a
 #define UND_MODE	0x0000001b
 #define UND_MODE	0x0000001b
 #define SYSTEM_MODE	0x0000001f
 #define SYSTEM_MODE	0x0000001f
 #define MODE32_BIT	0x00000010
 #define MODE32_BIT	0x00000010

+ 69 - 0
arch/arm/include/asm/virt.h

@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef VIRT_H
+#define VIRT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Flag indicating that the kernel was not entered in the same mode on every
+ * CPU.  The zImage loader stashes this value in an SPSR, so we need an
+ * architecturally defined flag bit here (the N flag, as it happens)
+ */
+#define BOOT_CPU_MODE_MISMATCH (1<<31)
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARM_VIRT_EXT
+/*
+ * __boot_cpu_mode records what mode the primary CPU was booted in.
+ * A correctly-implemented bootloader must start all CPUs in the same mode:
+ * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
+ * that some CPU(s) were booted in a different mode.
+ *
+ * This allows the kernel to flag an error when the secondaries have come up.
+ */
+extern int __boot_cpu_mode;
+
+void __hyp_set_vectors(unsigned long phys_vector_base);
+unsigned long __hyp_get_vectors(void);
+#else
+#define __boot_cpu_mode	(SVC_MODE)
+#endif
+
+#ifndef ZIMAGE
+void hyp_mode_check(void);
+
+/* Reports the availability of HYP mode */
+static inline bool is_hyp_mode_available(void)
+{
+	return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
+		!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
+}
+
+/* Check if the bootloader has booted CPUs in different modes */
+static inline bool is_hyp_mode_mismatched(void)
+{
+	return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ! VIRT_H */

+ 2 - 0
arch/arm/kernel/Makefile

@@ -82,4 +82,6 @@ head-y			:= head$(MMUEXT).o
 obj-$(CONFIG_DEBUG_LL)	+= debug.o
 obj-$(CONFIG_DEBUG_LL)	+= debug.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 
 
+obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
+
 extra-y := $(head-y) vmlinux.lds
 extra-y := $(head-y) vmlinux.lds

+ 11 - 3
arch/arm/kernel/head.S

@@ -83,8 +83,12 @@ ENTRY(stext)
  THUMB(	.thumb			)	@ switch to Thumb now.
  THUMB(	.thumb			)	@ switch to Thumb now.
  THUMB(1:			)
  THUMB(1:			)
 
 
-	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
-						@ and irqs disabled
+#ifdef CONFIG_ARM_VIRT_EXT
+	bl	__hyp_stub_install
+#endif
+	@ ensure svc mode and all interrupts masked
+	safe_svcmode_maskall r9
+
 	mrc	p15, 0, r9, c0, c0		@ get processor id
 	mrc	p15, 0, r9, c0, c0		@ get processor id
 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
 	movs	r10, r5				@ invalid processor (r5=0)?
 	movs	r10, r5				@ invalid processor (r5=0)?
@@ -326,7 +330,11 @@ ENTRY(secondary_startup)
 	 * the processor type - there is no need to check the machine type
 	 * the processor type - there is no need to check the machine type
 	 * as it has already been validated by the primary processor.
 	 * as it has already been validated by the primary processor.
 	 */
 	 */
-	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+	bl	__hyp_stub_install
+#endif
+	safe_svcmode_maskall r9
+
 	mrc	p15, 0, r9, c0, c0		@ get processor id
 	mrc	p15, 0, r9, c0, c0		@ get processor id
 	bl	__lookup_processor_type
 	bl	__lookup_processor_type
 	movs	r10, r5				@ invalid processor?
 	movs	r10, r5				@ invalid processor?

+ 223 - 0
arch/arm/kernel/hyp-stub.S

@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/virt.h>
+
+#ifndef ZIMAGE
+/*
+ * For the kernel proper, we need to find out the CPU boot mode long after
+ * boot, so we need to store it in a writable variable.
+ *
+ * This is not in .bss, because we set it sufficiently early that the boot-time
+ * zeroing of .bss would clobber it.
+ */
+.data
+ENTRY(__boot_cpu_mode)
+	.long	0
+.text
+
+	/*
+	 * Save the primary CPU boot mode. Requires 3 scratch registers.
+	 */
+	.macro	store_primary_cpu_mode	reg1, reg2, reg3
+	mrs	\reg1, cpsr
+	and	\reg1, \reg1, #MODE_MASK
+	adr	\reg2, .L__boot_cpu_mode_offset
+	ldr	\reg3, [\reg2]
+	str	\reg1, [\reg2, \reg3]
+	.endm
+
+	/*
+	 * Compare the current mode with the one saved on the primary CPU.
+	 * If they don't match, record that fact. The Z bit indicates
+	 * if there's a match or not.
+	 * Requires 3 additionnal scratch registers.
+	 */
+	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+	adr	\reg2, .L__boot_cpu_mode_offset
+	ldr	\reg3, [\reg2]
+	ldr	\reg1, [\reg2, \reg3]
+	cmp	\mode, \reg1		@ matches primary CPU boot mode?
+	orrne	r7, r7, #BOOT_CPU_MODE_MISMATCH
+	strne	r7, [r5, r6]		@ record what happened and give up
+	.endm
+
+#else	/* ZIMAGE */
+
+	.macro	store_primary_cpu_mode	reg1:req, reg2:req, reg3:req
+	.endm
+
+/*
+ * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
+ * consistency checking:
+ */
+	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+	cmp	\mode, \mode
+	.endm
+
+#endif /* ZIMAGE */
+
+/*
+ * Hypervisor stub installation functions.
+ *
+ * These must be called with the MMU and D-cache off.
+ * They are not ABI compliant and are only intended to be called from the kernel
+ * entry points in head.S.
+ */
+@ Call this from the primary CPU
+ENTRY(__hyp_stub_install)
+	store_primary_cpu_mode	r4, r5, r6
+ENDPROC(__hyp_stub_install)
+
+	@ fall through...
+
+@ Secondary CPUs should call here
+ENTRY(__hyp_stub_install_secondary)
+	mrs	r4, cpsr
+	and	r4, r4, #MODE_MASK
+
+	/*
+	 * If the secondary has booted with a different mode, give up
+	 * immediately.
+	 */
+	compare_cpu_mode_with_primary	r4, r5, r6, r7
+	bxne	lr
+
+	/*
+	 * Once we have given up on one CPU, we do not try to install the
+	 * stub hypervisor on the remaining ones: because the saved boot mode
+	 * is modified, it can't compare equal to the CPSR mode field any
+	 * more.
+	 *
+	 * Otherwise...
+	 */
+
+	cmp	r4, #HYP_MODE
+	bxne	lr			@ give up if the CPU is not in HYP mode
+
+/*
+ * Configure HSCTLR to set correct exception endianness/instruction set
+ * state etc.
+ * Turn off all traps
+ * Eventually, CPU-specific code might be needed -- assume not for now
+ *
+ * This code relies on the "eret" instruction to synchronize the
+ * various coprocessor accesses.
+ */
+	@ Now install the hypervisor stub:
+	adr	r7, __hyp_stub_vectors
+	mcr	p15, 4, r7, c12, c0, 0	@ set hypervisor vector base (HVBAR)
+
+	@ Disable all traps, so we don't get any nasty surprise
+	mov	r7, #0
+	mcr	p15, 4, r7, c1, c1, 0	@ HCR
+	mcr	p15, 4, r7, c1, c1, 2	@ HCPTR
+	mcr	p15, 4, r7, c1, c1, 3	@ HSTR
+
+THUMB(	orr	r7, #(1 << 30)	)	@ HSCTLR.TE
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	orr	r7, #(1 << 9)		@ HSCTLR.EE
+#endif
+	mcr	p15, 4, r7, c1, c0, 0	@ HSCTLR
+
+	mrc	p15, 4, r7, c1, c1, 1	@ HDCR
+	and	r7, #0x1f		@ Preserve HPMN
+	mcr	p15, 4, r7, c1, c1, 1	@ HDCR
+
+#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
+	@ make CNTP_* and CNTPCT accessible from PL1
+	mrc	p15, 0, r7, c0, c1, 1	@ ID_PFR1
+	lsr	r7, #16
+	and	r7, #0xf
+	cmp	r7, #1
+	bne	1f
+	mrc	p15, 4, r7, c14, c1, 0	@ CNTHCTL
+	orr	r7, r7, #3		@ PL1PCEN | PL1PCTEN
+	mcr	p15, 4, r7, c14, c1, 0	@ CNTHCTL
+1:
+#endif
+
+	bic	r7, r4, #MODE_MASK
+	orr	r7, r7, #SVC_MODE
+THUMB(	orr	r7, r7, #PSR_T_BIT	)
+	msr	spsr_cxsf, r7		@ This is SPSR_hyp.
+
+	__MSR_ELR_HYP(14)		@ msr elr_hyp, lr
+	__ERET				@ return, switching to SVC mode
+					@ The boot CPU mode is left in r4.
+ENDPROC(__hyp_stub_install_secondary)
+
+__hyp_stub_do_trap:
+	cmp	r0, #-1
+	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
+	mcrne	p15, 4, r0, c12, c0, 0	@ set HVBAR
+	__ERET
+ENDPROC(__hyp_stub_do_trap)
+
+/*
+ * __hyp_set_vectors: Call this after boot to set the initial hypervisor
+ * vectors as part of hypervisor installation.  On an SMP system, this should
+ * be called on each CPU.
+ *
+ * r0 must be the physical address of the new vector table (which must lie in
+ * the bottom 4GB of physical address space.
+ *
+ * r0 must be 32-byte aligned.
+ *
+ * Before calling this, you must check that the stub hypervisor is installed
+ * everywhere, by waiting for any secondary CPUs to be brought up and then
+ * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
+ *
+ * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
+ * something else went wrong... in such cases, trying to install a new
+ * hypervisor is unlikely to work as desired.
+ *
+ * When you call into your shiny new hypervisor, sp_hyp will contain junk,
+ * so you will need to set that to something sensible at the new hypervisor's
+ * initialisation entry point.
+ */
+ENTRY(__hyp_get_vectors)
+	mov	r0, #-1
+ENDPROC(__hyp_get_vectors)
+	@ fall through
+ENTRY(__hyp_set_vectors)
+	__HVC(0)
+	bx	lr
+ENDPROC(__hyp_set_vectors)
+
+#ifndef ZIMAGE
+.align 2
+.L__boot_cpu_mode_offset:
+	.long	__boot_cpu_mode - .
+#endif
+
+.align 5
+__hyp_stub_vectors:
+__hyp_stub_reset:	W(b)	.
+__hyp_stub_und:		W(b)	.
+__hyp_stub_svc:		W(b)	.
+__hyp_stub_pabort:	W(b)	.
+__hyp_stub_dabort:	W(b)	.
+__hyp_stub_trap:	W(b)	__hyp_stub_do_trap
+__hyp_stub_irq:		W(b)	.
+__hyp_stub_fiq:		W(b)	.
+ENDPROC(__hyp_stub_vectors)
+

+ 20 - 0
arch/arm/kernel/setup.c

@@ -55,6 +55,7 @@
 #include <asm/traps.h>
 #include <asm/traps.h>
 #include <asm/unwind.h>
 #include <asm/unwind.h>
 #include <asm/memblock.h>
 #include <asm/memblock.h>
+#include <asm/virt.h>
 
 
 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 #include "compat.h"
 #include "compat.h"
@@ -937,6 +938,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 }
 }
 
 
+void __init hyp_mode_check(void)
+{
+#ifdef CONFIG_ARM_VIRT_EXT
+	if (is_hyp_mode_available()) {
+		pr_info("CPU: All CPU(s) started in HYP mode.\n");
+		pr_info("CPU: Virtualization extensions available.\n");
+	} else if (is_hyp_mode_mismatched()) {
+		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
+			__boot_cpu_mode & MODE_MASK);
+		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
+	} else
+		pr_info("CPU: All CPU(s) started in SVC mode.\n");
+#endif
+}
+
 void __init setup_arch(char **cmdline_p)
 void __init setup_arch(char **cmdline_p)
 {
 {
 	struct machine_desc *mdesc;
 	struct machine_desc *mdesc;
@@ -980,6 +996,10 @@ void __init setup_arch(char **cmdline_p)
 	if (is_smp())
 	if (is_smp())
 		smp_init_cpus();
 		smp_init_cpus();
 #endif
 #endif
+
+	if (!is_smp())
+		hyp_mode_check();
+
 	reserve_crashkernel();
 	reserve_crashkernel();
 
 
 	tcm_init();
 	tcm_init();

+ 3 - 0
arch/arm/kernel/smp.c

@@ -42,6 +42,7 @@
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/localtimer.h>
 #include <asm/localtimer.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_plat.h>
+#include <asm/virt.h>
 
 
 /*
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * as from 2.5, kernels no longer have an init_tasks structure
@@ -290,6 +291,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
 	       num_online_cpus(),
 	       num_online_cpus(),
 	       bogosum / (500000/HZ),
 	       bogosum / (500000/HZ),
 	       (bogosum / (5000/HZ)) % 100);
 	       (bogosum / (5000/HZ)) % 100);
+
+	hyp_mode_check();
 }
 }
 
 
 void __init smp_prepare_boot_cpu(void)
 void __init smp_prepare_boot_cpu(void)

+ 17 - 0
arch/arm/mm/Kconfig

@@ -624,6 +624,23 @@ config ARM_THUMBEE
 	  Say Y here if you have a CPU with the ThumbEE extension and code to
 	  Say Y here if you have a CPU with the ThumbEE extension and code to
 	  make use of it. Say N for code that can run on CPUs without ThumbEE.
 	  make use of it. Say N for code that can run on CPUs without ThumbEE.
 
 
+config ARM_VIRT_EXT
+	bool "Native support for the ARM Virtualization Extensions"
+	depends on MMU && CPU_V7
+	help
+	  Enable the kernel to make use of the ARM Virtualization
+	  Extensions to install hypervisors without run-time firmware
+	  assistance.
+
+	  A compliant bootloader is required in order to make maximum
+	  use of this feature.  Refer to Documentation/arm/Booting for
+	  details.
+
+	  It is safe to enable this option even if the kernel may not be
+	  booted in HYP mode, may not have support for the
+	  virtualization extensions, or may be booted with a
+	  non-compliant bootloader.
+
 config SWP_EMULATE
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
 	bool "Emulate SWP/SWPB instructions"
 	depends on !CPU_USE_DOMAINS && CPU_V7
 	depends on !CPU_USE_DOMAINS && CPU_V7