Browse Source

arm64: kernel: Add support for Privileged Access Never

'Privileged Access Never' is a new arm8.1 feature which prevents
privileged code from accessing any virtual address where read or write
access is also permitted at EL0.

This patch enables the PAN feature on all CPUs, and modifies {get,put}_user
helpers temporarily to permit access.

This will catch kernel bugs where user memory is accessed directly.
'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use ALTERNATIVE in asm and tidy up pan_enable check]
Signed-off-by: Will Deacon <will.deacon@arm.com>
James Morse 10 years ago
parent
commit
338d4f49d6

+ 14 - 0
arch/arm64/Kconfig

@@ -596,6 +596,20 @@ config FORCE_MAX_ZONEORDER
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
 	default "11"
 	default "11"
 
 
+config ARM64_PAN
+	bool "Enable support for Privileged Access Never (PAN)"
+	default y
+	help
+	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+	 prevents the kernel or hypervisor from accessing user-space (EL0)
+	 memory directly.
+
+	 Choosing this option will cause any unprotected (not using
+	 copy_to_user et al) memory access to fail with a permission fault.
+
+	 The feature is detected at runtime, and will remain as a 'nop'
+	 instruction if the cpu does not implement the feature.
+
 menuconfig ARMV8_DEPRECATED
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
 	depends on COMPAT

+ 2 - 1
arch/arm64/include/asm/cpufeature.h

@@ -25,8 +25,9 @@
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
 #define ARM64_WORKAROUND_845719			2
 #define ARM64_WORKAROUND_845719			2
 #define ARM64_HAS_SYSREG_GIC_CPUIF		3
 #define ARM64_HAS_SYSREG_GIC_CPUIF		3
+#define ARM64_HAS_PAN				4
 
 
-#define ARM64_NCAPS				4
+#define ARM64_NCAPS				5
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 

+ 8 - 0
arch/arm64/include/asm/futex.h

@@ -20,10 +20,16 @@
 
 
 #include <linux/futex.h>
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
+#include <asm/sysreg.h>
 
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 	asm volatile(							\
 	asm volatile(							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
 "1:	ldxr	%w1, %2\n"						\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 	insn "\n"							\
 "2:	stlxr	%w3, %w0, %2\n"						\
 "2:	stlxr	%w3, %w0, %2\n"						\
@@ -39,6 +45,8 @@
 "	.align	3\n"							\
 "	.align	3\n"							\
 "	.quad	1b, 4b, 2b, 4b\n"					\
 "	.quad	1b, 4b, 2b, 4b\n"					\
 "	.popsection\n"							\
 "	.popsection\n"							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
 	: "r" (oparg), "Ir" (-EFAULT)					\
 	: "memory")
 	: "memory")

+ 2 - 0
arch/arm64/include/asm/processor.h

@@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
 
 
 #endif
 #endif
 
 
+void cpu_enable_pan(void);
+
 #endif /* __ASM_PROCESSOR_H */
 #endif /* __ASM_PROCESSOR_H */

+ 8 - 0
arch/arm64/include/asm/sysreg.h

@@ -20,6 +20,8 @@
 #ifndef __ASM_SYSREG_H
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
 
+#include <asm/opcodes.h>
+
 #define SCTLR_EL1_CP15BEN	(0x1 << 5)
 #define SCTLR_EL1_CP15BEN	(0x1 << 5)
 #define SCTLR_EL1_SED		(0x1 << 8)
 #define SCTLR_EL1_SED		(0x1 << 8)
 
 
@@ -36,6 +38,12 @@
 #define sys_reg(op0, op1, crn, crm, op2) \
 #define sys_reg(op0, op1, crn, crm, op2) \
 	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
 	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
 
 
+#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN                         (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+				     (!!x)<<8 | 0x1f)
+
 #ifdef __ASSEMBLY__
 #ifdef __ASSEMBLY__
 
 
 	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
 	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30

+ 11 - 0
arch/arm64/include/asm/uaccess.h

@@ -24,7 +24,10 @@
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
 #include <asm/compiler.h>
@@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs)
 do {									\
 do {									\
 	unsigned long __gu_val;						\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 	case 1:								\
 		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
 		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
@@ -148,6 +153,8 @@ do {									\
 		BUILD_BUG();						\
 		BUILD_BUG();						\
 	}								\
 	}								\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 } while (0)
 
 
 #define __get_user(x, ptr)						\
 #define __get_user(x, ptr)						\
@@ -194,6 +201,8 @@ do {									\
 do {									\
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 	case 1:								\
 		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
 		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
@@ -210,6 +219,8 @@ do {									\
 	default:							\
 	default:							\
 		BUILD_BUG();						\
 		BUILD_BUG();						\
 	}								\
 	}								\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 } while (0)
 
 
 #define __put_user(x, ptr)						\
 #define __put_user(x, ptr)						\

+ 1 - 0
arch/arm64/include/uapi/asm/ptrace.h

@@ -44,6 +44,7 @@
 #define PSR_I_BIT	0x00000080
 #define PSR_I_BIT	0x00000080
 #define PSR_A_BIT	0x00000100
 #define PSR_A_BIT	0x00000100
 #define PSR_D_BIT	0x00000200
 #define PSR_D_BIT	0x00000200
+#define PSR_PAN_BIT	0x00400000
 #define PSR_Q_BIT	0x08000000
 #define PSR_Q_BIT	0x08000000
 #define PSR_V_BIT	0x10000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
 #define PSR_C_BIT	0x20000000

+ 7 - 1
arch/arm64/kernel/armv8_deprecated.c

@@ -14,6 +14,8 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/sysctl.h>
 
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
 #include <asm/opcodes.h>
 #include <asm/sysreg.h>
 #include <asm/sysreg.h>
@@ -280,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
  */
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+		    CONFIG_ARM64_PAN)				\
 	"	mov		%w2, %w1\n"			\
 	"	mov		%w2, %w1\n"			\
 	"0:	ldxr"B"		%w1, [%3]\n"			\
 	"0:	ldxr"B"		%w1, [%3]\n"			\
 	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
 	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
@@ -295,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
 	"	.align		3\n"				\
 	"	.align		3\n"				\
 	"	.quad		0b, 3b\n"			\
 	"	.quad		0b, 3b\n"			\
 	"	.quad		1b, 3b\n"			\
 	"	.quad		1b, 3b\n"			\
-	"	.popsection"					\
+	"	.popsection\n"					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
 	: "memory")
 	: "memory")

+ 20 - 0
arch/arm64/kernel/cpufeature.c

@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpufeature.h>
+#include <asm/processor.h>
 
 
 static bool
 static bool
 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
@@ -39,6 +40,15 @@ has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
 	return feature_matches(val, entry);
 	return feature_matches(val, entry);
 }
 }
 
 
+static bool __maybe_unused
+has_id_aa64mmfr1_feature(const struct arm64_cpu_capabilities *entry)
+{
+	u64 val;
+
+	val = read_cpuid(id_aa64mmfr1_el1);
+	return feature_matches(val, entry);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 	{
 		.desc = "GIC system register CPU interface",
 		.desc = "GIC system register CPU interface",
@@ -47,6 +57,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = 24,
 		.field_pos = 24,
 		.min_field_value = 1,
 		.min_field_value = 1,
 	},
 	},
+#ifdef CONFIG_ARM64_PAN
+	{
+		.desc = "Privileged Access Never",
+		.capability = ARM64_HAS_PAN,
+		.matches = has_id_aa64mmfr1_feature,
+		.field_pos = 20,
+		.min_field_value = 1,
+		.enable = cpu_enable_pan,
+	},
+#endif /* CONFIG_ARM64_PAN */
 	{},
 	{},
 };
 };
 
 

+ 8 - 0
arch/arm64/lib/clear_user.S

@@ -16,7 +16,11 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
  */
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 
 	.text
 	.text
 
 
@@ -29,6 +33,8 @@
  * Alignment fixed up by hardware.
  * Alignment fixed up by hardware.
  */
  */
 ENTRY(__clear_user)
 ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	subs	x1, x1, #8
 	b.mi	2f
 	b.mi	2f
@@ -48,6 +54,8 @@ USER(9f, strh	wzr, [x0], #2	)
 	b.mi	5f
 	b.mi	5f
 USER(9f, strb	wzr, [x0]	)
 USER(9f, strb	wzr, [x0]	)
 5:	mov	x0, #0
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 	ret
 ENDPROC(__clear_user)
 ENDPROC(__clear_user)
 
 

+ 8 - 0
arch/arm64/lib/copy_from_user.S

@@ -15,7 +15,11 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 
 /*
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
  *	x0 - bytes not copied
  *	x0 - bytes not copied
  */
  */
 ENTRY(__copy_from_user)
 ENTRY(__copy_from_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	add	x5, x1, x2			// upper user buffer boundary
 	add	x5, x1, x2			// upper user buffer boundary
 	subs	x2, x2, #16
 	subs	x2, x2, #16
 	b.mi	1f
 	b.mi	1f
@@ -56,6 +62,8 @@ USER(9f, ldrh	w3, [x1], #2	)
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, ldrb	w3, [x1]	)
 	strb	w3, [x0]
 	strb	w3, [x0]
 5:	mov	x0, #0
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 	ret
 ENDPROC(__copy_from_user)
 ENDPROC(__copy_from_user)
 
 

+ 8 - 0
arch/arm64/lib/copy_in_user.S

@@ -17,7 +17,11 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 
 /*
 /*
  * Copy from user space to user space (alignment handled by the hardware)
  * Copy from user space to user space (alignment handled by the hardware)
@@ -30,6 +34,8 @@
  *	x0 - bytes not copied
  *	x0 - bytes not copied
  */
  */
 ENTRY(__copy_in_user)
 ENTRY(__copy_in_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	add	x5, x0, x2			// upper user buffer boundary
 	add	x5, x0, x2			// upper user buffer boundary
 	subs	x2, x2, #16
 	subs	x2, x2, #16
 	b.mi	1f
 	b.mi	1f
@@ -58,6 +64,8 @@ USER(9f, strh	w3, [x0], #2	)
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, strb	w3, [x0]	)
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 	ret
 ENDPROC(__copy_in_user)
 ENDPROC(__copy_in_user)
 
 

+ 8 - 0
arch/arm64/lib/copy_to_user.S

@@ -15,7 +15,11 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 
 /*
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
  *	x0 - bytes not copied
  *	x0 - bytes not copied
  */
  */
 ENTRY(__copy_to_user)
 ENTRY(__copy_to_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	add	x5, x0, x2			// upper user buffer boundary
 	add	x5, x0, x2			// upper user buffer boundary
 	subs	x2, x2, #16
 	subs	x2, x2, #16
 	b.mi	1f
 	b.mi	1f
@@ -56,6 +62,8 @@ USER(9f, strh	w3, [x0], #2	)
 	ldrb	w3, [x1]
 	ldrb	w3, [x1]
 USER(9f, strb	w3, [x0]	)
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 	ret
 ENDPROC(__copy_to_user)
 ENDPROC(__copy_to_user)
 
 

+ 16 - 0
arch/arm64/mm/fault.c

@@ -30,9 +30,11 @@
 #include <linux/highmem.h>
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
 #include <linux/perf_event.h>
 
 
+#include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/esr.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
@@ -223,6 +225,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 		mm_flags |= FAULT_FLAG_WRITE;
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 	}
 
 
+	/*
+	 * PAN bit set implies the fault happened in kernel space, but not
+	 * in the arch's user access functions.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+		goto no_context;
+
 	/*
 	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * validly references user space from well defined areas of the code,
 	 * validly references user space from well defined areas of the code,
@@ -536,3 +545,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 
 
 	return 0;
 	return 0;
 }
 }
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */