Ver Fonte

Merge branch 'devel-stable' into for-next

Russell King há 10 anos atrás
pai
commit
e9f2d6d660

+ 1 - 1
Documentation/arm/memory.txt

@@ -41,7 +41,7 @@ fffe8000	fffeffff	DTCM mapping area for platforms with
 fffe0000	fffe7fff	ITCM mapping area for platforms with
 fffe0000	fffe7fff	ITCM mapping area for platforms with
 				ITCM mounted inside the CPU.
 				ITCM mounted inside the CPU.
 
 
-ffc00000	ffdfffff	Fixmap mapping region.  Addresses provided
+ffc00000	ffefffff	Fixmap mapping region.  Addresses provided
 				by fix_to_virt() will be located here.
 				by fix_to_virt() will be located here.
 
 
 fee00000	feffffff	Mapping of PCI I/O space. This is a static
 fee00000	feffffff	Mapping of PCI I/O space. This is a static

+ 10 - 0
arch/arm/include/asm/cacheflush.h

@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 
 
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 			     void *kaddr, unsigned long len);
 			     void *kaddr, unsigned long len);
+
 #endif
 #endif

+ 14 - 17
arch/arm/include/asm/fixmap.h

@@ -2,27 +2,24 @@
 #define _ASM_FIXMAP_H
 #define _ASM_FIXMAP_H
 
 
 #define FIXADDR_START		0xffc00000UL
 #define FIXADDR_START		0xffc00000UL
-#define FIXADDR_TOP		0xffe00000UL
-#define FIXADDR_SIZE		(FIXADDR_TOP - FIXADDR_START)
+#define FIXADDR_END		0xfff00000UL
+#define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 
 
-#define FIX_KMAP_NR_PTES	(FIXADDR_SIZE >> PAGE_SHIFT)
+#include <asm/kmap_types.h>
 
 
-#define __fix_to_virt(x)	(FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x)	(((x) - FIXADDR_START) >> PAGE_SHIFT)
+enum fixed_addresses {
+	FIX_KMAP_BEGIN,
+	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
 
-extern void __this_fixmap_does_not_exist(void);
+	/* Support writing RO kernel text via kprobes, jump labels, etc. */
+	FIX_TEXT_POKE0,
+	FIX_TEXT_POKE1,
 
 
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
-	if (idx >= FIX_KMAP_NR_PTES)
-		__this_fixmap_does_not_exist();
-	return __fix_to_virt(idx);
-}
+	__end_of_fixed_addresses
+};
 
 
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
-	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-	return __virt_to_fix(vaddr);
-}
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
 
 
 #endif
 #endif

+ 1 - 1
arch/arm/kernel/Makefile

@@ -68,7 +68,7 @@ test-kprobes-objs		+= kprobes-test-arm.o
 endif
 endif
 obj-$(CONFIG_OABI_COMPAT)	+= sys_oabi-compat.o
 obj-$(CONFIG_OABI_COMPAT)	+= sys_oabi-compat.o
 obj-$(CONFIG_ARM_THUMBEE)	+= thumbee.o
 obj-$(CONFIG_ARM_THUMBEE)	+= thumbee.o
-obj-$(CONFIG_KGDB)		+= kgdb.o
+obj-$(CONFIG_KGDB)		+= kgdb.o patch.o
 obj-$(CONFIG_ARM_UNWIND)	+= unwind.o
 obj-$(CONFIG_ARM_UNWIND)	+= unwind.o
 obj-$(CONFIG_HAVE_TCM)		+= tcm.o
 obj-$(CONFIG_HAVE_TCM)		+= tcm.o
 obj-$(CONFIG_OF)		+= devtree.o
 obj-$(CONFIG_OF)		+= devtree.o

+ 19 - 0
arch/arm/kernel/ftrace.c

@@ -15,6 +15,7 @@
 #include <linux/ftrace.h>
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/stop_machine.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/opcodes.h>
 #include <asm/opcodes.h>
@@ -35,6 +36,22 @@
 
 
 #define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
 #define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
 
 
+static int __ftrace_modify_code(void *data)
+{
+	int *command = data;
+
+	set_kernel_text_rw();
+	ftrace_modify_all_code(*command);
+	set_kernel_text_ro();
+
+	return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+	stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 {
 {
 	return rec->arch.old_mcount ? OLD_NOP : NOP;
 	return rec->arch.old_mcount ? OLD_NOP : NOP;
@@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void)
 int ftrace_arch_code_modify_post_process(void)
 int ftrace_arch_code_modify_post_process(void)
 {
 {
 	set_all_modules_text_ro();
 	set_all_modules_text_ro();
+	/* Make sure any TLB misses during machine stop are cleared. */
+	flush_tlb_all();
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
arch/arm/kernel/jump_label.c

@@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
 		insn = arm_gen_nop();
 		insn = arm_gen_nop();
 
 
 	if (is_static)
 	if (is_static)
-		__patch_text(addr, insn);
+		__patch_text_early(addr, insn);
 	else
 	else
 		patch_text(addr, insn);
 		patch_text(addr, insn);
 }
 }

+ 29 - 0
arch/arm/kernel/kgdb.c

@@ -12,8 +12,12 @@
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 #include <linux/kgdb.h>
 #include <linux/kgdb.h>
+#include <linux/uaccess.h>
+
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
+#include "patch.h"
+
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 {
 {
 	{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
 	{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
@@ -244,6 +248,31 @@ void kgdb_arch_exit(void)
 	unregister_die_notifier(&kgdb_notifier);
 	unregister_die_notifier(&kgdb_notifier);
 }
 }
 
 
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+	int err;
+
+	/* patch_text() only supports int-sized breakpoints */
+	BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
+
+	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+				BREAK_INSTR_SIZE);
+	if (err)
+		return err;
+
+	patch_text((void *)bpt->bpt_addr,
+		   *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+
+	return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+	patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+
+	return 0;
+}
+
 /*
 /*
  * Register our undef instruction hooks with ARM undef core.
  * Register our undef instruction hooks with ARM undef core.
  * We regsiter a hook specifically looking for the KGB break inst
  * We regsiter a hook specifically looking for the KGB break inst

+ 5 - 4
arch/arm/kernel/machine_kexec.c

@@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags;
 
 
 static atomic_t waiting_for_crash_ipi;
 static atomic_t waiting_for_crash_ipi;
 
 
+static unsigned long dt_mem;
 /*
 /*
  * Provide a dummy crash_notes definition while crash dump arrives to arm.
  * Provide a dummy crash_notes definition while crash dump arrives to arm.
  * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
  * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image)
 			return err;
 			return err;
 
 
 		if (be32_to_cpu(header) == OF_DT_HEADER)
 		if (be32_to_cpu(header) == OF_DT_HEADER)
-			kexec_boot_atags = current_segment->mem;
+			dt_mem = current_segment->mem;
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image)
 	reboot_code_buffer = page_address(image->control_code_page);
 	reboot_code_buffer = page_address(image->control_code_page);
 
 
 	/* Prepare parameters for reboot_code_buffer*/
 	/* Prepare parameters for reboot_code_buffer*/
+	set_kernel_text_rw();
 	kexec_start_address = image->start;
 	kexec_start_address = image->start;
 	kexec_indirection_page = page_list;
 	kexec_indirection_page = page_list;
 	kexec_mach_type = machine_arch_type;
 	kexec_mach_type = machine_arch_type;
-	if (!kexec_boot_atags)
-		kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
+	kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
+				     + KEXEC_ARM_ATAGS_OFFSET;
 
 
 	/* copy our kernel relocation code to the control code page */
 	/* copy our kernel relocation code to the control code page */
 	reboot_entry = fncpy(reboot_code_buffer,
 	reboot_entry = fncpy(reboot_code_buffer,

+ 73 - 19
arch/arm/kernel/patch.c

@@ -1,8 +1,11 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
+#include <linux/mm.h>
 #include <linux/stop_machine.h>
 #include <linux/stop_machine.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
+#include <asm/fixmap.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_plat.h>
 #include <asm/opcodes.h>
 #include <asm/opcodes.h>
 
 
@@ -13,21 +16,77 @@ struct patch {
 	unsigned int insn;
 	unsigned int insn;
 };
 };
 
 
-void __kprobes __patch_text(void *addr, unsigned int insn)
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+	__acquires(&patch_lock)
+{
+	unsigned int uintaddr = (uintptr_t) addr;
+	bool module = !core_kernel_text(uintaddr);
+	struct page *page;
+
+	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+		page = vmalloc_to_page(addr);
+	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+		page = virt_to_page(addr);
+	else
+		return addr;
+
+	if (flags)
+		spin_lock_irqsave(&patch_lock, *flags);
+	else
+		__acquire(&patch_lock);
+
+	set_fixmap(fixmap, page_to_phys(page));
+
+	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+	__releases(&patch_lock)
+{
+	clear_fixmap(fixmap);
+
+	if (flags)
+		spin_unlock_irqrestore(&patch_lock, *flags);
+	else
+		__release(&patch_lock);
+}
+
+void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
 {
 {
 	bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
 	bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+	unsigned int uintaddr = (uintptr_t) addr;
+	bool twopage = false;
+	unsigned long flags;
+	void *waddr = addr;
 	int size;
 	int size;
 
 
+	if (remap)
+		waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
+	else
+		__acquire(&patch_lock);
+
 	if (thumb2 && __opcode_is_thumb16(insn)) {
 	if (thumb2 && __opcode_is_thumb16(insn)) {
-		*(u16 *)addr = __opcode_to_mem_thumb16(insn);
+		*(u16 *)waddr = __opcode_to_mem_thumb16(insn);
 		size = sizeof(u16);
 		size = sizeof(u16);
-	} else if (thumb2 && ((uintptr_t)addr & 2)) {
+	} else if (thumb2 && (uintaddr & 2)) {
 		u16 first = __opcode_thumb32_first(insn);
 		u16 first = __opcode_thumb32_first(insn);
 		u16 second = __opcode_thumb32_second(insn);
 		u16 second = __opcode_thumb32_second(insn);
-		u16 *addrh = addr;
+		u16 *addrh0 = waddr;
+		u16 *addrh1 = waddr + 2;
+
+		twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
+		if (twopage && remap)
+			addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
+
+		*addrh0 = __opcode_to_mem_thumb16(first);
+		*addrh1 = __opcode_to_mem_thumb16(second);
 
 
-		addrh[0] = __opcode_to_mem_thumb16(first);
-		addrh[1] = __opcode_to_mem_thumb16(second);
+		if (twopage && addrh1 != addr + 2) {
+			flush_kernel_vmap_range(addrh1, 2);
+			patch_unmap(FIX_TEXT_POKE1, NULL);
+		}
 
 
 		size = sizeof(u32);
 		size = sizeof(u32);
 	} else {
 	} else {
@@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
 		else
 		else
 			insn = __opcode_to_mem_arm(insn);
 			insn = __opcode_to_mem_arm(insn);
 
 
-		*(u32 *)addr = insn;
+		*(u32 *)waddr = insn;
 		size = sizeof(u32);
 		size = sizeof(u32);
 	}
 	}
 
 
+	if (waddr != addr) {
+		flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+		patch_unmap(FIX_TEXT_POKE0, &flags);
+	} else
+		__release(&patch_lock);
+
 	flush_icache_range((uintptr_t)(addr),
 	flush_icache_range((uintptr_t)(addr),
 			   (uintptr_t)(addr) + size);
 			   (uintptr_t)(addr) + size);
 }
 }
@@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
 		.insn = insn,
 		.insn = insn,
 	};
 	};
 
 
-	if (cache_ops_need_broadcast()) {
-		stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
-	} else {
-		bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
-				      && __opcode_is_thumb32(insn)
-				      && ((uintptr_t)addr & 2);
-
-		if (straddles_word)
-			stop_machine(patch_text_stop_machine, &patch, NULL);
-		else
-			__patch_text(addr, insn);
-	}
+	stop_machine(patch_text_stop_machine, &patch, NULL);
 }
 }

+ 11 - 1
arch/arm/kernel/patch.h

@@ -2,6 +2,16 @@
 #define _ARM_KERNEL_PATCH_H
 #define _ARM_KERNEL_PATCH_H
 
 
 void patch_text(void *addr, unsigned int insn);
 void patch_text(void *addr, unsigned int insn);
-void __patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+	__patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+	__patch_text_real(addr, insn, false);
+}
 
 
 #endif
 #endif

+ 19 - 0
arch/arm/kernel/vmlinux.lds.S

@@ -8,6 +8,9 @@
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
 #include <asm/page.h>
 #include <asm/page.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/pgtable.h>
+#endif
 	
 	
 #define PROC_INFO							\
 #define PROC_INFO							\
 	. = ALIGN(4);							\
 	. = ALIGN(4);							\
@@ -90,6 +93,11 @@ SECTIONS
 		_text = .;
 		_text = .;
 		HEAD_TEXT
 		HEAD_TEXT
 	}
 	}
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+	. = ALIGN(1<<SECTION_SHIFT);
+#endif
+
 	.text : {			/* Real text segment		*/
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 		_stext = .;		/* Text and read-only data	*/
 			__exception_text_start = .;
 			__exception_text_start = .;
@@ -112,6 +120,9 @@ SECTIONS
 			ARM_CPU_KEEP(PROC_INFO)
 			ARM_CPU_KEEP(PROC_INFO)
 	}
 	}
 
 
+#ifdef CONFIG_DEBUG_RODATA
+	. = ALIGN(1<<SECTION_SHIFT);
+#endif
 	RO_DATA(PAGE_SIZE)
 	RO_DATA(PAGE_SIZE)
 
 
 	. = ALIGN(4);
 	. = ALIGN(4);
@@ -145,7 +156,11 @@ SECTIONS
 	_etext = .;			/* End of text and rodata section */
 	_etext = .;			/* End of text and rodata section */
 
 
 #ifndef CONFIG_XIP_KERNEL
 #ifndef CONFIG_XIP_KERNEL
+# ifdef CONFIG_ARM_KERNMEM_PERMS
+	. = ALIGN(1<<SECTION_SHIFT);
+# else
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
+# endif
 	__init_begin = .;
 	__init_begin = .;
 #endif
 #endif
 	/*
 	/*
@@ -218,8 +233,12 @@ SECTIONS
 #ifdef CONFIG_XIP_KERNEL
 #ifdef CONFIG_XIP_KERNEL
 	__data_loc = ALIGN(4);		/* location in binary */
 	__data_loc = ALIGN(4);		/* location in binary */
 	. = PAGE_OFFSET + TEXT_OFFSET;
 	. = PAGE_OFFSET + TEXT_OFFSET;
+#else
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+	. = ALIGN(1<<SECTION_SHIFT);
 #else
 #else
 	. = ALIGN(THREAD_SIZE);
 	. = ALIGN(THREAD_SIZE);
+#endif
 	__init_end = .;
 	__init_end = .;
 	__data_loc = .;
 	__data_loc = .;
 #endif
 #endif

+ 21 - 0
arch/arm/mm/Kconfig

@@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
 	help
 	help
 	  This option specifies the architecture can support big endian
 	  This option specifies the architecture can support big endian
 	  operation.
 	  operation.
+
+config ARM_KERNMEM_PERMS
+	bool "Restrict kernel memory permissions"
+	help
+	  If this is set, kernel memory other than kernel text (and rodata)
+	  will be made non-executable. The tradeoff is that each region is
+	  padded to section-size (1MiB) boundaries (because their permissions
+	  are different and splitting the 1M pages into 4K ones causes TLB
+	  performance problems), wasting memory.
+
+config DEBUG_RODATA
+	bool "Make kernel text and rodata read-only"
+	depends on ARM_KERNMEM_PERMS
+	default y
+	help
+	  If this is set, kernel text and rodata will be made read-only. This
+	  is to help catch accidental or malicious attempts to change the
+	  kernel's executable code. Additionally splits rodata from kernel
+	  text so it can be made explicitly non-executable. This creates
+	  another section-size padded region, so it can waste more memory
+	  space while gaining the read-only protections.

+ 8 - 7
arch/arm/mm/highmem.c

@@ -18,19 +18,20 @@
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 #include "mm.h"
 #include "mm.h"
 
 
-pte_t *fixmap_page_table;
-
 static inline void set_fixmap_pte(int idx, pte_t pte)
 static inline void set_fixmap_pte(int idx, pte_t pte)
 {
 {
 	unsigned long vaddr = __fix_to_virt(idx);
 	unsigned long vaddr = __fix_to_virt(idx);
-	set_pte_ext(fixmap_page_table + idx, pte, 0);
+	pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+	set_pte_ext(ptep, pte, 0);
 	local_flush_tlb_kernel_page(vaddr);
 	local_flush_tlb_kernel_page(vaddr);
 }
 }
 
 
 static inline pte_t get_fixmap_pte(unsigned long vaddr)
 static inline pte_t get_fixmap_pte(unsigned long vaddr)
 {
 {
-	unsigned long idx = __virt_to_fix(vaddr);
-	return *(fixmap_page_table + idx);
+	pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+	return *ptep;
 }
 }
 
 
 void *kmap(struct page *page)
 void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
 	 * With debugging enabled, kunmap_atomic forces that entry to 0.
 	 * With debugging enabled, kunmap_atomic forces that entry to 0.
 	 * Make sure it was indeed properly unmapped.
 	 * Make sure it was indeed properly unmapped.
 	 */
 	 */
-	BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
 #endif
 #endif
 	/*
 	/*
 	 * When debugging is off, kunmap_atomic leaves the previous mapping
 	 * When debugging is off, kunmap_atomic leaves the previous mapping
@@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
 	idx = type + KM_TYPE_NR * smp_processor_id();
 	idx = type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 #ifdef CONFIG_DEBUG_HIGHMEM
-	BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
 #endif
 #endif
 	set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
 	set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
 
 

+ 147 - 2
arch/arm/mm/init.c

@@ -29,6 +29,7 @@
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
+#include <asm/system_info.h>
 #include <asm/tlb.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
 #include <asm/fixmap.h>
 
 
@@ -570,7 +571,7 @@ void __init mem_init(void)
 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
 #endif
 #endif
-			MLK(FIXADDR_START, FIXADDR_TOP),
+			MLK(FIXADDR_START, FIXADDR_END),
 			MLM(VMALLOC_START, VMALLOC_END),
 			MLM(VMALLOC_START, VMALLOC_END),
 			MLM(PAGE_OFFSET, (unsigned long)high_memory),
 			MLM(PAGE_OFFSET, (unsigned long)high_memory),
 #ifdef CONFIG_HIGHMEM
 #ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
 	}
 	}
 }
 }
 
 
-void free_initmem(void)
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+struct section_perm {
+	unsigned long start;
+	unsigned long end;
+	pmdval_t mask;
+	pmdval_t prot;
+	pmdval_t clear;
+};
+
+static struct section_perm nx_perms[] = {
+	/* Make pages tables, etc before _stext RW (set NX). */
+	{
+		.start	= PAGE_OFFSET,
+		.end	= (unsigned long)_stext,
+		.mask	= ~PMD_SECT_XN,
+		.prot	= PMD_SECT_XN,
+	},
+	/* Make init RW (set NX). */
+	{
+		.start	= (unsigned long)__init_begin,
+		.end	= (unsigned long)_sdata,
+		.mask	= ~PMD_SECT_XN,
+		.prot	= PMD_SECT_XN,
+	},
+#ifdef CONFIG_DEBUG_RODATA
+	/* Make rodata NX (set RO in ro_perms below). */
+	{
+		.start  = (unsigned long)__start_rodata,
+		.end    = (unsigned long)__init_begin,
+		.mask   = ~PMD_SECT_XN,
+		.prot   = PMD_SECT_XN,
+	},
+#endif
+};
+
+#ifdef CONFIG_DEBUG_RODATA
+static struct section_perm ro_perms[] = {
+	/* Make kernel code and rodata RX (set RO). */
+	{
+		.start  = (unsigned long)_stext,
+		.end    = (unsigned long)__init_begin,
+#ifdef CONFIG_ARM_LPAE
+		.mask   = ~PMD_SECT_RDONLY,
+		.prot   = PMD_SECT_RDONLY,
+#else
+		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+		.clear  = PMD_SECT_AP_WRITE,
+#endif
+	},
+};
+#endif
+
+/*
+ * Updates section permissions only for the current mm (sections are
+ * copied into each mm). During startup, this is the init_mm. Is only
+ * safe to be called with preemption disabled, as under stop_machine().
+ */
+static inline void section_update(unsigned long addr, pmdval_t mask,
+				  pmdval_t prot)
+{
+	struct mm_struct *mm;
+	pmd_t *pmd;
+
+	mm = current->active_mm;
+	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+
+#ifdef CONFIG_ARM_LPAE
+	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#else
+	if (addr & SECTION_SIZE)
+		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
+	else
+		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#endif
+	flush_pmd_entry(pmd);
+	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
+/* Make sure extended page tables are in use. */
+static inline bool arch_has_strict_perms(void)
+{
+	if (cpu_architecture() < CPU_ARCH_ARMv6)
+		return false;
+
+	return !!(get_cr() & CR_XP);
+}
+
+#define set_section_perms(perms, field)	{				\
+	size_t i;							\
+	unsigned long addr;						\
+									\
+	if (!arch_has_strict_perms())					\
+		return;							\
+									\
+	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
+		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
+		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
+			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
+				perms[i].start, perms[i].end,		\
+				SECTION_SIZE);				\
+			continue;					\
+		}							\
+									\
+		for (addr = perms[i].start;				\
+		     addr < perms[i].end;				\
+		     addr += SECTION_SIZE)				\
+			section_update(addr, perms[i].mask,		\
+				       perms[i].field);			\
+	}								\
+}
+
+static inline void fix_kernmem_perms(void)
+{
+	set_section_perms(nx_perms, prot);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+	set_section_perms(ro_perms, prot);
+}
+
+void set_kernel_text_rw(void)
+{
+	set_section_perms(ro_perms, clear);
+}
+
+void set_kernel_text_ro(void)
+{
+	set_section_perms(ro_perms, prot);
+}
+#endif /* CONFIG_DEBUG_RODATA */
+
+#else
+static inline void fix_kernmem_perms(void) { }
+#endif /* CONFIG_ARM_KERNMEM_PERMS */
+
+void free_tcmmem(void)
 {
 {
 #ifdef CONFIG_HAVE_TCM
 #ifdef CONFIG_HAVE_TCM
 	extern char __tcm_start, __tcm_end;
 	extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
 	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
 	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
 	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 #endif
 #endif
+}
+
+void free_initmem(void)
+{
+	fix_kernmem_perms();
+	free_tcmmem();
 
 
 	poison_init_mem(__init_begin, __init_end - __init_begin);
 	poison_init_mem(__init_begin, __init_end - __init_begin);
 	if (!machine_is_integrator() && !machine_is_cintegrator())
 	if (!machine_is_integrator() && !machine_is_cintegrator())

+ 35 - 4
arch/arm/mm/mmu.c

@@ -22,6 +22,7 @@
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
 #include <asm/cachetype.h>
 #include <asm/cachetype.h>
+#include <asm/fixmap.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_plat.h>
@@ -356,6 +357,29 @@ const struct mem_type *get_mem_type(unsigned int type)
 }
 }
 EXPORT_SYMBOL(get_mem_type);
 EXPORT_SYMBOL(get_mem_type);
 
 
+/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+	unsigned long vaddr = __fix_to_virt(idx);
+	pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+	/* Make sure fixmap region does not exceed available allocation. */
+	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+		     FIXADDR_END);
+	BUG_ON(idx >= __end_of_fixed_addresses);
+
+	if (pgprot_val(prot))
+		set_pte_at(NULL, vaddr, pte,
+			pfn_pte(phys >> PAGE_SHIFT, prot));
+	else
+		pte_clear(NULL, vaddr, pte);
+	local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
 /*
 /*
  * Adjust the PMD section entries according to the CPU in use.
  * Adjust the PMD section entries according to the CPU in use.
  */
  */
@@ -1296,10 +1320,10 @@ static void __init kmap_init(void)
 #ifdef CONFIG_HIGHMEM
 #ifdef CONFIG_HIGHMEM
 	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
 	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
 		PKMAP_BASE, _PAGE_KERNEL_TABLE);
 		PKMAP_BASE, _PAGE_KERNEL_TABLE);
-
-	fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
-		FIXADDR_START, _PAGE_KERNEL_TABLE);
 #endif
 #endif
+
+	early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
+			_PAGE_KERNEL_TABLE);
 }
 }
 
 
 static void __init map_lowmem(void)
 static void __init map_lowmem(void)
@@ -1319,12 +1343,19 @@ static void __init map_lowmem(void)
 		if (start >= end)
 		if (start >= end)
 			break;
 			break;
 
 
-		if (end < kernel_x_start || start >= kernel_x_end) {
+		if (end < kernel_x_start) {
 			map.pfn = __phys_to_pfn(start);
 			map.pfn = __phys_to_pfn(start);
 			map.virtual = __phys_to_virt(start);
 			map.virtual = __phys_to_virt(start);
 			map.length = end - start;
 			map.length = end - start;
 			map.type = MT_MEMORY_RWX;
 			map.type = MT_MEMORY_RWX;
 
 
+			create_mapping(&map);
+		} else if (start >= kernel_x_end) {
+			map.pfn = __phys_to_pfn(start);
+			map.virtual = __phys_to_virt(start);
+			map.length = end - start;
+			map.type = MT_MEMORY_RW;
+
 			create_mapping(&map);
 			create_mapping(&map);
 		} else {
 		} else {
 			/* This better cover the entire kernel */
 			/* This better cover the entire kernel */