Browse Source

Merge branch 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 kaslr update from Ingo Molnar:
 "This adds kernel module load address randomization"

* 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, kaslr: fix module lock ordering problem
  x86, kaslr: randomize module base load address
Linus Torvalds 11 years ago
parent
commit
e06df6a7ea
2 changed files with 45 additions and 5 deletions
  1. 2 2
      Documentation/kernel-parameters.txt
  2. 43 3
      arch/x86/kernel/module.c

+ 2 - 2
Documentation/kernel-parameters.txt

@@ -2060,8 +2060,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			IOAPICs that may be present in the system.
 			IOAPICs that may be present in the system.
 
 
 	nokaslr		[X86]
 	nokaslr		[X86]
-			Disable kernel base offset ASLR (Address Space
-			Layout Randomization) if built into the kernel.
+			Disable kernel and module base offset ASLR (Address
+			Space Layout Randomization) if built into the kernel.
 
 
 	noautogroup	Disable scheduler automatic task group creation.
 	noautogroup	Disable scheduler automatic task group creation.
 
 

+ 43 - 3
arch/x86/kernel/module.c

@@ -28,6 +28,7 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/gfp.h>
 #include <linux/jump_label.h>
 #include <linux/jump_label.h>
+#include <linux/random.h>
 
 
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
@@ -43,13 +44,52 @@ do {							\
 } while (0)
 } while (0)
 #endif
 #endif
 
 
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long module_load_offset;
+static int randomize_modules = 1;
+
+/* Mutex protects the module_load_offset. */
+static DEFINE_MUTEX(module_kaslr_mutex);
+
+static int __init parse_nokaslr(char *p)
+{
+	randomize_modules = 0;
+	return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
+static unsigned long int get_module_load_offset(void)
+{
+	if (randomize_modules) {
+		mutex_lock(&module_kaslr_mutex);
+		/*
+		 * Calculate the module_load_offset the first time this
+		 * code is called. Once calculated it stays the same until
+		 * reboot.
+		 */
+		if (module_load_offset == 0)
+			module_load_offset =
+				(get_random_int() % 1024 + 1) * PAGE_SIZE;
+		mutex_unlock(&module_kaslr_mutex);
+	}
+	return module_load_offset;
+}
+#else
+static unsigned long int get_module_load_offset(void)
+{
+	return 0;
+}
+#endif
+
 void *module_alloc(unsigned long size)
 void *module_alloc(unsigned long size)
 {
 {
 	if (PAGE_ALIGN(size) > MODULES_LEN)
 	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
 		return NULL;
-	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-				NUMA_NO_NODE, __builtin_return_address(0));
+	return __vmalloc_node_range(size, 1,
+				    MODULES_VADDR + get_module_load_offset(),
+				    MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+				    PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+				    __builtin_return_address(0));
 }
 }
 
 
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32