summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 12:34:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 12:34:49 -0700
commite06df6a7eae1ab1ef4deb076aeeaed90e948e5c0 (patch)
treeff3a95036550939440f5cd2e7d251bb303b337f9 /arch
parentc0fc3cbac0a6fe40f98c5e5ed5f2df5e291bc94d (diff)
parent9dd721c6dbfc310f94306902611f86dda87a45fa (diff)
Merge branch 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 kaslr update from Ingo Molnar: "This adds kernel module load address randomization" * 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kaslr: fix module lock ordering problem x86, kaslr: randomize module base load address
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/module.c46
1 files changed, 43 insertions, 3 deletions
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 18be189368b..e69f9882bf9 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/jump_label.h>
+#include <linux/random.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -43,13 +44,52 @@ do { \
} while (0)
#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long module_load_offset;
+static int randomize_modules = 1;
+
+/* Mutex protects the module_load_offset. */
+static DEFINE_MUTEX(module_kaslr_mutex);
+
+static int __init parse_nokaslr(char *p)
+{
+ randomize_modules = 0;
+ return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
+static unsigned long int get_module_load_offset(void)
+{
+ if (randomize_modules) {
+ mutex_lock(&module_kaslr_mutex);
+ /*
+ * Calculate the module_load_offset the first time this
+ * code is called. Once calculated it stays the same until
+ * reboot.
+ */
+ if (module_load_offset == 0)
+ module_load_offset =
+ (get_random_int() % 1024 + 1) * PAGE_SIZE;
+ mutex_unlock(&module_kaslr_mutex);
+ }
+ return module_load_offset;
+}
+#else
+static unsigned long int get_module_load_offset(void)
+{
+ return 0;
+}
+#endif
+
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return __vmalloc_node_range(size, 1,
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
#ifdef CONFIG_X86_32