summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 12:34:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 12:34:49 -0700
commite06df6a7eae1ab1ef4deb076aeeaed90e948e5c0 (patch)
treeff3a95036550939440f5cd2e7d251bb303b337f9
parentc0fc3cbac0a6fe40f98c5e5ed5f2df5e291bc94d (diff)
parent9dd721c6dbfc310f94306902611f86dda87a45fa (diff)
Merge branch 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 kaslr update from Ingo Molnar: "This adds kernel module load address randomization" * 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kaslr: fix module lock ordering problem x86, kaslr: randomize module base load address
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--arch/x86/kernel/module.c46
2 files changed, 45 insertions, 5 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 06600cc9a26..67755ea834a 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2060,8 +2060,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
IOAPICs that may be present in the system.
nokaslr [X86]
- Disable kernel base offset ASLR (Address Space
- Layout Randomization) if built into the kernel.
+ Disable kernel and module base offset ASLR (Address
+ Space Layout Randomization) if built into the kernel.
noautogroup Disable scheduler automatic task group creation.
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 18be189368b..e69f9882bf9 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/jump_label.h>
+#include <linux/random.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -43,13 +44,52 @@ do { \
} while (0)
#endif
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long module_load_offset;
+static int randomize_modules = 1;
+
+/* Mutex protects the module_load_offset. */
+static DEFINE_MUTEX(module_kaslr_mutex);
+
+static int __init parse_nokaslr(char *p)
+{
+ randomize_modules = 0;
+ return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
+static unsigned long int get_module_load_offset(void)
+{
+ if (randomize_modules) {
+ mutex_lock(&module_kaslr_mutex);
+ /*
+ * Calculate the module_load_offset the first time this
+ * code is called. Once calculated it stays the same until
+ * reboot.
+ */
+ if (module_load_offset == 0)
+ module_load_offset =
+ (get_random_int() % 1024 + 1) * PAGE_SIZE;
+ mutex_unlock(&module_kaslr_mutex);
+ }
+ return module_load_offset;
+}
+#else
+static unsigned long int get_module_load_offset(void)
+{
+ return 0;
+}
+#endif
+
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return __vmalloc_node_range(size, 1,
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
#ifdef CONFIG_X86_32