summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig.debug10
-rw-r--r--arch/x86_64/ia32/.gitignore1
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/process.c5
-rw-r--r--arch/x86_64/kernel/syscall.c2
-rw-r--r--arch/x86_64/mm/init.c23
-rw-r--r--arch/x86_64/mm/pageattr.c9
7 files changed, 45 insertions, 7 deletions
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index e2c6e64a85e..fcb06a50fdd 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -9,6 +9,16 @@ config INIT_DEBUG
Fill __init and __initdata at the end of boot. This helps debugging
illegal uses of __init and __initdata after initialization.
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const data.
+ This option may have a slight performance impact because a portion
+ of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
config IOMMU_DEBUG
depends on GART_IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging"
diff --git a/arch/x86_64/ia32/.gitignore b/arch/x86_64/ia32/.gitignore
deleted file mode 100644
index 48ab174fe5f..00000000000
--- a/arch/x86_64/ia32/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vsyscall*.so
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index e0eb0c712fe..df0773c9bdb 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -341,7 +341,7 @@ ENTRY(ia32_ptregs_common)
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
- .data
+ .section .rodata,"a"
.align 8
.globl ia32_sys_call_table
ia32_sys_call_table:
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7519fc520eb..3060ed97b75 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
DECLARE_PER_CPU(int, cpu_state);
#include <asm/nmi.h>
-/* We don't actually take CPU down, just spin without interrupts. */
+/* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void)
{
idle_task_exit();
@@ -166,8 +166,9 @@ static inline void play_dead(void)
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
while (1)
- safe_halt();
+ halt();
}
#else
static inline void play_dead(void)
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c
index e263685f864..7c176b3edde 100644
--- a/arch/x86_64/kernel/syscall.c
+++ b/arch/x86_64/kernel/syscall.c
@@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm-x86_64/unistd.h>
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index c016dfe8478..1faae5fc1c0 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -498,6 +498,29 @@ void free_initmem(void)
printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
}
+#ifdef CONFIG_DEBUG_RODATA
+
+extern char __start_rodata, __end_rodata;
+void mark_rodata_ro(void)
+{
+ unsigned long addr = (unsigned long)&__start_rodata;
+
+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
+
+ printk ("Write protecting the kernel read-only data: %luk\n",
+ (&__end_rodata - &__start_rodata) >> 10);
+
+ /*
+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
+ * We do this after the printk so that if something went wrong in the
+ * change, the printk gets out at least to give a better debug hint
+ * of who is the culprit.
+ */
+ global_flush_tlb();
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index b90e8fe9eeb..35f1f1aab06 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
pte_t *kpte;
struct page *kpte_page;
unsigned kpte_flags;
+ pgprot_t ref_prot2;
kpte = lookup_address(address);
if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
* split_large_page will take the reference for this change_page_attr
* on the split page.
*/
- struct page *split = split_large_page(address, prot, ref_prot);
+
+ struct page *split;
+ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
+
+ split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
- set_pte(kpte,mk_pte(split, ref_prot));
+ set_pte(kpte,mk_pte(split, ref_prot2));
kpte_page = split;
}
get_page(kpte_page);