summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-11-03 10:12:13 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-11-03 10:12:13 +0000
commit06e944b8e5fc4bec83f102f98c1ee4f972f5f072 (patch)
treed53b1c3ca270f49f1cae63bbe117cc8587e51510
parentf114040e3ea6e07372334ade75d1ee0775c355e1 (diff)
parent80d6b0c2eed2a504f6740cd1f5ea76dc50abfc4d (diff)
Merge tag 'ronx-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into devel-stable
generic fixmaps ARM support for CONFIG_DEBUG_RODATA
-rw-r--r--Documentation/arm/memory.txt2
-rw-r--r--arch/arm/include/asm/cacheflush.h10
-rw-r--r--arch/arm/include/asm/fixmap.h31
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/ftrace.c19
-rw-r--r--arch/arm/kernel/jump_label.c2
-rw-r--r--arch/arm/kernel/kgdb.c29
-rw-r--r--arch/arm/kernel/machine_kexec.c9
-rw-r--r--arch/arm/kernel/patch.c92
-rw-r--r--arch/arm/kernel/patch.h12
-rw-r--r--arch/arm/kernel/vmlinux.lds.S19
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/highmem.c15
-rw-r--r--arch/arm/mm/init.c149
-rw-r--r--arch/arm/mm/mmu.c39
15 files changed, 394 insertions, 57 deletions
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 38dc06d0a79..4178ebda6e6 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -41,7 +41,7 @@ fffe8000 fffeffff DTCM mapping area for platforms with
fffe0000 fffe7fff ITCM mapping area for platforms with
ITCM mounted inside the CPU.
-ffc00000 ffdfffff Fixmap mapping region. Addresses provided
+ffc00000 ffefffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.
fee00000 feffffff Mapping of PCI I/O space. This is a static
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 10e78d00a0b..2d46862e7be 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+
#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 74124b0d0d7..0415eae1df2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,27 +2,24 @@
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
-#define FIXADDR_TOP 0xffe00000UL
-#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
+#define FIXADDR_END 0xfff00000UL
+#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
-#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
+#include <asm/kmap_types.h>
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+enum fixed_addresses {
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
-extern void __this_fixmap_does_not_exist(void);
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_TEXT_POKE1,
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- if (idx >= FIX_KMAP_NR_PTES)
- __this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
-}
+ __end_of_fixed_addresses
+};
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 38ddd9f83d0..70b73076633 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -67,7 +67,7 @@ test-kprobes-objs += kprobes-test-arm.o
endif
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += kgdb.o patch.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_OF) += devtree.o
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index af9a8a927a4..b8c75e45a95 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/opcodes.h>
@@ -35,6 +36,22 @@
#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+static int __ftrace_modify_code(void *data)
+{
+ int *command = data;
+
+ set_kernel_text_rw();
+ ftrace_modify_all_code(*command);
+ set_kernel_text_ro();
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return rec->arch.old_mcount ? OLD_NOP : NOP;
@@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void)
int ftrace_arch_code_modify_post_process(void)
{
set_all_modules_text_ro();
+ /* Make sure any TLB misses during machine stop are cleared. */
+ flush_tlb_all();
return 0;
}
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index 4ce4f789446..afeeb9ea6f4 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
insn = arm_gen_nop();
if (is_static)
- __patch_text(addr, insn);
+ __patch_text_early(addr, insn);
else
patch_text(addr, insn);
}
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a74b53c1b7d..07db2f8a1b4 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -12,8 +12,12 @@
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/uaccess.h>
+
#include <asm/traps.h>
+#include "patch.h"
+
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
@@ -244,6 +248,31 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+
+ /* patch_text() only supports int-sized breakpoints */
+ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
+
+ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+
+ patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+
+ return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+
+ return 0;
+}
+
/*
* Register our undef instruction hooks with ARM undef core.
* We regsiter a hook specifically looking for the KGB break inst
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 8cf0996aa1a..4423a565ef6 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
+static unsigned long dt_mem;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image)
return err;
if (be32_to_cpu(header) == OF_DT_HEADER)
- kexec_boot_atags = current_segment->mem;
+ dt_mem = current_segment->mem;
}
return 0;
}
@@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
+ set_kernel_text_rw();
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
- if (!kexec_boot_atags)
- kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
+ kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
+ + KEXEC_ARM_ATAGS_OFFSET;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 07314af4773..5038960e3c5 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -1,8 +1,11 @@
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#include <linux/kprobes.h>
+#include <linux/mm.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
@@ -13,21 +16,77 @@ struct patch {
unsigned int insn;
};
-void __kprobes __patch_text(void *addr, unsigned int insn)
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ __acquires(&patch_lock)
+{
+ unsigned int uintaddr = (uintptr_t) addr;
+ bool module = !core_kernel_text(uintaddr);
+ struct page *page;
+
+ if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ page = vmalloc_to_page(addr);
+ else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+ page = virt_to_page(addr);
+ else
+ return addr;
+
+ if (flags)
+ spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
+
+ set_fixmap(fixmap, page_to_phys(page));
+
+ return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ __releases(&patch_lock)
+{
+ clear_fixmap(fixmap);
+
+ if (flags)
+ spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
+}
+
+void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
{
bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+ unsigned int uintaddr = (uintptr_t) addr;
+ bool twopage = false;
+ unsigned long flags;
+ void *waddr = addr;
int size;
+ if (remap)
+ waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
+ else
+ __acquire(&patch_lock);
+
if (thumb2 && __opcode_is_thumb16(insn)) {
- *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+ *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
size = sizeof(u16);
- } else if (thumb2 && ((uintptr_t)addr & 2)) {
+ } else if (thumb2 && (uintaddr & 2)) {
u16 first = __opcode_thumb32_first(insn);
u16 second = __opcode_thumb32_second(insn);
- u16 *addrh = addr;
+ u16 *addrh0 = waddr;
+ u16 *addrh1 = waddr + 2;
+
+ twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
+ if (twopage && remap)
+ addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
+
+ *addrh0 = __opcode_to_mem_thumb16(first);
+ *addrh1 = __opcode_to_mem_thumb16(second);
- addrh[0] = __opcode_to_mem_thumb16(first);
- addrh[1] = __opcode_to_mem_thumb16(second);
+ if (twopage && addrh1 != addr + 2) {
+ flush_kernel_vmap_range(addrh1, 2);
+ patch_unmap(FIX_TEXT_POKE1, NULL);
+ }
size = sizeof(u32);
} else {
@@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
else
insn = __opcode_to_mem_arm(insn);
- *(u32 *)addr = insn;
+ *(u32 *)waddr = insn;
size = sizeof(u32);
}
+ if (waddr != addr) {
+ flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+ patch_unmap(FIX_TEXT_POKE0, &flags);
+ } else
+ __release(&patch_lock);
+
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
}
@@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
.insn = insn,
};
- if (cache_ops_need_broadcast()) {
- stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
- } else {
- bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
- && __opcode_is_thumb32(insn)
- && ((uintptr_t)addr & 2);
-
- if (straddles_word)
- stop_machine(patch_text_stop_machine, &patch, NULL);
- else
- __patch_text(addr, insn);
- }
+ stop_machine(patch_text_stop_machine, &patch, NULL);
}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
index b4731f2dac3..77e054c2f6c 100644
--- a/arch/arm/kernel/patch.h
+++ b/arch/arm/kernel/patch.h
@@ -2,6 +2,16 @@
#define _ARM_KERNEL_PATCH_H
void patch_text(void *addr, unsigned int insn);
-void __patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, false);
+}
#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8e95aa47457..b31aa73e807 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,6 +8,9 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/pgtable.h>
+#endif
#define PROC_INFO \
. = ALIGN(4); \
@@ -90,6 +93,11 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -112,6 +120,9 @@ SECTIONS
ARM_CPU_KEEP(PROC_INFO)
}
+#ifdef CONFIG_DEBUG_RODATA
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -145,7 +156,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
+# ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+# else
. = ALIGN(PAGE_SIZE);
+# endif
__init_begin = .;
#endif
/*
@@ -219,7 +234,11 @@ SECTIONS
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
#else
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#else
. = ALIGN(THREAD_SIZE);
+#endif
__init_end = .;
__data_loc = .;
#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ae69809a9e4..c9cd9c5bf1e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1008,3 +1008,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
help
This option specifies the architecture can support big endian
operation.
+
+config ARM_KERNMEM_PERMS
+ bool "Restrict kernel memory permissions"
+ help
+ If this is set, kernel memory other than kernel text (and rodata)
+ will be made non-executable. The tradeoff is that each region is
+ padded to section-size (1MiB) boundaries (because their permissions
+ are different and splitting the 1M pages into 4K ones causes TLB
+ performance problems), wasting memory.
+
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ depends on ARM_KERNMEM_PERMS
+ default y
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable. This creates
+ another section-size padded region, so it can waste more memory
+ space while gaining the read-only protections.
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 45aeaaca905..81061987ac4 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,19 +18,20 @@
#include <asm/tlbflush.h>
#include "mm.h"
-pte_t *fixmap_page_table;
-
static inline void set_fixmap_pte(int idx, pte_t pte)
{
unsigned long vaddr = __fix_to_virt(idx);
- set_pte_ext(fixmap_page_table + idx, pte, 0);
+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
static inline pte_t get_fixmap_pte(unsigned long vaddr)
{
- unsigned long idx = __virt_to_fix(vaddr);
- return *(fixmap_page_table + idx);
+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ return *ptep;
}
void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
- BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
@@ -134,7 +135,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 92bba32d923..67b15426b9c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -29,6 +29,7 @@
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -570,7 +571,7 @@ void __init mem_init(void)
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_TOP),
+ MLK(FIXADDR_START, FIXADDR_END),
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
}
}
-void free_initmem(void)
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+struct section_perm {
+ unsigned long start;
+ unsigned long end;
+ pmdval_t mask;
+ pmdval_t prot;
+ pmdval_t clear;
+};
+
+static struct section_perm nx_perms[] = {
+ /* Make pages tables, etc before _stext RW (set NX). */
+ {
+ .start = PAGE_OFFSET,
+ .end = (unsigned long)_stext,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+ /* Make init RW (set NX). */
+ {
+ .start = (unsigned long)__init_begin,
+ .end = (unsigned long)_sdata,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+#ifdef CONFIG_DEBUG_RODATA
+ /* Make rodata NX (set RO in ro_perms below). */
+ {
+ .start = (unsigned long)__start_rodata,
+ .end = (unsigned long)__init_begin,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+#endif
+};
+
+#ifdef CONFIG_DEBUG_RODATA
+static struct section_perm ro_perms[] = {
+ /* Make kernel code and rodata RX (set RO). */
+ {
+ .start = (unsigned long)_stext,
+ .end = (unsigned long)__init_begin,
+#ifdef CONFIG_ARM_LPAE
+ .mask = ~PMD_SECT_RDONLY,
+ .prot = PMD_SECT_RDONLY,
+#else
+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+ .clear = PMD_SECT_AP_WRITE,
+#endif
+ },
+};
+#endif
+
+/*
+ * Updates section permissions only for the current mm (sections are
+ * copied into each mm). During startup, this is the init_mm. Is only
+ * safe to be called with preemption disabled, as under stop_machine().
+ */
+static inline void section_update(unsigned long addr, pmdval_t mask,
+ pmdval_t prot)
+{
+ struct mm_struct *mm;
+ pmd_t *pmd;
+
+ mm = current->active_mm;
+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+
+#ifdef CONFIG_ARM_LPAE
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#else
+ if (addr & SECTION_SIZE)
+ pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
+ else
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#endif
+ flush_pmd_entry(pmd);
+ local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
+/* Make sure extended page tables are in use. */
+static inline bool arch_has_strict_perms(void)
+{
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return false;
+
+ return !!(get_cr() & CR_XP);
+}
+
+#define set_section_perms(perms, field) { \
+ size_t i; \
+ unsigned long addr; \
+ \
+ if (!arch_has_strict_perms()) \
+ return; \
+ \
+ for (i = 0; i < ARRAY_SIZE(perms); i++) { \
+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
+ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
+ perms[i].start, perms[i].end, \
+ SECTION_SIZE); \
+ continue; \
+ } \
+ \
+ for (addr = perms[i].start; \
+ addr < perms[i].end; \
+ addr += SECTION_SIZE) \
+ section_update(addr, perms[i].mask, \
+ perms[i].field); \
+ } \
+}
+
+static inline void fix_kernmem_perms(void)
+{
+ set_section_perms(nx_perms, prot);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+
+void set_kernel_text_rw(void)
+{
+ set_section_perms(ro_perms, clear);
+}
+
+void set_kernel_text_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+#endif /* CONFIG_DEBUG_RODATA */
+
+#else
+static inline void fix_kernmem_perms(void) { }
+#endif /* CONFIG_ARM_KERNMEM_PERMS */
+
+void free_tcmmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
+}
+
+void free_initmem(void)
+{
+ fix_kernmem_perms();
+ free_tcmmem();
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1..a7b12cb21e8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -393,6 +394,29 @@ SET_MEMORY_FN(x, pte_set_x)
SET_MEMORY_FN(nx, pte_set_nx)
/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ /* Make sure fixmap region does not exceed available allocation. */
+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+ FIXADDR_END);
+ BUG_ON(idx >= __end_of_fixed_addresses);
+
+ if (pgprot_val(prot))
+ set_pte_at(NULL, vaddr, pte,
+ pfn_pte(phys >> PAGE_SHIFT, prot));
+ else
+ pte_clear(NULL, vaddr, pte);
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
+/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
@@ -1326,10 +1350,10 @@ static void __init kmap_init(void)
#ifdef CONFIG_HIGHMEM
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
-
- fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
- FIXADDR_START, _PAGE_KERNEL_TABLE);
#endif
+
+ early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
+ _PAGE_KERNEL_TABLE);
}
static void __init map_lowmem(void)
@@ -1349,13 +1373,20 @@ static void __init map_lowmem(void)
if (start >= end)
break;
- if (end < kernel_x_start || start >= kernel_x_end) {
+ if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+
+ create_mapping(&map);
} else {
/* This better cover the entire kernel */
if (start < kernel_x_start) {