summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/mmu.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7fa0966cd15..bdf5c94f7c3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -393,6 +394,29 @@ SET_MEMORY_FN(x, pte_set_x)
SET_MEMORY_FN(nx, pte_set_nx)
/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ /* Make sure fixmap region does not exceed available allocation. */
+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+ FIXADDR_END);
+ BUG_ON(idx >= __end_of_fixed_addresses);
+
+ if (pgprot_val(prot))
+ set_pte_at(NULL, vaddr, pte,
+ pfn_pte(phys >> PAGE_SHIFT, prot));
+ else
+ pte_clear(NULL, vaddr, pte);
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
+/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)