summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/abort-ev6.S5
-rw-r--r--arch/arm/mm/alignment.c9
-rw-r--r--arch/arm/mm/dma-mapping.c98
-rw-r--r--arch/arm/mm/extable.c7
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/idmap.c8
-rw-r--r--arch/arm/mm/init.c28
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c82
-rw-r--r--arch/arm/mm/nommu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S4
14 files changed, 211 insertions, 63 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cd2c88e7a8f..1f8fed94c2a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -952,3 +952,9 @@ config ARCH_HAS_BARRIERS
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+ bool
+ help
+ This option specifies the architecture can support big endian
+ operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 80741992a9f..3815a8262af 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r3, r3
-#endif
+ ARM_BE8(rev r3, r3)
+
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b8907..924036473b1 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <asm/unaligned.h>
+#include <asm/opcodes.h>
#include "fault.h"
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
- instr = (tinstr << 16) | tinst2;
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
- } else
+ } else {
fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a847171..79f8b39801a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then fail the
+ * allocation.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ return 0;
+ }
+
+ /*
+ * Now check that the mask, when translated to a PFN,
+ * fits within the allowable addresses which we can
+ * allocate.
+ */
+ if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ arm_dma_pfn_limit + 1);
return 0;
}
}
@@ -687,7 +707,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -700,7 +720,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
+ unsigned long limit;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
+ return 0;
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ limit = dma_to_pfn(dev, mask);
+
+ if (limit < arm_dma_pfn_limit)
return 0;
+
return 1;
}
EXPORT_SYMBOL(dma_supported);
@@ -1232,7 +1271,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
break;
len = (j - i) << PAGE_SHIFT;
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ ret = iommu_map(mapping->domain, iova, phys, len,
+ IOMMU_READ|IOMMU_WRITE);
if (ret < 0)
goto fail;
iova += len;
@@ -1431,6 +1471,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
@@ -1444,6 +1505,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int ret = 0;
unsigned int count;
struct scatterlist *s;
+ int prot;
size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE;
@@ -1460,7 +1522,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
@@ -1665,19 +1729,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- switch (dir) {
- case DMA_BIDIRECTIONAL:
- prot = IOMMU_READ | IOMMU_WRITE;
- break;
- case DMA_TO_DEVICE:
- prot = IOMMU_READ;
- break;
- case DMA_FROM_DEVICE:
- prot = IOMMU_WRITE;
- break;
- default:
- prot = 0;
- }
+ prot = __dma_direction_to_prot(dir);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9d285626bc7..312e15e6d00 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -9,8 +9,13 @@ int fixup_exception(struct pt_regs *regs)
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
- if (fixup)
+ if (fixup) {
regs->ARM_pc = fixup->fixup;
+#ifdef CONFIG_THUMB2_KERNEL
+ /* Clear the IT state to avoid nasty surprises in the fixup */
+ regs->ARM_cpsr &= ~PSR_IT_MASK;
+#endif
+ }
return fixup != NULL;
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2a5907b5c8d..ff379ac115d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
@@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
-#else /* !USE_SPLIT_PTLOCKS */
+#else /* !USE_SPLIT_PTE_PTLOCKS */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 83cb3ac2709..8e0e52eb76b 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -10,6 +10,7 @@
#include <asm/system_info.h>
pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
unsigned long addr, end;
unsigned long next;
- addr = virt_to_phys(text_start);
- end = virt_to_phys(text_end);
+ addr = virt_to_idmap(text_start);
+ end = virt_to_idmap(text_end);
+ pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
@@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
if (!idmap_pgd)
return -ENOMEM;
- pr_info("Setting up static identity map for 0x%p - 0x%p\n",
- __idmap_text_start, __idmap_text_end);
identity_mapping_add(idmap_pgd, __idmap_text_start,
__idmap_text_end, 0);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index febaee7ca57..3e8f106ee5f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -17,7 +17,6 @@
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
@@ -77,14 +76,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
-#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-#endif /* CONFIG_OF_FLATTREE */
-
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
@@ -218,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
@@ -240,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
+ arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
@@ -351,6 +344,11 @@ void __init arm_memblock_init(struct meminfo *mi,
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
+ /* FDT scan will populate initrd_start */
+ if (initrd_start) {
+ phys_initrd_start = __virt_to_phys(initrd_start);
+ phys_initrd_size = initrd_end - initrd_start;
+ }
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
@@ -379,8 +377,6 @@ void __init arm_memblock_init(struct meminfo *mi,
if (mdesc->reserve)
mdesc->reserve();
- early_init_dt_scan_reserved_mem();
-
/*
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
@@ -424,12 +420,10 @@ void __init bootmem_init(void)
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
- *
- * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
- * the system, not the maximum PFN.
*/
- max_low_pfn = max_low - PHYS_PFN_OFFSET;
- max_pfn = max_high - PHYS_PFN_OFFSET;
+ min_low_pfn = min;
+ max_low_pfn = max_low;
+ max_pfn = max_high;
}
/*
@@ -535,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
/* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0..d5a982d15a8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 0c6356255fe..d27158c38eb 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17eeb59b..78eeeca78f5 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,8 @@
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
}
}
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+ unsigned long map_start, map_end;
+ pgd_t *pgd0, *pgdk;
+ pud_t *pud0, *pudk, *pud_start;
+ pmd_t *pmd0, *pmdk;
+ phys_addr_t phys;
+ int i;
+
+ if (!(mdesc->init_meminfo))
+ return;
+
+ /* remap kernel code and data */
+ map_start = init_mm.start_code;
+ map_end = init_mm.brk;
+
+ /* get a handle on things... */
+ pgd0 = pgd_offset_k(0);
+ pud_start = pud0 = pud_offset(pgd0, 0);
+ pmd0 = pmd_offset(pud0, 0);
+
+ pgdk = pgd_offset_k(map_start);
+ pudk = pud_offset(pgdk, map_start);
+ pmdk = pmd_offset(pudk, map_start);
+
+ mdesc->init_meminfo();
+
+ /* Run the patch stub to update the constants */
+ fixup_pv_table(&__pv_table_begin,
+ (&__pv_table_end - &__pv_table_begin) << 2);
+
+ /*
+ * Cache cleaning operations for self-modifying code
+ * We should clean the entries by MVA but running a
+ * for loop over every pv_table entry pointer would
+ * just complicate the code.
+ */
+ flush_cache_louis();
+ dsb();
+ isb();
+
+ /* remap level 1 table */
+ for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+ set_pud(pud0,
+ __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+ pmd0 += PTRS_PER_PMD;
+ }
+
+ /* remap pmds for kernel mapping */
+ phys = __pa(map_start) & PMD_MASK;
+ do {
+ *pmdk++ = __pmd(phys | pmdprot);
+ phys += PMD_SIZE;
+ } while (phys < map_end);
+
+ flush_cache_all();
+ cpu_switch_mm(pgd0, &init_mm);
+ cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+ local_flush_bp_all();
+ local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+ if (mdesc->init_meminfo)
+ mdesc->init_meminfo();
+}
+
+#endif
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 34d4ab217ba..5c668b7a31f 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -296,6 +296,15 @@ void __init sanity_check_meminfo(void)
}
/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+ struct proc_info_list *procinfo)
+{
+}
+
+/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1128064fddc..45dc29f85d5 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -220,9 +220,7 @@ __v6_setup:
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c63d9bdee51..60920f62fdf 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -367,9 +367,7 @@ __v7_setup:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"