diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 12:06:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 12:06:03 -0700 |
commit | e4f30545a23b6e22fb347a1ad034ce1aa280209c (patch) | |
tree | 415db60ace3ab89fe150bca797c690ddaf021432 | |
parent | d586c86d50cefa0897a51a2dbc714060ccedae76 (diff) | |
parent | ebf81a938dade3b450eb11c57fa744cfac4b523f (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull second set of arm64 updates from Catalin Marinas:
"A second pull request for this merging window, mainly with fixes and
docs clarification:
- Documentation clarification on CPU topology and booting
requirements
- Additional cache flushing during boot (needed in the presence of
external caches or under virtualisation)
- DMA range invalidation fix for non cache line aligned buffers
- Build failure fix with !COMPAT
- Kconfig update for STRICT_DEVMEM"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: Fix DMA range invalidation for cache line unaligned buffers
arm64: Add missing Kconfig for CONFIG_STRICT_DEVMEM
arm64: fix !CONFIG_COMPAT build failures
Revert "arm64: virt: ensure visibility of __boot_cpu_mode"
arm64: Relax the kernel cache requirements for boot
arm64: Update the TCR_EL1 translation granule definitions for 16K pages
ARM: topology: Make it clear that all CPUs need to be described
-rw-r--r-- | Documentation/arm64/booting.txt | 10 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/arm/topology.txt | 7 | ||||
-rw-r--r-- | arch/arm64/Kconfig.debug | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-hwdef.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/virt.h | 13 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 30 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_event.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_regs.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/cache.S | 24 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 25 |
10 files changed, 99 insertions, 36 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index a9691cc48fe..beb754e87c6 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -111,8 +111,14 @@ Before jumping into the kernel, the following conditions must be met: - Caches, MMUs The MMU must be off. Instruction cache may be on or off. - Data cache must be off and invalidated. - External caches (if present) must be configured and disabled. + The address range corresponding to the loaded kernel image must be + cleaned to the PoC. In the presence of a system cache or other + coherent masters with caches enabled, this will typically require + cache maintenance by VA rather than set/way operations. + System caches which respect the architected cache maintenance by VA + operations must be configured and may be enabled. + System caches which do not respect architected cache maintenance by VA + operations (not recommended) must be configured and disabled. - Architected timers CNTFRQ must be programmed with the timer frequency and CNTVOFF must diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index 4aa20e7a424..1061faf5f60 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt @@ -75,9 +75,10 @@ The cpu-map node can only contain three types of child nodes: whose bindings are described in paragraph 3. -The nodes describing the CPU topology (cluster/core/thread) can only be -defined within the cpu-map node. -Any other configuration is consider invalid and therefore must be ignored. +The nodes describing the CPU topology (cluster/core/thread) can only +be defined within the cpu-map node and every core/thread in the system +must be defined within the topology. Any other configuration is +invalid and therefore must be ignored. =========================================== 2.1 - cpu-map child nodes naming convention diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 835c559786b..d10ec334c93 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -6,6 +6,20 @@ config FRAME_POINTER bool default y +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU + help + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. + + If this option is switched on, the /dev/mem file only allows + userspace access to memory mapped peripherals. + + If in doubt, say Y. + config EARLY_PRINTK bool "Early printk support" default y diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index f7af66b54cb..5fc8a66c392 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -120,8 +120,12 @@ #define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26)) #define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26)) #define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) +#define TCR_TG0_4K (UL(0) << 14) #define TCR_TG0_64K (UL(1) << 14) -#define TCR_TG1_64K (UL(1) << 30) +#define TCR_TG0_16K (UL(2) << 14) +#define TCR_TG1_16K (UL(1) << 30) +#define TCR_TG1_4K (UL(2) << 30) +#define TCR_TG1_64K (UL(3) << 30) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 130e2be952c..215ad4649dd 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -22,7 +22,6 @@ #define BOOT_CPU_MODE_EL2 (0xe12) #ifndef __ASSEMBLY__ -#include <asm/cacheflush.h> /* * __boot_cpu_mode records what mode CPUs were booted in. @@ -38,20 +37,9 @@ extern u32 __boot_cpu_mode[2]; void __hyp_set_vectors(phys_addr_t phys_vector_base); phys_addr_t __hyp_get_vectors(void); -static inline void sync_boot_mode(void) -{ - /* - * As secondaries write to __boot_cpu_mode with caches disabled, we - * must flush the corresponding cache entries to ensure the visibility - * of their writes. - */ - __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode)); -} - /* Reports the availability of HYP mode */ static inline bool is_hyp_mode_available(void) { - sync_boot_mode(); return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); } @@ -59,7 +47,6 @@ static inline bool is_hyp_mode_available(void) /* Check if the bootloader has booted CPUs in different modes */ static inline bool is_hyp_mode_mismatched(void) { - sync_boot_mode(); return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 1fe5d8d2bdf..0fd56500077 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -26,6 +26,7 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#include <asm/cache.h> #include <asm/cputype.h> #include <asm/memory.h> #include <asm/thread_info.h> @@ -229,7 +230,11 @@ ENTRY(set_cpu_boot_mode_flag) cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 -1: str w20, [x1] // This CPU has booted in EL1 +1: dc cvac, x1 // Clean potentially dirty cache line + dsb sy + str w20, [x1] // This CPU has booted in EL1 + dc civac, x1 // Clean&invalidate potentially stale cache line + dsb sy ret ENDPROC(set_cpu_boot_mode_flag) @@ -240,8 +245,9 @@ ENDPROC(set_cpu_boot_mode_flag) * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ - .pushsection .data + .pushsection .data..cacheline_aligned ENTRY(__boot_cpu_mode) + .align L1_CACHE_SHIFT .long BOOT_CPU_MODE_EL2 .long 0 .popsection @@ -408,6 +414,15 @@ ENDPROC(__calc_phys_offset) */ __create_page_tables: pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses + mov x27, lr + + /* + * Invalidate the idmap and swapper page tables to avoid potential + * dirty cache lines being evicted. + */ + mov x0, x25 + add x1, x26, #SWAPPER_DIR_SIZE + bl __inval_cache_range /* * Clear the idmap and swapper page tables. @@ -467,6 +482,17 @@ __create_page_tables: ldr x5, =FIXADDR_TOP // Fixed mapping virtual address add x0, x26, #2 * PAGE_SIZE // section table address create_pgd_entry x26, x0, x5, x6, x7 + + /* + * Since the page tables have been populated with non-cacheable + * accesses (MMU disabled), invalidate the idmap and swapper page + * tables again to remove any speculatively loaded cache lines. + */ + mov x0, x25 + add x1, x26, #SWAPPER_DIR_SIZE + bl __inval_cache_range + + mov lr, x27 ret ENDPROC(__create_page_tables) .ltorg diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e868c72a793..baf5afb7e6a 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1386,6 +1386,7 @@ user_backtrace(struct frame_tail __user *tail, return buftail.fp; } +#ifdef CONFIG_COMPAT /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this @@ -1430,6 +1431,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; } +#endif /* CONFIG_COMPAT */ void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) @@ -1451,6 +1453,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, tail && !((unsigned long)tail & 0xf)) tail = user_backtrace(tail, entry); } else { +#ifdef CONFIG_COMPAT /* AARCH32 compat mode */ struct compat_frame_tail __user *tail; @@ -1459,6 +1462,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, while ((entry->nr < PERF_MAX_STACK_DEPTH) && tail && !((unsigned long)tail & 0x3)) tail = compat_user_backtrace(tail, entry); +#endif } } diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index f2d6f0a36d6..422ebd63b61 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -2,6 +2,8 @@ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> + +#include <asm/compat.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index c46f48b33c1..fda756875fa 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area) ENDPROC(__flush_dcache_area) /* + * __inval_cache_range(start, end) + * - start - start address of region + * - end - end address of region + */ +ENTRY(__inval_cache_range) + /* FALLTHROUGH */ + +/* * __dma_inv_range(start, end) * - start - virtual start address of region * - end - virtual end address of region @@ -175,14 +183,22 @@ ENDPROC(__flush_dcache_area) __dma_inv_range: dcache_line_size x2, x3 sub x3, x2, #1 - bic x0, x0, x3 + tst x1, x3 // end cache line aligned? bic x1, x1, x3 -1: dc ivac, x0 // invalidate D / U line - add x0, x0, x2 + b.eq 1f + dc civac, x1 // clean & invalidate D / U line +1: tst x0, x3 // start cache line aligned? + bic x0, x0, x3 + b.eq 2f + dc civac, x0 // clean & invalidate D / U line + b 3f +2: dc ivac, x0 // invalidate D / U line +3: add x0, x0, x2 cmp x0, x1 - b.lo 1b + b.lo 2b dsb sy ret +ENDPROC(__inval_cache_range) ENDPROC(__dma_inv_range) /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index e085ee6ef4e..9042aff5e9e 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -28,14 +28,21 @@ #include "proc-macros.S" -#ifndef CONFIG_SMP -/* PTWs cacheable, inner/outer WBWA not shareable */ -#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA +#ifdef CONFIG_ARM64_64K_PAGES +#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K +#else +#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K +#endif + +#ifdef CONFIG_SMP +#define TCR_SMP_FLAGS TCR_SHARED #else -/* PTWs cacheable, inner/outer WBWA shareable */ -#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED +#define TCR_SMP_FLAGS 0 #endif +/* PTWs cacheable, inner/outer WBWA */ +#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA + #define MAIR(attr, mt) ((attr) << ((mt) * 8)) /* @@ -209,18 +216,14 @@ ENTRY(__cpu_setup) * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \ - TCR_ASID16 | TCR_TBI0 | (1 << 31) + ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in * TCR_EL1. */ mrs x9, ID_AA64MMFR0_EL1 bfi x10, x9, #32, #3 -#ifdef CONFIG_ARM64_64K_PAGES - orr x10, x10, TCR_TG0_64K - orr x10, x10, TCR_TG1_64K -#endif msr tcr_el1, x10 ret // return to head.S ENDPROC(__cpu_setup) |