From a3a80544acb3dfa97d43b8eee1332fe1fca7fe51 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 5 Aug 2014 10:25:55 +0100 Subject: arm64: fix typo in I-cache policy detection This removes an unfortunately placed semi-colon resulting in all instruction caches being classified as AIVIVT. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index f798f66634a..17716962302 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -49,7 +49,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) if (l1ip != ICACHE_POLICY_PIPT) set_bit(ICACHEF_ALIASING, &__icache_flags); - if (l1ip == ICACHE_POLICY_AIVIVT); + if (l1ip == ICACHE_POLICY_AIVIVT) set_bit(ICACHEF_AIVIVT, &__icache_flags); pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); -- cgit v1.2.3-70-g09d2 From 4190312beb2acfb7bfb1bb971e24a759aa96b0e8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 13 Aug 2014 18:53:03 +0100 Subject: arm64: align randomized TEXT_OFFSET on 4 kB boundary When booting via UEFI, the kernel Image is loaded at a 4 kB boundary and the embedded EFI stub is executed in place. The EFI stub relocates the Image to reside TEXT_OFFSET bytes above a 2 MB boundary, and jumps into the kernel proper. In AArch64, PC relative symbol references are emitted using adrp/add or adrp/ldr pairs, where the offset into a 4 kB page is resolved using a separate :lo12: relocation. This implicitly assumes that the code will always be executed at the same relative offset with respect to a 4 kB boundary, or the references will point to the wrong address. This means we should link the kernel at a 4 kB aligned base address in order to remain compatible with the base address the UEFI loader uses when doing the initial load of Image. So update the code that generates TEXT_OFFSET to choose a multiple of 4 kB. At the same time, update the code so it chooses from the interval [0..2MB) as the author originally intended. Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/Makefile | 2 +- arch/arm64/kernel/head.S | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 57833546bf0..2df5e5daeeb 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -39,7 +39,7 @@ head-y := arch/arm64/kernel/head.o # The byte offset of the kernel image in RAM from the start of RAM. ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) -TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}') +TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') else TEXT_OFFSET := 0x00080000 endif diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 144f10567f8..bed028364a9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -38,11 +38,11 @@ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) -#if (TEXT_OFFSET & 0xf) != 0 -#error TEXT_OFFSET must be at least 16B aligned -#elif (PAGE_OFFSET & 0xfffff) != 0 +#if (TEXT_OFFSET & 0xfff) != 0 +#error TEXT_OFFSET must be at least 4KB aligned +#elif (PAGE_OFFSET & 0x1fffff) != 0 #error PAGE_OFFSET must be at least 2MB aligned -#elif TEXT_OFFSET > 0xfffff +#elif TEXT_OFFSET > 0x1fffff #error TEXT_OFFSET must be less than 2MB #endif -- cgit v1.2.3-70-g09d2 From 86c8b27a01cf6c16fc159ade223cb2ccc70dc4b5 Mon Sep 17 00:00:00 2001 From: Leif Lindholm Date: Mon, 28 Jul 2014 19:03:03 +0100 Subject: arm64: ignore DT memreserve entries when booting in UEFI mode UEFI provides its own method for marking regions to reserve, via the memory map which is also used to initialise memblock. So when using the UEFI memory map, ignore any memreserve entries present in the DT. Reported-by: Mark Rutland Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Signed-off-by: Leif Lindholm Signed-off-by: Will Deacon --- arch/arm64/kernel/efi.c | 2 ++ arch/arm64/mm/init.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e72f3100958..24f0c6fb61d 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -188,6 +188,8 @@ static __init void reserve_regions(void) if (uefi_debug) pr_cont("\n"); } + + set_bit(EFI_MEMMAP, &efi.flags); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5b4526ee3a0..5472c240187 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -148,7 +149,8 @@ void __init arm64_memblock_init(void) memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); #endif - early_init_fdt_scan_reserved_mem(); + if (!efi_enabled(EFI_MEMMAP)) + early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) -- cgit v1.2.3-70-g09d2 From 44b375070fa3defa6bf1010bd0e4e64f7986bfc4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Aug 2014 22:05:45 +0100 Subject: Revert "arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL" For some reason, the audit patches didn't make it out of -next this merge window, so revert our temporary hack and let the audit guys deal with fixing up -next. This reverts commit 2a8f45b040bcb9b2ad2845f061499d1b6f41cc7b. Signed-off-by: Will Deacon --- arch/arm64/kernel/ptrace.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 0310811bd77..70526cfda05 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1115,19 +1115,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, regs->syscallno); -#ifdef CONFIG_AUDITSYSCALL audit_syscall_entry(syscall_get_arch(), regs->syscallno, regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); -#endif return regs->syscallno; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { -#ifdef CONFIG_AUDITSYSCALL audit_syscall_exit(regs); -#endif if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); -- cgit v1.2.3-70-g09d2 From 6a7519e81321343165f89abb8b616df186d3e57a Mon Sep 17 00:00:00 2001 From: Semen Protsenko Date: Fri, 15 Aug 2014 16:22:44 +0300 Subject: efi/arm64: Store Runtime Services revision "efi" global data structure contains "runtime_version" field which must be assigned in order to use it later in Runtime Services virtual calls (virt_efi_* functions). Before this patch "runtime_version" was unassigned (0), so each Runtime Service virtual call that checks revision would fail. Signed-off-by: Semen Protsenko Acked-by: Ard Biesheuvel Cc: Signed-off-by: Matt Fleming --- arch/arm64/kernel/efi.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e72f3100958..5dbb7bd3b83 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -463,6 +463,8 @@ static int __init arm64_enter_virtual_mode(void) efi_native_runtime_setup(); set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + efi.runtime_version = efi.systab->hdr.revision; + return 0; err_unmap: -- cgit v1.2.3-70-g09d2 From 5843be2279d7a91ef48c20ac31715d1eb9607a84 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Fri, 22 Aug 2014 20:49:16 +0100 Subject: arm64: Remove unused variable in head.S Remove an unused local variable from head.S. It seems this was never used even from the initial commit 9703d9d7f77ce129621f7d80a844822e2daa7008 (arm64: Kernel booting and initialisation), and is a left over from a previous implementation of __calc_phys_offset. Signed-off-by: Geoff Levand Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bed028364a9..87306905622 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode) .long 0 .popsection - .align 3 -2: .quad . - .quad PAGE_OFFSET - #ifdef CONFIG_SMP .align 3 1: .quad . -- cgit v1.2.3-70-g09d2 From 27d7ff273c2aad37b28f6ff0cab2cfa35b51e648 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Aug 2014 14:13:24 +0100 Subject: arm64: ptrace: fix compat hardware watchpoint reporting I'm not sure what I was on when I wrote this, but when iterating over the hardware watchpoint array (hbp_watch_array), our index is off by ARM_MAX_BRP, so we walk off the end of our thread_struct... ... except, a dodgy condition in the loop means that it never executes at all (bp cannot be NULL). This patch fixes the code so that we remove the bp check and use the correct index for accessing the watchpoint structures. Cc: Signed-off-by: Will Deacon --- arch/arm64/include/asm/hw_breakpoint.h | 1 - arch/arm64/kernel/ptrace.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d064047612b..52b484b6aa1 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, */ #define ARM_MAX_BRP 16 #define ARM_MAX_WRP 16 -#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) /* Virtual debug register bases. */ #define AARCH64_DBG_REG_BVR 0 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 70526cfda05..2ac99887800 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, break; } } - for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { + + for (i = 0; i < ARM_MAX_WRP; ++i) { if (current->thread.debug.hbp_watch[i] == bp) { info.si_errno = -((i << 1) + 1); break; -- cgit v1.2.3-70-g09d2 From 85487edd252fa04718dcd735bc0f41213bbb9546 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Aug 2014 14:20:24 +0100 Subject: arm64: ptrace: fix compat reg getter/setter return values copy_{to,from}_user return the number of bytes remaining on failure, not an error code. This patch returns -EFAULT when the copy operation didn't complete, rather than expose the number of bytes not copied directly to userspace. Signed-off-by: Will Deacon --- arch/arm64/kernel/ptrace.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2ac99887800..fe63ac5e9bf 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -663,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target, kbuf += sizeof(reg); } else { ret = copy_to_user(ubuf, ®, sizeof(reg)); - if (ret) + if (ret) { + ret = -EFAULT; break; + } ubuf += sizeof(reg); } @@ -702,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target, kbuf += sizeof(reg); } else { ret = copy_from_user(®, ubuf, sizeof(reg)); - if (ret) - return ret; + if (ret) { + ret = -EFAULT; + break; + } ubuf += sizeof(reg); } -- cgit v1.2.3-70-g09d2 From 5b75a6af11357813a7eeb4a29d0261adbbfab556 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Aug 2014 14:25:21 +0100 Subject: arm64: perf: don't rely on layout of pt_regs when grabbing sp or pc The current perf_regs code relies on sp and pc sitting just off the end of the pt_regs->regs array. This is ugly and fragile, so this patch checks for these register explicitly and returns the appropriate field. Acked-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_regs.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 422ebd63b61..6762ad70558 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs->compat_lr; } + if ((u32)idx == PERF_REG_ARM64_SP) + return regs->sp; + + if ((u32)idx == PERF_REG_ARM64_PC) + return regs->pc; + return regs->regs[idx]; } -- cgit v1.2.3-70-g09d2 From 7c68a9cc040216c902f93f9c80305df55d9beff7 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Mon, 1 Sep 2014 11:09:51 +0800 Subject: arm64: fix bug for reloading FPSIMD state after cpu power off Now arm64 defers reloading FPSIMD state, but this optimization also introduces the bug after cpu resume back from low power mode. The reason is after the cpu has been powered off, s/w need set the cpu's fpsimd_last_state to NULL so that it will force to reload FPSIMD state for the thread, otherwise there has the chance to meet the condition for both the task's fpsimd_state.cpu field contains the id of the current cpu, and the cpu's fpsimd_last_state per-cpu variable points to the task's fpsimd_state, so finally kernel will skip to reload the context during it return back to userland. Acked-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Leo Yan Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ad8aebb1cde..3dca15634e6 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, case CPU_PM_ENTER: if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_save_state(¤t->thread.fpsimd_state); + this_cpu_write(fpsimd_last_state, NULL); break; case CPU_PM_EXIT: if (current->mm) -- cgit v1.2.3-70-g09d2 From 5e39977edf6500fd12f169e6c458d33b0ef62feb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 1 Sep 2014 15:47:19 +0100 Subject: Revert "arm64: cpuinfo: print info for all CPUs" It turns out that vendors are relying on the format of /proc/cpuinfo, and we've even spotted out-of-tree hacks attempting to make it look identical to the format used by arch/arm/. That means we can't afford to churn this interface in mainline, so revert the recent reformatting of the file for arm64 pending discussions on the list to find out what people actually want. This reverts commit d7a49086f263164a2c4c178eb76412d48cd671d7. Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/setup.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6f0ccf35ae..edb146d0185 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; #endif static const char *cpu_name; +static const char *machine_name; phys_addr_t __fdt_pointer __initdata; /* @@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) while (true) cpu_relax(); } + + machine_name = of_flat_dt_get_machine_name(); } /* @@ -447,21 +450,10 @@ static int c_show(struct seq_file *m, void *v) { int i; - /* - * Dump out the common processor features in a single line. Userspace - * should read the hwcaps with getauxval(AT_HWCAP) rather than - * attempting to parse this. - */ - seq_puts(m, "features\t:"); - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, " %s", hwcap_str[i]); - seq_puts(m, "\n\n"); + seq_printf(m, "Processor\t: %s rev %d (%s)\n", + cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); for_each_online_cpu(i) { - struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); - u32 midr = cpuinfo->reg_midr; - /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with @@ -470,13 +462,25 @@ static int c_show(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif - seq_printf(m, "implementer\t: 0x%02x\n", - MIDR_IMPLEMENTOR(midr)); - seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr)); - seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr)); - seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr)); } + /* dump out the processor features */ + seq_puts(m, "Features\t: "); + + for (i = 0; hwcap_str[i]; i++) + if (elf_hwcap & (1 << i)) + seq_printf(m, "%s ", hwcap_str[i]); + + seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); + seq_printf(m, "CPU architecture: AArch64\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); + seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); + seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); + + seq_puts(m, "\n"); + + seq_printf(m, "Hardware\t: %s\n", machine_name); + return 0; } -- cgit v1.2.3-70-g09d2