summaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig26
-rw-r--r--arch/tile/include/asm/atomic.h21
-rw-r--r--arch/tile/include/asm/hugetlb.h1
-rw-r--r--arch/tile/include/asm/irqflags.h10
-rw-r--r--arch/tile/include/asm/ptrace.h3
-rw-r--r--arch/tile/include/asm/syscall.h6
-rw-r--r--arch/tile/include/asm/syscalls.h6
-rw-r--r--arch/tile/include/asm/thread_info.h10
-rw-r--r--arch/tile/include/asm/uaccess.h7
-rw-r--r--arch/tile/include/uapi/asm/unistd.h2
-rw-r--r--arch/tile/kernel/compat.c6
-rw-r--r--arch/tile/kernel/early_printk.c27
-rw-r--r--arch/tile/kernel/intvec_32.S10
-rw-r--r--arch/tile/kernel/intvec_64.S24
-rw-r--r--arch/tile/kernel/process.c68
-rw-r--r--arch/tile/kernel/ptrace.c39
-rw-r--r--arch/tile/kernel/setup.c25
-rw-r--r--arch/tile/kernel/smpboot.c4
-rw-r--r--arch/tile/kernel/time.c6
-rw-r--r--arch/tile/lib/uaccess.c8
-rw-r--r--arch/tile/mm/pgtable.c7
21 files changed, 139 insertions, 177 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 25877aebc68..5b6a40dd555 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -16,12 +16,15 @@ config TILE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
- select HAVE_SYSCALL_WRAPPERS if TILEGX
select VIRT_TO_BUS
select SYS_HYPERVISOR
+ select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_SYSCALL_TRACEPOINTS
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
@@ -40,9 +43,6 @@ config MMU
config GENERIC_CSUM
def_bool y
-config SEMAPHORE_SLEEPERS
- def_bool y
-
config HAVE_ARCH_ALLOC_REMAP
def_bool y
@@ -67,12 +67,6 @@ config HUGETLB_SUPER_PAGES
config RWSEM_GENERIC_SPINLOCK
def_bool y
-# We have a very flat architecture from a migration point of view,
-# so save boot time by presetting this (particularly useful on tile-sim).
-config DEFAULT_MIGRATION_COST
- int
- default "10000000"
-
# We only support gcc 4.4 and above, so this should work.
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y
@@ -114,13 +108,6 @@ config STRICT_DEVMEM
config SMP
def_bool y
-# Allow checking for compile-time determined overflow errors in
-# copy_from_user(). There are still unprovable places in the
-# generic code as of 2.6.34, so this option is not really compatible
-# with -Werror, which is more useful in general.
-config DEBUG_COPY_FROM_USER
- def_bool n
-
config HVC_TILE
depends on TTY
select HVC_DRIVER
@@ -420,11 +407,6 @@ endmenu
menu "Executable file formats"
-# only elf supported
-config KCORE_ELF
- def_bool y
- depends on PROC_FS
-
source "fs/Kconfig.binfmt"
endmenu
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index f2461429a4a..e71387ab20c 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -131,4 +131,25 @@ static inline int atomic_read(const atomic_t *v)
#include <asm/atomic_64.h>
#endif
+#ifndef __ASSEMBLY__
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_TILE_ATOMIC_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index 0f885af2b62..3257733003f 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -16,6 +16,7 @@
#define _ASM_TILE_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 241c0bb60b1..c96f9bbb760 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
#include <asm/percpu.h>
#include <arch/spr_def.h>
-/* Set and clear kernel interrupt masks. */
+/*
+ * Set and clear kernel interrupt masks.
+ *
+ * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
+ * clobber. We rely on it being equivalent to a compiler barrier in
+ * this code since arch_local_irq_save() and friends must act as
+ * compiler barriers. This compiler semantic is baked into enough
+ * places that the compiler will maintain it going forward.
+ */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 2e83fc1b946..fd412260aff 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -44,7 +44,8 @@ typedef unsigned long pt_reg_t;
struct pt_regs *get_pt_regs(struct pt_regs *);
/* Trace the current syscall. */
-extern void do_syscall_trace(void);
+extern int do_syscall_trace_enter(struct pt_regs *regs);
+extern void do_syscall_trace_exit(struct pt_regs *regs);
#define arch_has_single_step() (1)
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h
index d35e0dcb67b..9644b88f133 100644
--- a/arch/tile/include/asm/syscall.h
+++ b/arch/tile/include/asm/syscall.h
@@ -22,6 +22,12 @@
#include <linux/err.h>
#include <arch/abi.h>
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+#ifdef CONFIG_COMPAT
+extern void *compat_sys_call_table[];
+#endif
+
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 78886e2417a..07b298450ef 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -24,12 +24,6 @@
#include <linux/types.h>
#include <linux/compat.h>
-/* The array of function pointers for syscalls. */
-extern void *sys_call_table[];
-#ifdef CONFIG_COMPAT
-extern void *compat_sys_call_table[];
-#endif
-
/*
* Note that by convention, any syscall which requires the current
* register set takes an additional "struct pt_regs *" pointer; a
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index e9c670d7a7f..d1733dee98a 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -124,6 +124,7 @@ extern void _cpu_idle(void);
#define TIF_SECCOMP 6 /* secure computing */
#define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
+#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -134,12 +135,19 @@ extern void _cpu_idle(void);
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
+/* Work to do at syscall entry. */
+#define _TIF_SYSCALL_ENTRY_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
+
+/* Work to do at syscall exit. */
+#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
+
/*
* Thread-synchronous status.
*
@@ -153,8 +161,6 @@ extern void _cpu_idle(void);
#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 9ab078a4605..8a082bc6bca 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -395,7 +395,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return n;
}
-#ifdef CONFIG_DEBUG_COPY_FROM_USER
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+/*
+ * There are still unprovable places in the generic code as of 2.6.34, so this
+ * option is not really compatible with -Werror, which is more useful in
+ * general.
+ */
extern void copy_from_user_overflow(void)
__compiletime_warning("copy_from_user() size is not provably correct");
diff --git a/arch/tile/include/uapi/asm/unistd.h b/arch/tile/include/uapi/asm/unistd.h
index cd7b6dd9d47..3866397aaf5 100644
--- a/arch/tile/include/uapi/asm/unistd.h
+++ b/arch/tile/include/uapi/asm/unistd.h
@@ -20,6 +20,8 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
+#define NR_syscalls __NR_syscalls
+
/* Additional Tilera-specific syscalls. */
#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 6ea4cdb3c6a..ed378416b86 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -56,12 +56,6 @@ COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
- char __user *, buf, size_t, len)
-{
- return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
-}
-
COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
u32, offset_lo, u32, offset_hi,
u32, nbytes_lo, u32, nbytes_hi)
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index afb9c9a0d88..34d72a151bf 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/printk.h>
#include <asm/setup.h>
#include <hv/hypervisor.h>
@@ -33,25 +34,8 @@ static struct console early_hv_console = {
};
/* Direct interface for emergencies */
-static struct console *early_console = &early_hv_console;
-static int early_console_initialized;
static int early_console_complete;
-static void early_vprintk(const char *fmt, va_list ap)
-{
- char buf[512];
- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
- early_console->write(early_console, buf, n);
-}
-
-void early_printk(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- early_vprintk(fmt, ap);
- va_end(ap);
-}
-
void early_panic(const char *fmt, ...)
{
va_list ap;
@@ -69,14 +53,13 @@ static int __initdata keep_early;
static int __init setup_early_printk(char *str)
{
- if (early_console_initialized)
+ if (early_console)
return 1;
if (str != NULL && strncmp(str, "keep", 4) == 0)
keep_early = 1;
early_console = &early_hv_console;
- early_console_initialized = 1;
register_console(early_console);
return 0;
@@ -85,12 +68,12 @@ static int __init setup_early_printk(char *str)
void __init disable_early_printk(void)
{
early_console_complete = 1;
- if (!early_console_initialized || !early_console)
+ if (!early_console)
return;
if (!keep_early) {
early_printk("disabling early console\n");
unregister_console(early_console);
- early_console_initialized = 0;
+ early_console = NULL;
} else {
early_printk("keeping early console\n");
}
@@ -98,7 +81,7 @@ void __init disable_early_printk(void)
void warn_early_printk(void)
{
- if (early_console_complete || early_console_initialized)
+ if (early_console_complete || early_console)
return;
early_printk("\
Machine shutting down before console output is fully initialized.\n\
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index f212bf7cea8..cb52d66343e 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1201,7 +1201,10 @@ handle_syscall:
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
bzt r30, .Lrestore_syscall_regs
- jal do_syscall_trace
+ {
+ PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
+ jal do_syscall_trace_enter
+ }
FEEDBACK_REENTER(handle_syscall)
/*
@@ -1252,7 +1255,10 @@ handle_syscall:
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
bzt r30, 1f
- jal do_syscall_trace
+ {
+ PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
+ jal do_syscall_trace_exit
+ }
FEEDBACK_REENTER(handle_syscall)
1: {
movei r30, 0 /* not an NMI */
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 4ea08090265..85d48395702 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1000,13 +1000,19 @@ handle_syscall:
/* Trace syscalls, if requested. */
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
- ld r30, r31
- andi r30, r30, _TIF_SYSCALL_TRACE
+ {
+ ld r30, r31
+ moveli r32, _TIF_SYSCALL_ENTRY_WORK
+ }
+ and r30, r30, r32
{
addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
beqzt r30, .Lrestore_syscall_regs
}
- jal do_syscall_trace
+ {
+ PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
+ jal do_syscall_trace_enter
+ }
FEEDBACK_REENTER(handle_syscall)
/*
@@ -1071,13 +1077,19 @@ handle_syscall:
FEEDBACK_REENTER(handle_syscall)
/* Do syscall trace again, if requested. */
- ld r30, r31
- andi r0, r30, _TIF_SYSCALL_TRACE
+ {
+ ld r30, r31
+ moveli r32, _TIF_SYSCALL_EXIT_WORK
+ }
+ and r0, r30, r32
{
andi r0, r30, _TIF_SINGLESTEP
beqzt r0, 1f
}
- jal do_syscall_trace
+ {
+ PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
+ jal do_syscall_trace_exit
+ }
FEEDBACK_REENTER(handle_syscall)
andi r0, r30, _TIF_SINGLESTEP
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index caf93ae1179..8ac304484f9 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -40,13 +40,11 @@
#include <arch/abi.h>
#include <arch/sim_def.h>
-
/*
* Use the (x86) "idle=poll" option to prefer low latency when leaving the
* idle loop over low power while in the idle loop, e.g. if we have
* one thread per core and we want to get threads out of futex waits fast.
*/
-static int no_idle_nap;
static int __init idle_setup(char *str)
{
if (!str)
@@ -54,64 +52,19 @@ static int __init idle_setup(char *str)
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads.\n");
- no_idle_nap = 1;
- } else if (!strcmp(str, "halt"))
- no_idle_nap = 0;
- else
- return -1;
-
- return 0;
+ cpu_idle_poll_ctrl(true);
+ return 0;
+ } else if (!strcmp(str, "halt")) {
+ return 0;
+ }
+ return -1;
}
early_param("idle", idle_setup);
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- int cpu = smp_processor_id();
-
-
- current_thread_info()->status |= TS_POLLING;
-
- if (no_idle_nap) {
- while (1) {
- while (!need_resched())
- cpu_relax();
- schedule();
- }
- }
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
- if (cpu_is_offline(cpu))
- BUG(); /* no HOTPLUG_CPU */
-
- local_irq_disable();
- __get_cpu_var(irq_stat).idle_timestamp = jiffies;
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
-
- if (!need_resched())
- _cpu_idle();
- else
- local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ _cpu_idle();
}
/*
@@ -620,8 +573,7 @@ void show_regs(struct pt_regs *regs)
int i;
pr_err("\n");
- pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
- tsk->pid, tsk->comm, smp_processor_id());
+ show_regs_print_info(KERN_ERR);
#ifdef __tilegx__
for (i = 0; i < 51; i += 3)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9835312d5a9..0f83ed4602b 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -21,9 +21,13 @@
#include <linux/uaccess.h>
#include <linux/regset.h>
#include <linux/elf.h>
+#include <linux/tracehook.h>
#include <asm/traps.h>
#include <arch/chip.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
void user_enable_single_step(struct task_struct *child)
{
set_tsk_thread_flag(child, TIF_SINGLESTEP);
@@ -246,29 +250,26 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
#endif
-void do_syscall_trace(void)
+int do_syscall_trace_enter(struct pt_regs *regs)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (tracehook_report_syscall_entry(regs))
+ regs->regs[TREG_SYSCALL_NR] = -1;
+ }
- if (!(current->ptrace & PT_PTRACED))
- return;
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
- /*
- * The 0x80 provides a way for the tracing parent to distinguish
- * between a syscall stop and SIGTRAP delivery
- */
- ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+ return regs->regs[TREG_SYSCALL_NR];
+}
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+void do_syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs->regs[0]);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index d1e15f7b59c..7a5aa1a7864 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD
-/*
- * Note that the kernel can potentially support other compression
- * techniques than gz, though we don't do so by default. If we ever
- * decide to do so we can either look for other filename extensions,
- * or just allow a file with this name to be compressed with an
- * arbitrary compressor (somewhat counterintuitively).
- */
static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
+static char __initdata initramfs_file[128] = "initramfs";
static int __init setup_initramfs_file(char *str)
{
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file);
/*
- * We look for an "initramfs.cpio.gz" file in the hvfs.
- * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs.
+ * We look for a file called "initramfs" in the hvfs. If there is one, we
+ * allocate some memory for it and it will be unpacked to the initramfs.
+ * If it's compressed, the initd code will uncompress it first.
*/
static void __init load_hv_initrd(void)
{
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
if (fd == HV_ENOENT) {
- if (set_initramfs_file)
+ if (set_initramfs_file) {
pr_warning("No such hvfs initramfs file '%s'\n",
initramfs_file);
- return;
+ return;
+ } else {
+ /* Try old backwards-compatible name. */
+ fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
+ if (fd == HV_ENOENT)
+ return;
+ }
}
BUG_ON(fd < 0);
stat = hv_fs_fstat(fd);
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index e686c5ac90b..44bab29bf2f 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -207,9 +207,7 @@ void __cpuinit online_secondary(void)
/* Set up tile-timer clock-event device on this cpu */
setup_tile_timer();
- preempt_enable();
-
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index f6f50f2a5e3..5ac397ec698 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -230,6 +230,10 @@ int setup_profiling_timer(unsigned int multiplier)
*/
cycles_t ns2cycles(unsigned long nsecs)
{
- struct clock_event_device *dev = &__get_cpu_var(tile_timer);
+ /*
+ * We do not have to disable preemption here as each core has the same
+ * clock frequency.
+ */
+ struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
return ((u64)nsecs * dev->mult) >> dev->shift;
}
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c
index f8d398c9ee7..030abe3ee4f 100644
--- a/arch/tile/lib/uaccess.c
+++ b/arch/tile/lib/uaccess.c
@@ -22,11 +22,3 @@ int __range_ok(unsigned long addr, unsigned long size)
is_arch_mappable_range(addr, size));
}
EXPORT_SYMBOL(__range_ok);
-
-#ifdef CONFIG_DEBUG_COPY_FROM_USER
-void copy_from_user_overflow(void)
-{
- WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);
-#endif
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index b3b4972c245..dfd63ce8732 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
- read_lock(&vmlist_lock);
- for (p = vmlist; p; p = p->next) {
- if (p->addr == addr)
- break;
- }
- read_unlock(&vmlist_lock);
+ p = find_vm_area((void *)addr);
if (!p) {
pr_err("iounmap: bad address %p\n", addr);