summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S28
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c11
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/process.c64
-rw-r--r--arch/powerpc/kernel/signal.c4
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c46
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c20
-rw-r--r--arch/powerpc/kernel/uprobes.c6
18 files changed, 121 insertions, 100 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7523539cfe9..4e23ba2f3ca 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -441,8 +441,7 @@ int main(void)
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
- DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
- DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@@ -470,7 +469,6 @@ int main(void)
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
- DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 9499385676e..d22e73e4618 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -444,11 +444,6 @@ ret_from_kernel_thread:
PPC440EP_ERR42
blrl
li r3,0
- b do_exit # no return
-
- .globl __ret_from_kernel_execve
-__ret_from_kernel_execve:
- addi r1,r3,-STACK_FRAME_OVERHEAD
b ret_from_syscall
/* Traced system call support */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 56e0ff0878b..e9a906c2723 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -373,17 +373,11 @@ _GLOBAL(ret_from_fork)
_GLOBAL(ret_from_kernel_thread)
bl .schedule_tail
REST_NVGPRS(r1)
- REST_GPR(2,r1)
+ ld r14, 0(r14)
mtlr r14
mr r3,r15
blrl
li r3,0
- b .do_exit # no return
-
-_GLOBAL(__ret_from_kernel_execve)
- addi r1,r3,-STACK_FRAME_OVERHEAD
- li r10,1
- std r10,SOFTE(r1)
b syscall_exit
.section ".toc","aw"
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 697b390ebfd..62c0dc23782 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -8,13 +8,41 @@
*/
#include <linux/threads.h>
+#include <asm/epapr_hcalls.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
+/* epapr_ev_idle() was derived from e500_idle() */
+_GLOBAL(epapr_ev_idle)
+ CURRENT_THREAD_INFO(r3, r1)
+ PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
+ ori r4, r4,_TLF_NAPPING /* so when we take an exception */
+ PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */
+
+ wrteei 1
+
+idle_loop:
+ LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
+
+.global epapr_ev_idle_start
+epapr_ev_idle_start:
+ li r3, -1
+ nop
+ nop
+ nop
+
+ /*
+ * Guard against spurious wakeups from a hypervisor --
+ * only interrupt will cause us to return to LR due to
+ * _TLF_NAPPING.
+ */
+ b idle_loop
+
/* Hypercall entry point. Will be patched with device tree instructions. */
.global epapr_hypercall_start
epapr_hypercall_start:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 028aeae370b..f3eab8594d9 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -21,6 +21,10 @@
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/machdep.h>
+
+extern void epapr_ev_idle(void);
+extern u32 epapr_ev_idle_start[];
bool epapr_paravirt_enabled;
@@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void)
if (len % 4 || len > (4 * 4))
return -ENODEV;
- for (i = 0; i < (len / 4); i++)
+ for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
+ patch_instruction(epapr_ev_idle_start + i, insts[i]);
+ }
+
+ if (of_get_property(hyper_node, "has-idle", NULL))
+ ppc_md.power_save = epapr_ev_idle;
epapr_paravirt_enabled = true;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 867db1de894..a61b133c4f9 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -419,7 +419,7 @@ static void kvm_map_magic_page(void *data)
in[0] = KVM_MAGIC_PAGE;
in[1] = KVM_MAGIC_PAGE;
- kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
+ kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
*features = out[0];
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index bedd12e1cfb..0733b05eb85 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -387,7 +387,7 @@ void __init find_legacy_serial_ports(void)
of_node_put(parent);
continue;
}
- /* Check for known pciclass, and also check wether we have
+ /* Check for known pciclass, and also check whether we have
* a device with child nodes for ports or not
*/
if (of_device_is_compatible(np, "pciclass,0700") ||
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2049f2d00ff..9db8ec07ec9 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -82,7 +82,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
return -ENXIO;
/* Claim resources. This might need some rework as well depending
- * wether we are doing probe-only or not, like assigning unassigned
+ * whether we are doing probe-only or not, like assigning unassigned
* resources etc...
*/
pcibios_claim_one_bus(phb->bus);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7f94f760dd0..abc0d085699 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1428,8 +1428,6 @@ void __init pcibios_resource_survey(void)
ppc_md.pcibios_fixup();
}
-#ifdef CONFIG_HOTPLUG
-
/* This is used by the PCI hotplug driver to allocate resource
* of newly plugged busses. We can try to consolidate with the
* rest of the code later, for now, keep it as-is as our main
@@ -1488,8 +1486,6 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
-#endif /* CONFIG_HOTPLUG */
-
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
if (ppc_md.pcibios_enable_device_hook)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 4ff190ff24a..2cbe6768fdd 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -74,8 +74,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-#ifdef CONFIG_HOTPLUG
-
int pcibios_unmap_io_space(struct pci_bus *bus)
{
struct pci_controller *hose;
@@ -124,8 +122,6 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
-#endif /* CONFIG_HOTPLUG */
-
static int __devinit pcibios_map_phb_io_space(struct pci_controller *hose)
{
struct vm_struct *area;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 19e4288d848..78b8766fd79 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -43,6 +43,7 @@
#include <asm/dcr.h>
#include <asm/ftrace.h>
#include <asm/switch_to.h>
+#include <asm/epapr_hcalls.h>
#ifdef CONFIG_PPC32
extern void transfer_to_handler(void);
@@ -191,3 +192,7 @@ EXPORT_SYMBOL(__arch_hweight64);
#ifdef CONFIG_PPC_BOOK3S_64
EXPORT_SYMBOL_GPL(mmu_psize_defs);
#endif
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+EXPORT_SYMBOL(epapr_hypercall_start);
+#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ba48233500f..81430674e71 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -733,8 +733,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p,
- struct pt_regs *regs)
+ unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
@@ -745,25 +744,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (!regs) {
- /* for kernel thread, set `current' and stackptr in new task */
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
+ childregs->gpr[14] = usp; /* function */
#ifdef CONFIG_PPC64
- childregs->gpr[14] = *(unsigned long *)usp;
- childregs->gpr[2] = ((unsigned long *)usp)[1],
clear_tsk_thread_flag(p, TIF_32BIT);
-#else
- childregs->gpr[14] = usp; /* function */
- childregs->gpr[2] = (unsigned long) p;
+ childregs->softe = 1;
#endif
childregs->gpr[15] = arg;
p->thread.regs = NULL; /* no user register state */
+ ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
+ struct pt_regs *regs = current_pt_regs();
CHECK_FULL_REGS(regs);
*childregs = *regs;
- childregs->gpr[1] = usp;
+ if (usp)
+ childregs->gpr[1] = usp;
p->thread.regs = childregs;
childregs->gpr[3] = 0; /* Result from fork() */
if (clone_flags & CLONE_SETTLS) {
@@ -1027,51 +1026,6 @@ int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
}
-#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
-
-int sys_clone(unsigned long clone_flags, unsigned long usp,
- int __user *parent_tidp, void __user *child_threadptr,
- int __user *child_tidp, int p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- if (usp == 0)
- usp = regs->gpr[1]; /* stack pointer for child */
-#ifdef CONFIG_PPC64
- if (is_32bit_task()) {
- parent_tidp = TRUNC_PTR(parent_tidp);
- child_tidp = TRUNC_PTR(child_tidp);
- }
-#endif
- return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
-}
-
-int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
- unsigned long p4, unsigned long p5, unsigned long p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
-}
-
-int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
- unsigned long p4, unsigned long p5, unsigned long p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
- regs, 0, NULL, NULL);
-}
-
-void __ret_from_kernel_execve(struct pt_regs *normal)
-__noreturn;
-
-void ret_from_kernel_execve(struct pt_regs *normal)
-{
- set_thread_flag(TIF_RESTOREALL);
- __ret_from_kernel_execve(normal);
-}
-
static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index a2dc75793bd..3b997118df5 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -158,10 +158,8 @@ static int do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
- if (thread_info_flags & _TIF_UPROBE) {
- clear_thread_flag(TIF_UPROBE);
+ if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
- }
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d183f8719a5..1ca045d4432 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -83,7 +83,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* the context). This is very important because we must ensure we
* don't lose the VRSAVE content that may have been set prior to
* the process doing its first vector operation
- * Userland shall check AT_HWCAP to know wether it can rely on the
+ * Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not
*/
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2b952b5386f..e5b133ebd8a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,6 +427,45 @@ int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
+
+static atomic_t secondary_inhibit_count;
+
+/*
+ * Don't allow secondary CPU threads to come online
+ */
+void inhibit_secondary_onlining(void)
+{
+ /*
+ * This makes secondary_inhibit_count stable during cpu
+ * online/offline operations.
+ */
+ get_online_cpus();
+
+ atomic_inc(&secondary_inhibit_count);
+ put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
+
+/*
+ * Allow secondary CPU threads to come online again
+ */
+void uninhibit_secondary_onlining(void)
+{
+ get_online_cpus();
+ atomic_dec(&secondary_inhibit_count);
+ put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
+
+static int secondaries_inhibited(void)
+{
+ return atomic_read(&secondary_inhibit_count);
+}
+
+#else /* HOTPLUG_CPU */
+
+#define secondaries_inhibited() 0
+
#endif
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
@@ -445,6 +484,13 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
+ /*
+ * Don't allow secondary threads to come online if inhibited
+ */
+ if (threads_per_core > 1 && secondaries_inhibited() &&
+ cpu % threads_per_core != 0)
+ return -EBUSY;
+
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index cf357a059dd..3ce1f864c2d 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -607,7 +607,7 @@ static void register_nodes(void)
int sysfs_add_device_to_node(struct device *dev, int nid)
{
- struct node *node = &node_devices[nid];
+ struct node *node = node_devices[nid];
return sysfs_create_link(&node->dev.kobj, &dev->kobj,
kobject_name(&dev->kobj));
}
@@ -615,7 +615,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
void sysfs_remove_device_from_node(struct device *dev, int nid)
{
- struct node *node = &node_devices[nid];
+ struct node *node = node_devices[nid];
sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ce4cb772dc7..b3b14352b05 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -297,6 +297,8 @@ static u64 vtime_delta(struct task_struct *tsk,
u64 now, nowscaled, deltascaled;
u64 udelta, delta, user_scaled;
+ WARN_ON_ONCE(!irqs_disabled());
+
now = mftb();
nowscaled = read_spurr(now);
get_paca()->system_time += now - get_paca()->starttime;
@@ -355,15 +357,15 @@ void vtime_account_idle(struct task_struct *tsk)
}
/*
- * Transfer the user and system times accumulated in the paca
- * by the exception entry and exit code to the generic process
- * user and system time records.
+ * Transfer the user time accumulated in the paca
+ * by the exception entry and exit code to the generic
+ * process user time records.
* Must be called with interrupts disabled.
- * Assumes that vtime_account() has been called recently
- * (i.e. since the last entry from usermode) so that
+ * Assumes that vtime_account_system/idle() has been called
+ * recently (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
-void account_process_tick(struct task_struct *tsk, int user_tick)
+void vtime_account_user(struct task_struct *tsk)
{
cputime_t utime, utimescaled;
@@ -375,12 +377,6 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
account_user_time(tsk, utime, utimescaled);
}
-void vtime_task_switch(struct task_struct *prev)
-{
- vtime_account(prev);
- account_process_tick(prev, 0);
-}
-
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#endif
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index d2d46d1014f..bc77834dbf4 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -64,6 +64,8 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
regs->nip = current->utask->xol_vaddr;
+
+ user_enable_single_step(current);
return 0;
}
@@ -119,6 +121,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* to be executed.
*/
regs->nip = utask->vaddr + MAX_UINSN_BYTES;
+
+ user_disable_single_step(current);
return 0;
}
@@ -162,6 +166,8 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
}
/*