diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-15 12:45:00 +0200 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-15 12:45:00 +0200 |
commit | 6268464b370e234e0255330190f9bd5d19386ad7 (patch) | |
tree | 5742641092ce64227dd2086d78baaede57da1f80 /arch/alpha/kernel | |
parent | 7df01d96b295e400167e78061b81d4c91630b12d (diff) | |
parent | 0fdf13606b67f830559abdaad15980c7f4f05ec4 (diff) |
Merge remote branch 'tip/perf/core' into oprofile/core
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r-- | arch/alpha/kernel/entry.S | 20 | ||||
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 128 | ||||
-rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/signal.c | 56 | ||||
-rw-r--r-- | arch/alpha/kernel/systbls.S | 2 |
5 files changed, 110 insertions, 98 deletions
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index ab1ee0ab082..6d159cee5f2 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -73,8 +73,6 @@ ldq $20, HAE_REG($19); \ stq $21, HAE_CACHE($19); \ stq $21, 0($20); \ - ldq $0, 0($sp); \ - ldq $1, 8($sp); \ 99:; \ ldq $19, 72($sp); \ ldq $20, 80($sp); \ @@ -316,7 +314,7 @@ ret_from_sys_call: cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ ldq $0, SP_OFF($sp) and $0, 8, $0 - beq $0, restore_all + beq $0, ret_to_kernel ret_to_user: /* Make sure need_resched and sigpending don't change between sampling and the rti. */ @@ -329,6 +327,11 @@ restore_all: RESTORE_ALL call_pal PAL_rti +ret_to_kernel: + lda $16, 7 + call_pal PAL_swpipl + br restore_all + .align 3 $syscall_error: /* @@ -657,7 +660,7 @@ kernel_thread: /* We don't actually care for a3 success widgetry in the kernel. Not for positive errno values. */ stq $0, 0($sp) /* $0 */ - br restore_all + br ret_to_kernel .end kernel_thread /* @@ -912,15 +915,6 @@ sys_execve: .end sys_execve .align 4 - .globl osf_sigprocmask - .ent osf_sigprocmask -osf_sigprocmask: - .prologue 0 - mov $sp, $18 - jmp $31, sys_osf_sigprocmask -.end osf_sigprocmask - - .align 4 .globl alpha_ni_syscall .ent alpha_ni_syscall alpha_ni_syscall: diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 85d8e4f58c8..1cc49683fb6 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -307,7 +307,7 @@ again: new_raw_count) != prev_raw_count) goto again; - delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; + delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; /* It is possible on very rare occasions that the PMC has overflowed * but the interrupt is yet to come. Detect and fix this situation. @@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) struct hw_perf_event *hwc = &pe->hw; int idx = hwc->idx; - if (cpuc->current_idx[j] != PMC_NO_INDEX) { - cpuc->idx_mask |= (1<<cpuc->current_idx[j]); - continue; + if (cpuc->current_idx[j] == PMC_NO_INDEX) { + alpha_perf_event_set_period(pe, hwc, idx); + cpuc->current_idx[j] = idx; } - alpha_perf_event_set_period(pe, hwc, idx); - cpuc->current_idx[j] = idx; - cpuc->idx_mask |= (1<<cpuc->current_idx[j]); + if (!(hwc->state & PERF_HES_STOPPED)) + cpuc->idx_mask |= (1<<cpuc->current_idx[j]); } cpuc->config = cpuc->event[0]->hw.config_base; } @@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ -static int alpha_pmu_enable(struct perf_event *event) +static int alpha_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; int n0; int ret; - unsigned long flags; + unsigned long irq_flags; /* * The Sparc code has the IRQ disable first followed by the perf @@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event) * nevertheless we disable the PMCs first to enable a potential * final PMI to occur before we disable interrupts. */ - perf_disable(); - local_irq_save(flags); + perf_pmu_disable(event->pmu); + local_irq_save(irq_flags); /* Default to error to be returned */ ret = -EAGAIN; @@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event) } } - local_irq_restore(flags); - perf_enable(); + hwc->state = PERF_HES_UPTODATE; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_STOPPED; + + local_irq_restore(irq_flags); + perf_pmu_enable(event->pmu); return ret; } @@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event) * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ -static void alpha_pmu_disable(struct perf_event *event) +static void alpha_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - unsigned long flags; + unsigned long irq_flags; int j; - perf_disable(); - local_irq_save(flags); + perf_pmu_disable(event->pmu); + local_irq_save(irq_flags); for (j = 0; j < cpuc->n_events; j++) { if (event == cpuc->event[j]) { @@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event) } } - local_irq_restore(flags); - perf_enable(); + local_irq_restore(irq_flags); + perf_pmu_enable(event->pmu); } @@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event) } -static void alpha_pmu_unthrottle(struct perf_event *event) +static void alpha_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!(hwc->state & PERF_HES_STOPPED)) { + cpuc->idx_mask &= ~(1UL<<hwc->idx); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + alpha_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } + + if (cpuc->enabled) + wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); +} + + +static void alpha_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + alpha_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + cpuc->idx_mask |= 1UL<<hwc->idx; - wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); + if (cpuc->enabled) + wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); } @@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event) return 0; } -static const struct pmu pmu = { - .enable = alpha_pmu_enable, - .disable = alpha_pmu_disable, - .read = alpha_pmu_read, - .unthrottle = alpha_pmu_unthrottle, -}; - - /* * Main entry point to initialise a HW performance event. */ -const struct pmu *hw_perf_event_init(struct perf_event *event) +static int alpha_pmu_event_init(struct perf_event *event) { int err; + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + if (!alpha_pmu) - return ERR_PTR(-ENODEV); + return -ENODEV; /* Do the real initialisation work. */ err = __hw_perf_event_init(event); - if (err) - return ERR_PTR(err); - - return &pmu; + return err; } - - /* * Main entry point - enable HW performance counters. */ -void hw_perf_enable(void) +static void alpha_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -700,7 +732,7 @@ void hw_perf_enable(void) * Main entry point - disable HW performance counters. */ -void hw_perf_disable(void) +static void alpha_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -713,6 +745,17 @@ void hw_perf_disable(void) wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); } +static struct pmu pmu = { + .pmu_enable = alpha_pmu_enable, + .pmu_disable = alpha_pmu_disable, + .event_init = alpha_pmu_event_init, + .add = alpha_pmu_add, + .del = alpha_pmu_del, + .start = alpha_pmu_start, + .stop = alpha_pmu_stop, + .read = alpha_pmu_read, +}; + /* * Main entry point - don't know when this is called but it @@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); /* la_ptr is the counter that overflowed. */ - if (unlikely(la_ptr >= perf_max_events)) { + if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: silly index %ld\n", la_ptr); @@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ - cpuc->idx_mask &= ~(1UL<<idx); + alpha_pmu_stop(event, 0); } } wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); @@ -837,6 +880,7 @@ void __init init_hw_perf_events(void) /* And set up PMU specification */ alpha_pmu = &ev67_pmu; - perf_max_events = alpha_pmu->num_pmcs; + + perf_pmu_register(&pmu); } diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308ea..3ec35066f1d 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) dest[27] = pt->r27; dest[28] = pt->r28; dest[29] = pt->gp; - dest[30] = rdusp(); + dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; dest[31] = pt->pc; /* Once upon a time this was the PS value. Which is stupid diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 0f6b51ae865..6f7feb5db27 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *, /* * The OSF/1 sigprocmask calling sequence is different from the * C sigprocmask() sequence.. - * - * how: - * 1 - SIG_BLOCK - * 2 - SIG_UNBLOCK - * 3 - SIG_SETMASK - * - * We change the range to -1 .. 1 in order to let gcc easily - * use the conditional move instructions. - * - * Note that we don't need to acquire the kernel lock for SMP - * operation, as all of this is local to this thread. */ -SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, - struct pt_regs *, regs) +SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) { - unsigned long oldmask = -EINVAL; - - if ((unsigned long)how-1 <= 2) { - long sign = how-2; /* -1 .. 1 */ - unsigned long block, unblock; - - newmask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - oldmask = current->blocked.sig[0]; - - unblock = oldmask & ~newmask; - block = oldmask | newmask; - if (!sign) - block = unblock; - if (sign <= 0) - newmask = block; - if (_NSIG_WORDS > 1 && sign > 0) - sigemptyset(¤t->blocked); - current->blocked.sig[0] = newmask; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - regs->r0 = 0; /* special no error return */ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; } - return oldmask; + return res; } SYSCALL_DEFINE3(osf_sigaction, int, sig, @@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags)) + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) return -EFAULT; - __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); new_ka.ka_restorer = NULL; } @@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index ce594ef533c..a6a1de9db16 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -58,7 +58,7 @@ sys_call_table: .quad sys_open /* 45 */ .quad alpha_ni_syscall .quad sys_getxgid - .quad osf_sigprocmask + .quad sys_osf_sigprocmask .quad alpha_ni_syscall .quad alpha_ni_syscall /* 50 */ .quad sys_acct |