diff options
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/Kconfig | 4 | ||||
-rw-r--r-- | arch/alpha/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/alpha/include/asm/hw_irq.h | 1 | ||||
-rw-r--r-- | arch/alpha/include/asm/ioctls.h | 4 | ||||
-rw-r--r-- | arch/alpha/include/asm/local64.h | 1 | ||||
-rw-r--r-- | arch/alpha/include/asm/md.h | 13 | ||||
-rw-r--r-- | arch/alpha/include/asm/perf_event.h | 8 | ||||
-rw-r--r-- | arch/alpha/include/asm/scatterlist.h | 2 | ||||
-rw-r--r-- | arch/alpha/include/asm/termbits.h | 1 | ||||
-rw-r--r-- | arch/alpha/include/asm/wrperfmon.h | 93 | ||||
-rw-r--r-- | arch/alpha/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/alpha/kernel/irq.c | 7 | ||||
-rw-r--r-- | arch/alpha/kernel/irq_alpha.c | 3 | ||||
-rw-r--r-- | arch/alpha/kernel/osf_sys.c | 14 | ||||
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 842 | ||||
-rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/time.c | 26 |
17 files changed, 990 insertions, 34 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 3e2e540a0f2..b9647bb66d1 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -47,10 +47,6 @@ config GENERIC_CALIBRATE_DELAY bool default y -config GENERIC_TIME - bool - default y - config GENERIC_CMOS_UPDATE def_bool y diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 1bce8169733..4567aca6fdd 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -41,9 +41,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) #define dma_cache_sync(dev, va, size, dir) ((void)0) -#define dma_get_cache_alignment() L1_CACHE_BYTES #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/include/asm/hw_irq.h b/arch/alpha/include/asm/hw_irq.h index a37db0f9509..5050ac81cd9 100644 --- a/arch/alpha/include/asm/hw_irq.h +++ b/arch/alpha/include/asm/hw_irq.h @@ -3,6 +3,7 @@ extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); #ifdef CONFIG_ALPHA_GENERIC #define ACTUAL_NR_IRQS alpha_mv.nr_irqs diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h index 67bb9f6fdbe..59617c3c2be 100644 --- a/arch/alpha/include/asm/ioctls.h +++ b/arch/alpha/include/asm/ioctls.h @@ -80,6 +80,7 @@ # define TIOCPKT_START 8 # define TIOCPKT_NOSTOP 16 # define TIOCPKT_DOSTOP 32 +# define TIOCPKT_IOCTL 64 #define TIOCNOTTY 0x5422 @@ -91,6 +92,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 @@ -106,7 +108,5 @@ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #endif /* _ASM_ALPHA_IOCTLS_H */ diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h new file mode 100644 index 00000000000..36c93b5cc23 --- /dev/null +++ b/arch/alpha/include/asm/local64.h @@ -0,0 +1 @@ +#include <asm-generic/local64.h> diff --git a/arch/alpha/include/asm/md.h b/arch/alpha/include/asm/md.h deleted file mode 100644 index 6c9b8222a4f..00000000000 --- a/arch/alpha/include/asm/md.h +++ /dev/null @@ -1,13 +0,0 @@ -/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $ - * md.h: High speed xor_block operation for RAID4/5 - * - */ - -#ifndef __ASM_MD_H -#define __ASM_MD_H - -/* #define HAVE_ARCH_XORBLOCK */ - -#define MD_XORBLOCK_ALIGNMENT sizeof(long) - -#endif /* __ASM_MD_H */ diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 3bef8522017..4157cd3c44a 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h @@ -2,8 +2,14 @@ #define __ASM_ALPHA_PERF_EVENT_H /* Alpha only supports software events through this interface. */ -static inline void set_perf_event_pending(void) { } +extern void set_perf_event_pending(void); #define PERF_EVENT_INDEX_OFFSET 0 +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +#else +static inline void init_hw_perf_events(void) { } +#endif + #endif /* __ASM_ALPHA_PERF_EVENT_H */ diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h index 5728c52a741..017d7471c3c 100644 --- a/arch/alpha/include/asm/scatterlist.h +++ b/arch/alpha/include/asm/scatterlist.h @@ -3,6 +3,4 @@ #include <asm-generic/scatterlist.h> -#define ISA_DMA_THRESHOLD (~0UL) - #endif /* !(_ALPHA_SCATTERLIST_H) */ diff --git a/arch/alpha/include/asm/termbits.h b/arch/alpha/include/asm/termbits.h index ad854a4a3af..879dd358992 100644 --- a/arch/alpha/include/asm/termbits.h +++ b/arch/alpha/include/asm/termbits.h @@ -180,6 +180,7 @@ struct ktermios { #define FLUSHO 0x00800000 #define PENDIN 0x20000000 #define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 /* Values for the ACTION argument to `tcflow'. */ #define TCOOFF 0 diff --git a/arch/alpha/include/asm/wrperfmon.h b/arch/alpha/include/asm/wrperfmon.h new file mode 100644 index 00000000000..319bf6788d8 --- /dev/null +++ b/arch/alpha/include/asm/wrperfmon.h @@ -0,0 +1,93 @@ +/* + * Definitions for use with the Alpha wrperfmon PAL call. + */ + +#ifndef __ALPHA_WRPERFMON_H +#define __ALPHA_WRPERFMON_H + +/* Following commands are implemented on all CPUs */ +#define PERFMON_CMD_DISABLE 0 +#define PERFMON_CMD_ENABLE 1 +#define PERFMON_CMD_DESIRED_EVENTS 2 +#define PERFMON_CMD_LOGGING_OPTIONS 3 +/* Following commands on EV5/EV56/PCA56 only */ +#define PERFMON_CMD_INT_FREQ 4 +#define PERFMON_CMD_ENABLE_CLEAR 7 +/* Following commands are on EV5 and better CPUs */ +#define PERFMON_CMD_READ 5 +#define PERFMON_CMD_WRITE 6 +/* Following command are on EV6 and better CPUs */ +#define PERFMON_CMD_ENABLE_WRITE 7 +/* Following command are on EV67 and better CPUs */ +#define PERFMON_CMD_I_STAT 8 +#define PERFMON_CMD_PMPC 9 + + +/* EV5/EV56/PCA56 Counters */ +#define EV5_PCTR_0 (1UL<<0) +#define EV5_PCTR_1 (1UL<<1) +#define EV5_PCTR_2 (1UL<<2) + +#define EV5_PCTR_0_COUNT_SHIFT 48 +#define EV5_PCTR_1_COUNT_SHIFT 32 +#define EV5_PCTR_2_COUNT_SHIFT 16 + +#define EV5_PCTR_0_COUNT_MASK 0xffffUL +#define EV5_PCTR_1_COUNT_MASK 0xffffUL +#define EV5_PCTR_2_COUNT_MASK 0x3fffUL + +/* EV6 Counters */ +#define EV6_PCTR_0 (1UL<<0) +#define EV6_PCTR_1 (1UL<<1) + +#define EV6_PCTR_0_COUNT_SHIFT 28 +#define EV6_PCTR_1_COUNT_SHIFT 6 + +#define EV6_PCTR_0_COUNT_MASK 0xfffffUL +#define EV6_PCTR_1_COUNT_MASK 0xfffffUL + +/* EV67 (and subsequent) counters */ +#define EV67_PCTR_0 (1UL<<0) +#define EV67_PCTR_1 (1UL<<1) + +#define EV67_PCTR_0_COUNT_SHIFT 28 +#define EV67_PCTR_1_COUNT_SHIFT 6 + +#define EV67_PCTR_0_COUNT_MASK 0xfffffUL +#define EV67_PCTR_1_COUNT_MASK 0xfffffUL + + +/* + * The Alpha Architecure Handbook, vers. 4 (1998) appears to have a misprint + * in Table E-23 regarding the bits that set the event PCTR 1 counts. + * Hopefully what we have here is correct. + */ +#define EV6_PCTR_0_EVENT_MASK 0x10UL +#define EV6_PCTR_1_EVENT_MASK 0x0fUL + +/* EV6 Events */ +#define EV6_PCTR_0_CYCLES (0UL << 4) +#define EV6_PCTR_0_INSTRUCTIONS (1UL << 4) + +#define EV6_PCTR_1_CYCLES 0 +#define EV6_PCTR_1_BRANCHES 1 +#define EV6_PCTR_1_BRANCH_MISPREDICTS 2 +#define EV6_PCTR_1_DTB_SINGLE_MISSES 3 +#define EV6_PCTR_1_DTB_DOUBLE_MISSES 4 +#define EV6_PCTR_1_ITB_MISSES 5 +#define EV6_PCTR_1_UNALIGNED_TRAPS 6 +#define EV6_PCTR_1_REPLY_TRAPS 7 + +/* From the Alpha Architecture Reference Manual, 4th edn., 2002 */ +#define EV67_PCTR_MODE_MASK 0x10UL +#define EV67_PCTR_EVENT_MASK 0x0CUL + +#define EV67_PCTR_MODE_PROFILEME (1UL<<4) +#define EV67_PCTR_MODE_AGGREGATE (0UL<<4) + +#define EV67_PCTR_INSTR_CYCLES (0UL<<2) +#define EV67_PCTR_CYCLES_UNDEF (1UL<<2) +#define EV67_PCTR_INSTR_BCACHEMISS (2UL<<2) +#define EV67_PCTR_CYCLES_MBOX (3UL<<2) + +#endif diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 5a62fb46ef2..1ee9b5b629b 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o obj-$(CONFIG_SRM_ENV) += srm_env.o obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifdef CONFIG_ALPHA_GENERIC diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 7f912ba3d9a..fe912984d9b 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -31,6 +31,7 @@ #include <asm/uaccess.h> volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); void ack_bad_irq(unsigned int irq) { @@ -63,9 +64,7 @@ int irq_select_affinity(unsigned int irq) int show_interrupts(struct seq_file *p, void *v) { -#ifdef CONFIG_SMP int j; -#endif int irq = *(loff_t *) v; struct irqaction * action; unsigned long flags; @@ -112,6 +111,10 @@ unlock: seq_printf(p, "%10lu ", cpu_data[j].ipi_count); seq_putc(p, '\n'); #endif + seq_puts(p, "PMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, " Performance Monitoring\n"); seq_printf(p, "ERR: %10lu\n", irq_err_count); } return 0; diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index cfde865b78e..5f77afb88e8 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -10,6 +10,7 @@ #include <asm/machvec.h> #include <asm/dma.h> +#include <asm/perf_event.h> #include "proto.h" #include "irq_impl.h" @@ -111,6 +112,8 @@ init_IRQ(void) wrent(entInt, 0); alpha_mv.init_irq(); + + init_hw_perf_events(); } /* diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index de9d3971780..fb58150a7e8 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -234,17 +234,17 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st } static int -do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, +do_osf_statfs(struct path *path, struct osf_statfs __user *buffer, unsigned long bufsiz) { struct kstatfs linux_stat; - int error = vfs_statfs(dentry, &linux_stat); + int error = vfs_statfs(path, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } -SYSCALL_DEFINE3(osf_statfs, char __user *, pathname, +SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct path path; @@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, char __user *, pathname, retval = user_path(pathname, &path); if (!retval) { - retval = do_osf_statfs(path.dentry, buffer, bufsiz); + retval = do_osf_statfs(&path buffer, bufsiz); path_put(&path); } return retval; @@ -267,7 +267,7 @@ SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, retval = -EBADF; file = fget(fd); if (file) { - retval = do_osf_statfs(file->f_path.dentry, buffer, bufsiz); + retval = do_osf_statfs(&file->f_path, buffer, bufsiz); fput(file); } return retval; @@ -358,7 +358,7 @@ osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) return do_mount("", dirname, "proc", flags, NULL); } -SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path, +SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; @@ -932,7 +932,7 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, } -SYSCALL_DEFINE2(osf_utimes, char __user *, filename, +SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c new file mode 100644 index 00000000000..51c39fa4169 --- /dev/null +++ b/arch/alpha/kernel/perf_event.c @@ -0,0 +1,842 @@ +/* + * Hardware performance events for the Alpha. + * + * We implement HW counts on the EV67 and subsequent CPUs only. + * + * (C) 2010 Michael J. Cree + * + * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and + * ARM code, which are copyright by their respective authors. + */ + +#include <linux/perf_event.h> +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> + +#include <asm/hwrpb.h> +#include <asm/atomic.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pal.h> +#include <asm/wrperfmon.h> +#include <asm/hw_irq.h> + + +/* The maximum number of PMCs on any Alpha CPU whatsoever. */ +#define MAX_HWEVENTS 3 +#define PMC_NO_INDEX -1 + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + int enabled; + /* Number of events scheduled; also number entries valid in arrays below. */ + int n_events; + /* Number events added since last hw_perf_disable(). */ + int n_added; + /* Events currently scheduled. */ + struct perf_event *event[MAX_HWEVENTS]; + /* Event type of each scheduled event. */ + unsigned long evtype[MAX_HWEVENTS]; + /* Current index of each scheduled event; if not yet determined + * contains PMC_NO_INDEX. + */ + int current_idx[MAX_HWEVENTS]; + /* The active PMCs' config for easy use with wrperfmon(). */ + unsigned long config; + /* The active counters' indices for easy use with wrperfmon(). */ + unsigned long idx_mask; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + + + +/* + * A structure to hold the description of the PMCs available on a particular + * type of Alpha CPU. + */ +struct alpha_pmu_t { + /* Mapping of the perf system hw event types to indigenous event types */ + const int *event_map; + /* The number of entries in the event_map */ + int max_events; + /* The number of PMCs on this Alpha */ + int num_pmcs; + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask[MAX_HWEVENTS]; + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period[MAX_HWEVENTS]; + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left[3]; + /* Subroutine for allocation of PMCs. Enforces constraints. */ + int (*check_constraints)(struct perf_event **, unsigned long *, int); +}; + +/* + * The Alpha CPU PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct alpha_pmu_t *alpha_pmu; + + +#define HW_OP_UNSUPPORTED -1 + +/* + * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs + * follow. Since they are identical we refer to them collectively as the + * EV67 henceforth. + */ + +/* + * EV67 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +enum ev67_pmc_event_type { + EV67_CYCLES = 1, + EV67_INSTRUCTIONS, + EV67_BCACHEMISS, + EV67_MBOXREPLAY, + EV67_LAST_ET +}; +#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) + + +/* Mapping of the hw event types to the perf tool interface */ +static const int ev67_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, +}; + +struct ev67_mapping_t { + int config; + int idx; +}; + +/* + * The mapping used for one event only - these must be in same order as enum + * ev67_pmc_event_type definition. + */ +static const struct ev67_mapping_t ev67_mapping[] = { + {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ + {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ + {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ + {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ +}; + + +/* + * Check that a group of events can be simultaneously scheduled on to the + * EV67 PMU. Also allocate counter indices and config. + */ +static int ev67_check_constraints(struct perf_event **event, + unsigned long *evtype, int n_ev) +{ + int idx0; + unsigned long config; + + idx0 = ev67_mapping[evtype[0]-1].idx; + config = ev67_mapping[evtype[0]-1].config; + if (n_ev == 1) + goto success; + + BUG_ON(n_ev != 2); + + if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { + /* MBOX replay traps must be on PMC 1 */ + idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; + /* Only cycles can accompany MBOX replay traps */ + if (evtype[idx0] == EV67_CYCLES) { + config = EV67_PCTR_CYCLES_MBOX; + goto success; + } + } + + if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { + /* Bcache misses must be on PMC 1 */ + idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; + /* Only instructions can accompany Bcache misses */ + if (evtype[idx0] == EV67_INSTRUCTIONS) { + config = EV67_PCTR_INSTR_BCACHEMISS; + goto success; + } + } + + if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { + /* Instructions must be on PMC 0 */ + idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; + /* By this point only cycles can accompany instructions */ + if (evtype[idx0^1] == EV67_CYCLES) { + config = EV67_PCTR_INSTR_CYCLES; + goto success; + } + } + + /* Otherwise, darn it, there is a conflict. */ + return -1; + +success: + event[0]->hw.idx = idx0; + event[0]->hw.config_base = config; + if (n_ev == 2) { + event[1]->hw.idx = idx0 ^ 1; + event[1]->hw.config_base = config; + } + return 0; +} + + +static const struct alpha_pmu_t ev67_pmu = { + .event_map = ev67_perfmon_event_map, + .max_events = ARRAY_SIZE(ev67_perfmon_event_map), + .num_pmcs = 2, + .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, + .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, + .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, + .pmc_left = {16, 4, 0}, + .check_constraints = ev67_check_constraints +}; + + + +/* + * Helper routines to ensure that we read/write only the correct PMC bits + * when calling the wrperfmon PALcall. + */ +static inline void alpha_write_pmc(int idx, unsigned long val) +{ + val &= alpha_pmu->pmc_count_mask[idx]; + val <<= alpha_pmu->pmc_count_shift[idx]; + val |= (1<<idx); + wrperfmon(PERFMON_CMD_WRITE, val); +} + +static inline unsigned long alpha_read_pmc(int idx) +{ + unsigned long val; + + val = wrperfmon(PERFMON_CMD_READ, 0); + val >>= alpha_pmu->pmc_count_shift[idx]; + val &= alpha_pmu->pmc_count_mask[idx]; + return val; +} + +/* Set a new period to sample over */ +static int alpha_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = atomic64_read(&hwc->period_left); + long period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * Hardware restrictions require that the counters must not be + * written with values that are too close to the maximum period. + */ + if (unlikely(left < alpha_pmu->pmc_left[idx])) + left = alpha_pmu->pmc_left[idx]; + + if (left > (long)alpha_pmu->pmc_max_period[idx]) + left = alpha_pmu->pmc_max_period[idx]; + + atomic64_set(&hwc->prev_count, (unsigned long)(-left)); + + alpha_write_pmc(idx, (unsigned long)(-left)); + + perf_event_update_userpage(event); + + return ret; +} + + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long alpha_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = alpha_read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) { + delta += alpha_pmu->pmc_max_period[idx] + 1; + } + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + + +/* + * Collect all HW events into the array event[]. + */ +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *event[], unsigned long *evtype, + int *current_idx) +{ + struct perf_event *pe; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + event[n] = group; + evtype[n] = group->hw.event_base; + current_idx[n++] = PMC_NO_INDEX; + } + list_for_each_entry(pe, &group->sibling_list, group_entry) { + if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + event[n] = pe; + evtype[n] = pe->hw.event_base; + current_idx[n++] = PMC_NO_INDEX; + } + } + return n; +} + + + +/* + * Check that a group of events can be simultaneously scheduled on to the PMU. + */ +static int alpha_check_constraints(struct perf_event **events, + unsigned long *evtypes, int n_ev) +{ + + /* No HW events is possible from hw_perf_group_sched_in(). */ + if (n_ev == 0) + return 0; + + if (n_ev > alpha_pmu->num_pmcs) + return -1; + + return alpha_pmu->check_constraints(events, evtypes, n_ev); +} + + +/* + * If new events have been scheduled then update cpuc with the new + * configuration. This may involve shifting cycle counts from one PMC to + * another. + */ +static void maybe_change_configuration(struct cpu_hw_events *cpuc) +{ + int j; + + if (cpuc->n_added == 0) + return; + + /* Find counters that are moving to another PMC and update */ + for (j = 0; j < cpuc->n_events; j++) { + struct perf_event *pe = cpuc->event[j]; + + if (cpuc->current_idx[j] != PMC_NO_INDEX && + cpuc->current_idx[j] != pe->hw.idx) { + alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); + cpuc->current_idx[j] = PMC_NO_INDEX; + } + } + + /* Assign to counters all unassigned events. */ + cpuc->idx_mask = 0; + for (j = 0; j < cpuc->n_events; j++) { + struct perf_event *pe = cpuc->event[j]; + struct hw_perf_event *hwc = &pe->hw; + int idx = hwc->idx; + + if (cpuc->current_idx[j] != PMC_NO_INDEX) { + cpuc->idx_mask |= (1<<cpuc->current_idx[j]); + continue; + } + + alpha_perf_event_set_period(pe, hwc, idx); + cpuc->current_idx[j] = idx; + cpuc->idx_mask |= (1<<cpuc->current_idx[j]); + } + cpuc->config = cpuc->event[0]->hw.config_base; +} + + + +/* Schedule perf HW event on to PMU. + * - this function is called from outside this module via the pmu struct + * returned from perf event initialisation. + */ +static int alpha_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int n0; + int ret; + unsigned long flags; + + /* + * The Sparc code has the IRQ disable first followed by the perf + * disable, however this can lead to an overflowed counter with the + * PMI disabled on rare occasions. The alpha_perf_event_update() + * routine should detect this situation by noting a negative delta, + * nevertheless we disable the PMCs first to enable a potential + * final PMI to occur before we disable interrupts. + */ + perf_disable(); + local_irq_save(flags); + + /* Default to error to be returned */ + ret = -EAGAIN; + + /* Insert event on to PMU and if successful modify ret to valid return */ + n0 = cpuc->n_events; + if (n0 < alpha_pmu->num_pmcs) { + cpuc->event[n0] = event; + cpuc->evtype[n0] = event->hw.event_base; + cpuc->current_idx[n0] = PMC_NO_INDEX; + + if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { + cpuc->n_events++; + cpuc->n_added++; + ret = 0; + } + } + + local_irq_restore(flags); + perf_enable(); + + return ret; +} + + + +/* Disable performance monitoring unit + * - this function is called from outside this module via the pmu struct + * returned from perf event initialisation. + */ +static void alpha_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + int j; + + perf_disable(); + local_irq_save(flags); + + for (j = 0; j < cpuc->n_events; j++) { + if (event == cpuc->event[j]) { + int idx = cpuc->current_idx[j]; + + /* Shift remaining entries down into the existing + * slot. + */ + while (++j < cpuc->n_events) { + cpuc->event[j - 1] = cpuc->event[j]; + cpuc->evtype[j - 1] = cpuc->evtype[j]; + cpuc->current_idx[j - 1] = + cpuc->current_idx[j]; + } + + /* Absorb the final count and turn off the event. */ + alpha_perf_event_update(event, hwc, idx, 0); + perf_event_update_userpage(event); + + cpuc->idx_mask &= ~(1UL<<idx); + cpuc->n_events--; + break; + } + } + + local_irq_restore(flags); + perf_enable(); +} + + +static void alpha_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + alpha_perf_event_update(event, hwc, hwc->idx, 0); +} + + +static void alpha_pmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + cpuc->idx_mask |= 1UL<<hwc->idx; + wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); +} + + +/* + * Check that CPU performance counters are supported. + * - currently support EV67 and later CPUs. + * - actually some later revisions of the EV6 have the same PMC model as the + * EV67 but we don't do suffiently deep CPU detection to detect them. + * Bad luck to the very few people who might have one, I guess. + */ +static int supported_cpu(void) +{ + struct percpu_struct *cpu; + unsigned long cputype; + + /* Get cpu type from HW */ + cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); + cputype = cpu->type & 0xffffffff; + /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ + return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); +} + + + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ + return; +} + + + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + struct perf_event *evts[MAX_HWEVENTS]; + unsigned long evtypes[MAX_HWEVENTS]; + int idx_rubbish_bin[MAX_HWEVENTS]; + int ev; + int n; + + /* We only support a limited range of HARDWARE event types with one + * only programmable via a RAW event type. + */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= alpha_pmu->max_events) + return -EINVAL; + ev = alpha_pmu->event_map[attr->config]; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + return -EOPNOTSUPP; + } else if (attr->type == PERF_TYPE_RAW) { + ev = attr->config & 0xff; + } else { + return -EOPNOTSUPP; + } + + if (ev < 0) { + return ev; + } + + /* The EV67 does not support mode exclusion */ + if (attr->exclude_kernel || attr->exclude_user + || attr->exclude_hv || attr->exclude_idle) { + return -EPERM; + } + + /* + * We place the event type in event_base here and leave calculation + * of the codes to programme the PMU for alpha_pmu_enable() because + * it is only then we will know what HW events are actually + * scheduled on to the PMU. At that point the code to programme the + * PMU is put into config_base and the PMC to use is placed into + * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that + * it is yet to be determined. + */ + hwc->event_base = ev; + + /* Collect events in a group together suitable for calling + * alpha_check_constraints() to verify that the group as a whole can + * be scheduled on to the PMU. + */ + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + alpha_pmu->num_pmcs - 1, + evts, evtypes, idx_rubbish_bin); + if (n < 0) + return -EINVAL; + } + evtypes[n] = hwc->event_base; + evts[n] = event; + + if (alpha_check_constraints(evts, evtypes, n + 1)) + return -EINVAL; + + /* Indicate that PMU config and idx are yet to be determined. */ + hwc->config_base = 0; + hwc->idx = PMC_NO_INDEX; + + event->destroy = hw_perf_event_destroy; + + /* + * Most architectures reserve the PMU for their use at this point. + * As there is no existing mechanism to arbitrate usage and there + * appears to be no other user of the Alpha PMU we just assume + * that we can just use it, hence a NO-OP here. + * + * Maybe an alpha_reserve_pmu() routine should be implemented but is + * anything else ever going to use it? + */ + + if (!hwc->sample_period) { + hwc->sample_period = alpha_pmu->pmc_max_period[0]; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +static const struct pmu pmu = { + .enable = alpha_pmu_enable, + .disable = alpha_pmu_disable, + .read = alpha_pmu_read, + .unthrottle = alpha_pmu_unthrottle, +}; + + +/* + * Main entry point to initialise a HW performance event. + */ +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + int err; + + if (!alpha_pmu) + return ERR_PTR(-ENODEV); + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + if (err) + return ERR_PTR(err); + + return &pmu; +} + + + +/* + * Main entry point - enable HW performance counters. + */ +void hw_perf_enable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + if (cpuc->n_events > 0) { + /* Update cpuc with information from any new scheduled events. */ + maybe_change_configuration(cpuc); + + /* Start counting the desired events. */ + wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); + wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + } +} + + +/* + * Main entry point - disable HW performance counters. + */ + +void hw_perf_disable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + cpuc->n_added = 0; + + wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); +} + + +/* + * Main entry point - don't know when this is called but it + * obviously dumps debug info. + */ +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr; + int pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr = wrperfmon(PERFMON_CMD_READ, 0); + pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; + pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; + + pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + + +/* + * Performance Monitoring Interrupt Service Routine called when a PMC + * overflows. The PMC that overflowed is passed in la_ptr. + */ +static void alpha_perf_event_irq_handler(unsigned long la_ptr, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + int idx, j; + + __get_cpu_var(irq_pmi_count)++; + cpuc = &__get_cpu_var(cpu_hw_events); + + /* Completely counting through the PMC's period to trigger a new PMC + * overflow interrupt while in this interrupt routine is utterly + * disastrous! The EV6 and EV67 counters are sufficiently large to + * prevent this but to be really sure disable the PMCs. + */ + wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); + + /* la_ptr is the counter that overflowed. */ + if (unlikely(la_ptr >= perf_max_events)) { + /* This should never occur! */ + irq_err_count++; + pr_warning("PMI: silly index %ld\n", la_ptr); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + idx = la_ptr; + + perf_sample_data_init(&data, 0); + for (j = 0; j < cpuc->n_events; j++) { + if (cpuc->current_idx[j] == idx) + break; + } + + if (unlikely(j == cpuc->n_events)) { + /* This can occur if the event is disabled right on a PMC overflow. */ + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + event = cpuc->event[j]; + + if (unlikely(!event)) { + /* This should never occur! */ + irq_err_count++; + pr_warning("PMI: No event at index %d!\n", idx); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + hwc = &event->hw; + alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); + data.period = event->hw.last_period; + + if (alpha_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, 1, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + cpuc->idx_mask &= ~(1UL<<idx); + } + } + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + + return; +} + + + +/* + * Init call to initialise performance events at kernel startup. + */ +void __init init_hw_perf_events(void) +{ + pr_info("Performance events: "); + + if (!supported_cpu()) { + pr_cont("No support for your CPU.\n"); + return; + } + + pr_cont("Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = alpha_perf_event_irq_handler; + + /* And set up PMU specification */ + alpha_pmu = &ev67_pmu; + perf_max_events = alpha_pmu->num_pmcs; +} + diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 395a464353b..88e608aebc8 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -387,7 +387,7 @@ EXPORT_SYMBOL(dump_elf_task_fp); * sys_execve() executes a new program. */ asmlinkage int -do_sys_execve(char __user *ufilename, char __user * __user *argv, +do_sys_execve(const char __user *ufilename, char __user * __user *argv, char __user * __user *envp, struct pt_regs *regs) { int error; diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 1efbed82c0f..eacceb26d9c 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -41,6 +41,7 @@ #include <linux/init.h> #include <linux/bcd.h> #include <linux/profile.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -82,6 +83,26 @@ static struct { unsigned long est_cycle_freq; +#ifdef CONFIG_PERF_EVENTS + +DEFINE_PER_CPU(u8, perf_event_pending); + +#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 +#define test_perf_event_pending() __get_cpu_var(perf_event_pending) +#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 + +void set_perf_event_pending(void) +{ + set_perf_event_pending_flag(); +} + +#else /* CONFIG_PERF_EVENTS */ + +#define test_perf_event_pending() 0 +#define clear_perf_event_pending() + +#endif /* CONFIG_PERF_EVENTS */ + static inline __u32 rpcc(void) { @@ -175,6 +196,11 @@ irqreturn_t timer_interrupt(int irq, void *dev) update_process_times(user_mode(get_irq_regs())); #endif + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); + } + return IRQ_HANDLED; } |