diff options
Diffstat (limited to 'arch/mips/sibyte/sb1250/bcm1250_tbprof.c')
-rw-r--r-- | arch/mips/sibyte/sb1250/bcm1250_tbprof.c | 154 |
1 files changed, 85 insertions, 69 deletions
diff --git a/arch/mips/sibyte/sb1250/bcm1250_tbprof.c b/arch/mips/sibyte/sb1250/bcm1250_tbprof.c index 7f813ae9eaf..992e0d8dbb6 100644 --- a/arch/mips/sibyte/sb1250/bcm1250_tbprof.c +++ b/arch/mips/sibyte/sb1250/bcm1250_tbprof.c @@ -28,6 +28,8 @@ #include <linux/fs.h> #include <linux/errno.h> #include <linux/reboot.h> +#include <linux/smp_lock.h> +#include <linux/wait.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> @@ -64,24 +66,25 @@ static void arm_tb(void) u_int64_t tb_options = M_SCD_TRACE_CFG_FREEZE_FULL; /* Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to trigger start of trace. XXX vary sampling period */ - bus_writeq(0, IOADDR(A_SCD_PERF_CNT_1)); - scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG)); + __raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1)); + scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG)); /* Unfortunately, in Pass 2 we must clear all counters to knock down a previous interrupt request. This means that bus profiling requires ALL of the SCD perf counters. */ - bus_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) | // keep counters 0,2,3 as is - M_SPC_CFG_ENABLE | // enable counting - M_SPC_CFG_CLEAR | // clear all counters - V_SPC_CFG_SRC1(1), // counter 1 counts cycles - IOADDR(A_SCD_PERF_CNT_CFG)); - bus_writeq(next, IOADDR(A_SCD_PERF_CNT_1)); + __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) | + // keep counters 0,2,3 as is + M_SPC_CFG_ENABLE | // enable counting + M_SPC_CFG_CLEAR | // clear all counters + V_SPC_CFG_SRC1(1), // counter 1 counts cycles + IOADDR(A_SCD_PERF_CNT_CFG)); + __raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1)); /* Reset the trace buffer */ - bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); + __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); #if 0 && defined(M_SCD_TRACE_CFG_FORCECNT) /* XXXKW may want to expose control to the data-collector */ tb_options |= M_SCD_TRACE_CFG_FORCECNT; #endif - bus_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG)); + __raw_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG)); sbp.tb_armed = 1; } @@ -93,23 +96,30 @@ static irqreturn_t sbprof_tb_intr(int irq, void *dev_id, struct pt_regs *regs) /* XXX should use XKPHYS to make writes bypass L2 */ u_int64_t *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++]; /* Read out trace */ - bus_writeq(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG)); + __raw_writeq(M_SCD_TRACE_CFG_START_READ, + IOADDR(A_SCD_TRACE_CFG)); __asm__ __volatile__ ("sync" : : : "memory"); /* Loop runs backwards because bundles are read out in reverse order */ for (i = 256 * 6; i > 0; i -= 6) { // Subscripts decrease to put bundle in the order // t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi - p[i-1] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 hi - p[i-2] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 lo - p[i-3] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 hi - p[i-4] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 lo - p[i-5] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 hi - p[i-6] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 lo + p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t2 hi + p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t2 lo + p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t1 hi + p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t1 lo + p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t0 hi + p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ)); + // read t0 lo } if (!sbp.tb_enable) { DBG(printk(DEVNAME ": tb_intr shutdown\n")); - bus_writeq(M_SCD_TRACE_CFG_RESET, - IOADDR(A_SCD_TRACE_CFG)); + __raw_writeq(M_SCD_TRACE_CFG_RESET, + IOADDR(A_SCD_TRACE_CFG)); sbp.tb_armed = 0; wake_up(&sbp.tb_sync); } else { @@ -118,7 +128,7 @@ static irqreturn_t sbprof_tb_intr(int irq, void *dev_id, struct pt_regs *regs) } else { /* No more trace buffer samples */ DBG(printk(DEVNAME ": tb_intr full\n")); - bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); + __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG)); sbp.tb_armed = 0; if (!sbp.tb_enable) { wake_up(&sbp.tb_sync); @@ -152,13 +162,11 @@ int sbprof_zbprof_start(struct file *filp) return -EBUSY; } /* Make sure there isn't a perf-cnt interrupt waiting */ - scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG)); + scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG)); /* Disable and clear counters, override SRC_1 */ - bus_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) | - M_SPC_CFG_ENABLE | - M_SPC_CFG_CLEAR | - V_SPC_CFG_SRC1(1), - IOADDR(A_SCD_PERF_CNT_CFG)); + __raw_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) | + M_SPC_CFG_ENABLE | M_SPC_CFG_CLEAR | V_SPC_CFG_SRC1(1), + IOADDR(A_SCD_PERF_CNT_CFG)); /* We grab this interrupt to prevent others from trying to use it, even though we don't want to service the interrupts @@ -172,55 +180,55 @@ int sbprof_zbprof_start(struct file *filp) /* I need the core to mask these, but the interrupt mapper to pass them through. I am exploiting my knowledge that cp0_status masks out IP[5]. krw */ - bus_writeq(K_INT_MAP_I3, - IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) + - (K_INT_PERF_CNT << 3))); + __raw_writeq(K_INT_MAP_I3, + IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) + + (K_INT_PERF_CNT << 3))); /* Initialize address traps */ - bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_0)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_1)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_2)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_3)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_1)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_2)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_3)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2)); - bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2)); + __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3)); /* Initialize Trace Event 0-7 */ // when interrupt - bus_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6)); - bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7)); + __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7)); /* Initialize Trace Sequence 0-7 */ // Start on event 0 (interrupt) - bus_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff, - IOADDR(A_SCD_TRACE_SEQUENCE_0)); + __raw_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff, + IOADDR(A_SCD_TRACE_SEQUENCE_0)); // dsamp when d used | asamp when a used - bus_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE | - K_SCD_TRSEQ_TRIGGER_ALL, - IOADDR(A_SCD_TRACE_SEQUENCE_1)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6)); - bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7)); + __raw_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE | + K_SCD_TRSEQ_TRIGGER_ALL, + IOADDR(A_SCD_TRACE_SEQUENCE_1)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6)); + __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7)); /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */ - bus_writeq((1ULL << K_INT_PERF_CNT), - IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE))); + __raw_writeq(1ULL << K_INT_PERF_CNT, + IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE))); arm_tb(); @@ -231,6 +239,7 @@ int sbprof_zbprof_start(struct file *filp) int sbprof_zbprof_stop(void) { + DEFINE_WAIT(wait); DBG(printk(DEVNAME ": stopping\n")); if (sbp.tb_enable) { @@ -240,7 +249,9 @@ int sbprof_zbprof_stop(void) this sleep happens. */ if (sbp.tb_armed) { DBG(printk(DEVNAME ": wait for disarm\n")); - interruptible_sleep_on(&sbp.tb_sync); + prepare_to_wait(&sbp.tb_sync, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&sbp.tb_sync, &wait); DBG(printk(DEVNAME ": disarm complete\n")); } free_irq(K_INT_TRACE_FREEZE, &sbp); @@ -333,13 +344,13 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf, return count; } -static int sbprof_tb_ioctl(struct inode *inode, - struct file *filp, - unsigned int command, - unsigned long arg) +static long sbprof_tb_ioctl(struct file *filp, + unsigned int command, + unsigned long arg) { int error = 0; + lock_kernel(); switch (command) { case SBPROF_ZBSTART: error = sbprof_zbprof_start(filp); @@ -348,13 +359,17 @@ static int sbprof_tb_ioctl(struct inode *inode, error = sbprof_zbprof_stop(); break; case SBPROF_ZBWAITFULL: - interruptible_sleep_on(&sbp.tb_read); + DEFINE_WAIT(wait); + prepare_to_wait(&sbp.tb_read, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&sbp.tb_read, &wait); /* XXXKW check if interrupted? */ return put_user(TB_FULL, (int *) arg); default: error = -EINVAL; break; } + unlock_kernel(); return error; } @@ -364,7 +379,8 @@ static struct file_operations sbprof_tb_fops = { .open = sbprof_tb_open, .release = sbprof_tb_release, .read = sbprof_tb_read, - .ioctl = sbprof_tb_ioctl, + .unlocked_ioctl = sbprof_tb_ioctl, + .compat_ioctl = sbprof_tb_ioctl, .mmap = NULL, }; |