diff options
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 220 |
1 files changed, 110 insertions, 110 deletions
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 536d0b0b39a..e159254fb7c 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -102,116 +102,6 @@ static u32 get_ibs_caps(void) return ibs_caps; } -#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX - -static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, - struct op_msrs const * const msrs) -{ - u64 val; - int i; - - /* enable active counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { - int virt = op_x86_phys_to_virt(i); - if (!reset_value[virt]) - continue; - rdmsrl(msrs->controls[i].addr, val); - val &= model->reserved; - val |= op_x86_get_ctrl(model, &counter_config[virt]); - wrmsrl(msrs->controls[i].addr, val); - } -} - -#endif - -/* functions for op_amd_spec */ - -static void op_amd_shutdown(struct op_msrs const * const msrs) -{ - int i; - - for (i = 0; i < NUM_COUNTERS; ++i) { - if (!msrs->counters[i].addr) - continue; - release_perfctr_nmi(MSR_K7_PERFCTR0 + i); - release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); - } -} - -static int op_amd_fill_in_addresses(struct op_msrs * const msrs) -{ - int i; - - for (i = 0; i < NUM_COUNTERS; i++) { - if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) - goto fail; - if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { - release_perfctr_nmi(MSR_K7_PERFCTR0 + i); - goto fail; - } - /* both registers must be reserved */ - msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; - continue; - fail: - if (!counter_config[i].enabled) - continue; - op_x86_warn_reserved(i); - op_amd_shutdown(msrs); - return -EBUSY; - } - - return 0; -} - -static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, - struct op_msrs const * const msrs) -{ - u64 val; - int i; - - /* setup reset_value */ - for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { - if (counter_config[i].enabled - && msrs->counters[op_x86_virt_to_phys(i)].addr) - reset_value[i] = counter_config[i].count; - else - reset_value[i] = 0; - } - - /* clear all counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { - if (!msrs->controls[i].addr) - continue; - rdmsrl(msrs->controls[i].addr, val); - if (val & ARCH_PERFMON_EVENTSEL_ENABLE) - op_x86_warn_in_use(i); - val &= model->reserved; - wrmsrl(msrs->controls[i].addr, val); - /* - * avoid a false detection of ctr overflows in NMI - * handler - */ - wrmsrl(msrs->counters[i].addr, -1LL); - } - - /* enable active counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { - int virt = op_x86_phys_to_virt(i); - if (!reset_value[virt]) - continue; - - /* setup counter registers */ - wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); - - /* setup control registers */ - rdmsrl(msrs->controls[i].addr, val); - val &= model->reserved; - val |= op_x86_get_ctrl(model, &counter_config[virt]); - wrmsrl(msrs->controls[i].addr, val); - } -} - /* * 16-bit Linear Feedback Shift Register (LFSR) * @@ -376,6 +266,116 @@ static void op_amd_stop_ibs(void) wrmsrl(MSR_AMD64_IBSOPCTL, 0); } +#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + +static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) +{ + u64 val; + int i; + + /* enable active counters */ + for (i = 0; i < NUM_COUNTERS; ++i) { + int virt = op_x86_phys_to_virt(i); + if (!reset_value[virt]) + continue; + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + val |= op_x86_get_ctrl(model, &counter_config[virt]); + wrmsrl(msrs->controls[i].addr, val); + } +} + +#endif + +/* functions for op_amd_spec */ + +static void op_amd_shutdown(struct op_msrs const * const msrs) +{ + int i; + + for (i = 0; i < NUM_COUNTERS; ++i) { + if (!msrs->counters[i].addr) + continue; + release_perfctr_nmi(MSR_K7_PERFCTR0 + i); + release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); + } +} + +static int op_amd_fill_in_addresses(struct op_msrs * const msrs) +{ + int i; + + for (i = 0; i < NUM_COUNTERS; i++) { + if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) + goto fail; + if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { + release_perfctr_nmi(MSR_K7_PERFCTR0 + i); + goto fail; + } + /* both registers must be reserved */ + msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; + continue; + fail: + if (!counter_config[i].enabled) + continue; + op_x86_warn_reserved(i); + op_amd_shutdown(msrs); + return -EBUSY; + } + + return 0; +} + +static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) +{ + u64 val; + int i; + + /* setup reset_value */ + for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { + if (counter_config[i].enabled + && msrs->counters[op_x86_virt_to_phys(i)].addr) + reset_value[i] = counter_config[i].count; + else + reset_value[i] = 0; + } + + /* clear all counters */ + for (i = 0; i < NUM_COUNTERS; ++i) { + if (!msrs->controls[i].addr) + continue; + rdmsrl(msrs->controls[i].addr, val); + if (val & ARCH_PERFMON_EVENTSEL_ENABLE) + op_x86_warn_in_use(i); + val &= model->reserved; + wrmsrl(msrs->controls[i].addr, val); + /* + * avoid a false detection of ctr overflows in NMI + * handler + */ + wrmsrl(msrs->counters[i].addr, -1LL); + } + + /* enable active counters */ + for (i = 0; i < NUM_COUNTERS; ++i) { + int virt = op_x86_phys_to_virt(i); + if (!reset_value[virt]) + continue; + + /* setup counter registers */ + wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); + + /* setup control registers */ + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + val |= op_x86_get_ctrl(model, &counter_config[virt]); + wrmsrl(msrs->controls[i].addr, val); + } +} + static int op_amd_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { |