summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 6bbdc16cc69..fa6541d781b 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -498,7 +498,7 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
* To be called with the counter disabled in hw:
*/
static void
-__hw_perf_counter_set_period(struct perf_counter *counter,
+x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
@@ -642,7 +642,7 @@ try_generic:
*/
barrier();
- __hw_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_counter_set_period(counter, hwc, idx);
__x86_pmu_enable(counter, hwc, idx);
return 0;
@@ -731,7 +731,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
int idx = hwc->idx;
x86_perf_counter_update(counter, hwc, idx);
- __hw_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_counter_set_period(counter, hwc, idx);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
__x86_pmu_enable(counter, hwc, idx);