summaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c104
1 files changed, 6 insertions, 98 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d1a1bee3522..0e8457da6f9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -25,11 +25,11 @@
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
-#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
@@ -3544,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
- vma->vm_mm->locked_vm -= event->mmap_locked;
+ vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
mutex_unlock(&event->mmap_mutex);
@@ -3625,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- locked = vma->vm_mm->locked_vm + extra;
+ locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
@@ -3651,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
event->mmap_user = get_current_user();
- vma->vm_mm->locked_vm += event->mmap_locked;
+ vma->vm_mm->pinned_vm += event->mmap_locked;
unlock:
if (!ret)
@@ -6853,7 +6853,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
- if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
+ if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6942,14 +6942,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
- /*
- * Ignore suspend/resume action, the perf_pm_notifier will
- * take care of that.
- */
- if (action & CPU_TASKS_FROZEN)
- return NOTIFY_OK;
-
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
@@ -6968,90 +6961,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static void perf_pm_resume_cpu(void *unused)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- struct pmu *pmu;
- int idx;
-
- idx = srcu_read_lock(&pmus_srcu);
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- ctx = cpuctx->task_ctx;
-
- perf_ctx_lock(cpuctx, ctx);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, ctx);
- }
- srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static void perf_pm_suspend_cpu(void *unused)
-{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- struct pmu *pmu;
- int idx;
-
- idx = srcu_read_lock(&pmus_srcu);
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- ctx = cpuctx->task_ctx;
-
- perf_ctx_lock(cpuctx, ctx);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
- perf_event_sched_in(cpuctx, ctx, current);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, ctx);
- }
- srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static int perf_resume(void)
-{
- get_online_cpus();
- smp_call_function(perf_pm_resume_cpu, NULL, 1);
- put_online_cpus();
-
- return NOTIFY_OK;
-}
-
-static int perf_suspend(void)
-{
- get_online_cpus();
- smp_call_function(perf_pm_suspend_cpu, NULL, 1);
- put_online_cpus();
-
- return NOTIFY_OK;
-}
-
-static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
-{
- switch (action) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- return perf_resume();
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- return perf_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block perf_pm_notifier = {
- .notifier_call = perf_pm,
-};
-
void __init perf_event_init(void)
{
int ret;
@@ -7066,7 +6975,6 @@ void __init perf_event_init(void)
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
- register_pm_notifier(&perf_pm_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);