perf: Fix cgroup scheduling in perf_enable_on_exec()
There is a comment that states that perf_event_context_sched_in() will also switch in the cgroup events, I cannot find it does so. Therefore all the resulting logic goes out the window too. Clean that up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7e41d17753
commit
70a0165752
|
@ -579,13 +579,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||||
* we are holding the rcu lock
|
* we are holding the rcu lock
|
||||||
*/
|
*/
|
||||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||||
|
cgrp2 = perf_cgroup_from_task(next, NULL);
|
||||||
/*
|
|
||||||
* next is NULL when called from perf_event_enable_on_exec()
|
|
||||||
* that will systematically cause a cgroup_switch()
|
|
||||||
*/
|
|
||||||
if (next)
|
|
||||||
cgrp2 = perf_cgroup_from_task(next, NULL);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* only schedule out current cgroup events if we know
|
* only schedule out current cgroup events if we know
|
||||||
|
@ -611,8 +605,6 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||||
* we are holding the rcu lock
|
* we are holding the rcu lock
|
||||||
*/
|
*/
|
||||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||||
|
|
||||||
/* prev can never be NULL */
|
|
||||||
cgrp2 = perf_cgroup_from_task(prev, NULL);
|
cgrp2 = perf_cgroup_from_task(prev, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1450,11 +1442,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||||
|
|
||||||
if (is_cgroup_event(event)) {
|
if (is_cgroup_event(event)) {
|
||||||
ctx->nr_cgroups--;
|
ctx->nr_cgroups--;
|
||||||
|
/*
|
||||||
|
* Because cgroup events are always per-cpu events, this will
|
||||||
|
* always be called from the right CPU.
|
||||||
|
*/
|
||||||
cpuctx = __get_cpu_context(ctx);
|
cpuctx = __get_cpu_context(ctx);
|
||||||
/*
|
/*
|
||||||
* if there are no more cgroup events
|
* If there are no more cgroup events then clear cgrp to avoid
|
||||||
* then cler cgrp to avoid stale pointer
|
* stale pointer in update_cgrp_time_from_cpuctx().
|
||||||
* in update_cgrp_time_from_cpuctx()
|
|
||||||
*/
|
*/
|
||||||
if (!ctx->nr_cgroups)
|
if (!ctx->nr_cgroups)
|
||||||
cpuctx->cgrp = NULL;
|
cpuctx->cgrp = NULL;
|
||||||
|
@ -3118,15 +3113,6 @@ static void perf_event_enable_on_exec(int ctxn)
|
||||||
if (!ctx || !ctx->nr_events)
|
if (!ctx || !ctx->nr_events)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* We must ctxsw out cgroup events to avoid conflict
|
|
||||||
* when invoking perf_task_event_sched_in() later on
|
|
||||||
* in this function. Otherwise we end up trying to
|
|
||||||
* ctxswin cgroup events which are already scheduled
|
|
||||||
* in.
|
|
||||||
*/
|
|
||||||
perf_cgroup_sched_out(current, NULL);
|
|
||||||
|
|
||||||
raw_spin_lock(&ctx->lock);
|
raw_spin_lock(&ctx->lock);
|
||||||
task_ctx_sched_out(ctx);
|
task_ctx_sched_out(ctx);
|
||||||
|
|
||||||
|
@ -3144,9 +3130,6 @@ static void perf_event_enable_on_exec(int ctxn)
|
||||||
|
|
||||||
raw_spin_unlock(&ctx->lock);
|
raw_spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* Also calls ctxswin for cgroup events, if any:
|
|
||||||
*/
|
|
||||||
perf_event_context_sched_in(ctx, ctx->task);
|
perf_event_context_sched_in(ctx, ctx->task);
|
||||||
out:
|
out:
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
Loading…
Reference in New Issue