perf: Fix scaling vs. perf_event_enable()
Similar to the perf_enable_on_exec(), ensure that event timings are consistent across perf_event_enable(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dvyukov@google.com Cc: eranian@google.com Cc: oleg@redhat.com Cc: panand@redhat.com Cc: sasha.levin@oracle.com Cc: vince@deater.net Link: http://lkml.kernel.org/r/20160224174948.218288698@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7fce250915
commit
bd2afa49d1
|
@ -2069,14 +2069,27 @@ static void add_event_to_ctx(struct perf_event *event,
|
|||
event->tstamp_stopped = tstamp;
|
||||
}
|
||||
|
||||
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx);
|
||||
static void ctx_sched_out(struct perf_event_context *ctx,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
enum event_type_t event_type);
|
||||
static void
|
||||
ctx_sched_in(struct perf_event_context *ctx,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
enum event_type_t event_type,
|
||||
struct task_struct *task);
|
||||
|
||||
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
if (!cpuctx->task_ctx)
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
|
||||
return;
|
||||
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
|
||||
}
|
||||
|
||||
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx,
|
||||
struct task_struct *task)
|
||||
|
@ -2227,17 +2240,18 @@ static void __perf_event_enable(struct perf_event *event,
|
|||
event->state <= PERF_EVENT_STATE_ERROR)
|
||||
return;
|
||||
|
||||
update_context_time(ctx);
|
||||
if (ctx->is_active)
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
|
||||
|
||||
__perf_event_mark_enabled(event);
|
||||
|
||||
if (!ctx->is_active)
|
||||
return;
|
||||
|
||||
if (!event_filter_match(event)) {
|
||||
if (is_cgroup_event(event)) {
|
||||
perf_cgroup_set_timestamp(current, ctx); // XXX ?
|
||||
if (is_cgroup_event(event))
|
||||
perf_cgroup_defer_enabled(event);
|
||||
}
|
||||
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2245,8 +2259,10 @@ static void __perf_event_enable(struct perf_event *event,
|
|||
* If the event is in a group and isn't the group leader,
|
||||
* then don't put it on unless the group is on.
|
||||
*/
|
||||
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
|
||||
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
|
||||
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
|
||||
return;
|
||||
}
|
||||
|
||||
task_ctx = cpuctx->task_ctx;
|
||||
if (ctx->task)
|
||||
|
@ -2658,18 +2674,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
|||
perf_cgroup_sched_out(task, next);
|
||||
}
|
||||
|
||||
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
if (!cpuctx->task_ctx)
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
|
||||
return;
|
||||
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with IRQs disabled
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue