mirror of https://gitee.com/openkylin/linux.git
trace: Remove unused trace_array_cpu parameter
Impact: cleanup Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
97e5b191ae
commit
7be421510b
|
@ -245,7 +245,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||||
if (pid != 0 &&
|
if (pid != 0 &&
|
||||||
!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
|
!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
|
||||||
(trace_flags & TRACE_ITER_STACKTRACE) != 0)
|
(trace_flags & TRACE_ITER_STACKTRACE) != 0)
|
||||||
__trace_stack(blk_tr, NULL, flags, 5, pc);
|
__trace_stack(blk_tr, flags, 5, pc);
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -776,7 +776,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
trace_function(struct trace_array *tr, struct trace_array_cpu *data,
|
trace_function(struct trace_array *tr,
|
||||||
unsigned long ip, unsigned long parent_ip, unsigned long flags,
|
unsigned long ip, unsigned long parent_ip, unsigned long flags,
|
||||||
int pc)
|
int pc)
|
||||||
{
|
{
|
||||||
|
@ -802,7 +802,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
static void __trace_graph_entry(struct trace_array *tr,
|
static void __trace_graph_entry(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct ftrace_graph_ent *trace,
|
struct ftrace_graph_ent *trace,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int pc)
|
int pc)
|
||||||
|
@ -826,7 +825,6 @@ static void __trace_graph_entry(struct trace_array *tr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __trace_graph_return(struct trace_array *tr,
|
static void __trace_graph_return(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct ftrace_graph_ret *trace,
|
struct ftrace_graph_ret *trace,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int pc)
|
int pc)
|
||||||
|
@ -856,11 +854,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||||
int pc)
|
int pc)
|
||||||
{
|
{
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
if (likely(!atomic_read(&data->disabled)))
|
||||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __ftrace_trace_stack(struct trace_array *tr,
|
static void __ftrace_trace_stack(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int skip, int pc)
|
int skip, int pc)
|
||||||
{
|
{
|
||||||
|
@ -891,27 +888,24 @@ static void __ftrace_trace_stack(struct trace_array *tr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_trace_stack(struct trace_array *tr,
|
static void ftrace_trace_stack(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int skip, int pc)
|
int skip, int pc)
|
||||||
{
|
{
|
||||||
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
__ftrace_trace_stack(tr, data, flags, skip, pc);
|
__ftrace_trace_stack(tr, flags, skip, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trace_stack(struct trace_array *tr,
|
void __trace_stack(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int skip, int pc)
|
int skip, int pc)
|
||||||
{
|
{
|
||||||
__ftrace_trace_stack(tr, data, flags, skip, pc);
|
__ftrace_trace_stack(tr, flags, skip, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_trace_userstack(struct trace_array *tr,
|
static void ftrace_trace_userstack(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
unsigned long flags, int pc)
|
||||||
unsigned long flags, int pc)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
|
@ -942,20 +936,17 @@ static void ftrace_trace_userstack(struct trace_array *tr,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trace_userstack(struct trace_array *tr,
|
void __trace_userstack(struct trace_array *tr, unsigned long flags)
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
{
|
||||||
ftrace_trace_userstack(tr, data, flags, preempt_count());
|
ftrace_trace_userstack(tr, flags, preempt_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ftrace_trace_special(void *__tr, void *__data,
|
ftrace_trace_special(void *__tr,
|
||||||
unsigned long arg1, unsigned long arg2, unsigned long arg3,
|
unsigned long arg1, unsigned long arg2, unsigned long arg3,
|
||||||
int pc)
|
int pc)
|
||||||
{
|
{
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct trace_array_cpu *data = __data;
|
|
||||||
struct trace_array *tr = __tr;
|
struct trace_array *tr = __tr;
|
||||||
struct special_entry *entry;
|
struct special_entry *entry;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
@ -971,8 +962,8 @@ ftrace_trace_special(void *__tr, void *__data,
|
||||||
entry->arg2 = arg2;
|
entry->arg2 = arg2;
|
||||||
entry->arg3 = arg3;
|
entry->arg3 = arg3;
|
||||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
ftrace_trace_stack(tr, data, irq_flags, 4, pc);
|
ftrace_trace_stack(tr, irq_flags, 4, pc);
|
||||||
ftrace_trace_userstack(tr, data, irq_flags, pc);
|
ftrace_trace_userstack(tr, irq_flags, pc);
|
||||||
|
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
}
|
}
|
||||||
|
@ -981,12 +972,11 @@ void
|
||||||
__trace_special(void *__tr, void *__data,
|
__trace_special(void *__tr, void *__data,
|
||||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||||
{
|
{
|
||||||
ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
|
ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tracing_sched_switch_trace(struct trace_array *tr,
|
tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct task_struct *prev,
|
struct task_struct *prev,
|
||||||
struct task_struct *next,
|
struct task_struct *next,
|
||||||
unsigned long flags, int pc)
|
unsigned long flags, int pc)
|
||||||
|
@ -1010,13 +1000,12 @@ tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
entry->next_state = next->state;
|
entry->next_state = next->state;
|
||||||
entry->next_cpu = task_cpu(next);
|
entry->next_cpu = task_cpu(next);
|
||||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
ftrace_trace_stack(tr, data, flags, 5, pc);
|
ftrace_trace_stack(tr, flags, 5, pc);
|
||||||
ftrace_trace_userstack(tr, data, flags, pc);
|
ftrace_trace_userstack(tr, flags, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tracing_sched_wakeup_trace(struct trace_array *tr,
|
tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct task_struct *wakee,
|
struct task_struct *wakee,
|
||||||
struct task_struct *curr,
|
struct task_struct *curr,
|
||||||
unsigned long flags, int pc)
|
unsigned long flags, int pc)
|
||||||
|
@ -1040,8 +1029,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||||
entry->next_state = wakee->state;
|
entry->next_state = wakee->state;
|
||||||
entry->next_cpu = task_cpu(wakee);
|
entry->next_cpu = task_cpu(wakee);
|
||||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
ftrace_trace_stack(tr, data, flags, 6, pc);
|
ftrace_trace_stack(tr, flags, 6, pc);
|
||||||
ftrace_trace_userstack(tr, data, flags, pc);
|
ftrace_trace_userstack(tr, flags, pc);
|
||||||
|
|
||||||
trace_wake_up();
|
trace_wake_up();
|
||||||
}
|
}
|
||||||
|
@ -1064,7 +1053,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||||
data = tr->data[cpu];
|
data = tr->data[cpu];
|
||||||
|
|
||||||
if (likely(atomic_inc_return(&data->disabled) == 1))
|
if (likely(atomic_inc_return(&data->disabled) == 1))
|
||||||
ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
|
ftrace_trace_special(tr, arg1, arg2, arg3, pc);
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -1092,7 +1081,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
if (likely(disabled == 1)) {
|
if (likely(disabled == 1)) {
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
__trace_graph_entry(tr, data, trace, flags, pc);
|
__trace_graph_entry(tr, trace, flags, pc);
|
||||||
}
|
}
|
||||||
/* Only do the atomic if it is not already set */
|
/* Only do the atomic if it is not already set */
|
||||||
if (!test_tsk_trace_graph(current))
|
if (!test_tsk_trace_graph(current))
|
||||||
|
@ -1118,7 +1107,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
if (likely(disabled == 1)) {
|
if (likely(disabled == 1)) {
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
__trace_graph_return(tr, data, trace, flags, pc);
|
__trace_graph_return(tr, trace, flags, pc);
|
||||||
}
|
}
|
||||||
if (!trace->depth)
|
if (!trace->depth)
|
||||||
clear_tsk_trace_graph(current);
|
clear_tsk_trace_graph(current);
|
||||||
|
|
|
@ -419,14 +419,12 @@ void ftrace(struct trace_array *tr,
|
||||||
unsigned long parent_ip,
|
unsigned long parent_ip,
|
||||||
unsigned long flags, int pc);
|
unsigned long flags, int pc);
|
||||||
void tracing_sched_switch_trace(struct trace_array *tr,
|
void tracing_sched_switch_trace(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct task_struct *prev,
|
struct task_struct *prev,
|
||||||
struct task_struct *next,
|
struct task_struct *next,
|
||||||
unsigned long flags, int pc);
|
unsigned long flags, int pc);
|
||||||
void tracing_record_cmdline(struct task_struct *tsk);
|
void tracing_record_cmdline(struct task_struct *tsk);
|
||||||
|
|
||||||
void tracing_sched_wakeup_trace(struct trace_array *tr,
|
void tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
struct task_struct *wakee,
|
struct task_struct *wakee,
|
||||||
struct task_struct *cur,
|
struct task_struct *cur,
|
||||||
unsigned long flags, int pc);
|
unsigned long flags, int pc);
|
||||||
|
@ -436,7 +434,6 @@ void trace_special(struct trace_array *tr,
|
||||||
unsigned long arg2,
|
unsigned long arg2,
|
||||||
unsigned long arg3, int pc);
|
unsigned long arg3, int pc);
|
||||||
void trace_function(struct trace_array *tr,
|
void trace_function(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long ip,
|
unsigned long ip,
|
||||||
unsigned long parent_ip,
|
unsigned long parent_ip,
|
||||||
unsigned long flags, int pc);
|
unsigned long flags, int pc);
|
||||||
|
@ -462,7 +459,6 @@ void update_max_tr_single(struct trace_array *tr,
|
||||||
struct task_struct *tsk, int cpu);
|
struct task_struct *tsk, int cpu);
|
||||||
|
|
||||||
void __trace_stack(struct trace_array *tr,
|
void __trace_stack(struct trace_array *tr,
|
||||||
struct trace_array_cpu *data,
|
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
int skip, int pc);
|
int skip, int pc);
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
|
|
||||||
if (likely(disabled == 1))
|
if (likely(disabled == 1))
|
||||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
ftrace_preempt_enable(resched);
|
ftrace_preempt_enable(resched);
|
||||||
|
@ -108,7 +108,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
|
|
||||||
if (likely(disabled == 1)) {
|
if (likely(disabled == 1)) {
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
|
@ -139,7 +139,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
|
|
||||||
if (likely(disabled == 1)) {
|
if (likely(disabled == 1)) {
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
/*
|
/*
|
||||||
* skip over 5 funcs:
|
* skip over 5 funcs:
|
||||||
* __ftrace_trace_stack,
|
* __ftrace_trace_stack,
|
||||||
|
@ -148,7 +148,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
* ftrace_list_func
|
* ftrace_list_func
|
||||||
* ftrace_call
|
* ftrace_call
|
||||||
*/
|
*/
|
||||||
__trace_stack(tr, data, flags, 5, pc);
|
__trace_stack(tr, flags, 5, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
|
|
|
@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
|
|
||||||
if (likely(disabled == 1))
|
if (likely(disabled == 1))
|
||||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||||
|
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
|
||||||
if (!report_latency(delta))
|
if (!report_latency(delta))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
|
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
|
||||||
|
|
||||||
latency = nsecs_to_usecs(delta);
|
latency = nsecs_to_usecs(delta);
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ check_critical_timing(struct trace_array *tr,
|
||||||
data->critical_sequence = max_sequence;
|
data->critical_sequence = max_sequence;
|
||||||
data->preempt_timestamp = ftrace_now(cpu);
|
data->preempt_timestamp = ftrace_now(cpu);
|
||||||
tracing_reset(tr, cpu);
|
tracing_reset(tr, cpu);
|
||||||
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
|
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
||||||
|
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
|
|
||||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||||
|
|
||||||
per_cpu(tracing_cpu, cpu) = 1;
|
per_cpu(tracing_cpu, cpu) = 1;
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
||||||
atomic_inc(&data->disabled);
|
atomic_inc(&data->disabled);
|
||||||
|
|
||||||
local_save_flags(flags);
|
local_save_flags(flags);
|
||||||
trace_function(tr, data, ip, parent_ip, flags, preempt_count());
|
trace_function(tr, ip, parent_ip, flags, preempt_count());
|
||||||
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
||||||
data->critical_start = 0;
|
data->critical_start = 0;
|
||||||
atomic_dec(&data->disabled);
|
atomic_dec(&data->disabled);
|
||||||
|
|
|
@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||||
data = ctx_trace->data[cpu];
|
data = ctx_trace->data[cpu];
|
||||||
|
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
if (likely(!atomic_read(&data->disabled)))
|
||||||
tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
|
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
|
||||||
data = ctx_trace->data[cpu];
|
data = ctx_trace->data[cpu];
|
||||||
|
|
||||||
if (likely(!atomic_read(&data->disabled)))
|
if (likely(!atomic_read(&data->disabled)))
|
||||||
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
|
tracing_sched_wakeup_trace(ctx_trace, wakee, current,
|
||||||
flags, pc);
|
flags, pc);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
|
@ -72,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
|
||||||
if (task_cpu(wakeup_task) != cpu)
|
if (task_cpu(wakeup_task) != cpu)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
trace_function(tr, data, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
__raw_spin_unlock(&wakeup_lock);
|
__raw_spin_unlock(&wakeup_lock);
|
||||||
|
@ -152,8 +152,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||||
tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc);
|
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* usecs conversion is slow so we try to delay the conversion
|
* usecs conversion is slow so we try to delay the conversion
|
||||||
|
@ -254,10 +254,8 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||||
|
|
||||||
data = wakeup_trace->data[wakeup_cpu];
|
data = wakeup_trace->data[wakeup_cpu];
|
||||||
data->preempt_timestamp = ftrace_now(cpu);
|
data->preempt_timestamp = ftrace_now(cpu);
|
||||||
tracing_sched_wakeup_trace(wakeup_trace, data, p, current,
|
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
|
||||||
flags, pc);
|
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||||
trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
|
|
||||||
flags, pc);
|
|
||||||
|
|
||||||
out_locked:
|
out_locked:
|
||||||
__raw_spin_unlock(&wakeup_lock);
|
__raw_spin_unlock(&wakeup_lock);
|
||||||
|
|
Loading…
Reference in New Issue