mirror of https://gitee.com/openkylin/linux.git
Merge branch 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: function-graph: always initialize task ret_stack function-graph: move initialization of new tasks up in fork function-graph: add memory barriers for accessing task's ret_stack function-graph: enable the stack after initialization of other variables function-graph: only allocate init tasks if it was not already done Manually fix trivial conflict in kernel/trace/ftrace.c
This commit is contained in:
commit
991ec02cdc
|
@ -981,6 +981,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
if (!p)
|
if (!p)
|
||||||
goto fork_out;
|
goto fork_out;
|
||||||
|
|
||||||
|
ftrace_graph_init_task(p);
|
||||||
|
|
||||||
rt_mutex_init_task(p);
|
rt_mutex_init_task(p);
|
||||||
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
|
@ -1130,8 +1132,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ftrace_graph_init_task(p);
|
|
||||||
|
|
||||||
p->pid = pid_nr(pid);
|
p->pid = pid_nr(pid);
|
||||||
p->tgid = p->pid;
|
p->tgid = p->pid;
|
||||||
if (clone_flags & CLONE_THREAD)
|
if (clone_flags & CLONE_THREAD)
|
||||||
|
@ -1140,7 +1140,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
if (current->nsproxy != p->nsproxy) {
|
if (current->nsproxy != p->nsproxy) {
|
||||||
retval = ns_cgroup_clone(p, pid);
|
retval = ns_cgroup_clone(p, pid);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto bad_fork_free_graph;
|
goto bad_fork_free_pid;
|
||||||
}
|
}
|
||||||
|
|
||||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
||||||
|
@ -1232,7 +1232,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
spin_unlock(¤t->sighand->siglock);
|
spin_unlock(¤t->sighand->siglock);
|
||||||
write_unlock_irq(&tasklist_lock);
|
write_unlock_irq(&tasklist_lock);
|
||||||
retval = -ERESTARTNOINTR;
|
retval = -ERESTARTNOINTR;
|
||||||
goto bad_fork_free_graph;
|
goto bad_fork_free_pid;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clone_flags & CLONE_THREAD) {
|
if (clone_flags & CLONE_THREAD) {
|
||||||
|
@ -1267,8 +1267,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
cgroup_post_fork(p);
|
cgroup_post_fork(p);
|
||||||
return p;
|
return p;
|
||||||
|
|
||||||
bad_fork_free_graph:
|
|
||||||
ftrace_graph_exit_task(p);
|
|
||||||
bad_fork_free_pid:
|
bad_fork_free_pid:
|
||||||
if (pid != &init_struct_pid)
|
if (pid != &init_struct_pid)
|
||||||
free_pid(pid);
|
free_pid(pid);
|
||||||
|
|
|
@ -3218,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t->ret_stack == NULL) {
|
if (t->ret_stack == NULL) {
|
||||||
t->curr_ret_stack = -1;
|
|
||||||
/* Make sure IRQs see the -1 first: */
|
|
||||||
barrier();
|
|
||||||
t->ret_stack = ret_stack_list[start++];
|
|
||||||
atomic_set(&t->tracing_graph_pause, 0);
|
atomic_set(&t->tracing_graph_pause, 0);
|
||||||
atomic_set(&t->trace_overrun, 0);
|
atomic_set(&t->trace_overrun, 0);
|
||||||
|
t->curr_ret_stack = -1;
|
||||||
|
/* Make sure the tasks see the -1 first: */
|
||||||
|
smp_wmb();
|
||||||
|
t->ret_stack = ret_stack_list[start++];
|
||||||
}
|
}
|
||||||
} while_each_thread(g, t);
|
} while_each_thread(g, t);
|
||||||
|
|
||||||
|
@ -3281,8 +3281,10 @@ static int start_graph_tracing(void)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* The cpu_boot init_task->ret_stack will never be freed */
|
/* The cpu_boot init_task->ret_stack will never be freed */
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu) {
|
||||||
ftrace_graph_init_task(idle_task(cpu));
|
if (!idle_task(cpu)->ret_stack)
|
||||||
|
ftrace_graph_init_task(idle_task(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = alloc_retstack_tasklist(ret_stack_list);
|
ret = alloc_retstack_tasklist(ret_stack_list);
|
||||||
|
@ -3374,18 +3376,25 @@ void unregister_ftrace_graph(void)
|
||||||
/* Allocate a return stack for newly created task */
|
/* Allocate a return stack for newly created task */
|
||||||
void ftrace_graph_init_task(struct task_struct *t)
|
void ftrace_graph_init_task(struct task_struct *t)
|
||||||
{
|
{
|
||||||
|
/* Make sure we do not use the parent ret_stack */
|
||||||
|
t->ret_stack = NULL;
|
||||||
|
|
||||||
if (ftrace_graph_active) {
|
if (ftrace_graph_active) {
|
||||||
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
struct ftrace_ret_stack *ret_stack;
|
||||||
|
|
||||||
|
ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
||||||
* sizeof(struct ftrace_ret_stack),
|
* sizeof(struct ftrace_ret_stack),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!t->ret_stack)
|
if (!ret_stack)
|
||||||
return;
|
return;
|
||||||
t->curr_ret_stack = -1;
|
t->curr_ret_stack = -1;
|
||||||
atomic_set(&t->tracing_graph_pause, 0);
|
atomic_set(&t->tracing_graph_pause, 0);
|
||||||
atomic_set(&t->trace_overrun, 0);
|
atomic_set(&t->trace_overrun, 0);
|
||||||
t->ftrace_timestamp = 0;
|
t->ftrace_timestamp = 0;
|
||||||
} else
|
/* make curr_ret_stack visable before we add the ret_stack */
|
||||||
t->ret_stack = NULL;
|
smp_wmb();
|
||||||
|
t->ret_stack = ret_stack;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ftrace_graph_exit_task(struct task_struct *t)
|
void ftrace_graph_exit_task(struct task_struct *t)
|
||||||
|
|
|
@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
|
||||||
if (!current->ret_stack)
|
if (!current->ret_stack)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must make sure the ret_stack is tested before we read
|
||||||
|
* anything else.
|
||||||
|
*/
|
||||||
|
smp_rmb();
|
||||||
|
|
||||||
/* The return trace stack is full */
|
/* The return trace stack is full */
|
||||||
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
|
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
|
||||||
atomic_inc(¤t->trace_overrun);
|
atomic_inc(¤t->trace_overrun);
|
||||||
|
|
Loading…
Reference in New Issue