mirror of https://gitee.com/openkylin/linux.git
Oleg is working on fixing a very tight race between opening a event file
and deleting that event at the same time (both must be done as root). I also found a bug while testing Oleg's patches which has to do with a race with kprobes using the function tracer. There's also a deadlock fix that was introduced with the previous fixes. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQEcBAABAgAGBQJR8nMiAAoJEOdOSU1xswtMXMMH/jNuG1vDcTjo7WXu3kYHJNWc u1Z3YPXunFozz4DofzXPCuSkSgRqSR4cVeGOAv3oEfwQv07jJdTAor8/FuVTNXW9 yiGMUvth0nLVZQEmsh3VvVztqFy8FxhpnQHhIa6pillPUdROKgE/A7/q4wT37lxT qniM1QJTK9fIf2t6suMwSsBD7fepiDkdHwVbs5NvN7qj62/QSPN+pHLAOBl90AGp r5eUSj8cDHxmzlV+GAJxeqI7KH+P2PGts9USI+s5EX8mODci620jz1HkKab6XRpz ggYdNzJ21z1fO9vfnpGCF0d03sKdnbJoIQjkyD4AJHlojmLe3s6l/nxS6jg3wH8= =VrR4 -----END PGP SIGNATURE----- Merge tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "Oleg is working on fixing a very tight race between opening a event file and deleting that event at the same time (both must be done as root). I also found a bug while testing Oleg's patches which has to do with a race with kprobes using the function tracer. There's also a deadlock fix that was introduced with the previous fixes" * tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Remove locking trace_types_lock from tracing_reset_all_online_cpus() ftrace: Add check for NULL regs if ops has SAVE_REGS set tracing: Kill trace_cpu struct/members tracing: Change tracing_fops/snapshot_fops to rely on tracing_get_cpu() tracing: Change tracing_entries_fops to rely on tracing_get_cpu() tracing: Change tracing_stats_fops to rely on tracing_get_cpu() tracing: Change tracing_buffers_fops to rely on tracing_get_cpu() tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu() tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
This commit is contained in:
commit
6803f37e09
|
@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
|
||||||
* the hashes are freed with call_rcu_sched().
|
* the hashes are freed with call_rcu_sched().
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
|
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
||||||
{
|
{
|
||||||
struct ftrace_hash *filter_hash;
|
struct ftrace_hash *filter_hash;
|
||||||
struct ftrace_hash *notrace_hash;
|
struct ftrace_hash *notrace_hash;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||||
|
/*
|
||||||
|
* There's a small race when adding ops that the ftrace handler
|
||||||
|
* that wants regs, may be called without them. We can not
|
||||||
|
* allow that handler to be called if regs is NULL.
|
||||||
|
*/
|
||||||
|
if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
|
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
|
||||||
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
|
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
|
||||||
|
|
||||||
|
@ -4218,7 +4228,7 @@ static inline void ftrace_startup_enable(int command) { }
|
||||||
# define ftrace_shutdown_sysctl() do { } while (0)
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
|
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -4241,7 +4251,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
||||||
do_for_each_ftrace_op(op, ftrace_control_list) {
|
do_for_each_ftrace_op(op, ftrace_control_list) {
|
||||||
if (!(op->flags & FTRACE_OPS_FL_STUB) &&
|
if (!(op->flags & FTRACE_OPS_FL_STUB) &&
|
||||||
!ftrace_function_local_disabled(op) &&
|
!ftrace_function_local_disabled(op) &&
|
||||||
ftrace_ops_test(op, ip))
|
ftrace_ops_test(op, ip, regs))
|
||||||
op->func(ip, parent_ip, op, regs);
|
op->func(ip, parent_ip, op, regs);
|
||||||
} while_for_each_ftrace_op(op);
|
} while_for_each_ftrace_op(op);
|
||||||
trace_recursion_clear(TRACE_CONTROL_BIT);
|
trace_recursion_clear(TRACE_CONTROL_BIT);
|
||||||
|
@ -4274,7 +4284,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
||||||
*/
|
*/
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
||||||
if (ftrace_ops_test(op, ip))
|
if (ftrace_ops_test(op, ip, regs))
|
||||||
op->func(ip, parent_ip, op, regs);
|
op->func(ip, parent_ip, op, regs);
|
||||||
} while_for_each_ftrace_op(op);
|
} while_for_each_ftrace_op(op);
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
|
|
@ -1224,18 +1224,17 @@ void tracing_reset_current(int cpu)
|
||||||
tracing_reset(&global_trace.trace_buffer, cpu);
|
tracing_reset(&global_trace.trace_buffer, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must have trace_types_lock held */
|
||||||
void tracing_reset_all_online_cpus(void)
|
void tracing_reset_all_online_cpus(void)
|
||||||
{
|
{
|
||||||
struct trace_array *tr;
|
struct trace_array *tr;
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
|
||||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||||
tracing_reset_online_cpus(&tr->trace_buffer);
|
tracing_reset_online_cpus(&tr->trace_buffer);
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
tracing_reset_online_cpus(&tr->max_buffer);
|
tracing_reset_online_cpus(&tr->max_buffer);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
mutex_unlock(&trace_types_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SAVED_CMDLINES 128
|
#define SAVED_CMDLINES 128
|
||||||
|
@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should be used after trace_array_get(), trace_types_lock
|
||||||
|
* ensures that i_cdev was already initialized.
|
||||||
|
*/
|
||||||
|
static inline int tracing_get_cpu(struct inode *inode)
|
||||||
|
{
|
||||||
|
if (inode->i_cdev) /* See trace_create_cpu_file() */
|
||||||
|
return (long)inode->i_cdev - 1;
|
||||||
|
return RING_BUFFER_ALL_CPUS;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct seq_operations tracer_seq_ops = {
|
static const struct seq_operations tracer_seq_ops = {
|
||||||
.start = s_start,
|
.start = s_start,
|
||||||
.next = s_next,
|
.next = s_next,
|
||||||
|
@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct trace_iterator *
|
static struct trace_iterator *
|
||||||
__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
|
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
||||||
struct inode *inode, struct file *file, bool snapshot)
|
|
||||||
{
|
{
|
||||||
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
|
||||||
iter->trace_buffer = &tr->trace_buffer;
|
iter->trace_buffer = &tr->trace_buffer;
|
||||||
iter->snapshot = snapshot;
|
iter->snapshot = snapshot;
|
||||||
iter->pos = -1;
|
iter->pos = -1;
|
||||||
|
iter->cpu_file = tracing_get_cpu(inode);
|
||||||
mutex_init(&iter->mutex);
|
mutex_init(&iter->mutex);
|
||||||
iter->cpu_file = tc->cpu;
|
|
||||||
|
|
||||||
/* Notify the tracer early; before we stop tracing. */
|
/* Notify the tracer early; before we stop tracing. */
|
||||||
if (iter->trace && iter->trace->open)
|
if (iter->trace && iter->trace->open)
|
||||||
|
@ -2971,44 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
|
||||||
filp->private_data = inode->i_private;
|
filp->private_data = inode->i_private;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
|
|
||||||
{
|
|
||||||
struct trace_cpu *tc = inode->i_private;
|
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
|
|
||||||
if (tracing_disabled)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (trace_array_get(tr) < 0)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
filp->private_data = inode->i_private;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tracing_release(struct inode *inode, struct file *file)
|
static int tracing_release(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
|
struct trace_array *tr = inode->i_private;
|
||||||
struct seq_file *m = file->private_data;
|
struct seq_file *m = file->private_data;
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
struct trace_array *tr;
|
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
/* Writes do not use seq_file, need to grab tr from inode */
|
|
||||||
if (!(file->f_mode & FMODE_READ)) {
|
if (!(file->f_mode & FMODE_READ)) {
|
||||||
struct trace_cpu *tc = inode->i_private;
|
trace_array_put(tr);
|
||||||
|
|
||||||
trace_array_put(tc->tr);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Writes do not use seq_file */
|
||||||
iter = m->private;
|
iter = m->private;
|
||||||
tr = iter->tr;
|
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
|
@ -3044,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tracing_release_generic_tc(struct inode *inode, struct file *file)
|
|
||||||
{
|
|
||||||
struct trace_cpu *tc = inode->i_private;
|
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
|
|
||||||
trace_array_put(tr);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int tracing_single_release_tr(struct inode *inode, struct file *file)
|
static int tracing_single_release_tr(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct trace_array *tr = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
|
@ -3064,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
|
||||||
|
|
||||||
static int tracing_open(struct inode *inode, struct file *file)
|
static int tracing_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -3073,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* If this file was open for write, then erase contents */
|
/* If this file was open for write, then erase contents */
|
||||||
if ((file->f_mode & FMODE_WRITE) &&
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
|
||||||
(file->f_flags & O_TRUNC)) {
|
int cpu = tracing_get_cpu(inode);
|
||||||
if (tc->cpu == RING_BUFFER_ALL_CPUS)
|
|
||||||
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
||||||
tracing_reset_online_cpus(&tr->trace_buffer);
|
tracing_reset_online_cpus(&tr->trace_buffer);
|
||||||
else
|
else
|
||||||
tracing_reset(&tr->trace_buffer, tc->cpu);
|
tracing_reset(&tr->trace_buffer, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
if (file->f_mode & FMODE_READ) {
|
||||||
iter = __tracing_open(tr, tc, inode, file, false);
|
iter = __tracing_open(inode, file, false);
|
||||||
if (IS_ERR(iter))
|
if (IS_ERR(iter))
|
||||||
ret = PTR_ERR(iter);
|
ret = PTR_ERR(iter);
|
||||||
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||||
|
@ -3948,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
|
||||||
|
|
||||||
static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -3995,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
||||||
if (trace_clocks[tr->clock_id].in_ns)
|
if (trace_clocks[tr->clock_id].in_ns)
|
||||||
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
||||||
|
|
||||||
iter->cpu_file = tc->cpu;
|
iter->tr = tr;
|
||||||
iter->tr = tc->tr;
|
iter->trace_buffer = &tr->trace_buffer;
|
||||||
iter->trace_buffer = &tc->tr->trace_buffer;
|
iter->cpu_file = tracing_get_cpu(inode);
|
||||||
mutex_init(&iter->mutex);
|
mutex_init(&iter->mutex);
|
||||||
filp->private_data = iter;
|
filp->private_data = iter;
|
||||||
|
|
||||||
|
@ -4020,8 +3998,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
||||||
static int tracing_release_pipe(struct inode *inode, struct file *file)
|
static int tracing_release_pipe(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct trace_iterator *iter = file->private_data;
|
struct trace_iterator *iter = file->private_data;
|
||||||
struct trace_cpu *tc = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
|
@ -4374,15 +4351,16 @@ static ssize_t
|
||||||
tracing_entries_read(struct file *filp, char __user *ubuf,
|
tracing_entries_read(struct file *filp, char __user *ubuf,
|
||||||
size_t cnt, loff_t *ppos)
|
size_t cnt, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = filp->private_data;
|
struct inode *inode = file_inode(filp);
|
||||||
struct trace_array *tr = tc->tr;
|
struct trace_array *tr = inode->i_private;
|
||||||
|
int cpu = tracing_get_cpu(inode);
|
||||||
char buf[64];
|
char buf[64];
|
||||||
int r = 0;
|
int r = 0;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
if (tc->cpu == RING_BUFFER_ALL_CPUS) {
|
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||||
int cpu, buf_size_same;
|
int cpu, buf_size_same;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
|
@ -4409,7 +4387,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
|
||||||
} else
|
} else
|
||||||
r = sprintf(buf, "X\n");
|
r = sprintf(buf, "X\n");
|
||||||
} else
|
} else
|
||||||
r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
|
r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
|
||||||
|
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
|
@ -4421,7 +4399,8 @@ static ssize_t
|
||||||
tracing_entries_write(struct file *filp, const char __user *ubuf,
|
tracing_entries_write(struct file *filp, const char __user *ubuf,
|
||||||
size_t cnt, loff_t *ppos)
|
size_t cnt, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = filp->private_data;
|
struct inode *inode = file_inode(filp);
|
||||||
|
struct trace_array *tr = inode->i_private;
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -4435,8 +4414,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
||||||
|
|
||||||
/* value is in KB */
|
/* value is in KB */
|
||||||
val <<= 10;
|
val <<= 10;
|
||||||
|
ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
|
||||||
ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -4697,8 +4675,7 @@ struct ftrace_buffer_info {
|
||||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
struct seq_file *m;
|
struct seq_file *m;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -4707,7 +4684,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
if (file->f_mode & FMODE_READ) {
|
||||||
iter = __tracing_open(tr, tc, inode, file, true);
|
iter = __tracing_open(inode, file, true);
|
||||||
if (IS_ERR(iter))
|
if (IS_ERR(iter))
|
||||||
ret = PTR_ERR(iter);
|
ret = PTR_ERR(iter);
|
||||||
} else {
|
} else {
|
||||||
|
@ -4724,8 +4701,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
iter->tr = tr;
|
iter->tr = tr;
|
||||||
iter->trace_buffer = &tc->tr->max_buffer;
|
iter->trace_buffer = &tr->max_buffer;
|
||||||
iter->cpu_file = tc->cpu;
|
iter->cpu_file = tracing_get_cpu(inode);
|
||||||
m->private = iter;
|
m->private = iter;
|
||||||
file->private_data = m;
|
file->private_data = m;
|
||||||
}
|
}
|
||||||
|
@ -4884,11 +4861,11 @@ static const struct file_operations tracing_pipe_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct file_operations tracing_entries_fops = {
|
static const struct file_operations tracing_entries_fops = {
|
||||||
.open = tracing_open_generic_tc,
|
.open = tracing_open_generic_tr,
|
||||||
.read = tracing_entries_read,
|
.read = tracing_entries_read,
|
||||||
.write = tracing_entries_write,
|
.write = tracing_entries_write,
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
.release = tracing_release_generic_tc,
|
.release = tracing_release_generic_tr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct file_operations tracing_total_entries_fops = {
|
static const struct file_operations tracing_total_entries_fops = {
|
||||||
|
@ -4940,8 +4917,7 @@ static const struct file_operations snapshot_raw_fops = {
|
||||||
|
|
||||||
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = inode->i_private;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_array *tr = tc->tr;
|
|
||||||
struct ftrace_buffer_info *info;
|
struct ftrace_buffer_info *info;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -4960,7 +4936,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
info->iter.tr = tr;
|
info->iter.tr = tr;
|
||||||
info->iter.cpu_file = tc->cpu;
|
info->iter.cpu_file = tracing_get_cpu(inode);
|
||||||
info->iter.trace = tr->current_trace;
|
info->iter.trace = tr->current_trace;
|
||||||
info->iter.trace_buffer = &tr->trace_buffer;
|
info->iter.trace_buffer = &tr->trace_buffer;
|
||||||
info->spare = NULL;
|
info->spare = NULL;
|
||||||
|
@ -5277,14 +5253,14 @@ static ssize_t
|
||||||
tracing_stats_read(struct file *filp, char __user *ubuf,
|
tracing_stats_read(struct file *filp, char __user *ubuf,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct trace_cpu *tc = filp->private_data;
|
struct inode *inode = file_inode(filp);
|
||||||
struct trace_array *tr = tc->tr;
|
struct trace_array *tr = inode->i_private;
|
||||||
struct trace_buffer *trace_buf = &tr->trace_buffer;
|
struct trace_buffer *trace_buf = &tr->trace_buffer;
|
||||||
|
int cpu = tracing_get_cpu(inode);
|
||||||
struct trace_seq *s;
|
struct trace_seq *s;
|
||||||
unsigned long cnt;
|
unsigned long cnt;
|
||||||
unsigned long long t;
|
unsigned long long t;
|
||||||
unsigned long usec_rem;
|
unsigned long usec_rem;
|
||||||
int cpu = tc->cpu;
|
|
||||||
|
|
||||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||||
if (!s)
|
if (!s)
|
||||||
|
@ -5337,10 +5313,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations tracing_stats_fops = {
|
static const struct file_operations tracing_stats_fops = {
|
||||||
.open = tracing_open_generic_tc,
|
.open = tracing_open_generic_tr,
|
||||||
.read = tracing_stats_read,
|
.read = tracing_stats_read,
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
.release = tracing_release_generic_tc,
|
.release = tracing_release_generic_tr,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
@ -5529,10 +5505,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
|
||||||
return tr->percpu_dir;
|
return tr->percpu_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dentry *
|
||||||
|
trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
|
||||||
|
void *data, long cpu, const struct file_operations *fops)
|
||||||
|
{
|
||||||
|
struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
|
||||||
|
|
||||||
|
if (ret) /* See tracing_get_cpu() */
|
||||||
|
ret->d_inode->i_cdev = (void *)(cpu + 1);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
|
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
|
||||||
{
|
{
|
||||||
struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
|
||||||
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
|
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
|
||||||
struct dentry *d_cpu;
|
struct dentry *d_cpu;
|
||||||
char cpu_dir[30]; /* 30 characters should be more than enough */
|
char cpu_dir[30]; /* 30 characters should be more than enough */
|
||||||
|
@ -5548,28 +5534,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* per cpu trace_pipe */
|
/* per cpu trace_pipe */
|
||||||
trace_create_file("trace_pipe", 0444, d_cpu,
|
trace_create_cpu_file("trace_pipe", 0444, d_cpu,
|
||||||
(void *)&data->trace_cpu, &tracing_pipe_fops);
|
tr, cpu, &tracing_pipe_fops);
|
||||||
|
|
||||||
/* per cpu trace */
|
/* per cpu trace */
|
||||||
trace_create_file("trace", 0644, d_cpu,
|
trace_create_cpu_file("trace", 0644, d_cpu,
|
||||||
(void *)&data->trace_cpu, &tracing_fops);
|
tr, cpu, &tracing_fops);
|
||||||
|
|
||||||
trace_create_file("trace_pipe_raw", 0444, d_cpu,
|
trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
|
||||||
(void *)&data->trace_cpu, &tracing_buffers_fops);
|
tr, cpu, &tracing_buffers_fops);
|
||||||
|
|
||||||
trace_create_file("stats", 0444, d_cpu,
|
trace_create_cpu_file("stats", 0444, d_cpu,
|
||||||
(void *)&data->trace_cpu, &tracing_stats_fops);
|
tr, cpu, &tracing_stats_fops);
|
||||||
|
|
||||||
trace_create_file("buffer_size_kb", 0444, d_cpu,
|
trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
|
||||||
(void *)&data->trace_cpu, &tracing_entries_fops);
|
tr, cpu, &tracing_entries_fops);
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
trace_create_file("snapshot", 0644, d_cpu,
|
trace_create_cpu_file("snapshot", 0644, d_cpu,
|
||||||
(void *)&data->trace_cpu, &snapshot_fops);
|
tr, cpu, &snapshot_fops);
|
||||||
|
|
||||||
trace_create_file("snapshot_raw", 0444, d_cpu,
|
trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
|
||||||
(void *)&data->trace_cpu, &snapshot_raw_fops);
|
tr, cpu, &snapshot_raw_fops);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5878,17 +5864,6 @@ struct dentry *trace_instance_dir;
|
||||||
static void
|
static void
|
||||||
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
|
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
|
||||||
|
|
||||||
static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu) {
|
|
||||||
memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
|
|
||||||
per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
|
|
||||||
per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
|
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
|
||||||
{
|
{
|
||||||
|
@ -5906,8 +5881,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_trace_buffers(tr, buf);
|
|
||||||
|
|
||||||
/* Allocate the first page for all buffers */
|
/* Allocate the first page for all buffers */
|
||||||
set_buffer_entries(&tr->trace_buffer,
|
set_buffer_entries(&tr->trace_buffer,
|
||||||
ring_buffer_size(tr->trace_buffer.buffer, 0));
|
ring_buffer_size(tr->trace_buffer.buffer, 0));
|
||||||
|
@ -5974,10 +5947,6 @@ static int new_instance_create(const char *name)
|
||||||
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
|
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
|
||||||
goto out_free_tr;
|
goto out_free_tr;
|
||||||
|
|
||||||
/* Holder for file callbacks */
|
|
||||||
tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
|
|
||||||
tr->trace_cpu.tr = tr;
|
|
||||||
|
|
||||||
tr->dir = debugfs_create_dir(name, trace_instance_dir);
|
tr->dir = debugfs_create_dir(name, trace_instance_dir);
|
||||||
if (!tr->dir)
|
if (!tr->dir)
|
||||||
goto out_free_tr;
|
goto out_free_tr;
|
||||||
|
@ -6132,13 +6101,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
|
||||||
tr, &tracing_iter_fops);
|
tr, &tracing_iter_fops);
|
||||||
|
|
||||||
trace_create_file("trace", 0644, d_tracer,
|
trace_create_file("trace", 0644, d_tracer,
|
||||||
(void *)&tr->trace_cpu, &tracing_fops);
|
tr, &tracing_fops);
|
||||||
|
|
||||||
trace_create_file("trace_pipe", 0444, d_tracer,
|
trace_create_file("trace_pipe", 0444, d_tracer,
|
||||||
(void *)&tr->trace_cpu, &tracing_pipe_fops);
|
tr, &tracing_pipe_fops);
|
||||||
|
|
||||||
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
||||||
(void *)&tr->trace_cpu, &tracing_entries_fops);
|
tr, &tracing_entries_fops);
|
||||||
|
|
||||||
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
||||||
tr, &tracing_total_entries_fops);
|
tr, &tracing_total_entries_fops);
|
||||||
|
@ -6157,7 +6126,7 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
trace_create_file("snapshot", 0644, d_tracer,
|
trace_create_file("snapshot", 0644, d_tracer,
|
||||||
(void *)&tr->trace_cpu, &snapshot_fops);
|
tr, &snapshot_fops);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu)
|
for_each_tracing_cpu(cpu)
|
||||||
|
@ -6451,10 +6420,6 @@ __init static int tracer_alloc_buffers(void)
|
||||||
|
|
||||||
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
||||||
|
|
||||||
/* Holder for file callbacks */
|
|
||||||
global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
|
|
||||||
global_trace.trace_cpu.tr = &global_trace;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&global_trace.systems);
|
INIT_LIST_HEAD(&global_trace.systems);
|
||||||
INIT_LIST_HEAD(&global_trace.events);
|
INIT_LIST_HEAD(&global_trace.events);
|
||||||
list_add(&global_trace.list, &ftrace_trace_arrays);
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
||||||
|
|
|
@ -130,19 +130,12 @@ enum trace_flag_type {
|
||||||
|
|
||||||
struct trace_array;
|
struct trace_array;
|
||||||
|
|
||||||
struct trace_cpu {
|
|
||||||
struct trace_array *tr;
|
|
||||||
struct dentry *dir;
|
|
||||||
int cpu;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The CPU trace array - it consists of thousands of trace entries
|
* The CPU trace array - it consists of thousands of trace entries
|
||||||
* plus some other descriptor data: (for example which task started
|
* plus some other descriptor data: (for example which task started
|
||||||
* the trace, etc.)
|
* the trace, etc.)
|
||||||
*/
|
*/
|
||||||
struct trace_array_cpu {
|
struct trace_array_cpu {
|
||||||
struct trace_cpu trace_cpu;
|
|
||||||
atomic_t disabled;
|
atomic_t disabled;
|
||||||
void *buffer_page; /* ring buffer spare */
|
void *buffer_page; /* ring buffer spare */
|
||||||
|
|
||||||
|
@ -196,7 +189,6 @@ struct trace_array {
|
||||||
bool allocated_snapshot;
|
bool allocated_snapshot;
|
||||||
#endif
|
#endif
|
||||||
int buffer_disabled;
|
int buffer_disabled;
|
||||||
struct trace_cpu trace_cpu; /* place holder */
|
|
||||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||||
int sys_refcount_enter;
|
int sys_refcount_enter;
|
||||||
int sys_refcount_exit;
|
int sys_refcount_exit;
|
||||||
|
|
Loading…
Reference in New Issue