tracing: Fix polling on trace_pipe_raw

The trace_pipe_raw never implemented polling and this was casing
issues for several utilities. This is now implemented.

Blocked reads still are on the TODO list.

Reported-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Tested-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt 2013-02-28 09:17:16 -05:00 committed by Steven Rostedt
parent 189e5784f6
commit cc60cdc952
1 changed files with 51 additions and 27 deletions

View File

@ -3555,10 +3555,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
}
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
if (trace_flags & TRACE_ITER_BLOCK) {
/*
* Always select as readable when in blocking mode
@ -3567,6 +3565,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
} else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
trace_wakeup_needed = true;
poll_wait(filp, &trace_wait, poll_table);
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
@ -3575,6 +3574,14 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
return trace_poll(iter, filp, poll_table);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
@ -4362,9 +4369,8 @@ static const struct file_operations snapshot_fops = {
#endif /* CONFIG_TRACER_SNAPSHOT */
struct ftrace_buffer_info {
struct trace_array *tr;
struct trace_iterator iter;
void *spare;
int cpu;
unsigned int read;
};
@ -4381,22 +4387,32 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
if (!info)
return -ENOMEM;
info->tr = tr;
info->cpu = tc->cpu;
info->spare = NULL;
info->iter.tr = tr;
info->iter.cpu_file = tc->cpu;
info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
info->read = (unsigned int)-1;
filp->private_data = info;
return nonseekable_open(inode, filp);
}
static unsigned int
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
{
struct ftrace_buffer_info *info = filp->private_data;
struct trace_iterator *iter = &info->iter;
return trace_poll(iter, filp, poll_table);
}
static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ftrace_buffer_info *info = filp->private_data;
struct trace_iterator *iter = &info->iter;
ssize_t ret;
size_t size;
@ -4404,7 +4420,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return 0;
if (!info->spare)
info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
info->spare = ring_buffer_alloc_read_page(iter->tr->buffer, iter->cpu_file);
if (!info->spare)
return -ENOMEM;
@ -4412,12 +4428,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (info->read < PAGE_SIZE)
goto read;
trace_access_lock(info->cpu);
ret = ring_buffer_read_page(info->tr->buffer,
trace_access_lock(iter->cpu_file);
ret = ring_buffer_read_page(iter->tr->buffer,
&info->spare,
count,
info->cpu, 0);
trace_access_unlock(info->cpu);
iter->cpu_file, 0);
trace_access_unlock(iter->cpu_file);
if (ret < 0)
return 0;
@ -4442,9 +4458,10 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
struct trace_iterator *iter = &info->iter;
if (info->spare)
ring_buffer_free_read_page(info->tr->buffer, info->spare);
ring_buffer_free_read_page(iter->tr->buffer, info->spare);
kfree(info);
return 0;
@ -4511,6 +4528,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
unsigned int flags)
{
struct ftrace_buffer_info *info = file->private_data;
struct trace_iterator *iter = &info->iter;
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct page *pages_def[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
@ -4541,8 +4559,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
len &= PAGE_MASK;
}
trace_access_lock(info->cpu);
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
again:
trace_access_lock(iter->cpu_file);
entries = ring_buffer_entries_cpu(iter->tr->buffer, iter->cpu_file);
for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
struct page *page;
@ -4553,15 +4572,15 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
ref->ref = 1;
ref->buffer = info->tr->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
ref->buffer = iter->tr->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (!ref->page) {
kfree(ref);
break;
}
r = ring_buffer_read_page(ref->buffer, &ref->page,
len, info->cpu, 1);
len, iter->cpu_file, 1);
if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
@ -4585,20 +4604,24 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
spd.nr_pages++;
*ppos += PAGE_SIZE;
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
entries = ring_buffer_entries_cpu(iter->tr->buffer, iter->cpu_file);
}
trace_access_unlock(info->cpu);
trace_access_unlock(iter->cpu_file);
spd.nr_pages = i;
/* did we read anything? */
if (!spd.nr_pages) {
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
ret = -EAGAIN;
else
ret = 0;
/* TODO: block */
goto out;
goto out;
}
default_wait_pipe(iter);
if (signal_pending(current)) {
ret = -EINTR;
goto out;
}
goto again;
}
ret = splice_to_pipe(pipe, &spd);
@ -4610,6 +4633,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
.poll = tracing_buffers_poll,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
.llseek = no_llseek,