2009-08-14 18:21:53 +08:00
|
|
|
#include "../perf.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
2009-12-14 05:50:28 +08:00
|
|
|
#include "session.h"
|
2009-08-14 18:21:53 +08:00
|
|
|
#include "thread.h"
|
|
|
|
#include "util.h"
|
2009-08-18 23:04:03 +08:00
|
|
|
#include "debug.h"
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2010-06-16 13:21:44 +08:00
|
|
|
/* Skip "." and ".." directories */
|
|
|
|
static int filter(const struct dirent *dir)
|
|
|
|
{
|
|
|
|
if (dir->d_name[0] == '.')
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2011-01-04 03:53:33 +08:00
|
|
|
struct thread_map *thread_map__new_by_pid(pid_t pid)
|
2010-03-18 22:36:05 +08:00
|
|
|
{
|
2011-01-04 03:53:33 +08:00
|
|
|
struct thread_map *threads;
|
2010-03-18 22:36:05 +08:00
|
|
|
char name[256];
|
|
|
|
int items;
|
|
|
|
struct dirent **namelist = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sprintf(name, "/proc/%d/task", pid);
|
2010-06-16 13:21:44 +08:00
|
|
|
items = scandir(name, &namelist, filter, NULL);
|
2010-03-18 22:36:05 +08:00
|
|
|
if (items <= 0)
|
2011-01-04 03:53:33 +08:00
|
|
|
return NULL;
|
2010-03-18 22:36:05 +08:00
|
|
|
|
2011-01-04 03:53:33 +08:00
|
|
|
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
|
|
|
|
if (threads != NULL) {
|
|
|
|
for (i = 0; i < items; i++)
|
|
|
|
threads->map[i] = atoi(namelist[i]->d_name);
|
|
|
|
threads->nr = items;
|
|
|
|
}
|
2010-03-18 22:36:05 +08:00
|
|
|
|
|
|
|
for (i=0; i<items; i++)
|
|
|
|
free(namelist[i]);
|
|
|
|
free(namelist);
|
|
|
|
|
2011-01-04 03:53:33 +08:00
|
|
|
return threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct thread_map *thread_map__new_by_tid(pid_t tid)
|
|
|
|
{
|
|
|
|
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
|
|
|
|
|
|
|
|
if (threads != NULL) {
|
|
|
|
threads->map[0] = tid;
|
|
|
|
threads->nr = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
|
|
|
|
{
|
|
|
|
if (pid != -1)
|
|
|
|
return thread_map__new_by_pid(pid);
|
|
|
|
return thread_map__new_by_tid(tid);
|
2010-03-18 22:36:05 +08:00
|
|
|
}
|
|
|
|
|
2009-10-09 03:04:17 +08:00
|
|
|
static struct thread *thread__new(pid_t pid)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-11-24 22:05:16 +08:00
|
|
|
struct thread *self = zalloc(sizeof(*self));
|
2009-08-14 18:21:53 +08:00
|
|
|
|
|
|
|
if (self != NULL) {
|
2009-12-12 00:50:36 +08:00
|
|
|
map_groups__init(&self->mg);
|
|
|
|
self->pid = pid;
|
2009-10-09 03:04:17 +08:00
|
|
|
self->comm = malloc(32);
|
|
|
|
if (self->comm)
|
|
|
|
snprintf(self->comm, 32, ":%d", self->pid);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2010-07-31 05:28:42 +08:00
|
|
|
void thread__delete(struct thread *self)
|
|
|
|
{
|
|
|
|
map_groups__exit(&self->mg);
|
|
|
|
free(self->comm);
|
|
|
|
free(self);
|
|
|
|
}
|
|
|
|
|
2009-08-14 18:21:53 +08:00
|
|
|
int thread__set_comm(struct thread *self, const char *comm)
|
|
|
|
{
|
2010-02-26 23:08:34 +08:00
|
|
|
int err;
|
|
|
|
|
2009-08-14 18:21:53 +08:00
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(comm);
|
2010-02-26 23:08:34 +08:00
|
|
|
err = self->comm == NULL ? -ENOMEM : 0;
|
|
|
|
if (!err) {
|
|
|
|
self->comm_set = true;
|
|
|
|
map_groups__flush(&self->mg);
|
|
|
|
}
|
|
|
|
return err;
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
perf tools: Bind callchains to the first sort dimension column
Currently, the callchains are displayed using a constant left
margin. So depending on the current sort dimension
configuration, callchains may appear to be well attached to the
first sort dimension column field which is mostly the case,
except when the first dimension of sorting is done by comm,
because these are right aligned.
This patch binds the callchain to the first letter in the first
column, whatever type of column it is (dso, comm, symbol).
Before:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
After:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
Also, for clarity, we don't put anymore the callchain as is but:
- If we have a top level ancestor in the callchain, start it
with a first ascii hook.
Before:
0.80% perf [kernel] [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
After:
0.80% perf [kernel] [k] __lock_acquire
|
--- __lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
- Otherwise, if we have several top level ancestors, then
display these like we did before:
1.69% Xorg
|
|--21.21%-- vread_hpet
| 0x7fffd85b46fc
| 0x7fffd85b494d
| 0x7f4fafb4e54d
|
|--15.15%-- exaOffscreenAlloc
|
|--9.09%-- I830WaitLpRing
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Anton Blanchard <anton@samba.org>
LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-10-23 05:23:23 +08:00
|
|
|
int thread__comm_len(struct thread *self)
|
|
|
|
{
|
|
|
|
if (!self->comm_len) {
|
|
|
|
if (!self->comm)
|
|
|
|
return 0;
|
|
|
|
self->comm_len = strlen(self->comm);
|
|
|
|
}
|
|
|
|
|
|
|
|
return self->comm_len;
|
|
|
|
}
|
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
|
|
|
{
|
|
|
|
return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
|
2010-03-26 23:11:06 +08:00
|
|
|
map_groups__fprintf(&self->mg, verbose, fp);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-12-14 05:50:28 +08:00
|
|
|
struct rb_node **p = &self->threads.rb_node;
|
2009-08-14 18:21:53 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct thread *th;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Font-end cache - PID lookups come in blocks,
|
|
|
|
* so most of the time we dont have to look up
|
|
|
|
* the full rbtree:
|
|
|
|
*/
|
2009-12-14 05:50:28 +08:00
|
|
|
if (self->last_match && self->last_match->pid == pid)
|
|
|
|
return self->last_match;
|
2009-08-14 18:21:53 +08:00
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
th = rb_entry(parent, struct thread, rb_node);
|
|
|
|
|
|
|
|
if (th->pid == pid) {
|
2009-12-14 05:50:28 +08:00
|
|
|
self->last_match = th;
|
2009-08-14 18:21:53 +08:00
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pid < th->pid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2009-10-09 03:04:17 +08:00
|
|
|
th = thread__new(pid);
|
2009-08-14 18:21:53 +08:00
|
|
|
if (th != NULL) {
|
|
|
|
rb_link_node(&th->rb_node, parent, p);
|
2009-12-14 05:50:28 +08:00
|
|
|
rb_insert_color(&th->rb_node, &self->threads);
|
|
|
|
self->last_match = th;
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
2009-09-29 01:48:46 +08:00
|
|
|
void thread__insert_map(struct thread *self, struct map *map)
|
|
|
|
{
|
2010-03-26 23:11:06 +08:00
|
|
|
map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
|
2009-12-12 00:50:36 +08:00
|
|
|
map_groups__insert(&self->mg, map);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
int thread__fork(struct thread *self, struct thread *parent)
|
|
|
|
{
|
|
|
|
int i;
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2010-02-20 09:02:07 +08:00
|
|
|
if (parent->comm_set) {
|
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(parent->comm);
|
|
|
|
if (!self->comm)
|
|
|
|
return -ENOMEM;
|
|
|
|
self->comm_set = true;
|
|
|
|
}
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
2009-12-12 00:50:36 +08:00
|
|
|
if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
|
2009-08-14 18:21:53 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
|
2009-08-14 18:21:53 +08:00
|
|
|
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
|
|
|
|
|
|
|
ret += thread__fprintf(pos, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|