2009-06-03 05:37:05 +08:00
|
|
|
/*
|
|
|
|
* builtin-report.c
|
|
|
|
*
|
|
|
|
* Builtin report command: Analyze the perf.data input file,
|
|
|
|
* look up and read DSOs and symbol information and display
|
|
|
|
* a histogram of results, along various sorting keys.
|
|
|
|
*/
|
2009-05-27 15:10:38 +08:00
|
|
|
#include "builtin.h"
|
2009-05-26 15:17:18 +08:00
|
|
|
|
2009-06-03 05:37:05 +08:00
|
|
|
#include "util/util.h"
|
2013-01-22 17:09:46 +08:00
|
|
|
#include "util/cache.h"
|
2009-06-03 05:37:05 +08:00
|
|
|
|
2011-02-04 19:45:46 +08:00
|
|
|
#include "util/annotate.h"
|
2009-06-04 21:19:47 +08:00
|
|
|
#include "util/color.h"
|
2009-07-02 01:46:08 +08:00
|
|
|
#include <linux/list.h>
|
2009-07-01 23:28:37 +08:00
|
|
|
#include <linux/rbtree.h>
|
2009-05-29 01:55:04 +08:00
|
|
|
#include "util/symbol.h"
|
2009-06-26 22:28:01 +08:00
|
|
|
#include "util/callchain.h"
|
2009-07-01 06:01:20 +08:00
|
|
|
#include "util/strlist.h"
|
2009-08-07 19:55:24 +08:00
|
|
|
#include "util/values.h"
|
2009-05-18 23:45:42 +08:00
|
|
|
|
2009-05-26 15:17:18 +08:00
|
|
|
#include "perf.h"
|
2009-08-17 04:05:48 +08:00
|
|
|
#include "util/debug.h"
|
2011-03-06 08:40:06 +08:00
|
|
|
#include "util/evlist.h"
|
|
|
|
#include "util/evsel.h"
|
2009-06-25 23:05:54 +08:00
|
|
|
#include "util/header.h"
|
2009-12-12 07:24:02 +08:00
|
|
|
#include "util/session.h"
|
2011-11-28 18:30:20 +08:00
|
|
|
#include "util/tool.h"
|
2009-05-26 15:17:18 +08:00
|
|
|
|
2015-12-15 23:39:39 +08:00
|
|
|
#include <subcmd/parse-options.h>
|
2016-01-09 18:16:29 +08:00
|
|
|
#include <subcmd/exec-cmd.h>
|
2009-05-26 15:17:18 +08:00
|
|
|
#include "util/parse-events.h"
|
|
|
|
|
2009-08-14 18:21:53 +08:00
|
|
|
#include "util/thread.h"
|
2009-09-25 00:02:49 +08:00
|
|
|
#include "util/sort.h"
|
2009-09-28 21:32:55 +08:00
|
|
|
#include "util/hist.h"
|
2013-10-15 22:27:32 +08:00
|
|
|
#include "util/data.h"
|
2012-10-16 07:33:38 +08:00
|
|
|
#include "arch/common.h"
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2015-04-25 03:29:45 +08:00
|
|
|
#include "util/auxtrace.h"
|
|
|
|
|
2013-09-13 14:27:43 +08:00
|
|
|
#include <dlfcn.h>
|
2011-07-04 19:57:50 +08:00
|
|
|
#include <linux/bitmap.h>
|
2016-03-24 02:16:55 +08:00
|
|
|
#include <linux/stringify.h>
|
2011-07-04 19:57:50 +08:00
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report {
|
2011-11-28 18:30:20 +08:00
|
|
|
struct perf_tool tool;
|
2011-11-25 18:19:45 +08:00
|
|
|
struct perf_session *session;
|
2015-11-13 03:50:13 +08:00
|
|
|
bool use_tui, use_gtk, use_stdio;
|
2011-11-17 22:19:04 +08:00
|
|
|
bool dont_use_callchains;
|
|
|
|
bool show_full_info;
|
|
|
|
bool show_threads;
|
|
|
|
bool inverted_callchain;
|
2013-01-24 23:10:36 +08:00
|
|
|
bool mem_mode;
|
2013-12-09 18:02:49 +08:00
|
|
|
bool header;
|
|
|
|
bool header_only;
|
2015-07-18 23:24:47 +08:00
|
|
|
bool nonany_branch_mode;
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 22:38:48 +08:00
|
|
|
int max_stack;
|
2011-11-17 22:19:04 +08:00
|
|
|
struct perf_read_values show_threads_values;
|
|
|
|
const char *pretty_printing_style;
|
|
|
|
const char *cpu_list;
|
2012-03-16 16:50:54 +08:00
|
|
|
const char *symbol_filter_str;
|
2013-05-14 10:09:04 +08:00
|
|
|
float min_percent;
|
2014-04-22 08:47:25 +08:00
|
|
|
u64 nr_entries;
|
2014-06-05 17:00:20 +08:00
|
|
|
u64 queue_size;
|
2015-09-04 22:45:44 +08:00
|
|
|
int socket_filter;
|
2011-11-17 22:19:04 +08:00
|
|
|
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
|
2011-11-25 18:19:45 +08:00
|
|
|
};
|
2011-07-04 19:57:50 +08:00
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
static int report__config(const char *var, const char *value, void *cb)
|
2013-01-22 17:09:46 +08:00
|
|
|
{
|
2014-06-05 17:00:20 +08:00
|
|
|
struct report *rep = cb;
|
|
|
|
|
2013-01-22 17:09:46 +08:00
|
|
|
if (!strcmp(var, "report.group")) {
|
|
|
|
symbol_conf.event_group = perf_config_bool(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-05-14 10:09:06 +08:00
|
|
|
if (!strcmp(var, "report.percent-limit")) {
|
2016-01-27 23:40:50 +08:00
|
|
|
double pcnt = strtof(value, NULL);
|
|
|
|
|
|
|
|
rep->min_percent = pcnt;
|
|
|
|
callchain_param.min_percent = pcnt;
|
2013-05-14 10:09:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2013-01-22 17:09:46 +08:00
|
|
|
if (!strcmp(var, "report.children")) {
|
|
|
|
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2014-06-05 17:00:20 +08:00
|
|
|
if (!strcmp(var, "report.queue-size")) {
|
|
|
|
rep->queue_size = perf_config_u64(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-22 17:09:46 +08:00
|
|
|
|
2016-02-26 17:31:51 +08:00
|
|
|
return 0;
|
2013-01-22 17:09:46 +08:00
|
|
|
}
|
|
|
|
|
2014-01-07 16:02:25 +08:00
|
|
|
static int hist_iter__report_callback(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al, bool single,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct report *rep = arg;
|
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct mem_info *mi;
|
|
|
|
struct branch_info *bi;
|
|
|
|
|
|
|
|
if (!ui__has_annotation())
|
|
|
|
return 0;
|
|
|
|
|
2015-07-18 23:24:49 +08:00
|
|
|
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
|
|
|
|
rep->nonany_branch_mode);
|
|
|
|
|
2014-01-07 16:02:25 +08:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
|
|
|
bi = he->branch_info;
|
|
|
|
err = addr_map_symbol__inc_samples(&bi->from, evsel->idx);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = addr_map_symbol__inc_samples(&bi->to, evsel->idx);
|
|
|
|
|
|
|
|
} else if (rep->mem_mode) {
|
|
|
|
mi = he->mem_info;
|
|
|
|
err = addr_map_symbol__inc_samples(&mi->daddr, evsel->idx);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
|
|
|
|
|
} else if (symbol_conf.cumulate_callchain) {
|
|
|
|
if (single)
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx,
|
|
|
|
al->addr);
|
|
|
|
} else {
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
2013-01-24 23:10:36 +08:00
|
|
|
}
|
|
|
|
|
2011-11-28 18:30:20 +08:00
|
|
|
static int process_sample_event(struct perf_tool *tool,
|
2011-11-25 18:19:45 +08:00
|
|
|
union perf_event *event,
|
2011-01-30 00:01:45 +08:00
|
|
|
struct perf_sample *sample,
|
2011-03-16 02:44:01 +08:00
|
|
|
struct perf_evsel *evsel,
|
2011-11-28 17:56:39 +08:00
|
|
|
struct machine *machine)
|
2009-06-04 05:14:49 +08:00
|
|
|
{
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report *rep = container_of(tool, struct report, tool);
|
perf tools: Consolidate symbol resolving across all tools
Now we have a very high level routine for simple tools to
process IP sample events:
int event__preprocess_sample(const event_t *self,
struct addr_location *al,
symbol_filter_t filter)
It receives the event itself and will insert new threads in the
global threads list and resolve the map and symbol, filling all
this info into the new addr_location struct, so that tools like
annotate and report can further process the event by creating
hist_entries in their specific way (with or without callgraphs,
etc).
It in turn uses the new next layer function:
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
This one will, given a thread (userspace or the kernel kthread
one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE
too in the near future) at the given cpumode, taking vdsos into
account (userspace hit, but kernel symbol) and will fill all
these details in the addr_location given.
Tools that need a more compact API for plain function
resolution, like 'kmem', can use this other one:
struct symbol *thread__find_function(struct thread *self, u64 addr,
symbol_filter_t filter)
So, to resolve a kernel symbol, that is all the 'kmem' tool
needs, its just a matter of calling:
sym = thread__find_function(kthread, addr, NULL);
The 'filter' parameter is needed because we do lazy
parsing/loading of ELF symtabs or /proc/kallsyms.
With this we remove more code duplication all around, which is
always good, huh? :-)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-28 02:29:23 +08:00
|
|
|
struct addr_location al;
|
2013-10-30 08:40:34 +08:00
|
|
|
struct hist_entry_iter iter = {
|
2015-05-19 16:04:10 +08:00
|
|
|
.evsel = evsel,
|
|
|
|
.sample = sample,
|
2015-11-26 15:08:20 +08:00
|
|
|
.hide_unresolved = symbol_conf.hide_unresolved,
|
2015-05-19 16:04:10 +08:00
|
|
|
.add_entry_cb = hist_iter__report_callback,
|
2013-10-30 08:40:34 +08:00
|
|
|
};
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
int ret = 0;
|
2009-12-06 19:08:24 +08:00
|
|
|
|
2016-03-23 05:39:09 +08:00
|
|
|
if (machine__resolve(machine, &al, sample) < 0) {
|
2013-12-20 13:11:12 +08:00
|
|
|
pr_debug("problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
2009-06-04 05:14:49 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2009-05-28 02:20:24 +08:00
|
|
|
|
2015-11-26 15:08:20 +08:00
|
|
|
if (symbol_conf.hide_unresolved && al.sym == NULL)
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
goto out_put;
|
2009-07-01 06:01:22 +08:00
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
goto out_put;
|
2011-07-04 19:57:50 +08:00
|
|
|
|
2015-09-25 21:15:42 +08:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
|
|
|
/*
|
|
|
|
* A non-synthesized event might not have a branch stack if
|
|
|
|
* branch stacks have been synthesized (using itrace options).
|
|
|
|
*/
|
|
|
|
if (!sample->branch_stack)
|
|
|
|
goto out_put;
|
2013-10-30 08:40:34 +08:00
|
|
|
iter.ops = &hist_iter_branch;
|
2015-09-25 21:15:42 +08:00
|
|
|
} else if (rep->mem_mode) {
|
2013-10-30 08:40:34 +08:00
|
|
|
iter.ops = &hist_iter_mem;
|
2015-09-25 21:15:42 +08:00
|
|
|
} else if (symbol_conf.cumulate_callchain) {
|
2012-09-11 13:13:04 +08:00
|
|
|
iter.ops = &hist_iter_cumulative;
|
2015-09-25 21:15:42 +08:00
|
|
|
} else {
|
2013-10-30 08:40:34 +08:00
|
|
|
iter.ops = &hist_iter_normal;
|
2015-09-25 21:15:42 +08:00
|
|
|
}
|
2013-10-30 08:40:34 +08:00
|
|
|
|
|
|
|
if (al.map != NULL)
|
|
|
|
al.map->dso->hit = 1;
|
|
|
|
|
2015-05-19 16:04:10 +08:00
|
|
|
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
|
2013-10-30 08:40:34 +08:00
|
|
|
if (ret < 0)
|
|
|
|
pr_debug("problem adding hist entry, skipping event\n");
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
out_put:
|
|
|
|
addr_location__put(&al);
|
2013-05-14 10:09:02 +08:00
|
|
|
return ret;
|
2009-06-04 05:14:49 +08:00
|
|
|
}
|
2009-06-03 15:38:58 +08:00
|
|
|
|
2011-11-28 18:30:20 +08:00
|
|
|
static int process_read_event(struct perf_tool *tool,
|
2011-11-25 18:19:45 +08:00
|
|
|
union perf_event *event,
|
2012-09-11 06:15:03 +08:00
|
|
|
struct perf_sample *sample __maybe_unused,
|
2011-11-28 17:56:39 +08:00
|
|
|
struct perf_evsel *evsel,
|
2012-09-11 06:15:03 +08:00
|
|
|
struct machine *machine __maybe_unused)
|
2009-06-25 04:46:04 +08:00
|
|
|
{
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report *rep = container_of(tool, struct report, tool);
|
2011-11-28 17:56:39 +08:00
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (rep->show_threads) {
|
2012-06-12 23:34:58 +08:00
|
|
|
const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
|
2011-11-17 22:19:04 +08:00
|
|
|
perf_read_values_add_value(&rep->show_threads_values,
|
2009-08-07 19:55:24 +08:00
|
|
|
event->read.pid, event->read.tid,
|
|
|
|
event->read.id,
|
|
|
|
name,
|
|
|
|
event->read.value);
|
|
|
|
}
|
|
|
|
|
2011-01-23 06:37:02 +08:00
|
|
|
dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
|
2012-06-12 23:34:58 +08:00
|
|
|
evsel ? perf_evsel__name(evsel) : "FAIL",
|
2009-11-28 02:29:22 +08:00
|
|
|
event->read.value);
|
2009-06-25 04:46:04 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-12 03:48:41 +08:00
|
|
|
/* For pipe mode, sample_type is not currently set */
|
2013-12-20 01:53:53 +08:00
|
|
|
static int report__setup_sample_type(struct report *rep)
|
2009-06-04 05:14:49 +08:00
|
|
|
{
|
2013-10-23 06:01:31 +08:00
|
|
|
struct perf_session *session = rep->session;
|
|
|
|
u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
|
|
|
|
bool is_pipe = perf_data_file__is_pipe(session->file);
|
2011-11-25 18:19:45 +08:00
|
|
|
|
2015-09-25 21:15:33 +08:00
|
|
|
if (session->itrace_synth_opts->callchain ||
|
|
|
|
(!is_pipe &&
|
|
|
|
perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
|
|
|
|
!session->itrace_synth_opts->set))
|
|
|
|
sample_type |= PERF_SAMPLE_CALLCHAIN;
|
|
|
|
|
2015-09-25 21:15:40 +08:00
|
|
|
if (session->itrace_synth_opts->last_branch)
|
|
|
|
sample_type |= PERF_SAMPLE_BRANCH_STACK;
|
|
|
|
|
2013-10-15 22:27:34 +08:00
|
|
|
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
2009-07-05 13:39:17 +08:00
|
|
|
if (sort__has_parent) {
|
2012-05-29 12:22:57 +08:00
|
|
|
ui__error("Selected --sort parent, but no "
|
2011-08-03 23:33:24 +08:00
|
|
|
"callchain data. Did you call "
|
|
|
|
"'perf record' without -g?\n");
|
2009-12-28 07:37:02 +08:00
|
|
|
return -EINVAL;
|
2009-07-05 13:39:17 +08:00
|
|
|
}
|
2009-12-16 06:04:42 +08:00
|
|
|
if (symbol_conf.use_callchain) {
|
2014-11-13 10:05:22 +08:00
|
|
|
ui__error("Selected -g or --branch-history but no "
|
|
|
|
"callchain data. Did\n"
|
|
|
|
"you call 'perf record' without -g?\n");
|
2009-10-07 18:47:31 +08:00
|
|
|
return -1;
|
2009-07-05 13:39:17 +08:00
|
|
|
}
|
2011-11-17 22:19:04 +08:00
|
|
|
} else if (!rep->dont_use_callchains &&
|
|
|
|
callchain_param.mode != CHAIN_NONE &&
|
2010-01-05 21:54:45 +08:00
|
|
|
!symbol_conf.use_callchain) {
|
2009-12-16 06:04:42 +08:00
|
|
|
symbol_conf.use_callchain = true;
|
2011-01-14 11:52:00 +08:00
|
|
|
if (callchain_register_param(&callchain_param) < 0) {
|
2012-05-29 12:22:57 +08:00
|
|
|
ui__error("Can't register callchain params.\n");
|
2009-12-28 07:37:02 +08:00
|
|
|
return -EINVAL;
|
2009-08-08 08:16:24 +08:00
|
|
|
}
|
2009-06-19 05:22:55 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 16:05:55 +08:00
|
|
|
if (symbol_conf.cumulate_callchain) {
|
|
|
|
/* Silently ignore if callchain is missing */
|
|
|
|
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
|
|
|
symbol_conf.cumulate_callchain = false;
|
|
|
|
perf_hpp__cancel_cumulate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-01 19:35:20 +08:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
2013-10-15 22:27:34 +08:00
|
|
|
if (!is_pipe &&
|
2012-08-02 06:15:52 +08:00
|
|
|
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
|
2012-05-29 12:22:57 +08:00
|
|
|
ui__error("Selected -b but no branch data. "
|
|
|
|
"Did you call perf record without -b?\n");
|
perf report: Add support for taken branch sampling
This patch adds support for taken branch sampling, i.e, the
PERF_SAMPLE_BRANCH_STACK feature to perf report. In other
words, to display histograms based on taken branches rather
than executed instructions addresses.
The new option is called -b and it takes no argument. To
generate meaningful output, the perf.data must have been
obtained using perf record -b xxx ... where xxx is a branch
filter option.
The output shows symbols, modules, sorted by 'who branches
where' the most often. The percentages reported in the first
column refer to the total number of branches captured and
not the usual number of samples.
Here is a quick example.
Here branchy is simple test program which looks as follows:
void f2(void)
{}
void f3(void)
{}
void f1(unsigned long n)
{
if (n & 1UL)
f2();
else
f3();
}
int main(void)
{
unsigned long i;
for (i=0; i < N; i++)
f1(i);
return 0;
}
Here is the output captured on Nehalem, if we are
only interested in user level function calls.
$ perf record -b any_call,u -e cycles:u branchy
$ perf report -b --sort=symbol
52.34% [.] main [.] f1
24.04% [.] f1 [.] f3
23.60% [.] f1 [.] f2
0.01% [k] _IO_new_file_xsputn [k] _IO_file_overflow
0.01% [k] _IO_vfprintf_internal [k] _IO_new_file_xsputn
0.01% [k] _IO_vfprintf_internal [k] strchrnul
0.01% [k] __printf [k] _IO_vfprintf_internal
0.01% [k] main [k] __printf
About half (52%) of the call branches captured are from main()
-> f1(). The second half (24%+23%) is split in two equal shares
between f1() -> f2(), f1() ->f3(). The output is as expected
given the code.
It should be noted, that using -b in perf record does not
eliminate information in the perf.data file. Consequently, a
typical profile can also be obtained by perf report by simply
not using its -b option.
It is possible to sort on branch related columns:
- dso_from, symbol_from
- dso_to, symbol_to
- mispredict
Signed-off-by: Roberto Agostino Vitillo <ravitillo@lbl.gov>
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: acme@redhat.com
Cc: robert.richter@amd.com
Cc: ming.m.lin@intel.com
Cc: andi@firstfloor.org
Cc: asharma@fb.com
Cc: vweaver1@eecs.utk.edu
Cc: khandual@linux.vnet.ibm.com
Cc: dsahern@gmail.com
Link: http://lkml.kernel.org/r/1328826068-11713-14-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-02-10 06:21:03 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-06 08:45:59 +08:00
|
|
|
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
|
|
|
|
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
|
|
|
|
(sample_type & PERF_SAMPLE_STACK_USER))
|
|
|
|
callchain_param.record_mode = CALLCHAIN_DWARF;
|
2015-01-06 02:23:04 +08:00
|
|
|
else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
|
|
|
|
callchain_param.record_mode = CALLCHAIN_LBR;
|
2014-10-06 08:45:59 +08:00
|
|
|
else
|
|
|
|
callchain_param.record_mode = CALLCHAIN_FP;
|
|
|
|
}
|
2015-07-18 23:24:47 +08:00
|
|
|
|
|
|
|
/* ??? handle more cases than just ANY? */
|
|
|
|
if (!(perf_evlist__combined_branch_type(session->evlist) &
|
|
|
|
PERF_SAMPLE_BRANCH_ANY))
|
|
|
|
rep->nonany_branch_mode = true;
|
|
|
|
|
2009-10-07 18:47:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2009-05-27 02:51:47 +08:00
|
|
|
|
2012-09-11 06:15:03 +08:00
|
|
|
static void sig_handler(int sig __maybe_unused)
|
2010-04-02 12:59:17 +08:00
|
|
|
{
|
|
|
|
session_done = 1;
|
|
|
|
}
|
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
|
2010-05-15 01:19:35 +08:00
|
|
|
const char *evname, FILE *fp)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
char unit;
|
2013-10-23 06:01:31 +08:00
|
|
|
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
|
|
|
|
u64 nr_events = hists->stats.total_period;
|
|
|
|
struct perf_evsel *evsel = hists_to_evsel(hists);
|
2013-01-22 17:09:44 +08:00
|
|
|
char buf[512];
|
|
|
|
size_t size = sizeof(buf);
|
2015-09-04 22:45:45 +08:00
|
|
|
int socked_id = hists->socket_filter;
|
2013-01-22 17:09:44 +08:00
|
|
|
|
2014-01-14 10:52:48 +08:00
|
|
|
if (symbol_conf.filter_relative) {
|
|
|
|
nr_samples = hists->stats.nr_non_filtered_samples;
|
|
|
|
nr_events = hists->stats.total_non_filtered_period;
|
|
|
|
}
|
|
|
|
|
2013-03-05 13:53:26 +08:00
|
|
|
if (perf_evsel__is_group_event(evsel)) {
|
2013-01-22 17:09:44 +08:00
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
|
|
|
perf_evsel__group_desc(evsel, buf, size);
|
|
|
|
evname = buf;
|
|
|
|
|
|
|
|
for_each_group_member(pos, evsel) {
|
2014-10-10 00:13:41 +08:00
|
|
|
const struct hists *pos_hists = evsel__hists(pos);
|
|
|
|
|
2014-01-14 10:52:48 +08:00
|
|
|
if (symbol_conf.filter_relative) {
|
2014-10-10 00:13:41 +08:00
|
|
|
nr_samples += pos_hists->stats.nr_non_filtered_samples;
|
|
|
|
nr_events += pos_hists->stats.total_non_filtered_period;
|
2014-01-14 10:52:48 +08:00
|
|
|
} else {
|
2014-10-10 00:13:41 +08:00
|
|
|
nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
|
|
|
|
nr_events += pos_hists->stats.total_period;
|
2014-01-14 10:52:48 +08:00
|
|
|
}
|
2013-01-22 17:09:44 +08:00
|
|
|
}
|
|
|
|
}
|
2010-05-15 01:19:35 +08:00
|
|
|
|
2012-04-06 10:01:01 +08:00
|
|
|
nr_samples = convert_unit(nr_samples, &unit);
|
|
|
|
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
|
2010-05-15 01:19:35 +08:00
|
|
|
if (evname != NULL)
|
2012-04-06 10:01:01 +08:00
|
|
|
ret += fprintf(fp, " of event '%s'", evname);
|
|
|
|
|
2015-08-11 18:30:49 +08:00
|
|
|
if (symbol_conf.show_ref_callgraph &&
|
|
|
|
strstr(evname, "call-graph=no")) {
|
|
|
|
ret += fprintf(fp, ", show reference callgraph");
|
|
|
|
}
|
|
|
|
|
2013-01-24 23:10:36 +08:00
|
|
|
if (rep->mem_mode) {
|
|
|
|
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
|
perf tools: Remove (null) value of "Sort order" for perf mem report
When '--sort' is not set, 'perf mem report" will print a null pointer as
the output value of sort order, so fix it.
Example:
Before this patch:
$ perf mem report
# To display the perf.data header info, please use --header/--header-only options.
#
# Samples: 18 of event 'cpu/mem-loads/pp'
# Total weight : 188
# Sort order : (null)
#
...
After this patch:
$ perf mem report
# To display the perf.data header info, please use --header/--header-only options.
#
# Samples: 18 of event 'cpu/mem-loads/pp'
# Total weight : 188
# Sort order : local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked
#
...
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427082605-12881-1-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-03-23 11:50:05 +08:00
|
|
|
ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
|
2013-01-24 23:10:36 +08:00
|
|
|
} else
|
|
|
|
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
|
2015-09-04 22:45:44 +08:00
|
|
|
|
2015-09-04 22:45:45 +08:00
|
|
|
if (socked_id > -1)
|
|
|
|
ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
|
2015-09-04 22:45:44 +08:00
|
|
|
|
2010-05-15 01:19:35 +08:00
|
|
|
return ret + fprintf(fp, "\n#\n");
|
|
|
|
}
|
|
|
|
|
2011-03-07 00:07:30 +08:00
|
|
|
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report *rep,
|
2011-03-07 00:07:30 +08:00
|
|
|
const char *help)
|
2010-05-24 09:36:51 +08:00
|
|
|
{
|
2011-03-06 08:40:06 +08:00
|
|
|
struct perf_evsel *pos;
|
2010-05-24 09:36:51 +08:00
|
|
|
|
2015-06-11 20:44:24 +08:00
|
|
|
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", evlist->stats.total_lost_samples);
|
2014-01-10 21:37:27 +08:00
|
|
|
evlist__for_each(evlist, pos) {
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(pos);
|
2012-06-12 23:34:58 +08:00
|
|
|
const char *evname = perf_evsel__name(pos);
|
2010-05-24 09:36:51 +08:00
|
|
|
|
2013-01-22 17:09:43 +08:00
|
|
|
if (symbol_conf.event_group &&
|
|
|
|
!perf_evsel__is_group_leader(pos))
|
|
|
|
continue;
|
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
|
2013-05-14 10:09:04 +08:00
|
|
|
hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout);
|
2010-05-24 09:36:51 +08:00
|
|
|
fprintf(stdout, "\n\n");
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:48 +08:00
|
|
|
if (sort_order == NULL &&
|
2015-05-11 21:44:39 +08:00
|
|
|
parent_pattern == default_parent_pattern)
|
2010-05-24 09:36:51 +08:00
|
|
|
fprintf(stdout, "#\n# (%s)\n#\n", help);
|
|
|
|
|
2015-05-11 21:44:39 +08:00
|
|
|
if (rep->show_threads) {
|
|
|
|
bool style = !strcmp(rep->pretty_printing_style, "raw");
|
|
|
|
perf_read_values_display(stdout, &rep->show_threads_values,
|
|
|
|
style);
|
|
|
|
perf_read_values_destroy(&rep->show_threads_values);
|
2010-05-24 09:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-08 21:10:00 +08:00
|
|
|
static void report__warn_kptr_restrict(const struct report *rep)
|
|
|
|
{
|
2015-09-30 22:54:04 +08:00
|
|
|
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
|
2015-04-08 18:59:32 +08:00
|
|
|
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
|
2014-01-08 21:10:00 +08:00
|
|
|
|
|
|
|
if (kernel_map == NULL ||
|
|
|
|
(kernel_map->dso->hit &&
|
|
|
|
(kernel_kmap->ref_reloc_sym == NULL ||
|
|
|
|
kernel_kmap->ref_reloc_sym->addr == 0))) {
|
|
|
|
const char *desc =
|
|
|
|
"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
|
|
|
|
"can't be resolved.";
|
|
|
|
|
|
|
|
if (kernel_map) {
|
|
|
|
const struct dso *kdso = kernel_map->dso;
|
|
|
|
if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
|
|
|
|
desc = "If some relocation was applied (e.g. "
|
|
|
|
"kexec) symbols may be misresolved.";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ui__warning(
|
|
|
|
"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
|
|
|
|
"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
|
|
|
|
"Samples in kernel modules can't be resolved as well.\n\n",
|
|
|
|
desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 23:22:07 +08:00
|
|
|
static int report__gtk_browse_hists(struct report *rep, const char *help)
|
|
|
|
{
|
|
|
|
int (*hist_browser)(struct perf_evlist *evlist, const char *help,
|
|
|
|
struct hist_browser_timer *timer, float min_pcnt);
|
|
|
|
|
|
|
|
hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
|
|
|
|
|
|
|
|
if (hist_browser == NULL) {
|
|
|
|
ui__error("GTK browser not found!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int report__browse_hists(struct report *rep)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_session *session = rep->session;
|
|
|
|
struct perf_evlist *evlist = session->evlist;
|
2016-01-09 18:16:29 +08:00
|
|
|
const char *help = perf_tip(system_path(TIPDIR));
|
|
|
|
|
|
|
|
if (help == NULL) {
|
|
|
|
/* fallback for people who don't install perf ;-) */
|
|
|
|
help = perf_tip(DOCDIR);
|
|
|
|
if (help == NULL)
|
|
|
|
help = "Cannot load tips.txt file, please install perf!";
|
|
|
|
}
|
2014-01-08 23:22:07 +08:00
|
|
|
|
|
|
|
switch (use_browser) {
|
|
|
|
case 1:
|
|
|
|
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
|
|
|
|
rep->min_percent,
|
|
|
|
&session->header.env);
|
|
|
|
/*
|
|
|
|
* Usually "ret" is the last pressed key, and we only
|
|
|
|
* care if the key notifies us to switch data file.
|
|
|
|
*/
|
|
|
|
if (ret != K_SWITCH_INPUT_DATA)
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ret = report__gtk_browse_hists(rep, help);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = perf_evlist__tty_browse_hists(evlist, rep, help);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-16 22:08:26 +08:00
|
|
|
static int report__collapse_hists(struct report *rep)
|
2014-01-09 01:45:24 +08:00
|
|
|
{
|
|
|
|
struct ui_progress prog;
|
|
|
|
struct perf_evsel *pos;
|
2016-02-16 22:08:26 +08:00
|
|
|
int ret = 0;
|
2014-01-09 01:45:24 +08:00
|
|
|
|
2014-04-22 08:47:25 +08:00
|
|
|
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
|
2014-01-09 01:45:24 +08:00
|
|
|
|
2014-01-10 21:37:27 +08:00
|
|
|
evlist__for_each(rep->session->evlist, pos) {
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(pos);
|
2014-01-09 01:45:24 +08:00
|
|
|
|
|
|
|
if (pos->idx == 0)
|
|
|
|
hists->symbol_filter_str = rep->symbol_filter_str;
|
|
|
|
|
2015-09-04 22:45:44 +08:00
|
|
|
hists->socket_filter = rep->socket_filter;
|
|
|
|
|
2016-02-16 22:08:26 +08:00
|
|
|
ret = hists__collapse_resort(hists, &prog);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2014-01-09 01:45:24 +08:00
|
|
|
|
|
|
|
/* Non-group events are considered as leader */
|
|
|
|
if (symbol_conf.event_group &&
|
|
|
|
!perf_evsel__is_group_leader(pos)) {
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *leader_hists = evsel__hists(pos->leader);
|
2014-01-09 01:45:24 +08:00
|
|
|
|
|
|
|
hists__match(leader_hists, hists);
|
|
|
|
hists__link(leader_hists, hists);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ui_progress__finish();
|
2016-02-16 22:08:26 +08:00
|
|
|
return ret;
|
2014-01-09 01:45:24 +08:00
|
|
|
}
|
|
|
|
|
2014-12-22 12:44:10 +08:00
|
|
|
static void report__output_resort(struct report *rep)
|
|
|
|
{
|
|
|
|
struct ui_progress prog;
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
|
|
|
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
|
|
|
|
|
|
|
|
evlist__for_each(rep->session->evlist, pos)
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(pos, &prog);
|
2014-12-22 12:44:10 +08:00
|
|
|
|
|
|
|
ui_progress__finish();
|
|
|
|
}
|
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
static int __cmd_report(struct report *rep)
|
2009-10-07 18:47:31 +08:00
|
|
|
{
|
2014-01-09 01:45:24 +08:00
|
|
|
int ret;
|
2012-03-09 06:47:47 +08:00
|
|
|
struct perf_session *session = rep->session;
|
2011-03-06 08:40:06 +08:00
|
|
|
struct perf_evsel *pos;
|
2013-10-15 22:27:34 +08:00
|
|
|
struct perf_data_file *file = session->file;
|
2009-05-18 23:45:42 +08:00
|
|
|
|
2010-04-02 12:59:17 +08:00
|
|
|
signal(SIGINT, sig_handler);
|
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (rep->cpu_list) {
|
|
|
|
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
|
|
|
|
rep->cpu_bitmap);
|
2015-11-28 01:32:37 +08:00
|
|
|
if (ret) {
|
|
|
|
ui__error("failed to set cpu bitmap\n");
|
2013-06-25 19:54:13 +08:00
|
|
|
return ret;
|
2015-11-28 01:32:37 +08:00
|
|
|
}
|
2011-07-04 19:57:50 +08:00
|
|
|
}
|
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (rep->show_threads)
|
|
|
|
perf_read_values_init(&rep->show_threads_values);
|
2009-06-19 05:22:55 +08:00
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
ret = report__setup_sample_type(rep);
|
2015-11-28 01:32:37 +08:00
|
|
|
if (ret) {
|
|
|
|
/* report__setup_sample_type() already showed error message */
|
2013-06-25 19:54:13 +08:00
|
|
|
return ret;
|
2015-11-28 01:32:37 +08:00
|
|
|
}
|
2009-12-28 07:37:02 +08:00
|
|
|
|
2015-03-03 22:58:45 +08:00
|
|
|
ret = perf_session__process_events(session);
|
2015-11-28 01:32:37 +08:00
|
|
|
if (ret) {
|
|
|
|
ui__error("failed to process sample\n");
|
2013-06-25 19:54:13 +08:00
|
|
|
return ret;
|
2015-11-28 01:32:37 +08:00
|
|
|
}
|
2009-05-27 00:48:58 +08:00
|
|
|
|
2014-01-08 21:10:00 +08:00
|
|
|
report__warn_kptr_restrict(rep);
|
perf symbols: Handle /proc/sys/kernel/kptr_restrict
Perf uses /proc/modules to figure out where kernel modules are loaded.
With the advent of kptr_restrict, non root users get zeroes for all module
start addresses.
So check if kptr_restrict is non zero and don't generate the syntethic
PERF_RECORD_MMAP events for them.
Warn the user about it in perf record and in perf report.
In perf report the reference relocation symbol being zero means that
kptr_restrict was set, thus /proc/kallsyms has only zeroed addresses, so don't
use it to fixup symbol addresses when using a valid kallsyms (in the buildid
cache) or vmlinux (in the vmlinux path) build-id located automatically or
specified by the user.
Provide an explanation about it in 'perf report' if kernel samples were taken,
checking if a suitable vmlinux or kallsyms was found/specified.
Restricted /proc/kallsyms don't go to the buildid cache anymore.
Example:
[acme@emilia ~]$ perf record -F 100000 sleep 1
WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, check
/proc/sys/kernel/kptr_restrict.
Samples in kernel functions may not be resolved if a suitable vmlinux file is
not found in the buildid cache or in the vmlinux path.
Samples in kernel modules won't be resolved at all.
If some relocation was applied (e.g. kexec) symbols may be misresolved even
with a suitable vmlinux or kallsyms file.
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.005 MB perf.data (~231 samples) ]
[acme@emilia ~]$
[acme@emilia ~]$ perf report --stdio
Kernel address maps (/proc/{kallsyms,modules}) were restricted,
check /proc/sys/kernel/kptr_restrict before running 'perf record'.
If some relocation was applied (e.g. kexec) symbols may be misresolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. .....................
#
20.24% sleep [kernel.kallsyms] [k] page_fault
20.04% sleep [kernel.kallsyms] [k] filemap_fault
19.78% sleep [kernel.kallsyms] [k] __lru_cache_add
19.69% sleep ld-2.12.so [.] memcpy
14.71% sleep [kernel.kallsyms] [k] dput
4.70% sleep [kernel.kallsyms] [k] flush_signal_handlers
0.73% sleep [kernel.kallsyms] [k] perf_event_comm
0.11% sleep [kernel.kallsyms] [k] native_write_msr_safe
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
This is because it found a suitable vmlinux (build-id checked) in
/lib/modules/2.6.39-rc7+/build/vmlinux (use -v in perf report to see the long
file name).
If we remove that file from the vmlinux path:
[root@emilia ~]# mv /lib/modules/2.6.39-rc7+/build/vmlinux \
/lib/modules/2.6.39-rc7+/build/vmlinux.OFF
[acme@emilia ~]$ perf report --stdio
[kernel.kallsyms] with build id 57298cdbe0131f6871667ec0eaab4804dcf6f562
not found, continuing without symbols
Kernel address maps (/proc/{kallsyms,modules}) were restricted, check
/proc/sys/kernel/kptr_restrict before running 'perf record'.
As no suitable kallsyms nor vmlinux was found, kernel samples can't be
resolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. ......
#
80.31% sleep [kernel.kallsyms] [k] 0xffffffff8103425a
19.69% sleep ld-2.12.so [.] memcpy
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
Reported-by: Stephane Eranian <eranian@google.com>
Suggested-by: David Miller <davem@davemloft.net>
Cc: Dave Jones <davej@redhat.com>
Cc: David Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kees Cook <kees.cook@canonical.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/n/tip-mt512joaxxbhhp1odop04yit@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-26 20:53:51 +08:00
|
|
|
|
2014-12-22 12:44:09 +08:00
|
|
|
evlist__for_each(session->evlist, pos)
|
|
|
|
rep->nr_entries += evsel__hists(pos)->nr_entries;
|
|
|
|
|
2013-12-20 13:11:13 +08:00
|
|
|
if (use_browser == 0) {
|
|
|
|
if (verbose > 3)
|
|
|
|
perf_session__fprintf(session, stdout);
|
2009-06-05 00:54:00 +08:00
|
|
|
|
2013-12-20 13:11:13 +08:00
|
|
|
if (verbose > 2)
|
|
|
|
perf_session__fprintf_dsos(session, stdout);
|
2009-05-27 15:10:38 +08:00
|
|
|
|
2013-12-20 13:11:13 +08:00
|
|
|
if (dump_trace) {
|
|
|
|
perf_session__fprintf_nr_events(session, stdout);
|
2014-10-11 02:49:21 +08:00
|
|
|
perf_evlist__fprintf_nr_events(session->evlist, stdout);
|
2013-12-20 13:11:13 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2012-08-07 21:20:46 +08:00
|
|
|
}
|
|
|
|
|
2016-02-16 22:08:26 +08:00
|
|
|
ret = report__collapse_hists(rep);
|
|
|
|
if (ret) {
|
|
|
|
ui__error("failed to process hist entry\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2011-03-06 08:40:06 +08:00
|
|
|
|
2013-09-18 03:34:28 +08:00
|
|
|
if (session_done())
|
|
|
|
return 0;
|
|
|
|
|
2014-12-22 12:44:10 +08:00
|
|
|
/*
|
|
|
|
* recalculate number of entries after collapsing since it
|
|
|
|
* might be changed during the collapse phase.
|
|
|
|
*/
|
|
|
|
rep->nr_entries = 0;
|
|
|
|
evlist__for_each(session->evlist, pos)
|
|
|
|
rep->nr_entries += evsel__hists(pos)->nr_entries;
|
|
|
|
|
2014-04-22 08:47:25 +08:00
|
|
|
if (rep->nr_entries == 0) {
|
2013-10-15 22:27:34 +08:00
|
|
|
ui__error("The %s file has no samples!\n", file->path);
|
2013-06-25 19:54:13 +08:00
|
|
|
return 0;
|
2010-03-05 23:51:09 +08:00
|
|
|
}
|
|
|
|
|
2014-12-22 12:44:10 +08:00
|
|
|
report__output_resort(rep);
|
2013-01-22 17:09:32 +08:00
|
|
|
|
2014-01-08 23:22:07 +08:00
|
|
|
return report__browse_hists(rep);
|
2009-05-18 23:45:42 +08:00
|
|
|
}
|
|
|
|
|
2009-07-02 23:58:21 +08:00
|
|
|
static int
|
2014-04-08 02:55:24 +08:00
|
|
|
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
|
2009-07-02 23:58:21 +08:00
|
|
|
{
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report *rep = (struct report *)opt->value;
|
2009-07-03 02:14:33 +08:00
|
|
|
|
2010-01-05 21:54:45 +08:00
|
|
|
/*
|
|
|
|
* --no-call-graph
|
|
|
|
*/
|
|
|
|
if (unset) {
|
2011-11-17 22:19:04 +08:00
|
|
|
rep->dont_use_callchains = true;
|
2010-01-05 21:54:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-08 02:55:24 +08:00
|
|
|
return parse_callchain_report_opt(arg);
|
2009-07-02 23:58:21 +08:00
|
|
|
}
|
|
|
|
|
2012-12-07 13:48:05 +08:00
|
|
|
int
|
|
|
|
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
if (arg) {
|
|
|
|
int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
|
|
|
|
if (err) {
|
|
|
|
char buf[BUFSIZ];
|
|
|
|
regerror(err, &ignore_callees_regex, buf, sizeof(buf));
|
|
|
|
pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
have_ignore_callees = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-09 06:47:47 +08:00
|
|
|
static int
|
2012-09-11 06:15:03 +08:00
|
|
|
parse_branch_mode(const struct option *opt __maybe_unused,
|
|
|
|
const char *str __maybe_unused, int unset)
|
2012-03-09 06:47:47 +08:00
|
|
|
{
|
2013-04-01 19:35:20 +08:00
|
|
|
int *branch_mode = opt->value;
|
|
|
|
|
|
|
|
*branch_mode = !unset;
|
2012-03-09 06:47:47 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-14 10:09:04 +08:00
|
|
|
static int
|
|
|
|
parse_percent_limit(const struct option *opt, const char *str,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report *rep = opt->value;
|
2016-01-27 23:40:50 +08:00
|
|
|
double pcnt = strtof(str, NULL);
|
2013-05-14 10:09:04 +08:00
|
|
|
|
2016-01-27 23:40:50 +08:00
|
|
|
rep->min_percent = pcnt;
|
|
|
|
callchain_param.min_percent = pcnt;
|
2013-05-14 10:09:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-09 13:45:41 +08:00
|
|
|
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
|
perf tools: Improve call graph documents and help messages
The --call-graph option is complex so we should provide better guide for
users. Also change help message to be consistent with config option
names. Now perf top will show help like below:
$ perf top --call-graph
Error: option `call-graph' requires a value
Usage: perf top [<options>]
--call-graph <record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]>
setup and enables call-graph (stack chain/backtrace):
record_mode: call graph recording mode (fp|dwarf|lbr)
record_size: if record_mode is 'dwarf', max size of stack recording (<bytes>)
default: 8192 (bytes)
print_type: call graph printing style (graph|flat|fractal|none)
threshold: minimum call graph inclusion threshold (<percent>)
print_limit: maximum number of call graph entry (<number>)
order: call graph order (caller|callee)
sort_key: call graph sort key (function|address)
branch: include last branch info to call graph (branch)
Default: fp,graph,0.5,caller,function
Requested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Chandler Carruth <chandlerc@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1445524112-5201-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-22 22:28:32 +08:00
|
|
|
|
|
|
|
const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
|
|
|
|
CALLCHAIN_REPORT_HELP
|
|
|
|
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
|
2015-10-22 14:28:48 +08:00
|
|
|
|
2012-09-11 06:15:03 +08:00
|
|
|
int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
|
2011-11-25 18:19:45 +08:00
|
|
|
{
|
2012-03-09 06:47:47 +08:00
|
|
|
struct perf_session *session;
|
2015-04-25 03:29:45 +08:00
|
|
|
struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
|
2011-12-07 17:02:54 +08:00
|
|
|
struct stat st;
|
2012-03-09 06:47:47 +08:00
|
|
|
bool has_br_stack = false;
|
2013-04-01 19:35:20 +08:00
|
|
|
int branch_mode = -1;
|
2014-11-13 10:05:22 +08:00
|
|
|
bool branch_call_mode = false;
|
perf tools: Improve call graph documents and help messages
The --call-graph option is complex so we should provide better guide for
users. Also change help message to be consistent with config option
names. Now perf top will show help like below:
$ perf top --call-graph
Error: option `call-graph' requires a value
Usage: perf top [<options>]
--call-graph <record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]>
setup and enables call-graph (stack chain/backtrace):
record_mode: call graph recording mode (fp|dwarf|lbr)
record_size: if record_mode is 'dwarf', max size of stack recording (<bytes>)
default: 8192 (bytes)
print_type: call graph printing style (graph|flat|fractal|none)
threshold: minimum call graph inclusion threshold (<percent>)
print_limit: maximum number of call graph entry (<number>)
order: call graph order (caller|callee)
sort_key: call graph sort key (function|address)
branch: include last branch info to call graph (branch)
Default: fp,graph,0.5,caller,function
Requested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Chandler Carruth <chandlerc@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1445524112-5201-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-22 22:28:32 +08:00
|
|
|
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
|
2011-11-25 18:19:45 +08:00
|
|
|
const char * const report_usage[] = {
|
2011-12-12 23:16:56 +08:00
|
|
|
"perf report [<options>]",
|
2011-11-25 18:19:45 +08:00
|
|
|
NULL
|
|
|
|
};
|
2013-12-20 01:53:53 +08:00
|
|
|
struct report report = {
|
2011-11-28 18:30:20 +08:00
|
|
|
.tool = {
|
2011-11-25 18:19:45 +08:00
|
|
|
.sample = process_sample_event,
|
|
|
|
.mmap = perf_event__process_mmap,
|
2013-08-21 18:10:25 +08:00
|
|
|
.mmap2 = perf_event__process_mmap2,
|
2011-11-25 18:19:45 +08:00
|
|
|
.comm = perf_event__process_comm,
|
2012-10-07 02:44:59 +08:00
|
|
|
.exit = perf_event__process_exit,
|
|
|
|
.fork = perf_event__process_fork,
|
2011-11-25 18:19:45 +08:00
|
|
|
.lost = perf_event__process_lost,
|
|
|
|
.read = process_read_event,
|
|
|
|
.attr = perf_event__process_attr,
|
|
|
|
.tracing_data = perf_event__process_tracing_data,
|
|
|
|
.build_id = perf_event__process_build_id,
|
2015-04-25 03:29:45 +08:00
|
|
|
.id_index = perf_event__process_id_index,
|
|
|
|
.auxtrace_info = perf_event__process_auxtrace_info,
|
|
|
|
.auxtrace = perf_event__process_auxtrace,
|
2014-07-06 20:18:21 +08:00
|
|
|
.ordered_events = true,
|
2011-11-25 18:19:45 +08:00
|
|
|
.ordering_requires_timestamps = true,
|
|
|
|
},
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 22:38:48 +08:00
|
|
|
.max_stack = PERF_MAX_STACK_DEPTH,
|
2011-11-25 18:19:45 +08:00
|
|
|
.pretty_printing_style = "normal",
|
2015-09-04 22:45:44 +08:00
|
|
|
.socket_filter = -1,
|
2011-11-25 18:19:45 +08:00
|
|
|
};
|
|
|
|
const struct option options[] = {
|
2012-10-30 11:56:02 +08:00
|
|
|
OPT_STRING('i', "input", &input_name, "file",
|
2009-05-26 15:17:18 +08:00
|
|
|
"input file name"),
|
2010-04-13 16:37:33 +08:00
|
|
|
OPT_INCR('v', "verbose", &verbose,
|
2009-05-27 06:46:14 +08:00
|
|
|
"be more verbose (show symbol address, etc)"),
|
2009-05-27 00:48:58 +08:00
|
|
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
|
|
|
"dump raw trace in ASCII"),
|
2009-11-24 22:05:15 +08:00
|
|
|
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
|
|
|
|
"file", "vmlinux pathname"),
|
2010-12-08 10:39:46 +08:00
|
|
|
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
|
|
|
|
"file", "kallsyms pathname"),
|
2015-11-13 03:50:13 +08:00
|
|
|
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
|
2009-11-24 22:05:15 +08:00
|
|
|
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
|
2009-07-02 14:09:46 +08:00
|
|
|
"load module symbols - WARNING: use only with -k and LIVE kernel"),
|
2009-12-16 06:04:42 +08:00
|
|
|
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
|
2009-07-11 23:18:37 +08:00
|
|
|
"Show a column with the number of samples"),
|
2011-11-17 22:19:04 +08:00
|
|
|
OPT_BOOLEAN('T', "threads", &report.show_threads,
|
2009-08-07 19:55:24 +08:00
|
|
|
"Show per-thread event counters"),
|
2011-11-17 22:19:04 +08:00
|
|
|
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
|
2009-08-10 21:26:32 +08:00
|
|
|
"pretty printing style key: normal raw"),
|
2011-11-17 22:19:04 +08:00
|
|
|
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
|
2012-03-20 02:13:29 +08:00
|
|
|
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
|
2011-11-17 22:19:04 +08:00
|
|
|
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
|
|
|
|
"Use the stdio interface"),
|
2013-12-09 18:02:49 +08:00
|
|
|
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
|
|
|
|
OPT_BOOLEAN(0, "header-only", &report.header_only,
|
|
|
|
"Show only data header."),
|
2009-05-28 16:52:00 +08:00
|
|
|
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
2014-03-04 08:06:42 +08:00
|
|
|
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
|
|
|
|
" Please refer the man page for the complete list."),
|
2014-03-04 09:46:34 +08:00
|
|
|
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
|
|
|
|
"output field(s): overhead, period, sample plus all of sort keys"),
|
2015-10-24 23:49:25 +08:00
|
|
|
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
|
2010-04-19 13:32:50 +08:00
|
|
|
"Show sample percentage for different cpu modes"),
|
2015-10-24 23:49:25 +08:00
|
|
|
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
|
|
|
|
"Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
|
2009-06-18 13:01:03 +08:00
|
|
|
OPT_STRING('p', "parent", &parent_pattern, "regex",
|
|
|
|
"regex filter to identify parent, see: '--sort parent'"),
|
2009-12-16 06:04:42 +08:00
|
|
|
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
|
2009-06-18 20:32:19 +08:00
|
|
|
"Only display entries with parent-match"),
|
2015-10-22 14:28:48 +08:00
|
|
|
OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
|
2015-11-09 13:45:41 +08:00
|
|
|
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
|
2015-10-22 14:28:48 +08:00
|
|
|
report_callchain_help, &report_parse_callchain_opt,
|
|
|
|
callchain_default_opt),
|
2013-10-30 16:05:55 +08:00
|
|
|
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
|
|
|
|
"Accumulate callchains of children and show total overhead as well"),
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 22:38:48 +08:00
|
|
|
OPT_INTEGER(0, "max-stack", &report.max_stack,
|
|
|
|
"Set the maximum stack depth when parsing the callchain, "
|
|
|
|
"anything beyond the specified depth will be ignored. "
|
|
|
|
"Default: " __stringify(PERF_MAX_STACK_DEPTH)),
|
2011-11-17 22:19:04 +08:00
|
|
|
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
|
|
|
|
"alias for inverted call graph"),
|
2012-12-07 13:48:05 +08:00
|
|
|
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
|
|
|
|
"ignore callees of these functions in call graphs",
|
|
|
|
report_parse_ignore_callees_opt),
|
2009-12-16 06:04:40 +08:00
|
|
|
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
2009-07-01 06:01:20 +08:00
|
|
|
"only consider symbols in these dsos"),
|
2011-11-14 02:30:08 +08:00
|
|
|
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
|
2009-07-01 06:01:21 +08:00
|
|
|
"only consider symbols in these comms"),
|
2015-03-24 23:52:41 +08:00
|
|
|
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
|
|
|
|
"only consider symbols in these pids"),
|
|
|
|
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
|
|
|
|
"only consider symbols in these tids"),
|
2009-12-16 06:04:40 +08:00
|
|
|
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
|
2009-07-01 06:01:22 +08:00
|
|
|
"only consider these symbols"),
|
2012-03-16 16:50:54 +08:00
|
|
|
OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
|
|
|
|
"only show symbols that (partially) match with this filter"),
|
2009-12-16 06:04:40 +08:00
|
|
|
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
|
2009-07-11 09:47:28 +08:00
|
|
|
"width[,width...]",
|
|
|
|
"don't try to adjust column width, use these fixed values"),
|
2015-03-13 20:51:54 +08:00
|
|
|
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
|
2009-07-11 09:47:28 +08:00
|
|
|
"separator for columns, no spaces will be added between "
|
|
|
|
"columns '.' is reserved."),
|
2015-11-26 15:08:20 +08:00
|
|
|
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
|
2009-12-29 08:48:34 +08:00
|
|
|
"Only display entries resolved to a symbol"),
|
2010-12-10 04:27:07 +08:00
|
|
|
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
|
|
|
"Look for files with symbols relative to this directory"),
|
2011-11-14 02:30:08 +08:00
|
|
|
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
|
2011-11-17 22:19:04 +08:00
|
|
|
"list of cpus to profile"),
|
|
|
|
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
|
perf tools: Make perf.data more self-descriptive (v8)
The goal of this patch is to include more information about the host
environment into the perf.data so it is more self-descriptive. Overtime,
profiles are captured on various machines and it becomes hard to track
what was recorded, on what machine and when.
This patch provides a way to solve this by extending the perf.data file
with basic information about the host machine. To add those extensions,
we leverage the feature bits capabilities of the perf.data format. The
change is backward compatible with existing perf.data files.
We define the following useful new extensions:
- HEADER_HOSTNAME: the hostname
- HEADER_OSRELEASE: the kernel release number
- HEADER_ARCH: the hw architecture
- HEADER_CPUDESC: generic CPU description
- HEADER_NRCPUS: number of online/avail cpus
- HEADER_CMDLINE: perf command line
- HEADER_VERSION: perf version
- HEADER_TOPOLOGY: cpu topology
- HEADER_EVENT_DESC: full event description (attrs)
- HEADER_CPUID: easy-to-parse low level CPU identication
The small granularity for the entries is to make it easier to extend
without breaking backward compatiblity. Many entries are provided as
ASCII strings.
Perf report/script have been modified to print the basic information as
easy-to-parse ASCII strings. Extended information about CPU and NUMA
topology may be requested with the -I option.
Thanks to David Ahern for reviewing and testing the many versions of
this patch.
$ perf report --stdio
# ========
# captured on : Mon Sep 26 15:22:14 2011
# hostname : quad
# os release : 3.1.0-rc4-tip
# perf version : 3.1.0-rc4
# arch : x86_64
# nrcpus online : 4
# nrcpus avail : 4
# cpudesc : Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
# cpuid : GenuineIntel,6,15,11
# total memory : 8105360 kB
# cmdline : /home/eranian/perfmon/official/tip/build/tools/perf/perf record date
# event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, id = { 29, 30, 31,
# HEADER_CPU_TOPOLOGY info available, use -I to display
# HEADER_NUMA_TOPOLOGY info available, use -I to display
# ========
#
...
$ perf report --stdio -I
# ========
# captured on : Mon Sep 26 15:22:14 2011
# hostname : quad
# os release : 3.1.0-rc4-tip
# perf version : 3.1.0-rc4
# arch : x86_64
# nrcpus online : 4
# nrcpus avail : 4
# cpudesc : Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
# cpuid : GenuineIntel,6,15,11
# total memory : 8105360 kB
# cmdline : /home/eranian/perfmon/official/tip/build/tools/perf/perf record date
# event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, id = { 29, 30, 31,
# sibling cores : 0-3
# sibling threads : 0
# sibling threads : 1
# sibling threads : 2
# sibling threads : 3
# node0 meminfo : total = 8320608 kB, free = 7571024 kB
# node0 cpu list : 0-3
# ========
#
...
Reviewed-by: David Ahern <dsahern@gmail.com>
Tested-by: David Ahern <dsahern@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/20110930134040.GA5575@quad
Signed-off-by: Stephane Eranian <eranian@google.com>
[ committer notes: Use --show-info in the tools as was in the docs, rename
perf_header_fprintf_info to perf_file_section__fprintf_info, fixup
conflict with f69b64f7 "perf: Support setting the disassembler style" ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-09-30 21:40:40 +08:00
|
|
|
"Display extended information about perf.data file"),
|
2011-10-06 23:48:31 +08:00
|
|
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
|
|
|
"Interleave source code with assembly code (default)"),
|
|
|
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
|
|
|
"Display raw encoding of assembly instructions (default)"),
|
2011-09-16 05:31:41 +08:00
|
|
|
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
|
|
|
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
2011-10-06 03:10:06 +08:00
|
|
|
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
|
|
|
|
"Show a column with the sum of periods"),
|
2013-01-22 17:09:45 +08:00
|
|
|
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
|
|
|
|
"Show event group information together"),
|
2013-04-01 19:35:20 +08:00
|
|
|
OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
|
2014-11-13 10:05:22 +08:00
|
|
|
"use branch records for per branch histogram filling",
|
|
|
|
parse_branch_mode),
|
|
|
|
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
|
|
|
|
"add last branch records to call history"),
|
2012-09-04 18:32:30 +08:00
|
|
|
OPT_STRING(0, "objdump", &objdump_path, "path",
|
|
|
|
"objdump binary to use for disassembly and annotations"),
|
2013-03-25 17:18:18 +08:00
|
|
|
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
|
|
|
|
"Disable symbol demangling"),
|
2014-09-13 12:15:05 +08:00
|
|
|
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
|
|
|
|
"Enable kernel symbol demangling"),
|
2013-01-24 23:10:36 +08:00
|
|
|
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
|
2013-05-14 10:09:04 +08:00
|
|
|
OPT_CALLBACK(0, "percent-limit", &report, "percent",
|
|
|
|
"Don't show entries under that percent", parse_percent_limit),
|
2014-01-14 10:52:48 +08:00
|
|
|
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
|
2014-02-07 11:06:07 +08:00
|
|
|
"how to display percentage of filtered entries", parse_filter_percentage),
|
2015-04-25 03:29:45 +08:00
|
|
|
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
|
|
|
|
"Instruction Tracing options",
|
|
|
|
itrace_parse_synth_opts),
|
2015-08-08 06:24:05 +08:00
|
|
|
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
|
|
|
|
"Show full source file name path for source lines"),
|
2015-08-11 18:30:49 +08:00
|
|
|
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
|
|
|
|
"Show callgraph from reference event"),
|
2015-09-04 22:45:44 +08:00
|
|
|
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
|
|
|
|
"only show processor socket that match with this filter"),
|
2015-12-23 01:07:05 +08:00
|
|
|
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
|
|
|
|
"Show raw trace event output (do not use print fmt or plugins)"),
|
2016-02-24 23:13:48 +08:00
|
|
|
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
|
|
|
|
"Show entries in a hierarchy"),
|
2009-05-26 15:17:18 +08:00
|
|
|
OPT_END()
|
2011-11-25 18:19:45 +08:00
|
|
|
};
|
2013-10-15 22:27:32 +08:00
|
|
|
struct perf_data_file file = {
|
|
|
|
.mode = PERF_DATA_MODE_READ,
|
|
|
|
};
|
2014-10-10 03:16:00 +08:00
|
|
|
int ret = hists__init();
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2009-05-26 15:17:18 +08:00
|
|
|
|
2013-12-20 01:53:53 +08:00
|
|
|
perf_config(report__config, &report);
|
2013-01-22 17:09:46 +08:00
|
|
|
|
2009-12-16 06:04:40 +08:00
|
|
|
argc = parse_options(argc, argv, options, report_usage, 0);
|
2015-12-10 11:00:56 +08:00
|
|
|
if (argc) {
|
|
|
|
/*
|
|
|
|
* Special case: if there's an argument left then assume that
|
|
|
|
* it's a symbol filter:
|
|
|
|
*/
|
|
|
|
if (argc > 1)
|
|
|
|
usage_with_options(report_usage, options);
|
|
|
|
|
|
|
|
report.symbol_filter_str = argv[0];
|
|
|
|
}
|
2009-12-16 06:04:40 +08:00
|
|
|
|
2015-06-19 16:57:33 +08:00
|
|
|
if (symbol_conf.vmlinux_name &&
|
|
|
|
access(symbol_conf.vmlinux_name, R_OK)) {
|
|
|
|
pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (symbol_conf.kallsyms_name &&
|
|
|
|
access(symbol_conf.kallsyms_name, R_OK)) {
|
|
|
|
pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (report.use_stdio)
|
2010-08-21 21:38:16 +08:00
|
|
|
use_browser = 0;
|
2011-11-17 22:19:04 +08:00
|
|
|
else if (report.use_tui)
|
2010-08-21 21:38:16 +08:00
|
|
|
use_browser = 1;
|
2012-03-20 02:13:29 +08:00
|
|
|
else if (report.use_gtk)
|
|
|
|
use_browser = 2;
|
2010-08-21 21:38:16 +08:00
|
|
|
|
2011-11-17 22:19:04 +08:00
|
|
|
if (report.inverted_callchain)
|
2011-06-07 23:49:46 +08:00
|
|
|
callchain_param.order = ORDER_CALLER;
|
2015-10-22 15:45:46 +08:00
|
|
|
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
|
|
|
|
callchain_param.order = ORDER_CALLER;
|
2011-06-07 23:49:46 +08:00
|
|
|
|
2015-09-25 21:15:46 +08:00
|
|
|
if (itrace_synth_opts.callchain &&
|
|
|
|
(int)itrace_synth_opts.callchain_sz > report.max_stack)
|
|
|
|
report.max_stack = itrace_synth_opts.callchain_sz;
|
|
|
|
|
2012-10-30 11:56:02 +08:00
|
|
|
if (!input_name || !strlen(input_name)) {
|
2011-12-07 17:02:54 +08:00
|
|
|
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
|
2012-10-30 11:56:02 +08:00
|
|
|
input_name = "-";
|
2011-12-07 17:02:54 +08:00
|
|
|
else
|
2012-10-30 11:56:02 +08:00
|
|
|
input_name = "perf.data";
|
2011-12-07 17:02:54 +08:00
|
|
|
}
|
2013-02-03 14:38:21 +08:00
|
|
|
|
2013-10-15 22:27:32 +08:00
|
|
|
file.path = input_name;
|
2015-11-13 03:50:13 +08:00
|
|
|
file.force = symbol_conf.force;
|
2013-10-15 22:27:32 +08:00
|
|
|
|
2013-02-03 14:38:21 +08:00
|
|
|
repeat:
|
2013-10-15 22:27:32 +08:00
|
|
|
session = perf_session__new(&file, false, &report.tool);
|
2012-03-09 06:47:47 +08:00
|
|
|
if (session == NULL)
|
2014-09-24 09:33:37 +08:00
|
|
|
return -1;
|
2012-03-09 06:47:47 +08:00
|
|
|
|
2014-06-05 17:00:20 +08:00
|
|
|
if (report.queue_size) {
|
|
|
|
ordered_events__set_alloc_size(&session->ordered_events,
|
|
|
|
report.queue_size);
|
|
|
|
}
|
|
|
|
|
2015-04-25 03:29:45 +08:00
|
|
|
session->itrace_synth_opts = &itrace_synth_opts;
|
|
|
|
|
2012-03-09 06:47:47 +08:00
|
|
|
report.session = session;
|
|
|
|
|
|
|
|
has_br_stack = perf_header__has_feat(&session->header,
|
|
|
|
HEADER_BRANCH_STACK);
|
2011-12-07 17:02:54 +08:00
|
|
|
|
2015-09-25 21:15:41 +08:00
|
|
|
if (itrace_synth_opts.last_branch)
|
|
|
|
has_br_stack = true;
|
|
|
|
|
2014-11-13 10:05:22 +08:00
|
|
|
/*
|
|
|
|
* Branch mode is a tristate:
|
|
|
|
* -1 means default, so decide based on the file having branch data.
|
|
|
|
* 0/1 means the user chose a mode.
|
|
|
|
*/
|
|
|
|
if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
|
2015-02-15 10:33:37 +08:00
|
|
|
!branch_call_mode) {
|
2013-04-01 19:35:20 +08:00
|
|
|
sort__mode = SORT_MODE__BRANCH;
|
2013-10-30 16:05:55 +08:00
|
|
|
symbol_conf.cumulate_callchain = false;
|
|
|
|
}
|
2014-11-13 10:05:22 +08:00
|
|
|
if (branch_call_mode) {
|
2014-11-18 09:58:54 +08:00
|
|
|
callchain_param.key = CCKEY_ADDRESS;
|
2014-11-13 10:05:22 +08:00
|
|
|
callchain_param.branch_callstack = 1;
|
|
|
|
symbol_conf.use_callchain = true;
|
|
|
|
callchain_register_param(&callchain_param);
|
|
|
|
if (sort_order == NULL)
|
|
|
|
sort_order = "srcline,symbol,dso";
|
|
|
|
}
|
2012-03-09 06:47:47 +08:00
|
|
|
|
2013-01-24 23:10:36 +08:00
|
|
|
if (report.mem_mode) {
|
2013-04-01 19:35:20 +08:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
2013-12-20 13:11:12 +08:00
|
|
|
pr_err("branch and mem mode incompatible\n");
|
2013-01-24 23:10:36 +08:00
|
|
|
goto error;
|
|
|
|
}
|
2013-04-03 20:26:11 +08:00
|
|
|
sort__mode = SORT_MODE__MEMORY;
|
2013-10-30 16:05:55 +08:00
|
|
|
symbol_conf.cumulate_callchain = false;
|
2013-01-24 23:10:36 +08:00
|
|
|
}
|
2012-03-09 06:47:48 +08:00
|
|
|
|
2016-02-24 23:13:48 +08:00
|
|
|
if (symbol_conf.report_hierarchy) {
|
|
|
|
/* disable incompatible options */
|
|
|
|
symbol_conf.event_group = false;
|
|
|
|
symbol_conf.cumulate_callchain = false;
|
|
|
|
|
|
|
|
if (field_order) {
|
|
|
|
pr_err("Error: --hierarchy and --fields options cannot be used together\n");
|
|
|
|
parse_options_usage(report_usage, options, "F", 1);
|
|
|
|
parse_options_usage(NULL, options, "hierarchy", 0);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
sort__need_collapse = true;
|
|
|
|
}
|
|
|
|
|
2015-05-09 23:19:43 +08:00
|
|
|
/* Force tty output for header output and per-thread stat. */
|
|
|
|
if (report.header || report.header_only || report.show_threads)
|
2013-12-09 18:02:49 +08:00
|
|
|
use_browser = 0;
|
|
|
|
|
2013-11-01 15:33:12 +08:00
|
|
|
if (strcmp(input_name, "-") != 0)
|
|
|
|
setup_browser(true);
|
2014-04-16 10:04:51 +08:00
|
|
|
else
|
2013-11-01 15:33:12 +08:00
|
|
|
use_browser = 0;
|
|
|
|
|
2016-01-18 17:24:06 +08:00
|
|
|
if (setup_sorting(session->evlist) < 0) {
|
|
|
|
if (sort_order)
|
|
|
|
parse_options_usage(report_usage, options, "s", 1);
|
|
|
|
if (field_order)
|
|
|
|
parse_options_usage(sort_order ? NULL : report_usage,
|
|
|
|
options, "F", 1);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2013-12-09 18:02:49 +08:00
|
|
|
if (report.header || report.header_only) {
|
|
|
|
perf_session__fprintf_info(session, stdout,
|
|
|
|
report.show_full_info);
|
2015-06-30 16:15:24 +08:00
|
|
|
if (report.header_only) {
|
|
|
|
ret = 0;
|
|
|
|
goto error;
|
|
|
|
}
|
2013-12-09 18:02:49 +08:00
|
|
|
} else if (use_browser == 0) {
|
|
|
|
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
|
|
|
|
stdout);
|
|
|
|
}
|
|
|
|
|
2010-05-12 10:18:06 +08:00
|
|
|
/*
|
2013-03-28 22:34:10 +08:00
|
|
|
* Only in the TUI browser we are doing integrated annotation,
|
2010-05-12 10:18:06 +08:00
|
|
|
* so don't allocate extra space that won't be used in the stdio
|
|
|
|
* implementation.
|
|
|
|
*/
|
2014-03-18 14:32:26 +08:00
|
|
|
if (ui__has_annotation()) {
|
2011-02-04 19:45:46 +08:00
|
|
|
symbol_conf.priv_size = sizeof(struct annotation);
|
2013-08-08 19:32:22 +08:00
|
|
|
machines__set_symbol_filter(&session->machines,
|
|
|
|
symbol__annotate_init);
|
2010-08-06 06:28:27 +08:00
|
|
|
/*
|
|
|
|
* For searching by name on the "Browse map details".
|
|
|
|
* providing it only in verbose mode not to bloat too
|
|
|
|
* much struct symbol.
|
|
|
|
*/
|
|
|
|
if (verbose) {
|
|
|
|
/*
|
|
|
|
* XXX: Need to provide a less kludgy way to ask for
|
|
|
|
* more space per symbol, the u32 is for the index on
|
|
|
|
* the ui browser.
|
|
|
|
* See symbol__browser_index.
|
|
|
|
*/
|
|
|
|
symbol_conf.priv_size += sizeof(u32);
|
|
|
|
symbol_conf.sort_by_name = true;
|
|
|
|
}
|
|
|
|
}
|
2009-12-16 06:04:40 +08:00
|
|
|
|
2014-08-12 14:40:45 +08:00
|
|
|
if (symbol__init(&session->header.env) < 0)
|
2012-03-09 06:47:47 +08:00
|
|
|
goto error;
|
2009-05-26 15:17:18 +08:00
|
|
|
|
2013-04-03 20:26:19 +08:00
|
|
|
sort__setup_elide(stdout);
|
2009-07-01 06:01:20 +08:00
|
|
|
|
2012-03-09 06:47:47 +08:00
|
|
|
ret = __cmd_report(&report);
|
2013-02-03 14:38:21 +08:00
|
|
|
if (ret == K_SWITCH_INPUT_DATA) {
|
|
|
|
perf_session__delete(session);
|
|
|
|
goto repeat;
|
|
|
|
} else
|
|
|
|
ret = 0;
|
|
|
|
|
2012-03-09 06:47:47 +08:00
|
|
|
error:
|
|
|
|
perf_session__delete(session);
|
|
|
|
return ret;
|
2009-05-26 15:17:18 +08:00
|
|
|
}
|