perf/core improvements and fixes

. perf build-id cache now can show DSOs present in a perf.data file that are
   not in the cache, to integrate with build-id servers being put in place by
   organizations such as Fedora.
 
 . perf buildid-list -i an-elf-file-instead-of-a-perf.data is back showing its
   build-id.
 
 . No need to do feature checks when doing a 'make tags'
 
 . Fix some 'perf test' errors and make them use the tracepoint evsel constructor.
 
 . perf top now shares more of the evsel config/creation routines with 'record',
   paving the way for further integration like 'top' snapshots, etc.
 
 . perf top now supports DWARF callchains.
 
 . perf evlist decodes sample_type and read_format, helping diagnose problems.
 
 . Fix mmap limitations on 32-bit, fix from David Miller.
 
 . perf diff fixes from Jiri Olsa.
 
 . Ignore ABS symbols when loading data maps, fix from Namhyung Kim
 
 . Hists improvements from Namhyung Kim
 
 . Don't check configuration on make clean, from Namhyung Kim
 
 . Fix dso__fprintf() print statement, from Stephane Eranian.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJQx7PHAAoJENZQFvNTUqpA8zQP/jhbP/sG8Jdmo90t2oKq0lLn
 3VrbzMbiWZXNwLoE1wdPGz66qbpzzLz0YOf+9XckDMH4lZb0HiCiAtUqGUZ7fqyh
 /cumo424vIBroh+eEF9+JNy19kLnQ9kp6+YQsyboTAHzkF8F99vJk8BpyY81jTq0
 K4uBjwAju5Pn7TLFhFo/ezcJx1UpNTqL6nEaQWbHiwPyf7BXO3aGfIQJYls9/y7G
 r3RGRbQ/cl1l8yxf+tQQaBzDoV/nUkHxxILIOikdh0geMNcznYPhEkzCl2LSm91u
 8RxTwtSzaN6oejwz7qq2dcIejsJJE+xObusrP6SPMOPDLmAwNCFRXDEiLmAXBq7F
 sZnptVR8rgaky6jgIwXV775yXIxJhenWv93yn7JXv3KUYCNCsAJaDxbWtoLw6G3p
 SyJWMZBESVpUiFaR1boxS5CKUE3BWmWHzV0NzmN/SR90VRlIcDH4o3QFYc2p5zDY
 UO3Woo9hNmCbFwqmyJTrN6A3YPfweZeZsFdWA+G+T+/iIQ9BZ0VEoeeRUTN1Oc2q
 pGkb63bSJXKjwmKwE5azZvC3niz+JB3xo28+7XW5L8vX5zUAC82MCDpiBaNK8b5/
 YbN30xcwIynuc7F5jee6YKwsvtE6wN24JSiulwTMO/lkp+EshlVOCJLEiwhZc+KN
 ireP/PGQMeNX91iqUF4V
 =r7FE
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. perf build-id cache now can show DSOs present in a perf.data file that are
  not in the cache, to integrate with build-id servers being put in place by
  organizations such as Fedora.

. perf buildid-list -i an-elf-file-instead-of-a-perf.data is back showing its
  build-id.

. No need to do feature checks when doing a 'make tags'

. Fix some 'perf test' errors and make them use the tracepoint evsel constructor.

. perf top now shares more of the evsel config/creation routines with 'record',
  paving the way for further integration like 'top' snapshots, etc.

. perf top now supports DWARF callchains.

. perf evlist decodes sample_type and read_format, helping diagnose problems.

. Fix mmap limitations on 32-bit, fix from David Miller.

. perf diff fixes from Jiri Olsa.

. Ignore ABS symbols when loading data maps, fix from Namhyung Kim

. Hists improvements from Namhyung Kim

. Don't check configuration on make clean, from Namhyung Kim

. Fix dso__fprintf() print statement, from Stephane Eranian.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-01-24 16:30:45 +01:00
commit 203e04c163
57 changed files with 1484 additions and 1520 deletions

View File

@ -222,10 +222,14 @@ install-pdf: pdf
#install-html: html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
ifneq ($(MAKECMDGOALS),clean)
ifneq ($(MAKECMDGOALS),tags)
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
$(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
-include $(OUTPUT)PERF-VERSION-FILE
endif
endif
#
# Determine "include::" file references in asciidoc files.

View File

@ -24,6 +24,9 @@ OPTIONS
-r::
--remove=::
Remove specified file from the cache.
-M::
--missing=::
List missing build ids in the cache for the specified file.
-v::
--verbose::
Be more verbose.

View File

@ -22,10 +22,6 @@ specified perf.data files.
OPTIONS
-------
-M::
--displacement::
Show position displacement relative to baseline.
-D::
--dump-raw-trace::
Dump raw trace in ASCII.

View File

@ -60,7 +60,7 @@ Default is to monitor all CPUS.
-i::
--inherit::
Child tasks inherit counters, only makes sens with -p option.
Child tasks do not inherit counters.
-k <path>::
--vmlinux=<path>::

View File

@ -153,6 +153,8 @@ INSTALL = install
# explicitly what architecture to check for. Fix this up for yours..
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
ifneq ($(MAKECMDGOALS),clean)
ifneq ($(MAKECMDGOALS),tags)
-include config/feature-tests.mak
ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
@ -206,6 +208,8 @@ ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
BASIC_CFLAGS += -I.
endif
endif # MAKECMDGOALS != tags
endif # MAKECMDGOALS != clean
# Guard against environment variables
BUILTIN_OBJS =
@ -230,11 +234,19 @@ endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
TE_LIB := -L$(TE_PATH) -ltraceevent
export LIBTRACEEVENT
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py
export LIBTRACEEVENT
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
$(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
@ -378,8 +390,11 @@ LIB_H += util/rblist.h
LIB_H += util/intlist.h
LIB_H += util/perf_regs.h
LIB_H += util/unwind.h
LIB_H += ui/helpline.h
LIB_H += util/vdso.h
LIB_H += ui/helpline.h
LIB_H += ui/progress.h
LIB_H += ui/util.h
LIB_H += ui/ui.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@ -453,6 +468,7 @@ LIB_OBJS += $(OUTPUT)util/stat.o
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/helpline.o
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/hist.o
LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
@ -471,7 +487,6 @@ LIB_OBJS += $(OUTPUT)tests/rdpmc.o
LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
LIB_OBJS += $(OUTPUT)tests/pmu.o
LIB_OBJS += $(OUTPUT)tests/util.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@ -510,6 +525,8 @@ PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT)
#
# Platform specific tweaks
#
ifneq ($(MAKECMDGOALS),clean)
ifneq ($(MAKECMDGOALS),tags)
# We choose to avoid "if .. else if .. else .. endif endif"
# because maintaining the nesting to match is a pain. If
@ -646,7 +663,6 @@ ifndef NO_NEWT
LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)ui/browsers/map.o
LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_OBJS += $(OUTPUT)ui/tui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
@ -655,9 +671,6 @@ ifndef NO_NEWT
LIB_H += ui/browsers/map.h
LIB_H += ui/keysyms.h
LIB_H += ui/libslang.h
LIB_H += ui/progress.h
LIB_H += ui/util.h
LIB_H += ui/ui.h
endif
endif
@ -677,10 +690,6 @@ ifndef NO_GTK2
LIB_OBJS += $(OUTPUT)ui/gtk/util.o
LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
# Make sure that it'd be included only once.
ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/util.o
endif
endif
endif
@ -707,7 +716,7 @@ disable-python = $(eval $(disable-python_code))
define disable-python_code
BASIC_CFLAGS += -DNO_LIBPYTHON
$(if $(1),$(warning No $(1) was found))
$(warning Python support won't be built)
$(warning Python support will not be built)
endef
override PYTHON := \
@ -715,19 +724,10 @@ override PYTHON := \
ifndef PYTHON
$(call disable-python,python interpreter)
python-clean :=
else
PYTHON_WORD := $(call shell-wordify,$(PYTHON))
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
ifdef NO_LIBPYTHON
$(call disable-python)
else
@ -843,6 +843,9 @@ ifdef ASCIIDOC8
export ASCIIDOC8
endif
endif # MAKECMDGOALS != tags
endif # MAKECMDGOALS != clean
# Shell quote (do not use $(call) to accommodate ancient setups);
ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
@ -1099,7 +1102,7 @@ perfexec_instdir = $(prefix)/$(perfexecdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
install: all try-install-man
install-bin: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
@ -1120,6 +1123,8 @@ install: all try-install-man
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
install: install-bin try-install-man
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'

View File

@ -14,6 +14,7 @@
#include "util/parse-options.h"
#include "util/strlist.h"
#include "util/build-id.h"
#include "util/session.h"
#include "util/symbol.h"
static int build_id_cache__add_file(const char *filename, const char *debugdir)
@ -58,19 +59,59 @@ static int build_id_cache__remove_file(const char *filename,
return err;
}
static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
{
char filename[PATH_MAX];
u8 build_id[BUILD_ID_SIZE];
if (dso__build_id_filename(dso, filename, sizeof(filename)) &&
filename__read_build_id(filename, build_id,
sizeof(build_id)) != sizeof(build_id)) {
if (errno == ENOENT)
return false;
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
} else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
}
return true;
}
static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp)
{
struct perf_session *session = perf_session__new(filename, O_RDONLY,
force, false, NULL);
if (session == NULL)
return -1;
perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0);
perf_session__delete(session);
return 0;
}
int cmd_buildid_cache(int argc, const char **argv,
const char *prefix __maybe_unused)
{
struct strlist *list;
struct str_node *pos;
int ret = 0;
bool force = false;
char debugdir[PATH_MAX];
char const *add_name_list_str = NULL,
*remove_name_list_str = NULL;
*remove_name_list_str = NULL,
*missing_filename = NULL;
const struct option buildid_cache_options[] = {
OPT_STRING('a', "add", &add_name_list_str,
"file list", "file(s) to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
@ -125,5 +166,8 @@ int cmd_buildid_cache(int argc, const char **argv,
}
}
return 0;
if (missing_filename)
ret = build_id_cache__fprintf_missing(missing_filename, force, stdout);
return ret;
}

View File

@ -44,23 +44,26 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
return fprintf(fp, "%s\n", sbuild_id);
}
static bool dso__skip_buildid(struct dso *dso, int with_hits)
{
return with_hits && !dso->hit;
}
static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
symbol__elf_init();
/*
* See if this is an ELF file first:
*/
if (filename__fprintf_build_id(input_name, stdout))
goto out;
session = perf_session__new(input_name, O_RDONLY, force, false,
&build_id__mark_dso_hit_ops);
if (session == NULL)
return -1;
/*
* See if this is an ELF file first:
*/
if (filename__fprintf_build_id(session->filename, stdout))
goto out;
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
@ -68,9 +71,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (with_hits || session->fd_pipe)
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
out:
perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
perf_session__delete(session);
out:
return 0;
}

View File

@ -23,7 +23,6 @@ static char const *input_old = "perf.data.old",
*input_new = "perf.data";
static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
static bool show_period;
static bool show_formula;
static bool show_baseline_only;
@ -146,58 +145,47 @@ static int setup_compute(const struct option *opt, const char *str,
return -EINVAL;
}
static double get_period_percent(struct hist_entry *he, u64 period)
double perf_diff__period_percent(struct hist_entry *he, u64 period)
{
u64 total = he->hists->stats.total_period;
return (period * 100.0) / total;
}
double perf_diff__compute_delta(struct hist_entry *he)
double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_percent = get_period_percent(he, he->stat.period);
double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0;
double new_percent = perf_diff__period_percent(he, he->stat.period);
double old_percent = perf_diff__period_percent(pair, pair->stat.period);
he->diff.period_ratio_delta = new_percent - old_percent;
he->diff.computed = true;
return he->diff.period_ratio_delta;
}
double perf_diff__compute_ratio(struct hist_entry *he)
double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_period = he->stat.period;
double old_period = pair ? pair->stat.period : 0;
double old_period = pair->stat.period;
he->diff.computed = true;
he->diff.period_ratio = pair ? (new_period / old_period) : 0;
he->diff.period_ratio = new_period / old_period;
return he->diff.period_ratio;
}
s64 perf_diff__compute_wdiff(struct hist_entry *he)
s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
{
struct hist_entry *pair = hist_entry__next_pair(he);
u64 new_period = he->stat.period;
u64 old_period = pair ? pair->stat.period : 0;
u64 old_period = pair->stat.period;
he->diff.computed = true;
if (!pair)
he->diff.wdiff = 0;
else
he->diff.wdiff = new_period * compute_wdiff_w2 -
old_period * compute_wdiff_w1;
he->diff.wdiff = new_period * compute_wdiff_w2 -
old_period * compute_wdiff_w1;
return he->diff.wdiff;
}
static int formula_delta(struct hist_entry *he, char *buf, size_t size)
static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
if (!pair)
return -1;
return scnprintf(buf, size,
"(%" PRIu64 " * 100 / %" PRIu64 ") - "
"(%" PRIu64 " * 100 / %" PRIu64 ")",
@ -205,41 +193,36 @@ static int formula_delta(struct hist_entry *he, char *buf, size_t size)
pair->stat.period, pair->hists->stats.total_period);
}
static int formula_ratio(struct hist_entry *he, char *buf, size_t size)
static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_period = he->stat.period;
double old_period = pair ? pair->stat.period : 0;
if (!pair)
return -1;
double old_period = pair->stat.period;
return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
}
static int formula_wdiff(struct hist_entry *he, char *buf, size_t size)
static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
u64 new_period = he->stat.period;
u64 old_period = pair ? pair->stat.period : 0;
if (!pair)
return -1;
u64 old_period = pair->stat.period;
return scnprintf(buf, size,
"(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
}
int perf_diff__formula(char *buf, size_t size, struct hist_entry *he)
int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
switch (compute) {
case COMPUTE_DELTA:
return formula_delta(he, buf, size);
return formula_delta(he, pair, buf, size);
case COMPUTE_RATIO:
return formula_ratio(he, buf, size);
return formula_ratio(he, pair, buf, size);
case COMPUTE_WEIGHTED_DIFF:
return formula_wdiff(he, buf, size);
return formula_wdiff(he, pair, buf, size);
default:
BUG_ON(1);
}
@ -312,9 +295,8 @@ static void insert_hist_entry_by_name(struct rb_root *root,
rb_insert_color(&he->rb_node, root);
}
static void hists__name_resort(struct hists *self, bool sort)
static void hists__name_resort(struct hists *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
struct rb_node *next = rb_first(&self->entries);
@ -322,16 +304,12 @@ static void hists__name_resort(struct hists *self, bool sort)
struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
n->position = position++;
if (sort) {
rb_erase(&n->rb_node, &self->entries);
insert_hist_entry_by_name(&tmp, n);
}
rb_erase(&n->rb_node, &self->entries);
insert_hist_entry_by_name(&tmp, n);
}
if (sort)
self->entries = tmp;
self->entries = tmp;
}
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
@ -355,12 +333,8 @@ static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name)
hists__output_resort(hists);
/*
* The hists__name_resort only sets possition
* if name is false.
*/
if (name || ((!name) && show_displacement))
hists__name_resort(hists, name);
if (name)
hists__name_resort(hists);
}
}
@ -385,18 +359,21 @@ static void hists__precompute(struct hists *hists)
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
struct hist_entry *pair = hist_entry__next_pair(he);
next = rb_next(&he->rb_node);
if (!pair)
continue;
switch (compute) {
case COMPUTE_DELTA:
perf_diff__compute_delta(he);
perf_diff__compute_delta(he, pair);
break;
case COMPUTE_RATIO:
perf_diff__compute_ratio(he);
perf_diff__compute_ratio(he, pair);
break;
case COMPUTE_WEIGHTED_DIFF:
perf_diff__compute_wdiff(he);
perf_diff__compute_wdiff(he, pair);
break;
default:
BUG_ON(1);
@ -562,8 +539,6 @@ static const char * const diff_usage[] = {
static const struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('M', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
"Show only items with match in baseline"),
OPT_CALLBACK('c', "compute", &compute,
@ -597,40 +572,32 @@ static const struct option options[] = {
static void ui_init(void)
{
perf_hpp__init();
/* No overhead column. */
perf_hpp__column_enable(PERF_HPP__OVERHEAD, false);
/*
* Display baseline/delta/ratio/displacement/
* Display baseline/delta/ratio
* formula/periods columns.
*/
perf_hpp__column_enable(PERF_HPP__BASELINE, true);
perf_hpp__column_enable(PERF_HPP__BASELINE);
switch (compute) {
case COMPUTE_DELTA:
perf_hpp__column_enable(PERF_HPP__DELTA, true);
perf_hpp__column_enable(PERF_HPP__DELTA);
break;
case COMPUTE_RATIO:
perf_hpp__column_enable(PERF_HPP__RATIO, true);
perf_hpp__column_enable(PERF_HPP__RATIO);
break;
case COMPUTE_WEIGHTED_DIFF:
perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true);
perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF);
break;
default:
BUG_ON(1);
};
if (show_displacement)
perf_hpp__column_enable(PERF_HPP__DISPL, true);
if (show_formula)
perf_hpp__column_enable(PERF_HPP__FORMULA, true);
perf_hpp__column_enable(PERF_HPP__FORMULA);
if (show_period) {
perf_hpp__column_enable(PERF_HPP__PERIOD, true);
perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true);
perf_hpp__column_enable(PERF_HPP__PERIOD);
perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE);
}
}

View File

@ -15,39 +15,6 @@
#include "util/parse-options.h"
#include "util/session.h"
struct perf_attr_details {
bool freq;
bool verbose;
};
static int comma_printf(bool *first, const char *fmt, ...)
{
va_list args;
int ret = 0;
if (!*first) {
ret += printf(",");
} else {
ret += printf(":");
*first = false;
}
va_start(args, fmt);
ret += vprintf(fmt, args);
va_end(args);
return ret;
}
static int __if_print(bool *first, const char *field, u64 value)
{
if (value == 0)
return 0;
return comma_printf(first, " %s: %" PRIu64, field, value);
}
#define if_print(field) __if_print(&first, #field, pos->attr.field)
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
struct perf_session *session;
@ -57,52 +24,8 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
if (session == NULL)
return -ENOMEM;
list_for_each_entry(pos, &session->evlist->entries, node) {
bool first = true;
printf("%s", perf_evsel__name(pos));
if (details->verbose || details->freq) {
comma_printf(&first, " sample_freq=%" PRIu64,
(u64)pos->attr.sample_freq);
}
if (details->verbose) {
if_print(type);
if_print(config);
if_print(config1);
if_print(config2);
if_print(size);
if_print(sample_type);
if_print(read_format);
if_print(disabled);
if_print(inherit);
if_print(pinned);
if_print(exclusive);
if_print(exclude_user);
if_print(exclude_kernel);
if_print(exclude_hv);
if_print(exclude_idle);
if_print(mmap);
if_print(comm);
if_print(freq);
if_print(inherit_stat);
if_print(enable_on_exec);
if_print(task);
if_print(watermark);
if_print(precise_ip);
if_print(mmap_data);
if_print(sample_id_all);
if_print(exclude_host);
if_print(exclude_guest);
if_print(__reserved_1);
if_print(wakeup_events);
if_print(bp_type);
if_print(branch_sample_type);
}
putchar('\n');
}
list_for_each_entry(pos, &session->evlist->entries, node)
perf_evsel__fprintf(pos, details, stdout);
perf_session__delete(session);
return 0;

View File

@ -230,14 +230,7 @@ static int perf_record__open(struct perf_record *rec)
struct perf_record_opts *opts = &rec->opts;
int rc = 0;
/*
* Set the evsel leader links before we configure attributes,
* since some might depend on this info.
*/
if (opts->group)
perf_evlist__set_leader(evlist);
perf_evlist__config_attrs(evlist, opts);
perf_evlist__config(evlist, opts);
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
@ -286,7 +279,7 @@ static int perf_record__open(struct perf_record *rec)
*/
opts->sample_id_all_missing = true;
if (!opts->sample_time && !opts->raw_samples && !time_needed)
attr->sample_type &= ~PERF_SAMPLE_TIME;
perf_evsel__reset_sample_bit(pos, TIME);
goto retry_sample_id;
}
@ -875,11 +868,10 @@ static int get_stack_size(char *str, unsigned long *_size)
}
#endif /* LIBUNWIND_SUPPORT */
static int
parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
int unset)
int record_parse_callchain_opt(const struct option *opt,
const char *arg, int unset)
{
struct perf_record *rec = (struct perf_record *)opt->value;
struct perf_record_opts *opts = opt->value;
char *tok, *name, *saveptr = NULL;
char *buf;
int ret = -1;
@ -905,7 +897,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
/* Framepointer style */
if (!strncmp(name, "fp", sizeof("fp"))) {
if (!strtok_r(NULL, ",", &saveptr)) {
rec->opts.call_graph = CALLCHAIN_FP;
opts->call_graph = CALLCHAIN_FP;
ret = 0;
} else
pr_err("callchain: No more arguments "
@ -918,20 +910,20 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
const unsigned long default_stack_dump_size = 8192;
ret = 0;
rec->opts.call_graph = CALLCHAIN_DWARF;
rec->opts.stack_dump_size = default_stack_dump_size;
opts->call_graph = CALLCHAIN_DWARF;
opts->stack_dump_size = default_stack_dump_size;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
unsigned long size = 0;
ret = get_stack_size(tok, &size);
rec->opts.stack_dump_size = size;
opts->stack_dump_size = size;
}
if (!ret)
pr_debug("callchain: stack dump size %d\n",
rec->opts.stack_dump_size);
opts->stack_dump_size);
#endif /* LIBUNWIND_SUPPORT */
} else {
pr_err("callchain: Unknown -g option "
@ -944,7 +936,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
free(buf);
if (!ret)
pr_debug("callchain: type %d\n", rec->opts.call_graph);
pr_debug("callchain: type %d\n", opts->call_graph);
return ret;
}
@ -982,9 +974,9 @@ static struct perf_record record = {
#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
#ifdef LIBUNWIND_SUPPORT
static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
#else
static const char callchain_help[] = CALLCHAIN_HELP "[fp]";
const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
#endif
/*
@ -1028,9 +1020,9 @@ const struct option record_options[] = {
"number of mmap data pages"),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
callchain_help, &parse_callchain_opt,
"fp"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
"mode[,dump_size]", record_callchain_help,
&record_parse_callchain_opt, "fp"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),

View File

@ -692,6 +692,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
setup_browser(true);
else {
use_browser = 0;
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
}

View File

@ -153,7 +153,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
}
if (!perf_target__has_task(&target) &&
!perf_evsel__is_group_member(evsel)) {
perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}

View File

@ -596,7 +596,7 @@ static void *display_thread_tui(void *arg)
* via --uid.
*/
list_for_each_entry(pos, &top->evlist->entries, node)
pos->hists.uid_filter_str = top->target.uid_str;
pos->hists.uid_filter_str = top->record_opts.target.uid_str;
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
&top->session->header.env);
@ -727,7 +727,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
}
if (!machine) {
pr_err("%u unprocessable samples recorded.",
pr_err("%u unprocessable samples recorded.\n",
top->session->hists.stats.nr_unprocessable_samples++);
return;
}
@ -894,35 +894,13 @@ static void perf_top__start_counters(struct perf_top *top)
{
struct perf_evsel *counter;
struct perf_evlist *evlist = top->evlist;
struct perf_record_opts *opts = &top->record_opts;
if (top->group)
perf_evlist__set_leader(evlist);
perf_evlist__config(evlist, opts);
list_for_each_entry(counter, &evlist->entries, node) {
struct perf_event_attr *attr = &counter->attr;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
if (top->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
attr->sample_freq = top->freq;
}
if (evlist->nr_entries > 1) {
attr->sample_type |= PERF_SAMPLE_ID;
attr->read_format |= PERF_FORMAT_ID;
}
if (perf_target__has_cpu(&top->target))
attr->sample_type |= PERF_SAMPLE_CPU;
if (symbol_conf.use_callchain)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
attr->mmap = 1;
attr->comm = 1;
attr->inherit = top->inherit;
fallback_missing_features:
if (top->exclude_guest_missing)
attr->exclude_guest = attr->exclude_host = 0;
@ -996,7 +974,7 @@ static void perf_top__start_counters(struct perf_top *top)
}
}
if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
@ -1016,7 +994,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
ui__error("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
} else if (callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
@ -1028,6 +1006,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
static int __cmd_top(struct perf_top *top)
{
struct perf_record_opts *opts = &top->record_opts;
pthread_t thread;
int ret;
/*
@ -1042,7 +1021,7 @@ static int __cmd_top(struct perf_top *top)
if (ret)
goto out_delete;
if (perf_target__has_task(&top->target))
if (perf_target__has_task(&opts->target))
perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
perf_event__process,
&top->session->host_machine);
@ -1053,6 +1032,17 @@ static int __cmd_top(struct perf_top *top)
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
/*
* When perf is starting the traced process, all the events (apart from
* group members) have enable_on_exec=1 set, so don't spoil it by
* prematurely enabling them.
*
* XXX 'top' still doesn't start workloads like record, trace, but should,
* so leave the check here.
*/
if (!perf_target__none(&opts->target))
perf_evlist__enable(top->evlist);
/* Wait for a minimal set of events before starting the snapshot */
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
@ -1093,116 +1083,56 @@ static int __cmd_top(struct perf_top *top)
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct perf_top *top = (struct perf_top *)opt->value;
char *tok, *tok2;
char *endptr;
/*
* --no-call-graph
*/
if (unset) {
top->dont_use_callchains = true;
if (unset)
return 0;
}
symbol_conf.use_callchain = true;
if (!arg)
return 0;
tok = strtok((char *)arg, ",");
if (!tok)
return -1;
/* get the output mode */
if (!strncmp(tok, "graph", strlen(arg)))
callchain_param.mode = CHAIN_GRAPH_ABS;
else if (!strncmp(tok, "flat", strlen(arg)))
callchain_param.mode = CHAIN_FLAT;
else if (!strncmp(tok, "fractal", strlen(arg)))
callchain_param.mode = CHAIN_GRAPH_REL;
else if (!strncmp(tok, "none", strlen(arg))) {
callchain_param.mode = CHAIN_NONE;
symbol_conf.use_callchain = false;
return 0;
} else
return -1;
/* get the min percentage */
tok = strtok(NULL, ",");
if (!tok)
goto setup;
callchain_param.min_percent = strtod(tok, &endptr);
if (tok == endptr)
return -1;
/* get the print limit */
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
if (tok2[0] != 'c') {
callchain_param.print_limit = strtod(tok2, &endptr);
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
}
/* get the call chain order */
if (!strcmp(tok2, "caller"))
callchain_param.order = ORDER_CALLER;
else if (!strcmp(tok2, "callee"))
callchain_param.order = ORDER_CALLEE;
else
return -1;
setup:
if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
return -1;
}
return 0;
return record_parse_callchain_opt(opt, arg, unset);
}
int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_evsel *pos;
int status;
char errbuf[BUFSIZ];
struct perf_top top = {
.count_filter = 5,
.delay_secs = 2,
.freq = 4000, /* 4 KHz */
.mmap_pages = 128,
.sym_pcnt_filter = 5,
.target = {
.uses_mmap = true,
.record_opts = {
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
.freq = 4000, /* 4 KHz */
.target = {
.uses_mmap = true,
},
},
.sym_pcnt_filter = 5,
};
char callchain_default_opt[] = "fractal,0.5,callee";
struct perf_record_opts *opts = &top.record_opts;
struct perf_target *target = &opts->target;
const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_INTEGER('c', "count", &top.default_interval,
"event period to sample"),
OPT_STRING('p', "pid", &top.target.pid, "pid",
OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
OPT_STRING('p', "pid", &target->pid, "pid",
"profile events on existing process id"),
OPT_STRING('t', "tid", &top.target.tid, "tid",
OPT_STRING('t', "tid", &target->tid, "tid",
"profile events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide,
OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu",
OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
"list of cpus to monitor"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages,
"number of mmap data pages"),
OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
@ -1211,16 +1141,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
OPT_BOOLEAN('g', "group", &top.group,
OPT_BOOLEAN('g', "group", &opts->group,
"put the counters into a counter group"),
OPT_BOOLEAN('i', "inherit", &top.inherit,
"child tasks inherit counters"),
OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
"child tasks do not inherit counters"),
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero,
"zero history across updates"),
OPT_INTEGER('F', "freq", &top.freq,
"profile at this frequency"),
OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
OPT_INTEGER('E', "entries", &top.print_entries,
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
@ -1233,10 +1161,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
"Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt,
callchain_default_opt),
OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
"mode[,dump_size]", record_callchain_help,
&parse_callchain_opt, "fp"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
@ -1251,7 +1178,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
OPT_END()
};
const char * const top_usage[] = {
@ -1281,27 +1208,27 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
setup_browser(false);
status = perf_target__validate(&top.target);
status = perf_target__validate(target);
if (status) {
perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
perf_target__strerror(target, status, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
status = perf_target__parse_uid(&top.target);
status = perf_target__parse_uid(target);
if (status) {
int saved_errno = errno;
perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
perf_target__strerror(target, status, errbuf, BUFSIZ);
ui__error("%s", errbuf);
status = -saved_errno;
goto out_delete_evlist;
}
if (perf_target__none(&top.target))
top.target.system_wide = true;
if (perf_target__none(target))
target->system_wide = true;
if (perf_evlist__create_maps(top.evlist, &top.target) < 0)
if (perf_evlist__create_maps(top.evlist, target) < 0)
usage_with_options(top_usage, options);
if (!top.evlist->nr_entries &&
@ -1315,24 +1242,22 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (top.delay_secs < 1)
top.delay_secs = 1;
if (opts->user_interval != ULLONG_MAX)
opts->default_interval = opts->user_interval;
if (opts->user_freq != UINT_MAX)
opts->freq = opts->user_freq;
/*
* User specified count overrides default frequency.
*/
if (top.default_interval)
top.freq = 0;
else if (top.freq) {
top.default_interval = top.freq;
if (opts->default_interval)
opts->freq = 0;
else if (opts->freq) {
opts->default_interval = opts->freq;
} else {
ui__error("frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
list_for_each_entry(pos, &top.evlist->entries, node) {
/*
* Fill in the ones not specifically initialized via -c:
*/
if (!pos->attr.sample_period)
pos->attr.sample_period = top.default_interval;
status = -EINVAL;
goto out_delete_evlist;
}
top.sym_evsel = perf_evlist__first(top.evlist);

View File

@ -455,7 +455,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
perf_evlist__config_attrs(evlist, &trace->opts);
perf_evlist__config(evlist, &trace->opts);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);

View File

@ -7,7 +7,7 @@ size=96
config=0
sample_period=4000
sample_type=263
read_format=7
read_format=0
disabled=1
inherit=1
pinned=0

View File

@ -6,12 +6,14 @@ args = --group -e cycles,instructions kill >/dev/null 2>&1
fd=1
group_fd=-1
sample_type=327
read_format=4
[event-2:base-record]
fd=2
group_fd=1
config=1
sample_type=327
read_format=4
mmap=0
comm=0
enable_on_exec=0

View File

@ -6,6 +6,7 @@ args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1
fd=1
group_fd=-1
sample_type=327
read_format=4
[event-2:base-record]
fd=2
@ -13,6 +14,7 @@ group_fd=1
type=0
config=1
sample_type=327
read_format=4
mmap=0
comm=0
enable_on_exec=0

View File

@ -22,36 +22,16 @@ int test__basic_mmap(void)
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evlist *evlist;
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.read_format = PERF_FORMAT_ID,
.sample_type = PERF_SAMPLE_ID,
.watermark = 0,
};
cpu_set_t cpu_set;
const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
"getpgid", };
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
(void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
int ids[nsyscalls];
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
for (i = 0; i < nsyscalls; ++i) {
char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
ids[i] = trace_event__id(name);
if (ids[i] < 0) {
pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
return -1;
}
nr_events[i] = 0;
expected_nr_events[i] = random() % 257;
}
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("thread_map__new\n");
@ -79,18 +59,19 @@ int test__basic_mmap(void)
goto out_free_cpus;
}
/* anonymous union fields, can't be initialized above */
attr.wakeup_events = 1;
attr.sample_period = 1;
for (i = 0; i < nsyscalls; ++i) {
attr.config = ids[i];
evsels[i] = perf_evsel__new(&attr, i);
char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
evsels[i] = perf_evsel__newtp("syscalls", name, i);
if (evsels[i] == NULL) {
pr_debug("perf_evsel__new\n");
goto out_free_evlist;
}
evsels[i]->attr.wakeup_events = 1;
perf_evsel__set_sample_id(evsels[i]);
perf_evlist__add(evlist, evsels[i]);
if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
@ -99,6 +80,9 @@ int test__basic_mmap(void)
strerror(errno));
goto out_close_fd;
}
nr_events[i] = 0;
expected_nr_events[i] = 1 + rand() % 127;
}
if (perf_evlist__mmap(evlist, 128, true) < 0) {
@ -128,6 +112,7 @@ int test__basic_mmap(void)
goto out_munmap;
}
err = -1;
evsel = perf_evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
pr_debug("event with id %" PRIu64
@ -137,16 +122,17 @@ int test__basic_mmap(void)
nr_events[evsel->idx]++;
}
err = 0;
list_for_each_entry(evsel, &evlist->entries, node) {
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
pr_debug("expected %d %s events, got %d\n",
expected_nr_events[evsel->idx],
perf_evsel__name(evsel), nr_events[evsel->idx]);
err = -1;
goto out_munmap;
}
}
err = 0;
out_munmap:
perf_evlist__munmap(evlist);
out_close_fd:

View File

@ -7,20 +7,12 @@
int test__open_syscall_event_on_all_cpus(void)
{
int err = -1, fd, cpu;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *evsel;
struct perf_event_attr attr;
unsigned int nr_open_calls = 111, i;
cpu_set_t cpu_set;
int id = trace_event__id("sys_enter_open");
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
if (id < 0) {
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
return -1;
}
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
@ -32,15 +24,11 @@ int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
CPU_ZERO(&cpu_set);
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_TRACEPOINT;
attr.config = id;
evsel = perf_evsel__new(&attr, 0);
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
goto out_thread_map_delete;
}

View File

@ -6,29 +6,18 @@
int test__open_syscall_event(void)
{
int err = -1, fd;
struct thread_map *threads;
struct perf_evsel *evsel;
struct perf_event_attr attr;
unsigned int nr_open_calls = 111, i;
int id = trace_event__id("sys_enter_open");
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
if (id < 0) {
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
return -1;
}
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
}
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_TRACEPOINT;
attr.config = id;
evsel = perf_evsel__new(&attr, 0);
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
goto out_thread_map_delete;
}

View File

@ -521,7 +521,7 @@ static int test__group1(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
/* cycles:upp */
evsel = perf_evsel__next(evsel);
@ -557,7 +557,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
/* cache-references + :u modifier */
evsel = perf_evsel__next(evsel);
@ -583,7 +583,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
return 0;
}
@ -606,7 +606,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong group name",
!strcmp(leader->group_name, "group1"));
@ -636,7 +636,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong group name",
!strcmp(leader->group_name, "group2"));
@ -663,7 +663,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
return 0;
}
@ -687,7 +687,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
/* instructions:kp + p */
evsel = perf_evsel__next(evsel);
@ -724,7 +724,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
/* instructions + G */
evsel = perf_evsel__next(evsel);
@ -751,7 +751,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
/* instructions:G */
evsel = perf_evsel__next(evsel);
@ -777,7 +777,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
return 0;
}

View File

@ -103,10 +103,10 @@ int test__PERF_RECORD(void)
* Config the evsels, setting attr->comm on the first one, etc.
*/
evsel = perf_evlist__first(evlist);
evsel->attr.sample_type |= PERF_SAMPLE_CPU;
evsel->attr.sample_type |= PERF_SAMPLE_TID;
evsel->attr.sample_type |= PERF_SAMPLE_TIME;
perf_evlist__config_attrs(evlist, &opts);
perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TID);
perf_evsel__set_sample_bit(evsel, TIME);
perf_evlist__config(evlist, &opts);
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {

View File

@ -16,7 +16,4 @@ int test__attr(void);
int test__dso_data(void);
int test__parse_events(void);
/* Util */
int trace_event__id(const char *evname);
#endif /* TESTS_H */

View File

@ -1,30 +0,0 @@
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "tests.h"
#include "debugfs.h"
int trace_event__id(const char *evname)
{
char *filename;
int err = -1, fd;
if (asprintf(&filename,
"%s/syscalls/%s/id",
tracing_events_path, evname) < 0)
return -1;
fd = open(filename, O_RDONLY);
if (fd >= 0) {
char id[16];
if (read(fd, id, sizeof(id)) > 0)
err = atoi(id);
close(fd);
}
free(filename);
return err;
}

View File

@ -587,6 +587,8 @@ HPP__COLOR_FN(overhead_guest_us, period_guest_us)
void hist_browser__init_hpp(void)
{
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color =
@ -607,12 +609,13 @@ static int hist_browser__show_entry(struct hist_browser *browser,
{
char s[256];
double percent;
int i, printed = 0;
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
if (current_entry) {
browser->he_selection = entry;
@ -629,12 +632,11 @@ static int hist_browser__show_entry(struct hist_browser *browser,
.buf = s,
.size = sizeof(s),
};
int i = 0;
ui_browser__gotorc(&browser->b, row, 0);
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
perf_hpp__for_each_format(fmt) {
if (!first) {
slsmg_printf(" ");
@ -642,14 +644,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
first = false;
if (perf_hpp__format[i].color) {
if (fmt->color) {
hpp.ptr = &percent;
/* It will set percent for us. See HPP__COLOR_FN above. */
width -= perf_hpp__format[i].color(&hpp, entry);
width -= fmt->color(&hpp, entry);
ui_browser__set_percent_color(&browser->b, percent, current_entry);
if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) {
if (!i && symbol_conf.use_callchain) {
slsmg_printf("%c ", folded_sign);
width -= 2;
}
@ -659,9 +661,11 @@ static int hist_browser__show_entry(struct hist_browser *browser,
if (!current_entry || !browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
} else {
width -= perf_hpp__format[i].entry(&hpp, entry);
width -= fmt->entry(&hpp, entry);
slsmg_printf("%s", s);
}
i++;
}
/* The scroll bar isn't being used */

View File

@ -74,6 +74,8 @@ HPP__COLOR_FN(overhead_guest_us, period_guest_us)
void perf_gtk__init_hpp(void)
{
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color =
@ -90,13 +92,14 @@ void perf_gtk__init_hpp(void)
static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
{
struct perf_hpp_fmt *fmt;
GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer;
struct sort_entry *se;
GtkListStore *store;
struct rb_node *nd;
GtkWidget *view;
int i, col_idx;
int col_idx;
int nr_cols;
char s[512];
@ -107,12 +110,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
nr_cols = 0;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
perf_hpp__for_each_format(fmt)
col_types[nr_cols++] = G_TYPE_STRING;
}
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@ -129,12 +128,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
perf_hpp__format[i].header(&hpp);
perf_hpp__for_each_format(fmt) {
fmt->header(&hpp);
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, s,
renderer, "markup",
@ -166,14 +161,11 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
if (perf_hpp__format[i].color)
perf_hpp__format[i].color(&hpp, h);
perf_hpp__for_each_format(fmt) {
if (fmt->color)
fmt->color(&hpp, h);
else
perf_hpp__format[i].entry(&hpp, h);
fmt->entry(&hpp, h);
gtk_list_store_set(store, &iter, col_idx++, s, -1);
}

View File

@ -24,17 +24,7 @@ static void gtk_helpline_push(const char *msg)
pgctx->statbar_ctx_id, msg);
}
static struct ui_helpline gtk_helpline_fns = {
.pop = gtk_helpline_pop,
.push = gtk_helpline_push,
};
void perf_gtk__init_helpline(void)
{
helpline_fns = &gtk_helpline_fns;
}
int perf_gtk__show_helpline(const char *fmt, va_list ap)
static int gtk_helpline_show(const char *fmt, va_list ap)
{
int ret;
char *ptr;
@ -54,3 +44,14 @@ int perf_gtk__show_helpline(const char *fmt, va_list ap)
return ret;
}
static struct ui_helpline gtk_helpline_fns = {
.pop = gtk_helpline_pop,
.push = gtk_helpline_push,
.show = gtk_helpline_show,
};
void perf_gtk__init_helpline(void)
{
helpline_fns = &gtk_helpline_fns;
}

View File

@ -16,9 +16,16 @@ static void nop_helpline__push(const char *msg __maybe_unused)
{
}
static int nop_helpline__show(const char *fmt __maybe_unused,
va_list ap __maybe_unused)
{
return 0;
}
static struct ui_helpline default_helpline_fns = {
.pop = nop_helpline__pop,
.push = nop_helpline__push,
.show = nop_helpline__show,
};
struct ui_helpline *helpline_fns = &default_helpline_fns;
@ -59,3 +66,8 @@ void ui_helpline__puts(const char *msg)
ui_helpline__pop();
ui_helpline__push(msg);
}
int ui_helpline__vshow(const char *fmt, va_list ap)
{
return helpline_fns->show(fmt, ap);
}

View File

@ -9,6 +9,7 @@
struct ui_helpline {
void (*pop)(void);
void (*push)(const char *msg);
int (*show)(const char *fmt, va_list ap);
};
extern struct ui_helpline *helpline_fns;
@ -20,28 +21,9 @@ void ui_helpline__push(const char *msg);
void ui_helpline__vpush(const char *fmt, va_list ap);
void ui_helpline__fpush(const char *fmt, ...);
void ui_helpline__puts(const char *msg);
int ui_helpline__vshow(const char *fmt, va_list ap);
extern char ui_helpline__current[512];
#ifdef NEWT_SUPPORT
extern char ui_helpline__last_msg[];
int ui_helpline__show_help(const char *format, va_list ap);
#else
static inline int ui_helpline__show_help(const char *format __maybe_unused,
va_list ap __maybe_unused)
{
return 0;
}
#endif /* NEWT_SUPPORT */
#ifdef GTK2_SUPPORT
int perf_gtk__show_helpline(const char *format, va_list ap);
#else
static inline int perf_gtk__show_helpline(const char *format __maybe_unused,
va_list ap __maybe_unused)
{
return 0;
}
#endif /* GTK2_SUPPORT */
#endif /* _PERF_UI_HELPLINE_H_ */

View File

@ -268,14 +268,18 @@ static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
char buf[32] = " ";
double diff;
double diff = 0.0;
if (he->diff.computed)
diff = he->diff.period_ratio_delta;
else
diff = perf_diff__compute_delta(he);
if (pair) {
if (he->diff.computed)
diff = he->diff.period_ratio_delta;
else
diff = perf_diff__compute_delta(he, pair);
} else
diff = perf_diff__period_percent(he, he->stat.period);
if (fabs(diff) >= 0.01)
scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
@ -297,14 +301,17 @@ static int hpp__width_ratio(struct perf_hpp *hpp __maybe_unused)
static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
char buf[32] = " ";
double ratio;
double ratio = 0.0;
if (he->diff.computed)
ratio = he->diff.period_ratio;
else
ratio = perf_diff__compute_ratio(he);
if (pair) {
if (he->diff.computed)
ratio = he->diff.period_ratio;
else
ratio = perf_diff__compute_ratio(he, pair);
}
if (ratio > 0.0)
scnprintf(buf, sizeof(buf), "%+14.6F", ratio);
@ -326,14 +333,17 @@ static int hpp__width_wdiff(struct perf_hpp *hpp __maybe_unused)
static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
char buf[32] = " ";
s64 wdiff;
s64 wdiff = 0;
if (he->diff.computed)
wdiff = he->diff.wdiff;
else
wdiff = perf_diff__compute_wdiff(he);
if (pair) {
if (he->diff.computed)
wdiff = he->diff.wdiff;
else
wdiff = perf_diff__compute_wdiff(he, pair);
}
if (wdiff != 0)
scnprintf(buf, sizeof(buf), "%14ld", wdiff);
@ -341,30 +351,6 @@ static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
return scnprintf(hpp->buf, hpp->size, fmt, buf);
}
static int hpp__header_displ(struct perf_hpp *hpp)
{
return scnprintf(hpp->buf, hpp->size, "Displ.");
}
static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused)
{
return 6;
}
static int hpp__entry_displ(struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
long displacement = pair ? pair->position - he->position : 0;
const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s";
char buf[32] = " ";
if (displacement)
scnprintf(buf, sizeof(buf), "%+4ld", displacement);
return scnprintf(hpp->buf, hpp->size, fmt, buf);
}
static int hpp__header_formula(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%70s";
@ -379,67 +365,80 @@ static int hpp__width_formula(struct perf_hpp *hpp __maybe_unused)
static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s";
char buf[96] = " ";
perf_diff__formula(buf, sizeof(buf), he);
if (pair)
perf_diff__formula(he, pair, buf, sizeof(buf));
return scnprintf(hpp->buf, hpp->size, fmt, buf);
}
#define HPP__COLOR_PRINT_FNS(_name) \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
.color = hpp__color_ ## _name, \
.entry = hpp__entry_ ## _name
#define HPP__COLOR_PRINT_FNS(_name) \
{ \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
.color = hpp__color_ ## _name, \
.entry = hpp__entry_ ## _name \
}
#define HPP__PRINT_FNS(_name) \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
.entry = hpp__entry_ ## _name
#define HPP__PRINT_FNS(_name) \
{ \
.header = hpp__header_ ## _name, \
.width = hpp__width_ ## _name, \
.entry = hpp__entry_ ## _name \
}
struct perf_hpp_fmt perf_hpp__format[] = {
{ .cond = false, HPP__COLOR_PRINT_FNS(baseline) },
{ .cond = true, HPP__COLOR_PRINT_FNS(overhead) },
{ .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) },
{ .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) },
{ .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) },
{ .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) },
{ .cond = false, HPP__PRINT_FNS(samples) },
{ .cond = false, HPP__PRINT_FNS(period) },
{ .cond = false, HPP__PRINT_FNS(period_baseline) },
{ .cond = false, HPP__PRINT_FNS(delta) },
{ .cond = false, HPP__PRINT_FNS(ratio) },
{ .cond = false, HPP__PRINT_FNS(wdiff) },
{ .cond = false, HPP__PRINT_FNS(displ) },
{ .cond = false, HPP__PRINT_FNS(formula) }
HPP__COLOR_PRINT_FNS(baseline),
HPP__COLOR_PRINT_FNS(overhead),
HPP__COLOR_PRINT_FNS(overhead_sys),
HPP__COLOR_PRINT_FNS(overhead_us),
HPP__COLOR_PRINT_FNS(overhead_guest_sys),
HPP__COLOR_PRINT_FNS(overhead_guest_us),
HPP__PRINT_FNS(samples),
HPP__PRINT_FNS(period),
HPP__PRINT_FNS(period_baseline),
HPP__PRINT_FNS(delta),
HPP__PRINT_FNS(ratio),
HPP__PRINT_FNS(wdiff),
HPP__PRINT_FNS(formula)
};
LIST_HEAD(perf_hpp__list);
#undef HPP__COLOR_PRINT_FNS
#undef HPP__PRINT_FNS
void perf_hpp__init(void)
{
if (symbol_conf.show_cpu_utilization) {
perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true;
perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true;
perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
if (perf_guest) {
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true;
perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
}
}
if (symbol_conf.show_nr_samples)
perf_hpp__format[PERF_HPP__SAMPLES].cond = true;
perf_hpp__column_enable(PERF_HPP__SAMPLES);
if (symbol_conf.show_total_period)
perf_hpp__format[PERF_HPP__PERIOD].cond = true;
perf_hpp__column_enable(PERF_HPP__PERIOD);
}
void perf_hpp__column_enable(unsigned col, bool enable)
void perf_hpp__column_register(struct perf_hpp_fmt *format)
{
list_add_tail(&format->list, &perf_hpp__list);
}
void perf_hpp__column_enable(unsigned col)
{
BUG_ON(col >= PERF_HPP__MAX_INDEX);
perf_hpp__format[col].cond = enable;
perf_hpp__column_register(&perf_hpp__format[col]);
}
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
@ -452,27 +451,29 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
bool color)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
char *start = hpp->buf;
int i, ret;
int ret;
bool first = true;
if (symbol_conf.exclude_other && !he->parent)
return 0;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
perf_hpp__for_each_format(fmt) {
/*
* If there's no field_sep, we still need
* to display initial ' '.
*/
if (!sep || !first) {
ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
advance_hpp(hpp, ret);
} else
first = false;
}
if (color && perf_hpp__format[i].color)
ret = perf_hpp__format[i].color(hpp, he);
if (color && fmt->color)
ret = fmt->color(hpp, he);
else
ret = perf_hpp__format[i].entry(hpp, he);
ret = fmt->entry(hpp, he);
advance_hpp(hpp, ret);
}
@ -504,16 +505,15 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
*/
unsigned int hists__sort_list_width(struct hists *hists)
{
struct perf_hpp_fmt *fmt;
struct sort_entry *se;
int i, ret = 0;
int i = 0, ret = 0;
for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
if (!perf_hpp__format[i].cond)
continue;
perf_hpp__for_each_format(fmt) {
if (i)
ret += 2;
ret += perf_hpp__format[i].width(NULL);
ret += fmt->width(NULL);
}
list_for_each_entry(se, &hist_entry__sort_list, list)

View File

@ -30,6 +30,7 @@ void setup_browser(bool fallback_to_pager)
if (fallback_to_pager)
setup_pager();
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
break;
}

View File

@ -335,13 +335,14 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, FILE *fp)
{
struct perf_hpp_fmt *fmt;
struct sort_entry *se;
struct rb_node *nd;
size_t ret = 0;
unsigned int width;
const char *sep = symbol_conf.field_sep;
const char *col_width = symbol_conf.col_width_list_str;
int idx, nr_rows = 0;
int nr_rows = 0;
char bf[96];
struct perf_hpp dummy_hpp = {
.buf = bf,
@ -355,16 +356,14 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
goto print_entries;
fprintf(fp, "# ");
for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
if (!perf_hpp__format[idx].cond)
continue;
perf_hpp__for_each_format(fmt) {
if (!first)
fprintf(fp, "%s", sep ?: " ");
else
first = false;
perf_hpp__format[idx].header(&dummy_hpp);
fmt->header(&dummy_hpp);
fprintf(fp, "%s", bf);
}
@ -400,18 +399,16 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
first = true;
fprintf(fp, "# ");
for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
unsigned int i;
if (!perf_hpp__format[idx].cond)
continue;
perf_hpp__for_each_format(fmt) {
unsigned int i;
if (!first)
fprintf(fp, "%s", sep ?: " ");
else
first = false;
width = perf_hpp__format[idx].width(&dummy_hpp);
width = fmt->width(&dummy_hpp);
for (i = 0; i < width; i++)
fprintf(fp, ".");
}

View File

@ -8,6 +8,8 @@
#include "../ui.h"
#include "../libslang.h"
char ui_helpline__last_msg[1024];
static void tui_helpline__pop(void)
{
}
@ -23,20 +25,7 @@ static void tui_helpline__push(const char *msg)
strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
}
struct ui_helpline tui_helpline_fns = {
.pop = tui_helpline__pop,
.push = tui_helpline__push,
};
void ui_helpline__init(void)
{
helpline_fns = &tui_helpline_fns;
ui_helpline__puts(" ");
}
char ui_helpline__last_msg[1024];
int ui_helpline__show_help(const char *format, va_list ap)
static int tui_helpline__show(const char *format, va_list ap)
{
int ret;
static int backlog;
@ -55,3 +44,15 @@ int ui_helpline__show_help(const char *format, va_list ap)
return ret;
}
struct ui_helpline tui_helpline_fns = {
.pop = tui_helpline__pop,
.push = tui_helpline__push,
.show = tui_helpline__show,
};
void ui_helpline__init(void)
{
helpline_fns = &tui_helpline_fns;
ui_helpline__puts(" ");
}

View File

@ -52,6 +52,16 @@ int ui__warning(const char *format, ...)
return ret;
}
int ui__error_paranoid(void)
{
return ui__error("Permission error - are you root?\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
" -1 - Not paranoid at all\n"
" 0 - Disallow raw tracepoint access for unpriv\n"
" 1 - Disallow cpu events for unpriv\n"
" 2 - Disallow kernel profiling for unpriv\n");
}
/**
* perf_error__register - Register error logging functions

View File

@ -143,4 +143,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
cursor->curr = cursor->curr->next;
cursor->pos++;
}
struct option;
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
extern const char record_callchain_help[];
#endif /* __PERF_CALLCHAIN_H */

View File

@ -23,10 +23,8 @@ int eprintf(int level, const char *fmt, ...)
if (verbose >= level) {
va_start(args, fmt);
if (use_browser == 1)
ret = ui_helpline__show_help(fmt, args);
else if (use_browser == 2)
ret = perf_gtk__show_helpline(fmt, args);
if (use_browser >= 1)
ui_helpline__vshow(fmt, args);
else
ret = vfprintf(stderr, fmt, args);
va_end(args);
@ -49,28 +47,6 @@ int dump_printf(const char *fmt, ...)
return ret;
}
#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT)
int ui__warning(const char *format, ...)
{
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
return 0;
}
#endif
int ui__error_paranoid(void)
{
return ui__error("Permission error - are you root?\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
" -1 - Not paranoid at all\n"
" 0 - Disallow raw tracepoint access for unpriv\n"
" 1 - Disallow cpu events for unpriv\n"
" 2 - Disallow kernel profiling for unpriv\n");
}
void trace_event(union perf_event *event)
{
unsigned char *raw_event = (void *)event;

View File

@ -5,6 +5,8 @@
#include <stdbool.h>
#include "event.h"
#include "../ui/helpline.h"
#include "../ui/progress.h"
#include "../ui/util.h"
extern int verbose;
extern bool quiet, dump_trace;
@ -12,38 +14,7 @@ extern bool quiet, dump_trace;
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(union perf_event *event);
struct ui_progress;
struct perf_error_ops;
#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT)
#include "../ui/progress.h"
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
#include "../ui/util.h"
#else
static inline void ui_progress__update(u64 curr __maybe_unused,
u64 total __maybe_unused,
const char *title __maybe_unused) {}
static inline void ui_progress__finish(void) {}
#define ui__error(format, arg...) ui__warning(format, ##arg)
static inline int
perf_error__register(struct perf_error_ops *eops __maybe_unused)
{
return 0;
}
static inline int
perf_error__unregister(struct perf_error_ops *eops __maybe_unused)
{
return 0;
}
#endif /* NEWT_SUPPORT || GTK2_SUPPORT */
int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
int ui__error_paranoid(void);

View File

@ -539,13 +539,13 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
}
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
bool with_hits)
bool (skip)(struct dso *dso, int parm), int parm)
{
struct dso *pos;
size_t ret = 0;
list_for_each_entry(pos, head, node) {
if (with_hits && !pos->hit)
if (skip && skip(pos, parm))
continue;
ret += dso__fprintf_buildid(pos, fp);
ret += fprintf(fp, " %s\n", pos->long_name);
@ -583,7 +583,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
if (dso->short_name != dso->long_name)
ret += fprintf(fp, "%s, ", dso->long_name);
ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
dso->loaded ? "" : "NOT ");
dso__loaded(dso, type) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {

View File

@ -138,7 +138,7 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name);
bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
bool with_hits);
bool (skip)(struct dso *dso, int parm), int parm);
size_t __dsos__fprintf(struct list_head *head, FILE *fp);
size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);

View File

@ -49,10 +49,16 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
return evlist;
}
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts)
void perf_evlist__config(struct perf_evlist *evlist,
struct perf_record_opts *opts)
{
struct perf_evsel *evsel;
/*
* Set the evsel leader links before we configure attributes,
* since some might depend on this info.
*/
if (opts->group)
perf_evlist__set_leader(evlist);
if (evlist->cpus->map[0] < 0)
opts->no_inherit = true;
@ -61,7 +67,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
perf_evsel__config(evsel, opts);
if (evlist->nr_entries > 1)
evsel->attr.sample_type |= PERF_SAMPLE_ID;
perf_evsel__set_sample_id(evsel);
}
}
@ -111,7 +117,6 @@ void __perf_evlist__set_leader(struct list_head *list)
struct perf_evsel *evsel, *leader;
leader = list_entry(list->next, struct perf_evsel, node);
leader->leader = NULL;
list_for_each_entry(evsel, list, node) {
if (evsel != leader)
@ -222,7 +227,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (perf_evsel__is_group_member(pos))
if (!perf_evsel__is_group_leader(pos))
continue;
for (thread = 0; thread < evlist->threads->nr; thread++)
ioctl(FD(pos, cpu, thread),
@ -238,7 +243,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (perf_evsel__is_group_member(pos))
if (!perf_evsel__is_group_leader(pos))
continue;
for (thread = 0; thread < evlist->threads->nr; thread++)
ioctl(FD(pos, cpu, thread),

View File

@ -76,8 +76,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts);
void perf_evlist__config(struct perf_evlist *evlist,
struct perf_record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
struct perf_record_opts *opts,

View File

@ -50,11 +50,36 @@ void hists__init(struct hists *hists)
pthread_mutex_init(&hists->lock, NULL);
}
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit)
{
if (!(evsel->attr.sample_type & bit)) {
evsel->attr.sample_type |= bit;
evsel->sample_size += sizeof(u64);
}
}
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit)
{
if (evsel->attr.sample_type & bit) {
evsel->attr.sample_type &= ~bit;
evsel->sample_size -= sizeof(u64);
}
}
void perf_evsel__set_sample_id(struct perf_evsel *evsel)
{
perf_evsel__set_sample_bit(evsel, ID);
evsel->attr.read_format |= PERF_FORMAT_ID;
}
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
evsel->idx = idx;
evsel->attr = *attr;
evsel->leader = evsel;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@ -440,11 +465,9 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
attr->inherit = !opts->no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
perf_evsel__set_sample_bit(evsel, IP);
perf_evsel__set_sample_bit(evsel, TID);
/*
* We default some events to a 1 default interval. But keep
@ -453,7 +476,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
opts->user_interval != ULLONG_MAX)) {
if (opts->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
perf_evsel__set_sample_bit(evsel, PERIOD);
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
@ -468,16 +491,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->inherit_stat = 1;
if (opts->sample_address) {
attr->sample_type |= PERF_SAMPLE_ADDR;
perf_evsel__set_sample_bit(evsel, ADDR);
attr->mmap_data = track;
}
if (opts->call_graph) {
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
if (opts->call_graph == CALLCHAIN_DWARF) {
attr->sample_type |= PERF_SAMPLE_REGS_USER |
PERF_SAMPLE_STACK_USER;
perf_evsel__set_sample_bit(evsel, REGS_USER);
perf_evsel__set_sample_bit(evsel, STACK_USER);
attr->sample_regs_user = PERF_REGS_MASK;
attr->sample_stack_user = opts->stack_dump_size;
attr->exclude_callchain_user = 1;
@ -485,20 +508,20 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
if (perf_target__has_cpu(&opts->target))
attr->sample_type |= PERF_SAMPLE_CPU;
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
attr->sample_type |= PERF_SAMPLE_PERIOD;
perf_evsel__set_sample_bit(evsel, PERIOD);
if (!opts->sample_id_all_missing &&
(opts->sample_time || !opts->no_inherit ||
perf_target__has_cpu(&opts->target)))
attr->sample_type |= PERF_SAMPLE_TIME;
perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) {
attr->sample_type |= PERF_SAMPLE_TIME;
attr->sample_type |= PERF_SAMPLE_RAW;
attr->sample_type |= PERF_SAMPLE_CPU;
perf_evsel__set_sample_bit(evsel, TIME);
perf_evsel__set_sample_bit(evsel, RAW);
perf_evsel__set_sample_bit(evsel, CPU);
}
if (opts->no_delay) {
@ -506,7 +529,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->wakeup_events = 1;
}
if (opts->branch_stack) {
attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type = opts->branch_stack;
}
@ -519,14 +542,14 @@ void perf_evsel__config(struct perf_evsel *evsel,
* Disabling only independent events or group leaders,
* keeping group members enabled.
*/
if (!perf_evsel__is_group_member(evsel))
if (perf_evsel__is_group_leader(evsel))
attr->disabled = 1;
/*
* Setting enable_on_exec for independent events and
* group leaders for traced executed by perf.
*/
if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel))
if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
attr->enable_on_exec = 1;
}
@ -707,7 +730,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
struct perf_evsel *leader = evsel->leader;
int fd;
if (!perf_evsel__is_group_member(evsel))
if (perf_evsel__is_group_leader(evsel))
return -1;
/*
@ -1205,3 +1228,128 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
return 0;
}
static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
{
va_list args;
int ret = 0;
if (!*first) {
ret += fprintf(fp, ",");
} else {
ret += fprintf(fp, ":");
*first = false;
}
va_start(args, fmt);
ret += vfprintf(fp, fmt, args);
va_end(args);
return ret;
}
static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
{
if (value == 0)
return 0;
return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
}
#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
struct bit_names {
int bit;
const char *name;
};
static int bits__fprintf(FILE *fp, const char *field, u64 value,
struct bit_names *bits, bool *first)
{
int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
bool first_bit = true;
do {
if (value & bits[i].bit) {
printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
first_bit = false;
}
} while (bits[++i].name != NULL);
return printed;
}
static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_##n, #n }
struct bit_names bits[] = {
bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
{ .name = NULL, }
};
#undef bit_name
return bits__fprintf(fp, "sample_type", value, bits, first);
}
static int read_format__fprintf(FILE *fp, bool *first, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
struct bit_names bits[] = {
bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
bit_name(ID), bit_name(GROUP),
{ .name = NULL, }
};
#undef bit_name
return bits__fprintf(fp, "read_format", value, bits, first);
}
int perf_evsel__fprintf(struct perf_evsel *evsel,
struct perf_attr_details *details, FILE *fp)
{
bool first = true;
int printed = fprintf(fp, "%s", perf_evsel__name(evsel));
if (details->verbose || details->freq) {
printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
(u64)evsel->attr.sample_freq);
}
if (details->verbose) {
if_print(type);
if_print(config);
if_print(config1);
if_print(config2);
if_print(size);
printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
if (evsel->attr.read_format)
printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
if_print(disabled);
if_print(inherit);
if_print(pinned);
if_print(exclusive);
if_print(exclude_user);
if_print(exclude_kernel);
if_print(exclude_hv);
if_print(exclude_idle);
if_print(mmap);
if_print(comm);
if_print(freq);
if_print(inherit_stat);
if_print(enable_on_exec);
if_print(task);
if_print(watermark);
if_print(precise_ip);
if_print(mmap_data);
if_print(sample_id_all);
if_print(exclude_host);
if_print(exclude_guest);
if_print(__reserved_1);
if_print(wakeup_events);
if_print(bp_type);
if_print(branch_sample_type);
}
fputc('\n', fp);
return ++printed;
}

View File

@ -118,6 +118,19 @@ void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
enum perf_event_sample_format bit);
#define perf_evsel__set_sample_bit(evsel, bit) \
__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
#define perf_evsel__reset_sample_bit(evsel, bit) \
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
void perf_evsel__set_sample_id(struct perf_evsel *evsel);
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
@ -226,8 +239,16 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
return list_entry(evsel->node.next, struct perf_evsel, node);
}
static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel)
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
{
return evsel->leader != NULL;
return evsel->leader == evsel;
}
struct perf_attr_details {
bool freq;
bool verbose;
};
int perf_evsel__fprintf(struct perf_evsel *evsel,
struct perf_attr_details *details, FILE *fp);
#endif /* __PERF_EVSEL_H */

View File

@ -785,7 +785,7 @@ void hists__match(struct hists *leader, struct hists *other)
pair = hists__find_entry(other, pos);
if (pair)
hist__entry_add_pair(pos, pair);
hist_entry__add_pair(pair, pos);
}
}
@ -806,7 +806,7 @@ int hists__link(struct hists *leader, struct hists *other)
pair = hists__add_dummy_entry(leader, pos);
if (pair == NULL)
return -1;
hist__entry_add_pair(pair, pos);
hist_entry__add_pair(pos, pair);
}
}

View File

@ -126,13 +126,19 @@ struct perf_hpp {
};
struct perf_hpp_fmt {
bool cond;
int (*header)(struct perf_hpp *hpp);
int (*width)(struct perf_hpp *hpp);
int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
struct list_head list;
};
extern struct list_head perf_hpp__list;
#define perf_hpp__for_each_format(format) \
list_for_each_entry(format, &perf_hpp__list, list)
extern struct perf_hpp_fmt perf_hpp__format[];
enum {
@ -148,14 +154,14 @@ enum {
PERF_HPP__DELTA,
PERF_HPP__RATIO,
PERF_HPP__WEIGHTED_DIFF,
PERF_HPP__DISPL,
PERF_HPP__FORMULA,
PERF_HPP__MAX_INDEX
};
void perf_hpp__init(void);
void perf_hpp__column_enable(unsigned col, bool enable);
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
bool color);
@ -219,8 +225,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
unsigned int hists__sort_list_width(struct hists *self);
double perf_diff__compute_delta(struct hist_entry *he);
double perf_diff__compute_ratio(struct hist_entry *he);
s64 perf_diff__compute_wdiff(struct hist_entry *he);
int perf_diff__formula(char *buf, size_t size, struct hist_entry *he);
double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size);
double perf_diff__period_percent(struct hist_entry *he, u64 period);
#endif /* __PERF_HIST_H */

View File

@ -1,10 +1,15 @@
#include "callchain.h"
#include "debug.h"
#include "event.h"
#include "evsel.h"
#include "hist.h"
#include "machine.h"
#include "map.h"
#include "sort.h"
#include "strlist.h"
#include "thread.h"
#include <stdbool.h>
#include "unwind.h"
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
@ -48,6 +53,29 @@ static void dsos__delete(struct list_head *dsos)
}
}
void machine__delete_dead_threads(struct machine *machine)
{
struct thread *n, *t;
list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
list_del(&t->node);
thread__delete(t);
}
}
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd = rb_first(&machine->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
rb_erase(&t->rb_node, &machine->threads);
nd = rb_next(nd);
thread__delete(t);
}
}
void machine__exit(struct machine *machine)
{
map_groups__exit(&machine->kmaps);
@ -264,6 +292,534 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
return 0;
}
struct map *machine__new_module(struct machine *machine, u64 start,
const char *filename)
{
struct map *map;
struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
if (dso == NULL)
return NULL;
map = map__new2(start, dso, MAP__FUNCTION);
if (map == NULL)
return NULL;
if (machine__is_host(machine))
dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
else
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
map_groups__insert(&machine->kmaps, map);
return map;
}
size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
{
struct rb_node *nd;
size_t ret = 0;
for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->kernel_dsos, fp);
ret += __dsos__fprintf(&pos->user_dsos, fp);
}
return ret;
}
size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
__dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
}
size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
struct rb_node *nd;
size_t ret = 0;
for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
return ret;
}
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
if (kdso->has_build_id) {
char filename[PATH_MAX];
if (dso__build_id_filename(kdso, filename, sizeof(filename)))
printed += fprintf(fp, "[0] %s\n", filename);
}
for (i = 0; i < vmlinux_path__nr_entries; ++i)
printed += fprintf(fp, "[%d] %s\n",
i + kdso->has_build_id, vmlinux_path[i]);
return printed;
}
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
return ret;
}
static struct dso *machine__get_kernel(struct machine *machine)
{
const char *vmlinux_name = NULL;
struct dso *kernel;
if (machine__is_host(machine)) {
vmlinux_name = symbol_conf.vmlinux_name;
if (!vmlinux_name)
vmlinux_name = "[kernel.kallsyms]";
kernel = dso__kernel_findnew(machine, vmlinux_name,
"[kernel]",
DSO_TYPE_KERNEL);
} else {
char bf[PATH_MAX];
if (machine__is_default_guest(machine))
vmlinux_name = symbol_conf.default_guest_vmlinux_name;
if (!vmlinux_name)
vmlinux_name = machine__mmap_name(machine, bf,
sizeof(bf));
kernel = dso__kernel_findnew(machine, vmlinux_name,
"[guest.kernel]",
DSO_TYPE_GUEST_KERNEL);
}
if (kernel != NULL && (!kernel->has_build_id))
dso__read_running_kernel_build_id(kernel, machine);
return kernel;
}
struct process_args {
u64 start;
};
static int symbol__in_kernel(void *arg, const char *name,
char type __maybe_unused, u64 start)
{
struct process_args *args = arg;
if (strchr(name, '['))
return 0;
args->start = start;
return 1;
}
/* Figure out the start address of kernel map from /proc/kallsyms */
static u64 machine__get_kernel_start_addr(struct machine *machine)
{
const char *filename;
char path[PATH_MAX];
struct process_args args;
if (machine__is_host(machine)) {
filename = "/proc/kallsyms";
} else {
if (machine__is_default_guest(machine))
filename = (char *)symbol_conf.default_guest_kallsyms;
else {
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
filename = path;
}
}
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
return 0;
return args.start;
}
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
u64 start = machine__get_kernel_start_addr(machine);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
machine->vmlinux_maps[type] = map__new2(start, kernel, type);
if (machine->vmlinux_maps[type] == NULL)
return -1;
machine->vmlinux_maps[type]->map_ip =
machine->vmlinux_maps[type]->unmap_ip =
identity__map_ip;
kmap = map__kmap(machine->vmlinux_maps[type]);
kmap->kmaps = &machine->kmaps;
map_groups__insert(&machine->kmaps,
machine->vmlinux_maps[type]);
}
return 0;
}
void machine__destroy_kernel_maps(struct machine *machine)
{
enum map_type type;
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
if (machine->vmlinux_maps[type] == NULL)
continue;
kmap = map__kmap(machine->vmlinux_maps[type]);
map_groups__remove(&machine->kmaps,
machine->vmlinux_maps[type]);
if (kmap->ref_reloc_sym) {
/*
* ref_reloc_sym is shared among all maps, so free just
* on one of them.
*/
if (type == MAP__FUNCTION) {
free((char *)kmap->ref_reloc_sym->name);
kmap->ref_reloc_sym->name = NULL;
free(kmap->ref_reloc_sym);
}
kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
int machines__create_guest_kernel_maps(struct rb_root *machines)
{
int ret = 0;
struct dirent **namelist = NULL;
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
symbol_conf.default_guest_kallsyms) {
machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
}
if (symbol_conf.guestmount) {
items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
if (items <= 0)
return -ENOENT;
for (i = 0; i < items; i++) {
if (!isdigit(namelist[i]->d_name[0])) {
/* Filter out . and .. */
continue;
}
pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
if ((*endp != '\0') ||
(endp == namelist[i]->d_name) ||
(errno == ERANGE)) {
pr_debug("invalid directory (%s). Skipping.\n",
namelist[i]->d_name);
continue;
}
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
ret = access(path, R_OK);
if (ret) {
pr_debug("Can't access file %s\n", path);
goto failure;
}
machines__create_kernel_maps(machines, pid);
}
failure:
free(namelist);
}
return ret;
}
void machines__destroy_guest_kernel_maps(struct rb_root *machines)
{
struct rb_node *next = rb_first(machines);
while (next) {
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, machines);
machine__delete(pos);
}
}
int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
{
struct machine *machine = machines__findnew(machines, pid);
if (machine == NULL)
return -1;
return machine__create_kernel_maps(machine);
}
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_kallsyms(map->dso, filename, map, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
/*
* Since /proc/kallsyms will have multiple sessions for the
* kernel, with modules between them, fixup the end of all
* sections.
*/
__map_groups__fixup_end(&machine->kmaps, type);
}
return ret;
}
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_vmlinux_path(map->dso, map, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
map__reloc_vmlinux(map);
}
return ret;
}
static void map_groups__fixup_end(struct map_groups *mg)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
__map_groups__fixup_end(mg, i);
}
static char *get_kernel_version(const char *root_dir)
{
char version[PATH_MAX];
FILE *file;
char *name, *tmp;
const char *prefix = "Linux version ";
sprintf(version, "%s/proc/version", root_dir);
file = fopen(version, "r");
if (!file)
return NULL;
version[0] = '\0';
tmp = fgets(version, sizeof(version), file);
fclose(file);
name = strstr(version, prefix);
if (!name)
return NULL;
name += strlen(prefix);
tmp = strchr(name, ' ');
if (tmp)
*tmp = '\0';
return strdup(name);
}
static int map_groups__set_modules_path_dir(struct map_groups *mg,
const char *dir_name)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
int ret = 0;
if (!dir) {
pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
return -1;
}
while ((dent = readdir(dir)) != NULL) {
char path[PATH_MAX];
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
if (stat(path, &st))
continue;
if (S_ISDIR(st.st_mode)) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
ret = map_groups__set_modules_path_dir(mg, path);
if (ret < 0)
goto out;
} else {
char *dot = strrchr(dent->d_name, '.'),
dso_name[PATH_MAX];
struct map *map;
char *long_name;
if (dot == NULL || strcmp(dot, ".ko"))
continue;
snprintf(dso_name, sizeof(dso_name), "[%.*s]",
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
map = map_groups__find_by_name(mg, MAP__FUNCTION,
dso_name);
if (map == NULL)
continue;
long_name = strdup(path);
if (long_name == NULL) {
ret = -1;
goto out;
}
dso__set_long_name(map->dso, long_name);
map->dso->lname_alloc = 1;
dso__kernel_module_get_build_id(map->dso, "");
}
}
out:
closedir(dir);
return ret;
}
static int machine__set_modules_path(struct machine *machine)
{
char *version;
char modules_path[PATH_MAX];
version = get_kernel_version(machine->root_dir);
if (!version)
return -1;
snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
machine->root_dir, version);
free(version);
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
}
static int machine__create_modules(struct machine *machine)
{
char *line = NULL;
size_t n;
FILE *file;
struct map *map;
const char *modules;
char path[PATH_MAX];
if (machine__is_default_guest(machine))
modules = symbol_conf.default_guest_modules;
else {
sprintf(path, "%s/proc/modules", machine->root_dir);
modules = path;
}
if (symbol__restricted_filename(path, "/proc/modules"))
return -1;
file = fopen(modules, "r");
if (file == NULL)
return -1;
while (!feof(file)) {
char name[PATH_MAX];
u64 start;
char *sep;
int line_len;
line_len = getline(&line, &n, file);
if (line_len < 0)
break;
if (!line)
goto out_failure;
line[--line_len] = '\0'; /* \n */
sep = strrchr(line, 'x');
if (sep == NULL)
continue;
hex2u64(sep + 1, &start);
sep = strchr(line, ' ');
if (sep == NULL)
continue;
*sep = '\0';
snprintf(name, sizeof(name), "[%s]", line);
map = machine__new_module(machine, start, name);
if (map == NULL)
goto out_delete_line;
dso__kernel_module_get_build_id(map->dso, machine->root_dir);
}
free(line);
fclose(file);
return machine__set_modules_path(machine);
out_delete_line:
free(line);
out_failure:
return -1;
}
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
if (kernel == NULL ||
__machine__create_kernel_maps(machine, kernel) < 0)
return -1;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
if (machine__is_host(machine))
pr_debug("Problems creating module maps, "
"continuing anyway...\n");
else
pr_debug("Problems creating module maps for guest %d, "
"continuing anyway...\n", machine->pid);
}
/*
* Now that we have all the maps created, just set the ->end of them:
*/
map_groups__fixup_end(&machine->kmaps);
return 0;
}
static void machine__set_kernel_mmap_len(struct machine *machine,
union perf_event *event)
{
@ -462,3 +1018,189 @@ int machine__process_event(struct machine *machine, union perf_event *event)
return ret;
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
machine->last_match = NULL;
rb_erase(&th->rb_node, &machine->threads);
/*
* We may have references to this thread, for instance in some hist_entry
* instances, so just move them to a separate list.
*/
list_add_tail(&th->node, &machine->dead_threads);
}
static bool symbol__match_parent_regex(struct symbol *sym)
{
if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
return 1;
return 0;
}
static const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
static void ip__resolve_ams(struct machine *machine, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
size_t i;
u8 m;
memset(&al, 0, sizeof(al));
for (i = 0; i < NCPUMODES; i++) {
m = cpumodes[i];
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
ip, &al, NULL);
if (al.sym)
goto found;
}
found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;
ams->map = al.map;
}
struct branch_info *machine__resolve_bstack(struct machine *machine,
struct thread *thr,
struct branch_stack *bs)
{
struct branch_info *bi;
unsigned int i;
bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
bi[i].flags = bs->entries[i].flags;
}
return bi;
}
static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
int err;
callchain_cursor_reset(&callchain_cursor);
if (chain->nr > PERF_MAX_STACK_DEPTH) {
pr_warning("corrupted callchain. skipping...\n");
return 0;
}
for (i = 0; i < chain->nr; i++) {
u64 ip;
struct addr_location al;
if (callchain_param.order == ORDER_CALLEE)
ip = chain->ips[i];
else
ip = chain->ips[chain->nr - i - 1];
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: "
"%"PRId64"\n", (s64) ip);
/*
* It seems the callchain is corrupted.
* Discard all.
*/
callchain_cursor_reset(&callchain_cursor);
return 0;
}
continue;
}
al.filtered = false;
thread__find_addr_location(thread, machine, cpumode,
MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
}
err = callchain_cursor_append(&callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
}
return 0;
}
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym);
}
int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
struct symbol **parent)
{
int ret;
callchain_cursor_reset(&callchain_cursor);
ret = machine__resolve_callchain_sample(machine, thread,
sample->callchain, parent);
if (ret)
return ret;
/* Can we do dwarf post unwind? */
if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
(evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
return 0;
/* Bail out if nothing was captured. */
if ((!sample->user_regs.regs) ||
(!sample->user_stack.size))
return 0;
return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
thread, evsel->attr.sample_regs_user,
sample);
}

View File

@ -61,9 +61,10 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
void machine__delete_dead_threads(struct machine *machine);
void machine__delete_threads(struct machine *machine);
void machine__delete(struct machine *machine);
struct branch_info *machine__resolve_bstack(struct machine *machine,
struct thread *thread,
struct branch_stack *bs);
@ -129,11 +130,11 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter);
size_t machine__fprintf_dsos_buildid(struct machine *machine,
FILE *fp, bool with_hits);
size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm);
size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp);
size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
FILE *fp, bool with_hits);
size_t machines__fprintf_dsos_buildid(struct rb_root *machines, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm);
void machine__destroy_kernel_maps(struct machine *machine);
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);

View File

@ -16,7 +16,6 @@
#include "cpumap.h"
#include "event-parse.h"
#include "perf_regs.h"
#include "unwind.h"
#include "vdso.h"
static int perf_session__open(struct perf_session *self, bool force)
@ -128,15 +127,6 @@ struct perf_session *perf_session__new(const char *filename, int mode,
goto out;
memcpy(self->filename, filename, len);
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
*/
#if BITS_PER_LONG == 64
self->mmap_window = ULLONG_MAX;
#else
self->mmap_window = 32 * 1024 * 1024ULL;
#endif
self->machines = RB_ROOT;
self->repipe = repipe;
INIT_LIST_HEAD(&self->ordered_samples.samples);
@ -171,236 +161,44 @@ struct perf_session *perf_session__new(const char *filename, int mode,
return NULL;
}
static void machine__delete_dead_threads(struct machine *machine)
{
struct thread *n, *t;
list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
list_del(&t->node);
thread__delete(t);
}
}
static void perf_session__delete_dead_threads(struct perf_session *session)
{
machine__delete_dead_threads(&session->host_machine);
}
static void machine__delete_threads(struct machine *self)
{
struct rb_node *nd = rb_first(&self->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
rb_erase(&t->rb_node, &self->threads);
nd = rb_next(nd);
thread__delete(t);
}
}
static void perf_session__delete_threads(struct perf_session *session)
{
machine__delete_threads(&session->host_machine);
}
static void perf_session_env__delete(struct perf_session_env *env)
{
free(env->hostname);
free(env->os_release);
free(env->version);
free(env->arch);
free(env->cpu_desc);
free(env->cpuid);
free(env->cmdline);
free(env->sibling_cores);
free(env->sibling_threads);
free(env->numa_nodes);
free(env->pmu_mappings);
}
void perf_session__delete(struct perf_session *self)
{
perf_session__destroy_kernel_maps(self);
perf_session__delete_dead_threads(self);
perf_session__delete_threads(self);
perf_session_env__delete(&self->header.env);
machine__exit(&self->host_machine);
close(self->fd);
free(self);
vdso__exit();
}
void machine__remove_thread(struct machine *self, struct thread *th)
{
self->last_match = NULL;
rb_erase(&th->rb_node, &self->threads);
/*
* We may have references to this thread, for instance in some hist_entry
* instances, so just move them to a separate list.
*/
list_add_tail(&th->node, &self->dead_threads);
}
static bool symbol__match_parent_regex(struct symbol *sym)
{
if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
return 1;
return 0;
}
static const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
static void ip__resolve_ams(struct machine *self, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
size_t i;
u8 m;
memset(&al, 0, sizeof(al));
for (i = 0; i < NCPUMODES; i++) {
m = cpumodes[i];
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, self, m, MAP__FUNCTION,
ip, &al, NULL);
if (al.sym)
goto found;
}
found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;
ams->map = al.map;
}
struct branch_info *machine__resolve_bstack(struct machine *self,
struct thread *thr,
struct branch_stack *bs)
{
struct branch_info *bi;
unsigned int i;
bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
bi[i].flags = bs->entries[i].flags;
}
return bi;
}
static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
int err;
callchain_cursor_reset(&callchain_cursor);
if (chain->nr > PERF_MAX_STACK_DEPTH) {
pr_warning("corrupted callchain. skipping...\n");
return 0;
}
for (i = 0; i < chain->nr; i++) {
u64 ip;
struct addr_location al;
if (callchain_param.order == ORDER_CALLEE)
ip = chain->ips[i];
else
ip = chain->ips[chain->nr - i - 1];
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
cpumode = PERF_RECORD_MISC_HYPERVISOR;
break;
case PERF_CONTEXT_KERNEL:
cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
cpumode = PERF_RECORD_MISC_USER;
break;
default:
pr_debug("invalid callchain context: "
"%"PRId64"\n", (s64) ip);
/*
* It seems the callchain is corrupted.
* Discard all.
*/
callchain_cursor_reset(&callchain_cursor);
return 0;
}
continue;
}
al.filtered = false;
thread__find_addr_location(thread, machine, cpumode,
MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
}
err = callchain_cursor_append(&callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
}
return 0;
}
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym);
}
int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
struct symbol **parent)
{
int ret;
callchain_cursor_reset(&callchain_cursor);
ret = machine__resolve_callchain_sample(machine, thread,
sample->callchain, parent);
if (ret)
return ret;
/* Can we do dwarf post unwind? */
if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
(evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
return 0;
/* Bail out if nothing was captured. */
if ((!sample->user_regs.regs) ||
(!sample->user_stack.size))
return 0;
return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
thread, evsel->attr.sample_regs_user,
sample);
}
static int process_event_synth_tracing_data_stub(union perf_event *event
__maybe_unused,
struct perf_session *session
@ -1369,6 +1167,18 @@ fetch_mmaped_event(struct perf_session *session,
return event;
}
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
*/
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size, struct perf_tool *tool)
@ -1376,7 +1186,7 @@ int __perf_session__process_events(struct perf_session *session,
u64 head, page_offset, file_offset, file_pos, progress_next;
int err, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[8];
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
uint32_t size;
@ -1391,7 +1201,7 @@ int __perf_session__process_events(struct perf_session *session,
progress_next = file_size / 16;
mmap_size = session->mmap_window;
mmap_size = MMAP_SIZE;
if (mmap_size > file_size)
mmap_size = file_size;
@ -1532,10 +1342,10 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
}
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
bool with_hits)
bool (skip)(struct dso *dso, int parm), int parm)
{
size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm);
return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
}
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)

View File

@ -30,7 +30,6 @@ struct ordered_samples {
struct perf_session {
struct perf_header header;
unsigned long size;
unsigned long mmap_window;
struct machine host_machine;
struct rb_root machines;
struct perf_evlist *evlist;
@ -116,8 +115,8 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
FILE *fp, bool with_hits);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (fn)(struct dso *dso, int parm), int parm);
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);

View File

@ -55,9 +55,6 @@ struct he_stat {
struct hist_entry_diff {
bool computed;
/* PERF_HPP__DISPL */
int displacement;
/* PERF_HPP__DELTA */
double period_ratio_delta;
@ -118,7 +115,7 @@ static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
return NULL;
}
static inline void hist__entry_add_pair(struct hist_entry *he,
static inline void hist_entry__add_pair(struct hist_entry *he,
struct hist_entry *pair)
{
list_add_tail(&he->pairs.head, &pair->pairs.node);

View File

@ -718,6 +718,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
sym.st_value);
used_opd = true;
}
/*
* When loading symbols in a data mapping, ABS symbols (which
* has a value of SHN_ABS in its st_shndx) failed at
* elf_getscn(). And it marks the loading as a failure so
* already loaded symbols cannot be fixed up.
*
* I'm not sure what should be done. Just ignore them for now.
* - Namhyung Kim
*/
if (sym.st_shndx == SHN_ABS)
continue;
sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
if (!sec)

View File

@ -28,8 +28,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
static int vmlinux_path__nr_entries;
static char **vmlinux_path;
int vmlinux_path__nr_entries;
char **vmlinux_path;
struct symbol_conf symbol_conf = {
.exclude_other = true,
@ -202,13 +202,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
curr->end = ~0ULL;
}
static void map_groups__fixup_end(struct map_groups *mg)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
__map_groups__fixup_end(mg, i);
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
{
size_t namelen = strlen(name) + 1;
@ -652,8 +645,8 @@ discard_symbol: rb_erase(&pos->rb_node, root);
return count + moved;
}
static bool symbol__restricted_filename(const char *filename,
const char *restricted_filename)
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename)
{
bool restricted = false;
@ -887,200 +880,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
return NULL;
}
static int map_groups__set_modules_path_dir(struct map_groups *mg,
const char *dir_name)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
int ret = 0;
if (!dir) {
pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
return -1;
}
while ((dent = readdir(dir)) != NULL) {
char path[PATH_MAX];
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
if (stat(path, &st))
continue;
if (S_ISDIR(st.st_mode)) {
if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, ".."))
continue;
ret = map_groups__set_modules_path_dir(mg, path);
if (ret < 0)
goto out;
} else {
char *dot = strrchr(dent->d_name, '.'),
dso_name[PATH_MAX];
struct map *map;
char *long_name;
if (dot == NULL || strcmp(dot, ".ko"))
continue;
snprintf(dso_name, sizeof(dso_name), "[%.*s]",
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
map = map_groups__find_by_name(mg, MAP__FUNCTION,
dso_name);
if (map == NULL)
continue;
long_name = strdup(path);
if (long_name == NULL) {
ret = -1;
goto out;
}
dso__set_long_name(map->dso, long_name);
map->dso->lname_alloc = 1;
dso__kernel_module_get_build_id(map->dso, "");
}
}
out:
closedir(dir);
return ret;
}
static char *get_kernel_version(const char *root_dir)
{
char version[PATH_MAX];
FILE *file;
char *name, *tmp;
const char *prefix = "Linux version ";
sprintf(version, "%s/proc/version", root_dir);
file = fopen(version, "r");
if (!file)
return NULL;
version[0] = '\0';
tmp = fgets(version, sizeof(version), file);
fclose(file);
name = strstr(version, prefix);
if (!name)
return NULL;
name += strlen(prefix);
tmp = strchr(name, ' ');
if (tmp)
*tmp = '\0';
return strdup(name);
}
static int machine__set_modules_path(struct machine *machine)
{
char *version;
char modules_path[PATH_MAX];
version = get_kernel_version(machine->root_dir);
if (!version)
return -1;
snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
machine->root_dir, version);
free(version);
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
}
struct map *machine__new_module(struct machine *machine, u64 start,
const char *filename)
{
struct map *map;
struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
if (dso == NULL)
return NULL;
map = map__new2(start, dso, MAP__FUNCTION);
if (map == NULL)
return NULL;
if (machine__is_host(machine))
dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
else
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
map_groups__insert(&machine->kmaps, map);
return map;
}
static int machine__create_modules(struct machine *machine)
{
char *line = NULL;
size_t n;
FILE *file;
struct map *map;
const char *modules;
char path[PATH_MAX];
if (machine__is_default_guest(machine))
modules = symbol_conf.default_guest_modules;
else {
sprintf(path, "%s/proc/modules", machine->root_dir);
modules = path;
}
if (symbol__restricted_filename(path, "/proc/modules"))
return -1;
file = fopen(modules, "r");
if (file == NULL)
return -1;
while (!feof(file)) {
char name[PATH_MAX];
u64 start;
char *sep;
int line_len;
line_len = getline(&line, &n, file);
if (line_len < 0)
break;
if (!line)
goto out_failure;
line[--line_len] = '\0'; /* \n */
sep = strrchr(line, 'x');
if (sep == NULL)
continue;
hex2u64(sep + 1, &start);
sep = strchr(line, ' ');
if (sep == NULL)
continue;
*sep = '\0';
snprintf(name, sizeof(name), "[%s]", line);
map = machine__new_module(machine, start, name);
if (map == NULL)
goto out_delete_line;
dso__kernel_module_get_build_id(map->dso, machine->root_dir);
}
free(line);
fclose(file);
return machine__set_modules_path(machine);
out_delete_line:
free(line);
out_failure:
return -1;
}
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, symbol_filter_t filter)
{
@ -1300,195 +1099,6 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
return err;
}
size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
{
struct rb_node *nd;
size_t ret = 0;
for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->kernel_dsos, fp);
ret += __dsos__fprintf(&pos->user_dsos, fp);
}
return ret;
}
size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
bool with_hits)
{
return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
__dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
}
size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
FILE *fp, bool with_hits)
{
struct rb_node *nd;
size_t ret = 0;
for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
}
return ret;
}
static struct dso *machine__get_kernel(struct machine *machine)
{
const char *vmlinux_name = NULL;
struct dso *kernel;
if (machine__is_host(machine)) {
vmlinux_name = symbol_conf.vmlinux_name;
if (!vmlinux_name)
vmlinux_name = "[kernel.kallsyms]";
kernel = dso__kernel_findnew(machine, vmlinux_name,
"[kernel]",
DSO_TYPE_KERNEL);
} else {
char bf[PATH_MAX];
if (machine__is_default_guest(machine))
vmlinux_name = symbol_conf.default_guest_vmlinux_name;
if (!vmlinux_name)
vmlinux_name = machine__mmap_name(machine, bf,
sizeof(bf));
kernel = dso__kernel_findnew(machine, vmlinux_name,
"[guest.kernel]",
DSO_TYPE_GUEST_KERNEL);
}
if (kernel != NULL && (!kernel->has_build_id))
dso__read_running_kernel_build_id(kernel, machine);
return kernel;
}
struct process_args {
u64 start;
};
static int symbol__in_kernel(void *arg, const char *name,
char type __maybe_unused, u64 start)
{
struct process_args *args = arg;
if (strchr(name, '['))
return 0;
args->start = start;
return 1;
}
/* Figure out the start address of kernel map from /proc/kallsyms */
static u64 machine__get_kernel_start_addr(struct machine *machine)
{
const char *filename;
char path[PATH_MAX];
struct process_args args;
if (machine__is_host(machine)) {
filename = "/proc/kallsyms";
} else {
if (machine__is_default_guest(machine))
filename = (char *)symbol_conf.default_guest_kallsyms;
else {
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
filename = path;
}
}
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
return 0;
return args.start;
}
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
u64 start = machine__get_kernel_start_addr(machine);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
machine->vmlinux_maps[type] = map__new2(start, kernel, type);
if (machine->vmlinux_maps[type] == NULL)
return -1;
machine->vmlinux_maps[type]->map_ip =
machine->vmlinux_maps[type]->unmap_ip =
identity__map_ip;
kmap = map__kmap(machine->vmlinux_maps[type]);
kmap->kmaps = &machine->kmaps;
map_groups__insert(&machine->kmaps,
machine->vmlinux_maps[type]);
}
return 0;
}
void machine__destroy_kernel_maps(struct machine *machine)
{
enum map_type type;
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
if (machine->vmlinux_maps[type] == NULL)
continue;
kmap = map__kmap(machine->vmlinux_maps[type]);
map_groups__remove(&machine->kmaps,
machine->vmlinux_maps[type]);
if (kmap->ref_reloc_sym) {
/*
* ref_reloc_sym is shared among all maps, so free just
* on one of them.
*/
if (type == MAP__FUNCTION) {
free((char *)kmap->ref_reloc_sym->name);
kmap->ref_reloc_sym->name = NULL;
free(kmap->ref_reloc_sym);
}
kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
if (kernel == NULL ||
__machine__create_kernel_maps(machine, kernel) < 0)
return -1;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
if (machine__is_host(machine))
pr_debug("Problems creating module maps, "
"continuing anyway...\n");
else
pr_debug("Problems creating module maps for guest %d, "
"continuing anyway...\n", machine->pid);
}
/*
* Now that we have all the maps created, just set the ->end of them:
*/
map_groups__fixup_end(&machine->kmaps);
return 0;
}
static void vmlinux_path__exit(void)
{
while (--vmlinux_path__nr_entries >= 0) {
@ -1549,25 +1159,6 @@ static int vmlinux_path__init(void)
return -1;
}
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
if (kdso->has_build_id) {
char filename[PATH_MAX];
if (dso__build_id_filename(kdso, filename, sizeof(filename)))
printed += fprintf(fp, "[0] %s\n", filename);
}
for (i = 0; i < vmlinux_path__nr_entries; ++i)
printed += fprintf(fp, "[%d] %s\n",
i + kdso->has_build_id, vmlinux_path[i]);
return printed;
}
static int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
@ -1671,108 +1262,3 @@ void symbol__exit(void)
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.initialized = false;
}
int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
{
struct machine *machine = machines__findnew(machines, pid);
if (machine == NULL)
return -1;
return machine__create_kernel_maps(machine);
}
int machines__create_guest_kernel_maps(struct rb_root *machines)
{
int ret = 0;
struct dirent **namelist = NULL;
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
symbol_conf.default_guest_kallsyms) {
machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
}
if (symbol_conf.guestmount) {
items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
if (items <= 0)
return -ENOENT;
for (i = 0; i < items; i++) {
if (!isdigit(namelist[i]->d_name[0])) {
/* Filter out . and .. */
continue;
}
pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
if ((*endp != '\0') ||
(endp == namelist[i]->d_name) ||
(errno == ERANGE)) {
pr_debug("invalid directory (%s). Skipping.\n",
namelist[i]->d_name);
continue;
}
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
ret = access(path, R_OK);
if (ret) {
pr_debug("Can't access file %s\n", path);
goto failure;
}
machines__create_kernel_maps(machines, pid);
}
failure:
free(namelist);
}
return ret;
}
void machines__destroy_guest_kernel_maps(struct rb_root *machines)
{
struct rb_node *next = rb_first(machines);
while (next) {
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, machines);
machine__delete(pos);
}
}
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_kallsyms(map->dso, filename, map, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
/*
* Since /proc/kallsyms will have multiple sessions for the
* kernel, with modules between them, fixup the end of all
* sections.
*/
__map_groups__fixup_end(&machine->kmaps, type);
}
return ret;
}
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_vmlinux_path(map->dso, map, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
map__reloc_vmlinux(map);
}
return ret;
}

View File

@ -120,6 +120,8 @@ struct symbol_conf {
};
extern struct symbol_conf symbol_conf;
extern int vmlinux_path__nr_entries;
extern char **vmlinux_path;
static inline void *symbol__priv(struct symbol *sym)
{
@ -223,6 +225,8 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
size_t symbol__fprintf(struct symbol *sym, FILE *fp);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename);
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, symbol_filter_t filter,

View File

@ -54,10 +54,10 @@ int thread__comm_len(struct thread *self)
return self->comm_len;
}
static size_t thread__fprintf(struct thread *self, FILE *fp)
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
map_groups__fprintf(&self->mg, verbose, fp);
return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
map_groups__fprintf(&thread->mg, verbose, fp);
}
void thread__insert_map(struct thread *self, struct map *map)
@ -84,17 +84,3 @@ int thread__fork(struct thread *self, struct thread *parent)
return -ENOMEM;
return 0;
}
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
return ret;
}

View File

@ -30,6 +30,7 @@ int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
size_t thread__fprintf(struct thread *thread, FILE *fp);
static inline struct map *thread__find_map(struct thread *self,
enum map_type type, u64 addr)

View File

@ -26,6 +26,8 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
float samples_per_sec = top->samples / top->delay_secs;
float ksamples_per_sec = top->kernel_samples / top->delay_secs;
float esamples_percent = (100.0 * top->exact_samples) / top->samples;
struct perf_record_opts *opts = &top->record_opts;
struct perf_target *target = &opts->target;
size_t ret = 0;
if (!perf_guest) {
@ -61,31 +63,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
struct perf_evsel *first = perf_evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->attr.sample_period,
top->freq ? "Hz" : "");
opts->freq ? "Hz" : "");
}
ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
ret += SNPRINTF(bf + ret, size - ret, "], ");
if (top->target.pid)
if (target->pid)
ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
top->target.pid);
else if (top->target.tid)
target->pid);
else if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
top->target.tid);
else if (top->target.uid_str != NULL)
target->tid);
else if (target->uid_str != NULL)
ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
top->target.uid_str);
target->uid_str);
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
if (top->target.cpu_list)
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
top->evlist->cpus->nr > 1 ? "s" : "",
top->target.cpu_list);
target->cpu_list);
else {
if (top->target.tid)
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",

View File

@ -14,7 +14,7 @@ struct perf_session;
struct perf_top {
struct perf_tool tool;
struct perf_evlist *evlist;
struct perf_target target;
struct perf_record_opts record_opts;
/*
* Symbols will be added here in perf_event__process_sample and will
* get out after decayed.
@ -24,15 +24,11 @@ struct perf_top {
u64 exact_samples;
u64 guest_us_samples, guest_kernel_samples;
int print_entries, count_filter, delay_secs;
int freq;
bool hide_kernel_symbols, hide_user_symbols, zero;
bool use_tui, use_stdio;
bool sort_has_symbols;
bool dont_use_callchains;
bool kptr_restrict_warned;
bool vmlinux_warned;
bool inherit;
bool group;
bool sample_id_all_missing;
bool exclude_guest_missing;
bool dump_symtab;
@ -40,8 +36,6 @@ struct perf_top {
struct perf_evsel *sym_evsel;
struct perf_session *session;
struct winsize winsize;
unsigned int mmap_pages;
int default_interval;
int realtime_prio;
int sym_pcnt_filter;
const char *sym_filter;