2014-05-12 13:43:18 +08:00
|
|
|
#include "perf.h"
|
|
|
|
#include "util/debug.h"
|
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/sort.h"
|
|
|
|
#include "util/evsel.h"
|
|
|
|
#include "util/evlist.h"
|
|
|
|
#include "util/machine.h"
|
|
|
|
#include "util/thread.h"
|
|
|
|
#include "util/parse-events.h"
|
|
|
|
#include "tests/tests.h"
|
|
|
|
#include "tests/hists_common.h"
|
|
|
|
|
|
|
|
struct sample {
|
|
|
|
u32 cpu;
|
|
|
|
u32 pid;
|
|
|
|
u64 ip;
|
|
|
|
struct thread *thread;
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* For the numbers, see hists_common.c */
|
|
|
|
static struct sample fake_samples[] = {
|
|
|
|
/* perf [kernel] schedule() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [perf] main() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [perf] cmd_record() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [libc] malloc() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [libc] free() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [perf] main() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* perf [kernel] page_fault() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* bash [bash] main() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* bash [bash] xmalloc() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
|
2014-05-12 13:43:18 +08:00
|
|
|
/* bash [kernel] page_fault() */
|
2014-05-23 13:59:57 +08:00
|
|
|
{ .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
|
2014-05-12 13:43:18 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int add_hist_entries(struct hists *hists, struct machine *machine)
|
|
|
|
{
|
|
|
|
struct addr_location al;
|
2013-10-30 08:40:34 +08:00
|
|
|
struct perf_evsel *evsel = hists_to_evsel(hists);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct perf_sample sample = { .period = 100, };
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
|
2013-10-30 08:40:34 +08:00
|
|
|
struct hist_entry_iter iter = {
|
2015-05-19 16:04:10 +08:00
|
|
|
.evsel = evsel,
|
|
|
|
.sample = &sample,
|
2013-10-30 08:40:34 +08:00
|
|
|
.ops = &hist_iter_normal,
|
|
|
|
.hide_unresolved = false,
|
|
|
|
};
|
2014-05-12 13:43:18 +08:00
|
|
|
|
2016-03-23 05:23:43 +08:00
|
|
|
sample.cpumode = PERF_RECORD_MISC_USER;
|
2014-05-12 13:43:18 +08:00
|
|
|
sample.cpu = fake_samples[i].cpu;
|
|
|
|
sample.pid = fake_samples[i].pid;
|
|
|
|
sample.tid = fake_samples[i].pid;
|
|
|
|
sample.ip = fake_samples[i].ip;
|
|
|
|
|
2016-03-23 05:39:09 +08:00
|
|
|
if (machine__resolve(machine, &al, &sample) < 0)
|
2014-05-12 13:43:18 +08:00
|
|
|
goto out;
|
|
|
|
|
2016-04-27 21:16:24 +08:00
|
|
|
if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
|
2015-05-19 16:04:10 +08:00
|
|
|
NULL) < 0) {
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
addr_location__put(&al);
|
2014-05-12 13:43:18 +08:00
|
|
|
goto out;
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
}
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
fake_samples[i].thread = al.thread;
|
|
|
|
fake_samples[i].map = al.map;
|
|
|
|
fake_samples[i].sym = al.sym;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TEST_OK;
|
|
|
|
|
|
|
|
out:
|
|
|
|
pr_debug("Not enough memory for adding a hist entry\n");
|
|
|
|
return TEST_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void del_hist_entries(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root_in;
|
|
|
|
struct rb_root *root_out;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
if (sort__need_collapse)
|
|
|
|
root_in = &hists->entries_collapsed;
|
|
|
|
else
|
|
|
|
root_in = hists->entries_in;
|
|
|
|
|
|
|
|
root_out = &hists->entries;
|
|
|
|
|
|
|
|
while (!RB_EMPTY_ROOT(root_out)) {
|
|
|
|
node = rb_first(root_out);
|
|
|
|
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
rb_erase(node, root_out);
|
|
|
|
rb_erase(&he->rb_node_in, root_in);
|
2014-12-19 23:31:40 +08:00
|
|
|
hist_entry__delete(he);
|
2014-05-12 13:43:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
|
|
|
|
|
|
|
|
#define COMM(he) (thread__comm_str(he->thread))
|
|
|
|
#define DSO(he) (he->ms.map->dso->short_name)
|
|
|
|
#define SYM(he) (he->ms.sym->name)
|
|
|
|
#define CPU(he) (he->cpu)
|
|
|
|
#define PID(he) (he->thread->tid)
|
|
|
|
|
|
|
|
/* default sort keys (no field) */
|
|
|
|
static int test1(struct perf_evsel *evsel, struct machine *machine)
|
|
|
|
{
|
|
|
|
int err;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
field_order = NULL;
|
|
|
|
sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
|
|
|
|
|
2015-12-23 01:07:01 +08:00
|
|
|
setup_sorting(NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* expected output:
|
|
|
|
*
|
|
|
|
* Overhead Command Shared Object Symbol
|
|
|
|
* ======== ======= ============= ==============
|
|
|
|
* 20.00% perf perf [.] main
|
|
|
|
* 10.00% bash [kernel] [k] page_fault
|
|
|
|
* 10.00% bash bash [.] main
|
|
|
|
* 10.00% bash bash [.] xmalloc
|
|
|
|
* 10.00% perf [kernel] [k] page_fault
|
|
|
|
* 10.00% perf [kernel] [k] schedule
|
|
|
|
* 10.00% perf libc [.] free
|
|
|
|
* 10.00% perf libc [.] malloc
|
|
|
|
* 10.00% perf perf [.] cmd_record
|
|
|
|
*/
|
|
|
|
err = add_hist_entries(hists, machine);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, NULL);
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(evsel, NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
if (verbose > 2) {
|
|
|
|
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
|
|
|
|
print_hists_out(hists);
|
|
|
|
}
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
root = &hists->entries;
|
2014-05-12 13:43:18 +08:00
|
|
|
node = rb_first(root);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
!strcmp(SYM(he), "main") && he->stat.period == 200);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "page_fault") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
|
|
|
|
!strcmp(SYM(he), "main") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
|
|
|
|
!strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "page_fault") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "schedule") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
|
|
|
|
!strcmp(SYM(he), "free") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
|
|
|
|
!strcmp(SYM(he), "malloc") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
!strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
|
|
|
|
|
|
|
|
out:
|
|
|
|
del_hist_entries(hists);
|
|
|
|
reset_output_field();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mixed fields and sort keys */
|
|
|
|
static int test2(struct perf_evsel *evsel, struct machine *machine)
|
|
|
|
{
|
|
|
|
int err;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
field_order = "overhead,cpu";
|
|
|
|
sort_order = "pid";
|
|
|
|
|
2015-12-23 01:07:01 +08:00
|
|
|
setup_sorting(NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* expected output:
|
|
|
|
*
|
|
|
|
* Overhead CPU Command: Pid
|
|
|
|
* ======== === =============
|
|
|
|
* 30.00% 1 perf : 100
|
|
|
|
* 10.00% 0 perf : 100
|
|
|
|
* 10.00% 2 perf : 100
|
|
|
|
* 20.00% 2 perf : 200
|
|
|
|
* 10.00% 0 bash : 300
|
|
|
|
* 10.00% 1 bash : 300
|
|
|
|
* 10.00% 3 bash : 300
|
|
|
|
*/
|
|
|
|
err = add_hist_entries(hists, machine);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, NULL);
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(evsel, NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
if (verbose > 2) {
|
|
|
|
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
|
|
|
|
print_hists_out(hists);
|
|
|
|
}
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
root = &hists->entries;
|
2014-05-12 13:43:18 +08:00
|
|
|
node = rb_first(root);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100);
|
|
|
|
|
|
|
|
out:
|
|
|
|
del_hist_entries(hists);
|
|
|
|
reset_output_field();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fields only (no sort key) */
|
|
|
|
static int test3(struct perf_evsel *evsel, struct machine *machine)
|
|
|
|
{
|
|
|
|
int err;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
field_order = "comm,overhead,dso";
|
|
|
|
sort_order = NULL;
|
|
|
|
|
2015-12-23 01:07:01 +08:00
|
|
|
setup_sorting(NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* expected output:
|
|
|
|
*
|
|
|
|
* Command Overhead Shared Object
|
|
|
|
* ======= ======== =============
|
|
|
|
* bash 20.00% bash
|
|
|
|
* bash 10.00% [kernel]
|
|
|
|
* perf 30.00% perf
|
|
|
|
* perf 20.00% [kernel]
|
|
|
|
* perf 20.00% libc
|
|
|
|
*/
|
|
|
|
err = add_hist_entries(hists, machine);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, NULL);
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(evsel, NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
if (verbose > 2) {
|
|
|
|
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
|
|
|
|
print_hists_out(hists);
|
|
|
|
}
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
root = &hists->entries;
|
2014-05-12 13:43:18 +08:00
|
|
|
node = rb_first(root);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
|
|
|
|
he->stat.period == 200);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
he->stat.period == 300);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
he->stat.period == 200);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
|
|
|
|
he->stat.period == 200);
|
|
|
|
|
|
|
|
out:
|
|
|
|
del_hist_entries(hists);
|
|
|
|
reset_output_field();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle duplicate 'dso' field */
|
|
|
|
static int test4(struct perf_evsel *evsel, struct machine *machine)
|
|
|
|
{
|
|
|
|
int err;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
field_order = "dso,sym,comm,overhead,dso";
|
|
|
|
sort_order = "sym";
|
|
|
|
|
2015-12-23 01:07:01 +08:00
|
|
|
setup_sorting(NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* expected output:
|
|
|
|
*
|
|
|
|
* Shared Object Symbol Command Overhead
|
|
|
|
* ============= ============== ======= ========
|
|
|
|
* perf [.] cmd_record perf 10.00%
|
|
|
|
* libc [.] free perf 10.00%
|
|
|
|
* bash [.] main bash 10.00%
|
|
|
|
* perf [.] main perf 20.00%
|
|
|
|
* libc [.] malloc perf 10.00%
|
|
|
|
* [kernel] [k] page_fault bash 10.00%
|
|
|
|
* [kernel] [k] page_fault perf 10.00%
|
|
|
|
* [kernel] [k] schedule perf 10.00%
|
|
|
|
* bash [.] xmalloc bash 10.00%
|
|
|
|
*/
|
|
|
|
err = add_hist_entries(hists, machine);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, NULL);
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(evsel, NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
if (verbose > 2) {
|
|
|
|
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
|
|
|
|
print_hists_out(hists);
|
|
|
|
}
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
root = &hists->entries;
|
2014-05-12 13:43:18 +08:00
|
|
|
node = rb_first(root);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") &&
|
|
|
|
!strcmp(COMM(he), "bash") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 200);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
|
|
|
|
!strcmp(COMM(he), "bash") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") &&
|
|
|
|
!strcmp(COMM(he), "perf") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
!strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") &&
|
|
|
|
!strcmp(COMM(he), "bash") && he->stat.period == 100);
|
|
|
|
|
|
|
|
out:
|
|
|
|
del_hist_entries(hists);
|
|
|
|
reset_output_field();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* full sort keys w/o overhead field */
|
|
|
|
static int test5(struct perf_evsel *evsel, struct machine *machine)
|
|
|
|
{
|
|
|
|
int err;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2014-05-12 13:43:18 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node *node;
|
|
|
|
|
|
|
|
field_order = "cpu,pid,comm,dso,sym";
|
|
|
|
sort_order = "dso,pid";
|
|
|
|
|
2015-12-23 01:07:01 +08:00
|
|
|
setup_sorting(NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* expected output:
|
|
|
|
*
|
|
|
|
* CPU Command: Pid Command Shared Object Symbol
|
|
|
|
* === ============= ======= ============= ==============
|
|
|
|
* 0 perf: 100 perf [kernel] [k] schedule
|
|
|
|
* 2 perf: 200 perf [kernel] [k] page_fault
|
|
|
|
* 1 bash: 300 bash [kernel] [k] page_fault
|
|
|
|
* 0 bash: 300 bash bash [.] xmalloc
|
|
|
|
* 3 bash: 300 bash bash [.] main
|
|
|
|
* 1 perf: 100 perf libc [.] malloc
|
|
|
|
* 2 perf: 100 perf libc [.] free
|
|
|
|
* 1 perf: 100 perf perf [.] cmd_record
|
|
|
|
* 1 perf: 100 perf perf [.] main
|
|
|
|
* 2 perf: 200 perf perf [.] main
|
|
|
|
*/
|
|
|
|
err = add_hist_entries(hists, machine);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, NULL);
|
2016-01-18 17:24:00 +08:00
|
|
|
perf_evsel__output_resort(evsel, NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
if (verbose > 2) {
|
|
|
|
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
|
|
|
|
print_hists_out(hists);
|
|
|
|
}
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
root = &hists->entries;
|
2014-05-12 13:43:18 +08:00
|
|
|
node = rb_first(root);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 0 && PID(he) == 100 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "schedule") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 2 && PID(he) == 200 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "page_fault") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 1 && PID(he) == 300 &&
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
|
|
|
|
!strcmp(SYM(he), "page_fault") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 0 && PID(he) == 300 &&
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
|
|
|
|
!strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 3 && PID(he) == 300 &&
|
|
|
|
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
|
|
|
|
!strcmp(SYM(he), "main") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 1 && PID(he) == 100 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
|
|
|
|
!strcmp(SYM(he), "malloc") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 2 && PID(he) == 100 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
|
|
|
|
!strcmp(SYM(he), "free") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 1 && PID(he) == 100 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
!strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 1 && PID(he) == 100 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
!strcmp(SYM(he), "main") && he->stat.period == 100);
|
|
|
|
|
|
|
|
node = rb_next(node);
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node);
|
|
|
|
TEST_ASSERT_VAL("Invalid hist entry",
|
|
|
|
CPU(he) == 2 && PID(he) == 200 &&
|
|
|
|
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
|
|
|
|
!strcmp(SYM(he), "main") && he->stat.period == 100);
|
|
|
|
|
|
|
|
out:
|
|
|
|
del_hist_entries(hists);
|
|
|
|
reset_output_field();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
perf tests: Pass the subtest index to each test routine
Some tests have sub-tests we want to run, so allow passing this.
Wang tried to avoid having to touch all tests, but then, having the
test.func in an anonymous union makes the build fail on older compilers,
like the one in RHEL6, where:
test a = {
.func = foo,
};
fails.
To fix it leave the func pointer in the main structure and pass the subtest
index to all tests, end result function is the same, but we have just one
function pointer, not two, with and without the subtest index as an argument.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-5genj0ficwdmelpoqlds0u4y@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-19 23:01:48 +08:00
|
|
|
int test__hists_output(int subtest __maybe_unused)
|
2014-05-12 13:43:18 +08:00
|
|
|
{
|
|
|
|
int err = TEST_FAIL;
|
|
|
|
struct machines machines;
|
|
|
|
struct machine *machine;
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
struct perf_evlist *evlist = perf_evlist__new();
|
|
|
|
size_t i;
|
|
|
|
test_fn_t testcases[] = {
|
|
|
|
test1,
|
|
|
|
test2,
|
|
|
|
test3,
|
|
|
|
test4,
|
|
|
|
test5,
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_ASSERT_VAL("No memory", evlist);
|
|
|
|
|
perf tools: Add parse_events_error interface
Adding support to return error information from parse_events function.
Following struct will be populated by parse_events function on return:
struct parse_events_error {
int idx;
char *str;
char *help;
};
where 'idx' is the position in the string where the parsing failed,
'str' contains dynamically allocated error string describing the error
and 'help' is optional help string.
The change contains reporting function, which currently does not display
anything. The code changes to supply error data for specific event types
are coming in next patches. However this is what the expected output is:
$ sudo perf record -e 'sched:krava' ls
event syntax error: 'sched:krava'
\___ unknown tracepoint
...
$ perf record -e 'cpu/even=0x1/' ls
event syntax error: 'cpu/even=0x1/'
\___ unknown term
valid terms: pc,any,inv,edge,cmask,event,in_tx,ldlat,umask,in_tx_cp,offcore_rsp,config,config1,config2,name,period,branch_type
...
$ perf record -e cycles,cache-mises ls
event syntax error: '..es,cache-mises'
\___ parser error
...
The output functions cut the beginning of the event string so the error
starts up to 10th character and cut the end of the string of it crosses
the terminal width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1429729824-13932-2-git-send-email-jolsa@kernel.org
[ Renamed 'error' variables to 'err', not to clash with util.h error() ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-23 03:10:16 +08:00
|
|
|
err = parse_events(evlist, "cpu-clock", NULL);
|
2014-05-12 13:43:18 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2016-01-11 21:48:03 +08:00
|
|
|
err = TEST_FAIL;
|
2014-05-12 13:43:18 +08:00
|
|
|
|
|
|
|
machines__init(&machines);
|
|
|
|
|
|
|
|
/* setup threads/dso/map/symbols also */
|
|
|
|
machine = setup_fake_machine(&machines);
|
|
|
|
if (!machine)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (verbose > 1)
|
|
|
|
machine__fprintf(machine, stderr);
|
|
|
|
|
|
|
|
evsel = perf_evlist__first(evlist);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
|
|
|
|
err = testcases[i](evsel, machine);
|
|
|
|
if (err < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* tear down everything */
|
|
|
|
perf_evlist__delete(evlist);
|
|
|
|
machines__exit(&machines);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|