2019-05-29 22:18:02 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-10-23 18:45:13 +08:00
|
|
|
/*
|
|
|
|
* db-export.c: Support for exporting data suitable for import to a database
|
|
|
|
* Copyright (c) 2014, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
|
|
#include "evsel.h"
|
|
|
|
#include "machine.h"
|
|
|
|
#include "thread.h"
|
|
|
|
#include "comm.h"
|
|
|
|
#include "symbol.h"
|
2019-01-27 20:42:37 +08:00
|
|
|
#include "map.h"
|
2014-10-23 18:45:13 +08:00
|
|
|
#include "event.h"
|
2014-10-30 22:09:48 +08:00
|
|
|
#include "util.h"
|
2014-10-30 22:09:46 +08:00
|
|
|
#include "thread-stack.h"
|
2016-04-28 16:19:08 +08:00
|
|
|
#include "callchain.h"
|
2016-04-28 16:19:07 +08:00
|
|
|
#include "call-path.h"
|
2014-10-23 18:45:13 +08:00
|
|
|
#include "db-export.h"
|
|
|
|
|
2014-10-30 22:09:48 +08:00
|
|
|
struct deferred_export {
|
|
|
|
struct list_head node;
|
|
|
|
struct comm *comm;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int db_export__deferred(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
struct deferred_export *de;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
while (!list_empty(&dbe->deferred)) {
|
|
|
|
de = list_entry(dbe->deferred.next, struct deferred_export,
|
|
|
|
node);
|
|
|
|
err = dbe->export_comm(dbe, de->comm);
|
|
|
|
list_del(&de->node);
|
|
|
|
free(de);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void db_export__free_deferred(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
struct deferred_export *de;
|
|
|
|
|
|
|
|
while (!list_empty(&dbe->deferred)) {
|
|
|
|
de = list_entry(dbe->deferred.next, struct deferred_export,
|
|
|
|
node);
|
|
|
|
list_del(&de->node);
|
|
|
|
free(de);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int db_export__defer_comm(struct db_export *dbe, struct comm *comm)
|
|
|
|
{
|
|
|
|
struct deferred_export *de;
|
|
|
|
|
|
|
|
de = zalloc(sizeof(struct deferred_export));
|
|
|
|
if (!de)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
de->comm = comm;
|
|
|
|
list_add_tail(&de->node, &dbe->deferred);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-23 18:45:13 +08:00
|
|
|
int db_export__init(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
memset(dbe, 0, sizeof(struct db_export));
|
2014-10-30 22:09:48 +08:00
|
|
|
INIT_LIST_HEAD(&dbe->deferred);
|
2014-10-23 18:45:13 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:48 +08:00
|
|
|
int db_export__flush(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
return db_export__deferred(dbe);
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:46 +08:00
|
|
|
void db_export__exit(struct db_export *dbe)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
2014-10-30 22:09:48 +08:00
|
|
|
db_export__free_deferred(dbe);
|
2014-10-30 22:09:46 +08:00
|
|
|
call_return_processor__free(dbe->crp);
|
|
|
|
dbe->crp = NULL;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
if (evsel->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
evsel->db_id = ++dbe->evsel_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_evsel)
|
|
|
|
return dbe->export_evsel(dbe, evsel);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__machine(struct db_export *dbe, struct machine *machine)
|
|
|
|
{
|
|
|
|
if (machine->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
machine->db_id = ++dbe->machine_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_machine)
|
|
|
|
return dbe->export_machine(dbe, machine);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__thread(struct db_export *dbe, struct thread *thread,
|
|
|
|
struct machine *machine, struct comm *comm)
|
|
|
|
{
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
struct thread *main_thread;
|
2014-10-23 18:45:13 +08:00
|
|
|
u64 main_thread_db_id = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (thread->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
thread->db_id = ++dbe->thread_last_db_id;
|
|
|
|
|
|
|
|
if (thread->pid_ != -1) {
|
|
|
|
if (thread->pid_ == thread->tid) {
|
|
|
|
main_thread = thread;
|
|
|
|
} else {
|
|
|
|
main_thread = machine__findnew_thread(machine,
|
|
|
|
thread->pid_,
|
|
|
|
thread->pid_);
|
|
|
|
if (!main_thread)
|
|
|
|
return -ENOMEM;
|
|
|
|
err = db_export__thread(dbe, main_thread, machine,
|
|
|
|
comm);
|
|
|
|
if (err)
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
if (comm) {
|
|
|
|
err = db_export__comm_thread(dbe, comm, thread);
|
|
|
|
if (err)
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
main_thread_db_id = main_thread->db_id;
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
if (main_thread != thread)
|
|
|
|
thread__put(main_thread);
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dbe->export_thread)
|
|
|
|
return dbe->export_thread(dbe, thread, main_thread_db_id,
|
|
|
|
machine);
|
|
|
|
|
|
|
|
return 0;
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
|
|
|
|
out_put:
|
|
|
|
thread__put(main_thread);
|
|
|
|
return err;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__comm(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *main_thread)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (comm->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
comm->db_id = ++dbe->comm_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_comm) {
|
2014-10-30 22:09:48 +08:00
|
|
|
if (main_thread->comm_set)
|
|
|
|
err = dbe->export_comm(dbe, comm);
|
|
|
|
else
|
|
|
|
err = db_export__defer_comm(dbe, comm);
|
2014-10-23 18:45:13 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return db_export__comm_thread(dbe, comm, main_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
|
|
|
|
struct thread *thread)
|
|
|
|
{
|
|
|
|
u64 db_id;
|
|
|
|
|
|
|
|
db_id = ++dbe->comm_thread_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_comm_thread)
|
|
|
|
return dbe->export_comm_thread(dbe, db_id, comm, thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__dso(struct db_export *dbe, struct dso *dso,
|
|
|
|
struct machine *machine)
|
|
|
|
{
|
|
|
|
if (dso->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dso->db_id = ++dbe->dso_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_dso)
|
|
|
|
return dbe->export_dso(dbe, dso, machine);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int db_export__symbol(struct db_export *dbe, struct symbol *sym,
|
|
|
|
struct dso *dso)
|
|
|
|
{
|
|
|
|
u64 *sym_db_id = symbol__priv(sym);
|
|
|
|
|
|
|
|
if (*sym_db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*sym_db_id = ++dbe->symbol_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_symbol)
|
|
|
|
return dbe->export_symbol(dbe, sym, dso);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
|
|
|
|
u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (al->map) {
|
|
|
|
struct dso *dso = al->map->dso;
|
|
|
|
|
|
|
|
err = db_export__dso(dbe, dso, al->machine);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
*dso_db_id = dso->db_id;
|
|
|
|
|
|
|
|
if (!al->sym) {
|
2018-04-26 22:09:10 +08:00
|
|
|
al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
|
2014-10-23 18:45:13 +08:00
|
|
|
if (al->sym)
|
2018-04-27 03:52:34 +08:00
|
|
|
dso__insert_symbol(dso, al->sym);
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (al->sym) {
|
|
|
|
u64 *db_id = symbol__priv(al->sym);
|
|
|
|
|
|
|
|
err = db_export__symbol(dbe, al->sym, dso);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
*sym_db_id = *db_id;
|
|
|
|
*offset = al->addr - al->sym->start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
static struct call_path *call_path_from_sample(struct db_export *dbe,
|
|
|
|
struct machine *machine,
|
|
|
|
struct thread *thread,
|
|
|
|
struct perf_sample *sample,
|
|
|
|
struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
u64 kernel_start = machine__kernel_start(machine);
|
|
|
|
struct call_path *current = &dbe->cpr->call_path;
|
|
|
|
enum chain_order saved_order = callchain_param.order;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!symbol_conf.use_callchain || !sample->callchain)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the call path tree must be built starting with the root, we
|
|
|
|
* must use ORDER_CALL for call chain resolution, in order to process
|
|
|
|
* the callchain starting with the root node and ending with the leaf.
|
|
|
|
*/
|
|
|
|
callchain_param.order = ORDER_CALLER;
|
|
|
|
err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
|
2016-05-19 22:34:06 +08:00
|
|
|
sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
|
2016-04-28 16:19:08 +08:00
|
|
|
if (err) {
|
|
|
|
callchain_param.order = saved_order;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct callchain_cursor_node *node;
|
|
|
|
struct addr_location al;
|
|
|
|
u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
|
|
|
|
|
|
|
|
memset(&al, 0, sizeof(al));
|
|
|
|
|
|
|
|
node = callchain_cursor_current(&callchain_cursor);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Handle export of symbol and dso for this node by
|
|
|
|
* constructing an addr_location struct and then passing it to
|
|
|
|
* db_ids_from_al() to perform the export.
|
|
|
|
*/
|
|
|
|
al.sym = node->sym;
|
|
|
|
al.map = node->map;
|
|
|
|
al.machine = machine;
|
2016-05-11 11:26:48 +08:00
|
|
|
al.addr = node->ip;
|
2016-04-28 16:19:08 +08:00
|
|
|
|
2016-05-11 11:26:49 +08:00
|
|
|
if (al.map && !al.sym)
|
2018-04-26 04:46:28 +08:00
|
|
|
al.sym = dso__find_symbol(al.map->dso, al.addr);
|
2016-05-11 11:26:49 +08:00
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
|
|
|
|
|
|
|
|
/* add node to the call path tree if it doesn't exist */
|
|
|
|
current = call_path__findnew(dbe->cpr, current,
|
|
|
|
al.sym, node->ip,
|
|
|
|
kernel_start);
|
|
|
|
|
|
|
|
callchain_cursor_advance(&callchain_cursor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the callchain order to its prior value. */
|
|
|
|
callchain_param.order = saved_order;
|
|
|
|
|
|
|
|
if (current == &dbe->cpr->call_path) {
|
|
|
|
/* Bail because the callchain was empty. */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:43 +08:00
|
|
|
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
if (dbe->export_branch_type)
|
|
|
|
return dbe->export_branch_type(dbe, branch_type, name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-23 18:45:13 +08:00
|
|
|
int db_export__sample(struct db_export *dbe, union perf_event *event,
|
|
|
|
struct perf_sample *sample, struct perf_evsel *evsel,
|
2015-04-02 22:08:30 +08:00
|
|
|
struct addr_location *al)
|
2014-10-23 18:45:13 +08:00
|
|
|
{
|
2015-04-02 22:08:30 +08:00
|
|
|
struct thread* thread = al->thread;
|
2014-10-23 18:45:13 +08:00
|
|
|
struct export_sample es = {
|
|
|
|
.event = event,
|
|
|
|
.sample = sample,
|
|
|
|
.evsel = evsel,
|
|
|
|
.al = al,
|
|
|
|
};
|
|
|
|
struct thread *main_thread;
|
|
|
|
struct comm *comm = NULL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = db_export__evsel(dbe, evsel);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = db_export__machine(dbe, al->machine);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2016-05-24 08:52:24 +08:00
|
|
|
main_thread = thread__main_thread(al->machine, thread);
|
2014-10-23 18:45:13 +08:00
|
|
|
if (main_thread)
|
|
|
|
comm = machine__thread_exec_comm(al->machine, main_thread);
|
|
|
|
|
|
|
|
err = db_export__thread(dbe, thread, al->machine, comm);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
|
|
|
|
if (comm) {
|
|
|
|
err = db_export__comm(dbe, comm, main_thread);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
es.comm_db_id = comm->db_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
es.db_id = ++dbe->sample_last_db_id;
|
|
|
|
|
|
|
|
err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2016-04-28 16:19:08 +08:00
|
|
|
if (dbe->cpr) {
|
|
|
|
struct call_path *cp = call_path_from_sample(dbe, al->machine,
|
|
|
|
thread, sample,
|
|
|
|
evsel);
|
2016-04-28 16:19:09 +08:00
|
|
|
if (cp) {
|
2016-04-28 16:19:08 +08:00
|
|
|
db_export__call_path(dbe, cp);
|
2016-04-28 16:19:09 +08:00
|
|
|
es.call_path_id = cp->db_id;
|
|
|
|
}
|
2016-04-28 16:19:08 +08:00
|
|
|
}
|
|
|
|
|
2014-10-23 18:45:13 +08:00
|
|
|
if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
|
|
|
|
sample_addr_correlates_sym(&evsel->attr)) {
|
|
|
|
struct addr_location addr_al;
|
|
|
|
|
2016-03-23 05:44:46 +08:00
|
|
|
thread__resolve(thread, &addr_al, sample);
|
2014-10-23 18:45:13 +08:00
|
|
|
err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
|
|
|
|
&es.addr_sym_db_id, &es.addr_offset);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-30 22:09:46 +08:00
|
|
|
if (dbe->crp) {
|
|
|
|
err = thread_stack__process(thread, comm, sample, al,
|
|
|
|
&addr_al, es.db_id,
|
|
|
|
dbe->crp);
|
|
|
|
if (err)
|
2015-05-29 21:33:29 +08:00
|
|
|
goto out_put;
|
2014-10-30 22:09:46 +08:00
|
|
|
}
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dbe->export_sample)
|
2015-05-29 21:33:29 +08:00
|
|
|
err = dbe->export_sample(dbe, &es);
|
2014-10-23 18:45:13 +08:00
|
|
|
|
2015-05-29 21:33:29 +08:00
|
|
|
out_put:
|
|
|
|
thread__put(main_thread);
|
|
|
|
return err;
|
2014-10-23 18:45:13 +08:00
|
|
|
}
|
2014-10-30 22:09:43 +08:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
u32 branch_type;
|
|
|
|
const char *name;
|
|
|
|
} branch_types[] = {
|
|
|
|
{0, "no branch"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
|
|
|
|
{PERF_IP_FLAG_BRANCH, "unconditional jump"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
|
|
|
|
"software interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
|
|
|
|
"return from interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
|
|
|
|
"system call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
|
|
|
|
"return from system call"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
|
|
|
|
PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
|
|
|
|
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
|
|
|
|
{0, NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
int db_export__branch_types(struct db_export *dbe)
|
|
|
|
{
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
for (i = 0; branch_types[i].name ; i++) {
|
|
|
|
err = db_export__branch_type(dbe, branch_types[i].branch_type,
|
|
|
|
branch_types[i].name);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2018-09-20 21:00:44 +08:00
|
|
|
|
|
|
|
/* Add trace begin / end variants */
|
|
|
|
for (i = 0; branch_types[i].name ; i++) {
|
|
|
|
const char *name = branch_types[i].name;
|
|
|
|
u32 type = branch_types[i].branch_type;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
if (type == PERF_IP_FLAG_BRANCH ||
|
|
|
|
(type & (PERF_IP_FLAG_TRACE_BEGIN | PERF_IP_FLAG_TRACE_END)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "trace begin / %s", name);
|
|
|
|
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_BEGIN, buf);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "%s / trace end", name);
|
|
|
|
err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_END, buf);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-10-30 22:09:43 +08:00
|
|
|
return err;
|
|
|
|
}
|
2014-10-30 22:09:46 +08:00
|
|
|
|
|
|
|
int db_export__call_path(struct db_export *dbe, struct call_path *cp)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (cp->db_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (cp->parent) {
|
|
|
|
err = db_export__call_path(dbe, cp->parent);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
cp->db_id = ++dbe->call_path_last_db_id;
|
|
|
|
|
|
|
|
if (dbe->export_call_path)
|
|
|
|
return dbe->export_call_path(dbe, cp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:00:24 +08:00
|
|
|
int db_export__call_return(struct db_export *dbe, struct call_return *cr,
|
|
|
|
u64 *parent_db_id)
|
2014-10-30 22:09:46 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = db_export__call_path(dbe, cr->cp);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-02-28 21:00:24 +08:00
|
|
|
if (!cr->db_id)
|
|
|
|
cr->db_id = ++dbe->call_return_last_db_id;
|
|
|
|
|
|
|
|
if (parent_db_id) {
|
|
|
|
if (!*parent_db_id)
|
|
|
|
*parent_db_id = ++dbe->call_return_last_db_id;
|
|
|
|
cr->parent_db_id = *parent_db_id;
|
|
|
|
}
|
2014-10-30 22:09:46 +08:00
|
|
|
|
|
|
|
if (dbe->export_call_return)
|
|
|
|
return dbe->export_call_return(dbe, cr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|