2012-10-07 02:43:20 +08:00
|
|
|
#ifndef __PERF_MACHINE_H
|
|
|
|
#define __PERF_MACHINE_H
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
2012-11-09 22:32:52 +08:00
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include "map.h"
|
2014-09-30 04:07:28 +08:00
|
|
|
#include "dso.h"
|
2013-11-11 22:28:02 +08:00
|
|
|
#include "event.h"
|
2012-10-07 02:43:20 +08:00
|
|
|
|
2012-12-07 13:48:05 +08:00
|
|
|
struct addr_location;
|
2012-11-09 22:32:52 +08:00
|
|
|
struct branch_stack;
|
|
|
|
struct perf_evsel;
|
|
|
|
struct perf_sample;
|
|
|
|
struct symbol;
|
2012-10-07 02:43:20 +08:00
|
|
|
struct thread;
|
2012-10-07 03:26:02 +08:00
|
|
|
union perf_event;
|
2012-10-07 02:43:20 +08:00
|
|
|
|
2012-11-09 22:32:52 +08:00
|
|
|
/* Native host kernel uses -1 as pid index in machine */
|
|
|
|
#define HOST_KERNEL_ID (-1)
|
|
|
|
#define DEFAULT_GUEST_KERNEL_ID (0)
|
|
|
|
|
2014-01-29 22:14:39 +08:00
|
|
|
extern const char *ref_reloc_sym_names[];
|
|
|
|
|
2014-07-23 19:23:00 +08:00
|
|
|
struct vdso_info;
|
|
|
|
|
2012-11-09 22:32:52 +08:00
|
|
|
struct machine {
|
|
|
|
struct rb_node rb_node;
|
|
|
|
pid_t pid;
|
|
|
|
u16 id_hdr_size;
|
2014-07-31 14:00:45 +08:00
|
|
|
bool comm_exec;
|
2016-05-17 22:56:24 +08:00
|
|
|
bool kptr_restrict_warned;
|
2012-11-09 22:32:52 +08:00
|
|
|
char *root_dir;
|
|
|
|
struct rb_root threads;
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
pthread_rwlock_t threads_lock;
|
2016-05-04 21:09:33 +08:00
|
|
|
unsigned int nr_threads;
|
2012-11-09 22:32:52 +08:00
|
|
|
struct list_head dead_threads;
|
|
|
|
struct thread *last_match;
|
2014-07-23 19:23:00 +08:00
|
|
|
struct vdso_info *vdso_info;
|
2015-09-09 23:25:00 +08:00
|
|
|
struct perf_env *env;
|
2015-05-29 00:06:42 +08:00
|
|
|
struct dsos dsos;
|
2012-11-09 22:32:52 +08:00
|
|
|
struct map_groups kmaps;
|
|
|
|
struct map *vmlinux_maps[MAP__NR_TYPES];
|
2014-08-16 03:08:39 +08:00
|
|
|
u64 kernel_start;
|
2013-08-08 19:32:20 +08:00
|
|
|
symbol_filter_t symbol_filter;
|
2014-07-22 21:17:25 +08:00
|
|
|
pid_t *current_tid;
|
2014-10-23 18:45:13 +08:00
|
|
|
union { /* Tool specific area */
|
|
|
|
void *priv;
|
|
|
|
u64 db_id;
|
|
|
|
};
|
2012-11-09 22:32:52 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline
|
2015-09-30 22:54:04 +08:00
|
|
|
struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
|
2012-11-09 22:32:52 +08:00
|
|
|
{
|
|
|
|
return machine->vmlinux_maps[type];
|
|
|
|
}
|
|
|
|
|
2015-09-30 22:54:04 +08:00
|
|
|
static inline
|
|
|
|
struct map *machine__kernel_map(struct machine *machine)
|
|
|
|
{
|
|
|
|
return __machine__kernel_map(machine, MAP__FUNCTION);
|
|
|
|
}
|
|
|
|
|
2014-08-16 03:08:39 +08:00
|
|
|
int machine__get_kernel_start(struct machine *machine);
|
|
|
|
|
|
|
|
static inline u64 machine__kernel_start(struct machine *machine)
|
|
|
|
{
|
|
|
|
if (!machine->kernel_start)
|
|
|
|
machine__get_kernel_start(machine);
|
|
|
|
return machine->kernel_start;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
|
|
|
|
{
|
|
|
|
u64 kernel_start = machine__kernel_start(machine);
|
|
|
|
|
|
|
|
return ip >= kernel_start;
|
|
|
|
}
|
|
|
|
|
2014-03-14 22:00:03 +08:00
|
|
|
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
|
|
|
|
pid_t tid);
|
2014-07-31 14:00:45 +08:00
|
|
|
struct comm *machine__thread_exec_comm(struct machine *machine,
|
|
|
|
struct thread *thread);
|
2012-10-07 02:43:20 +08:00
|
|
|
|
2013-09-11 22:18:24 +08:00
|
|
|
int machine__process_comm_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
|
|
|
int machine__process_exit_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
|
|
|
int machine__process_fork_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
|
|
|
int machine__process_lost_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
2015-05-11 03:13:15 +08:00
|
|
|
int machine__process_lost_samples_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
2015-04-30 22:37:29 +08:00
|
|
|
int machine__process_aux_event(struct machine *machine,
|
|
|
|
union perf_event *event);
|
2015-04-30 22:37:30 +08:00
|
|
|
int machine__process_itrace_start_event(struct machine *machine,
|
|
|
|
union perf_event *event);
|
2016-03-23 00:09:37 +08:00
|
|
|
int machine__process_switch_event(struct machine *machine,
|
2015-07-21 17:44:03 +08:00
|
|
|
union perf_event *event);
|
2013-09-11 22:18:24 +08:00
|
|
|
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
|
|
|
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
|
|
|
int machine__process_event(struct machine *machine, union perf_event *event,
|
|
|
|
struct perf_sample *sample);
|
2012-10-07 03:26:02 +08:00
|
|
|
|
2012-11-09 22:32:52 +08:00
|
|
|
typedef void (*machine__process_t)(struct machine *machine, void *data);
|
|
|
|
|
2012-12-19 06:15:48 +08:00
|
|
|
struct machines {
|
|
|
|
struct machine host;
|
|
|
|
struct rb_root guests;
|
2013-08-08 19:32:20 +08:00
|
|
|
symbol_filter_t symbol_filter;
|
2012-12-19 06:15:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void machines__init(struct machines *machines);
|
|
|
|
void machines__exit(struct machines *machines);
|
|
|
|
|
|
|
|
void machines__process_guests(struct machines *machines,
|
|
|
|
machine__process_t process, void *data);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
2012-12-19 06:15:48 +08:00
|
|
|
struct machine *machines__add(struct machines *machines, pid_t pid,
|
2012-11-09 22:32:52 +08:00
|
|
|
const char *root_dir);
|
2012-12-19 06:15:48 +08:00
|
|
|
struct machine *machines__find_host(struct machines *machines);
|
|
|
|
struct machine *machines__find(struct machines *machines, pid_t pid);
|
|
|
|
struct machine *machines__findnew(struct machines *machines, pid_t pid);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
2012-12-19 06:15:48 +08:00
|
|
|
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
|
2012-11-09 22:32:52 +08:00
|
|
|
char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
|
|
|
|
|
2013-08-08 19:32:20 +08:00
|
|
|
void machines__set_symbol_filter(struct machines *machines,
|
|
|
|
symbol_filter_t symbol_filter);
|
2014-07-31 14:00:45 +08:00
|
|
|
void machines__set_comm_exec(struct machines *machines, bool comm_exec);
|
2013-08-08 19:32:20 +08:00
|
|
|
|
2013-09-29 03:13:00 +08:00
|
|
|
struct machine *machine__new_host(void);
|
2012-11-09 22:32:52 +08:00
|
|
|
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
|
|
|
|
void machine__exit(struct machine *machine);
|
2012-12-08 04:39:39 +08:00
|
|
|
void machine__delete_threads(struct machine *machine);
|
2012-11-09 22:32:52 +08:00
|
|
|
void machine__delete(struct machine *machine);
|
2015-04-10 17:35:00 +08:00
|
|
|
void machine__remove_thread(struct machine *machine, struct thread *th);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
2014-01-23 00:15:36 +08:00
|
|
|
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
|
|
|
|
struct addr_location *al);
|
2014-01-23 00:05:06 +08:00
|
|
|
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
|
|
|
|
struct addr_location *al);
|
2016-04-15 01:48:07 +08:00
|
|
|
|
|
|
|
struct callchain_cursor;
|
|
|
|
|
2014-10-24 02:26:17 +08:00
|
|
|
int thread__resolve_callchain(struct thread *thread,
|
2016-04-15 01:48:07 +08:00
|
|
|
struct callchain_cursor *cursor,
|
2014-10-24 02:26:17 +08:00
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample,
|
|
|
|
struct symbol **parent,
|
|
|
|
struct addr_location *root_al,
|
|
|
|
int max_stack);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Default guest kernel is defined by parameter --guestkallsyms
|
|
|
|
* and --guestmodules
|
|
|
|
*/
|
|
|
|
static inline bool machine__is_default_guest(struct machine *machine)
|
|
|
|
{
|
|
|
|
return machine ? machine->pid == DEFAULT_GUEST_KERNEL_ID : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool machine__is_host(struct machine *machine)
|
|
|
|
{
|
|
|
|
return machine ? machine->pid == HOST_KERNEL_ID : false;
|
|
|
|
}
|
|
|
|
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 07:43:22 +08:00
|
|
|
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
|
|
|
|
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
2015-05-29 22:31:12 +08:00
|
|
|
struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
|
|
|
|
|
2012-11-09 22:32:52 +08:00
|
|
|
size_t machine__fprintf(struct machine *machine, FILE *fp);
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct symbol *machine__find_kernel_symbol(struct machine *machine,
|
|
|
|
enum map_type type, u64 addr,
|
|
|
|
struct map **mapp,
|
|
|
|
symbol_filter_t filter)
|
|
|
|
{
|
|
|
|
return map_groups__find_symbol(&machine->kmaps, type, addr,
|
|
|
|
mapp, filter);
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:01:57 +08:00
|
|
|
static inline
|
|
|
|
struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
|
|
|
|
enum map_type type, const char *name,
|
|
|
|
struct map **mapp,
|
|
|
|
symbol_filter_t filter)
|
|
|
|
{
|
|
|
|
return map_groups__find_symbol_by_name(&machine->kmaps, type, name,
|
|
|
|
mapp, filter);
|
|
|
|
}
|
|
|
|
|
2012-11-09 22:32:52 +08:00
|
|
|
static inline
|
|
|
|
struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
|
|
|
|
struct map **mapp,
|
|
|
|
symbol_filter_t filter)
|
|
|
|
{
|
|
|
|
return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr,
|
|
|
|
mapp, filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
|
|
|
|
const char *name,
|
|
|
|
struct map **mapp,
|
|
|
|
symbol_filter_t filter)
|
|
|
|
{
|
|
|
|
return map_groups__find_function_by_name(&machine->kmaps, name, mapp,
|
|
|
|
filter);
|
|
|
|
}
|
|
|
|
|
perf machine: Fix up some more method names
Calling the function 'machine__new_module' implies a new 'module' will
be allocated, when in fact what is returned is a 'struct map' instance,
that not necessarily will be instantiated, as if one already exists with
the given module name, it will be returned instead.
So be consistent with other "find and if not there, create" like
functions, like machine__findnew_thread, machine__findnew_dso, etc, and
rename it to machine__findnew_module_map(), that in turn will call
machine__findnew_module_dso().
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/n/tip-acv830vd3hwww2ih5vjtbmu3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-06-01 23:01:02 +08:00
|
|
|
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
|
|
|
|
const char *filename);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
2016-04-19 23:12:49 +08:00
|
|
|
int __machine__load_kallsyms(struct machine *machine, const char *filename,
|
|
|
|
enum map_type type, bool no_kcore, symbol_filter_t filter);
|
2012-11-09 22:32:52 +08:00
|
|
|
int machine__load_kallsyms(struct machine *machine, const char *filename,
|
|
|
|
enum map_type type, symbol_filter_t filter);
|
|
|
|
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
|
|
|
|
symbol_filter_t filter);
|
|
|
|
|
2012-12-07 20:53:58 +08:00
|
|
|
size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
|
|
|
|
bool (skip)(struct dso *dso, int parm), int parm);
|
2012-12-19 06:15:48 +08:00
|
|
|
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp);
|
|
|
|
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
|
2012-12-07 20:53:58 +08:00
|
|
|
bool (skip)(struct dso *dso, int parm), int parm);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
|
|
|
void machine__destroy_kernel_maps(struct machine *machine);
|
|
|
|
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
|
|
|
|
int machine__create_kernel_maps(struct machine *machine);
|
|
|
|
|
2012-12-19 06:15:48 +08:00
|
|
|
int machines__create_kernel_maps(struct machines *machines, pid_t pid);
|
|
|
|
int machines__create_guest_kernel_maps(struct machines *machines);
|
|
|
|
void machines__destroy_kernel_maps(struct machines *machines);
|
2012-11-09 22:32:52 +08:00
|
|
|
|
|
|
|
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
|
|
|
|
|
2013-09-29 03:12:58 +08:00
|
|
|
int machine__for_each_thread(struct machine *machine,
|
|
|
|
int (*fn)(struct thread *thread, void *p),
|
|
|
|
void *priv);
|
2015-05-29 21:33:30 +08:00
|
|
|
int machines__for_each_thread(struct machines *machines,
|
|
|
|
int (*fn)(struct thread *thread, void *p),
|
|
|
|
void *priv);
|
2013-09-29 03:12:58 +08:00
|
|
|
|
2013-11-11 22:36:12 +08:00
|
|
|
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
|
2013-11-13 03:46:16 +08:00
|
|
|
struct target *target, struct thread_map *threads,
|
2015-06-17 21:51:11 +08:00
|
|
|
perf_event__handler_t process, bool data_mmap,
|
|
|
|
unsigned int proc_map_timeout);
|
2013-11-11 22:36:12 +08:00
|
|
|
static inline
|
2013-11-13 03:46:16 +08:00
|
|
|
int machine__synthesize_threads(struct machine *machine, struct target *target,
|
2015-06-17 21:51:11 +08:00
|
|
|
struct thread_map *threads, bool data_mmap,
|
|
|
|
unsigned int proc_map_timeout)
|
2013-11-11 22:36:12 +08:00
|
|
|
{
|
|
|
|
return __machine__synthesize_threads(machine, NULL, target, threads,
|
2015-06-17 21:51:11 +08:00
|
|
|
perf_event__process, data_mmap,
|
|
|
|
proc_map_timeout);
|
2013-11-11 22:36:12 +08:00
|
|
|
}
|
|
|
|
|
2014-07-22 21:17:25 +08:00
|
|
|
pid_t machine__get_current_tid(struct machine *machine, int cpu);
|
|
|
|
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
|
|
|
|
pid_t tid);
|
2015-07-23 03:14:29 +08:00
|
|
|
/*
|
|
|
|
* For use with libtraceevent's pevent_set_function_resolver()
|
|
|
|
*/
|
|
|
|
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
|
2014-07-22 21:17:25 +08:00
|
|
|
|
2012-10-07 02:43:20 +08:00
|
|
|
#endif /* __PERF_MACHINE_H */
|