2009-05-26 17:10:09 +08:00
|
|
|
|
|
|
|
#include "util.h"
|
2009-09-05 03:39:51 +08:00
|
|
|
#include "../perf.h"
|
2009-05-26 17:10:09 +08:00
|
|
|
#include "parse-options.h"
|
|
|
|
#include "parse-events.h"
|
|
|
|
#include "exec_cmd.h"
|
2009-06-02 04:50:19 +08:00
|
|
|
#include "string.h"
|
2009-07-22 02:16:29 +08:00
|
|
|
#include "cache.h"
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 15:58:57 +08:00
|
|
|
int nr_counters;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 15:58:57 +08:00
|
|
|
struct perf_counter_attr attrs[MAX_COUNTERS];
|
2009-05-26 17:10:09 +08:00
|
|
|
|
|
|
|
struct event_symbol {
|
2009-08-15 18:26:57 +08:00
|
|
|
u8 type;
|
|
|
|
u64 config;
|
|
|
|
const char *symbol;
|
|
|
|
const char *alias;
|
2009-05-26 17:10:09 +08:00
|
|
|
};
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
enum event_result {
|
|
|
|
EVT_FAILED,
|
|
|
|
EVT_HANDLED,
|
|
|
|
EVT_HANDLED_ALL
|
|
|
|
};
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
char debugfs_path[MAXPATHLEN];
|
|
|
|
|
2009-06-22 19:13:14 +08:00
|
|
|
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
|
|
|
|
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
|
2009-06-06 15:58:57 +08:00
|
|
|
|
2009-05-26 17:10:09 +08:00
|
|
|
static struct event_symbol event_symbols[] = {
|
2009-06-22 19:14:28 +08:00
|
|
|
{ CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
|
|
|
|
{ CHW(INSTRUCTIONS), "instructions", "" },
|
|
|
|
{ CHW(CACHE_REFERENCES), "cache-references", "" },
|
|
|
|
{ CHW(CACHE_MISSES), "cache-misses", "" },
|
|
|
|
{ CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
|
|
|
|
{ CHW(BRANCH_MISSES), "branch-misses", "" },
|
|
|
|
{ CHW(BUS_CYCLES), "bus-cycles", "" },
|
|
|
|
|
|
|
|
{ CSW(CPU_CLOCK), "cpu-clock", "" },
|
|
|
|
{ CSW(TASK_CLOCK), "task-clock", "" },
|
2009-06-22 23:17:26 +08:00
|
|
|
{ CSW(PAGE_FAULTS), "page-faults", "faults" },
|
2009-06-22 19:14:28 +08:00
|
|
|
{ CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
|
|
|
|
{ CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
|
|
|
|
{ CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
|
|
|
|
{ CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
|
2009-05-26 17:10:09 +08:00
|
|
|
};
|
|
|
|
|
2009-05-26 15:17:18 +08:00
|
|
|
#define __PERF_COUNTER_FIELD(config, name) \
|
|
|
|
((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
|
|
|
|
|
|
|
|
#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
|
|
|
|
#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
|
|
|
|
#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
|
|
|
|
#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static const char *hw_event_names[] = {
|
2009-06-06 19:58:12 +08:00
|
|
|
"cycles",
|
2009-05-26 15:17:18 +08:00
|
|
|
"instructions",
|
2009-06-06 19:58:12 +08:00
|
|
|
"cache-references",
|
|
|
|
"cache-misses",
|
2009-05-26 15:17:18 +08:00
|
|
|
"branches",
|
2009-06-06 19:58:12 +08:00
|
|
|
"branch-misses",
|
|
|
|
"bus-cycles",
|
2009-05-26 15:17:18 +08:00
|
|
|
};
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static const char *sw_event_names[] = {
|
2009-06-13 19:35:00 +08:00
|
|
|
"cpu-clock-msecs",
|
|
|
|
"task-clock-msecs",
|
2009-06-06 19:58:12 +08:00
|
|
|
"page-faults",
|
|
|
|
"context-switches",
|
|
|
|
"CPU-migrations",
|
|
|
|
"minor-faults",
|
|
|
|
"major-faults",
|
2009-05-26 15:17:18 +08:00
|
|
|
};
|
|
|
|
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
#define MAX_ALIASES 8
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static const char *hw_cache[][MAX_ALIASES] = {
|
2009-07-06 20:01:31 +08:00
|
|
|
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
|
|
|
|
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
|
2009-06-25 20:55:22 +08:00
|
|
|
{ "LLC", "L2" },
|
|
|
|
{ "dTLB", "d-tlb", "Data-TLB", },
|
|
|
|
{ "iTLB", "i-tlb", "Instruction-TLB", },
|
|
|
|
{ "branch", "branches", "bpu", "btb", "bpc", },
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
};
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static const char *hw_cache_op[][MAX_ALIASES] = {
|
2009-06-25 20:55:22 +08:00
|
|
|
{ "load", "loads", "read", },
|
|
|
|
{ "store", "stores", "write", },
|
|
|
|
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
};
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static const char *hw_cache_result[][MAX_ALIASES] = {
|
2009-06-25 20:55:22 +08:00
|
|
|
{ "refs", "Reference", "ops", "access", },
|
|
|
|
{ "misses", "miss", },
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
};
|
|
|
|
|
2009-06-25 19:46:07 +08:00
|
|
|
#define C(x) PERF_COUNT_HW_CACHE_##x
|
|
|
|
#define CACHE_READ (1 << C(OP_READ))
|
|
|
|
#define CACHE_WRITE (1 << C(OP_WRITE))
|
|
|
|
#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
|
|
|
|
#define COP(x) (1 << x)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cache operartion stat
|
|
|
|
* L1I : Read and prefetch only
|
|
|
|
* ITLB and BPU : Read-only
|
|
|
|
*/
|
|
|
|
static unsigned long hw_cache_stat[C(MAX)] = {
|
|
|
|
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
|
|
|
|
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(ITLB)] = (CACHE_READ),
|
|
|
|
[C(BPU)] = (CACHE_READ),
|
|
|
|
};
|
|
|
|
|
2009-09-05 03:39:51 +08:00
|
|
|
#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
|
2009-07-22 00:20:22 +08:00
|
|
|
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
|
2009-09-05 03:39:51 +08:00
|
|
|
if (sys_dirent.d_type == DT_DIR && \
|
2009-07-22 00:20:22 +08:00
|
|
|
(strcmp(sys_dirent.d_name, ".")) && \
|
|
|
|
(strcmp(sys_dirent.d_name, "..")))
|
|
|
|
|
2009-08-06 22:48:54 +08:00
|
|
|
static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
|
|
|
|
{
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
|
|
|
|
sys_dir->d_name, evt_dir->d_name);
|
|
|
|
fd = open(evt_path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-05 03:39:51 +08:00
|
|
|
#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
|
2009-07-22 00:20:22 +08:00
|
|
|
while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
|
2009-09-05 03:39:51 +08:00
|
|
|
if (evt_dirent.d_type == DT_DIR && \
|
2009-07-22 00:20:22 +08:00
|
|
|
(strcmp(evt_dirent.d_name, ".")) && \
|
2009-08-06 22:48:54 +08:00
|
|
|
(strcmp(evt_dirent.d_name, "..")) && \
|
|
|
|
(!tp_event_has_id(&sys_dirent, &evt_dirent)))
|
2009-07-22 00:20:22 +08:00
|
|
|
|
2009-09-17 16:34:51 +08:00
|
|
|
#define MAX_EVENT_LENGTH 512
|
2009-07-22 00:20:22 +08:00
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
int valid_debugfs_mount(const char *debugfs)
|
2009-07-22 00:20:22 +08:00
|
|
|
{
|
|
|
|
struct statfs st_fs;
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
if (statfs(debugfs, &st_fs) < 0)
|
2009-07-22 00:20:22 +08:00
|
|
|
return -ENOENT;
|
|
|
|
else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
|
|
|
|
return -ENOENT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-08-28 09:09:58 +08:00
|
|
|
struct tracepoint_path *tracepoint_id_to_path(u64 config)
|
2009-07-22 00:20:22 +08:00
|
|
|
{
|
2009-08-28 09:09:58 +08:00
|
|
|
struct tracepoint_path *path = NULL;
|
2009-07-22 00:20:22 +08:00
|
|
|
DIR *sys_dir, *evt_dir;
|
|
|
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
|
|
|
char id_buf[4];
|
2009-09-05 03:39:51 +08:00
|
|
|
int sys_dir_fd, fd;
|
2009-07-22 00:20:22 +08:00
|
|
|
u64 id;
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
if (valid_debugfs_mount(debugfs_path))
|
2009-08-28 09:09:58 +08:00
|
|
|
return NULL;
|
2009-07-22 00:20:22 +08:00
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
sys_dir = opendir(debugfs_path);
|
2009-07-22 00:20:22 +08:00
|
|
|
if (!sys_dir)
|
|
|
|
goto cleanup;
|
2009-09-05 03:39:51 +08:00
|
|
|
sys_dir_fd = dirfd(sys_dir);
|
|
|
|
|
|
|
|
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
|
|
|
int dfd = openat(sys_dir_fd, sys_dirent.d_name,
|
|
|
|
O_RDONLY|O_DIRECTORY), evt_dir_fd;
|
|
|
|
if (dfd == -1)
|
|
|
|
continue;
|
|
|
|
evt_dir = fdopendir(dfd);
|
|
|
|
if (!evt_dir) {
|
|
|
|
close(dfd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
evt_dir_fd = dirfd(evt_dir);
|
|
|
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/id",
|
2009-07-22 00:20:22 +08:00
|
|
|
evt_dirent.d_name);
|
2009-09-05 03:39:51 +08:00
|
|
|
fd = openat(evt_dir_fd, evt_path, O_RDONLY);
|
2009-07-22 00:20:22 +08:00
|
|
|
if (fd < 0)
|
|
|
|
continue;
|
|
|
|
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
|
|
|
|
close(fd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
close(fd);
|
|
|
|
id = atoll(id_buf);
|
|
|
|
if (id == config) {
|
|
|
|
closedir(evt_dir);
|
|
|
|
closedir(sys_dir);
|
2009-08-28 09:09:58 +08:00
|
|
|
path = calloc(1, sizeof(path));
|
|
|
|
path->system = malloc(MAX_EVENT_LENGTH);
|
|
|
|
if (!path->system) {
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
path->name = malloc(MAX_EVENT_LENGTH);
|
|
|
|
if (!path->name) {
|
|
|
|
free(path->system);
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
strncpy(path->system, sys_dirent.d_name,
|
|
|
|
MAX_EVENT_LENGTH);
|
|
|
|
strncpy(path->name, evt_dirent.d_name,
|
|
|
|
MAX_EVENT_LENGTH);
|
|
|
|
return path;
|
2009-07-22 00:20:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir(evt_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
closedir(sys_dir);
|
2009-08-28 09:09:58 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
|
|
|
|
static const char *tracepoint_id_to_name(u64 config)
|
|
|
|
{
|
|
|
|
static char buf[TP_PATH_LEN];
|
|
|
|
struct tracepoint_path *path;
|
|
|
|
|
|
|
|
path = tracepoint_id_to_path(config);
|
|
|
|
if (path) {
|
|
|
|
snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
|
|
|
|
free(path->name);
|
|
|
|
free(path->system);
|
|
|
|
free(path);
|
|
|
|
} else
|
|
|
|
snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
|
|
|
|
|
|
|
|
return buf;
|
2009-07-22 00:20:22 +08:00
|
|
|
}
|
|
|
|
|
2009-06-25 19:46:07 +08:00
|
|
|
static int is_cache_op_valid(u8 cache_type, u8 cache_op)
|
|
|
|
{
|
|
|
|
if (hw_cache_stat[cache_type] & COP(cache_op))
|
|
|
|
return 1; /* valid */
|
|
|
|
else
|
|
|
|
return 0; /* invalid */
|
|
|
|
}
|
|
|
|
|
2009-06-25 20:55:22 +08:00
|
|
|
static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
|
|
|
|
{
|
|
|
|
static char name[50];
|
|
|
|
|
|
|
|
if (cache_result) {
|
|
|
|
sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
|
|
|
|
hw_cache_op[cache_op][0],
|
|
|
|
hw_cache_result[cache_result][0]);
|
|
|
|
} else {
|
|
|
|
sprintf(name, "%s-%s", hw_cache[cache_type][0],
|
|
|
|
hw_cache_op[cache_op][1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
const char *event_name(int counter)
|
2009-05-26 15:17:18 +08:00
|
|
|
{
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 20:21:42 +08:00
|
|
|
u64 config = attrs[counter].config;
|
2009-06-06 15:58:57 +08:00
|
|
|
int type = attrs[counter].type;
|
2009-08-07 01:40:28 +08:00
|
|
|
|
|
|
|
return __event_name(type, config);
|
|
|
|
}
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
const char *__event_name(int type, u64 config)
|
2009-08-07 01:40:28 +08:00
|
|
|
{
|
2009-05-26 15:17:18 +08:00
|
|
|
static char buf[32];
|
|
|
|
|
2009-08-07 01:40:28 +08:00
|
|
|
if (type == PERF_TYPE_RAW) {
|
2009-06-06 15:58:57 +08:00
|
|
|
sprintf(buf, "raw 0x%llx", config);
|
2009-05-26 15:17:18 +08:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case PERF_TYPE_HARDWARE:
|
2009-06-11 20:06:28 +08:00
|
|
|
if (config < PERF_COUNT_HW_MAX)
|
2009-06-06 15:58:57 +08:00
|
|
|
return hw_event_names[config];
|
2009-05-26 15:17:18 +08:00
|
|
|
return "unknown-hardware";
|
|
|
|
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
case PERF_TYPE_HW_CACHE: {
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 20:21:42 +08:00
|
|
|
u8 cache_type, cache_op, cache_result;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
|
|
|
cache_type = (config >> 0) & 0xff;
|
|
|
|
if (cache_type > PERF_COUNT_HW_CACHE_MAX)
|
|
|
|
return "unknown-ext-hardware-cache-type";
|
|
|
|
|
|
|
|
cache_op = (config >> 8) & 0xff;
|
2009-06-06 19:58:12 +08:00
|
|
|
if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
|
|
|
|
return "unknown-ext-hardware-cache-op";
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
|
|
|
cache_result = (config >> 16) & 0xff;
|
2009-06-06 19:58:12 +08:00
|
|
|
if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
|
|
|
|
return "unknown-ext-hardware-cache-result";
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
2009-06-25 19:46:07 +08:00
|
|
|
if (!is_cache_op_valid(cache_type, cache_op))
|
|
|
|
return "invalid-cache";
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
2009-06-25 20:55:22 +08:00
|
|
|
return event_cache_name(cache_type, cache_op, cache_result);
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
}
|
|
|
|
|
2009-05-26 15:17:18 +08:00
|
|
|
case PERF_TYPE_SOFTWARE:
|
2009-06-11 20:06:28 +08:00
|
|
|
if (config < PERF_COUNT_SW_MAX)
|
2009-06-06 15:58:57 +08:00
|
|
|
return sw_event_names[config];
|
2009-05-26 15:17:18 +08:00
|
|
|
return "unknown-software";
|
|
|
|
|
2009-07-22 00:20:22 +08:00
|
|
|
case PERF_TYPE_TRACEPOINT:
|
|
|
|
return tracepoint_id_to_name(config);
|
|
|
|
|
2009-05-26 15:17:18 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
|
2009-08-15 18:26:57 +08:00
|
|
|
static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
{
|
|
|
|
int i, j;
|
2009-07-01 11:04:34 +08:00
|
|
|
int n, longest = -1;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
2009-07-01 11:04:34 +08:00
|
|
|
for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
|
|
|
|
n = strlen(names[i][j]);
|
|
|
|
if (n > longest && !strncasecmp(*str, names[i][j], n))
|
|
|
|
longest = n;
|
|
|
|
}
|
|
|
|
if (longest > 0) {
|
|
|
|
*str += longest;
|
|
|
|
return i;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-07 03:04:17 +08:00
|
|
|
return -1;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
}
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
2009-07-01 11:04:34 +08:00
|
|
|
parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
{
|
2009-07-01 11:04:34 +08:00
|
|
|
const char *s = *str;
|
|
|
|
int cache_type = -1, cache_op = -1, cache_result = -1;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
/*
|
|
|
|
* No fallback - if we cannot get a clear cache type
|
|
|
|
* then bail out:
|
|
|
|
*/
|
|
|
|
if (cache_type == -1)
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-01 11:04:34 +08:00
|
|
|
|
|
|
|
while ((cache_op == -1 || cache_result == -1) && *s == '-') {
|
|
|
|
++s;
|
|
|
|
|
|
|
|
if (cache_op == -1) {
|
|
|
|
cache_op = parse_aliases(&s, hw_cache_op,
|
|
|
|
PERF_COUNT_HW_CACHE_OP_MAX);
|
|
|
|
if (cache_op >= 0) {
|
|
|
|
if (!is_cache_op_valid(cache_type, cache_op))
|
|
|
|
return 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cache_result == -1) {
|
|
|
|
cache_result = parse_aliases(&s, hw_cache_result,
|
|
|
|
PERF_COUNT_HW_CACHE_RESULT_MAX);
|
|
|
|
if (cache_result >= 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't parse this as a cache op or result, so back up
|
|
|
|
* to the '-'.
|
|
|
|
*/
|
|
|
|
--s;
|
|
|
|
break;
|
|
|
|
}
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fall back to reads:
|
|
|
|
*/
|
2009-06-07 03:04:17 +08:00
|
|
|
if (cache_op == -1)
|
|
|
|
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fall back to accesses:
|
|
|
|
*/
|
|
|
|
if (cache_result == -1)
|
|
|
|
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
|
|
|
|
|
|
|
|
attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
|
|
|
|
attr->type = PERF_TYPE_HW_CACHE;
|
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
*str = s;
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum event_result
|
|
|
|
parse_single_tracepoint_event(char *sys_name,
|
|
|
|
const char *evt_name,
|
|
|
|
unsigned int evt_length,
|
|
|
|
char *flags,
|
|
|
|
struct perf_counter_attr *attr,
|
|
|
|
const char **strp)
|
|
|
|
{
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
char id_buf[4];
|
|
|
|
u64 id;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (flags) {
|
2009-09-17 16:49:43 +08:00
|
|
|
if (!strncmp(flags, "record", strlen(flags))) {
|
2009-09-12 05:19:45 +08:00
|
|
|
attr->sample_type |= PERF_SAMPLE_RAW;
|
2009-09-17 16:49:43 +08:00
|
|
|
attr->sample_type |= PERF_SAMPLE_TIME;
|
|
|
|
attr->sample_type |= PERF_SAMPLE_CPU;
|
|
|
|
}
|
2009-09-12 05:19:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
|
|
|
|
sys_name, evt_name);
|
|
|
|
|
|
|
|
fd = open(evt_path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return EVT_FAILED;
|
|
|
|
|
|
|
|
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
|
|
|
|
close(fd);
|
|
|
|
return EVT_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
close(fd);
|
|
|
|
id = atoll(id_buf);
|
|
|
|
attr->config = id;
|
|
|
|
attr->type = PERF_TYPE_TRACEPOINT;
|
|
|
|
*strp = evt_name + evt_length;
|
|
|
|
|
|
|
|
return EVT_HANDLED;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 02:22:46 +08:00
|
|
|
}
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
/* sys + ':' + event + ':' + flags*/
|
|
|
|
#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
|
|
|
|
static enum event_result
|
|
|
|
parse_subsystem_tracepoint_event(char *sys_name, char *flags)
|
|
|
|
{
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
struct dirent *evt_ent;
|
|
|
|
DIR *evt_dir;
|
|
|
|
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
|
|
|
|
evt_dir = opendir(evt_path);
|
|
|
|
|
|
|
|
if (!evt_dir) {
|
|
|
|
perror("Can't open event dir");
|
|
|
|
return EVT_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((evt_ent = readdir(evt_dir))) {
|
|
|
|
char event_opt[MAX_EVOPT_LEN + 1];
|
|
|
|
int len;
|
|
|
|
unsigned int rem = MAX_EVOPT_LEN;
|
|
|
|
|
|
|
|
if (!strcmp(evt_ent->d_name, ".")
|
|
|
|
|| !strcmp(evt_ent->d_name, "..")
|
|
|
|
|| !strcmp(evt_ent->d_name, "enable")
|
|
|
|
|| !strcmp(evt_ent->d_name, "filter"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
|
|
|
|
evt_ent->d_name);
|
|
|
|
if (len < 0)
|
|
|
|
return EVT_FAILED;
|
|
|
|
|
|
|
|
rem -= len;
|
|
|
|
if (flags) {
|
|
|
|
if (rem < strlen(flags) + 1)
|
|
|
|
return EVT_FAILED;
|
|
|
|
|
|
|
|
strcat(event_opt, ":");
|
|
|
|
strcat(event_opt, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parse_events(NULL, event_opt, 0))
|
|
|
|
return EVT_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return EVT_HANDLED_ALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static enum event_result parse_tracepoint_event(const char **strp,
|
2009-07-22 00:20:22 +08:00
|
|
|
struct perf_counter_attr *attr)
|
|
|
|
{
|
|
|
|
const char *evt_name;
|
2009-08-13 16:27:18 +08:00
|
|
|
char *flags;
|
2009-07-22 00:20:22 +08:00
|
|
|
char sys_name[MAX_EVENT_LENGTH];
|
|
|
|
unsigned int sys_length, evt_length;
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
if (valid_debugfs_mount(debugfs_path))
|
2009-07-22 00:20:22 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
evt_name = strchr(*strp, ':');
|
|
|
|
if (!evt_name)
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-22 00:20:22 +08:00
|
|
|
|
|
|
|
sys_length = evt_name - *strp;
|
|
|
|
if (sys_length >= MAX_EVENT_LENGTH)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
strncpy(sys_name, *strp, sys_length);
|
|
|
|
sys_name[sys_length] = '\0';
|
|
|
|
evt_name = evt_name + 1;
|
2009-08-13 16:27:18 +08:00
|
|
|
|
|
|
|
flags = strchr(evt_name, ':');
|
|
|
|
if (flags) {
|
2009-09-13 15:44:29 +08:00
|
|
|
/* split it out: */
|
|
|
|
evt_name = strndup(evt_name, flags - evt_name);
|
2009-08-13 16:27:18 +08:00
|
|
|
flags++;
|
|
|
|
}
|
|
|
|
|
2009-07-22 00:20:22 +08:00
|
|
|
evt_length = strlen(evt_name);
|
|
|
|
if (evt_length >= MAX_EVENT_LENGTH)
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-22 00:20:22 +08:00
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
if (!strcmp(evt_name, "*")) {
|
|
|
|
*strp = evt_name + evt_length;
|
|
|
|
return parse_subsystem_tracepoint_event(sys_name, flags);
|
|
|
|
} else
|
|
|
|
return parse_single_tracepoint_event(sys_name, evt_name,
|
|
|
|
evt_length, flags,
|
|
|
|
attr, strp);
|
2009-07-22 00:20:22 +08:00
|
|
|
}
|
|
|
|
|
2009-06-22 19:14:28 +08:00
|
|
|
static int check_events(const char *str, unsigned int i)
|
|
|
|
{
|
2009-07-01 11:04:34 +08:00
|
|
|
int n;
|
2009-06-22 19:14:28 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
n = strlen(event_symbols[i].symbol);
|
|
|
|
if (!strncmp(str, event_symbols[i].symbol, n))
|
|
|
|
return n;
|
|
|
|
|
|
|
|
n = strlen(event_symbols[i].alias);
|
|
|
|
if (n)
|
|
|
|
if (!strncmp(str, event_symbols[i].alias, n))
|
|
|
|
return n;
|
2009-06-22 19:14:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
2009-07-01 11:04:34 +08:00
|
|
|
parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
|
2009-05-26 17:10:09 +08:00
|
|
|
{
|
2009-07-01 11:04:34 +08:00
|
|
|
const char *str = *strp;
|
2009-05-26 17:10:09 +08:00
|
|
|
unsigned int i;
|
2009-07-01 11:04:34 +08:00
|
|
|
int n;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
|
|
|
|
n = check_events(str, i);
|
|
|
|
if (n > 0) {
|
|
|
|
attr->type = event_symbols[i].type;
|
|
|
|
attr->config = event_symbols[i].config;
|
|
|
|
*strp = str + n;
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_HANDLED;
|
2009-07-01 11:04:34 +08:00
|
|
|
}
|
|
|
|
}
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-01 11:04:34 +08:00
|
|
|
}
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
|
|
|
parse_raw_event(const char **strp, struct perf_counter_attr *attr)
|
2009-07-01 11:04:34 +08:00
|
|
|
{
|
|
|
|
const char *str = *strp;
|
|
|
|
u64 config;
|
|
|
|
int n;
|
2009-06-06 15:58:57 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
if (*str != 'r')
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-01 11:04:34 +08:00
|
|
|
n = hex2u64(str + 1, &config);
|
|
|
|
if (n > 0) {
|
|
|
|
*strp = str + n + 1;
|
|
|
|
attr->type = PERF_TYPE_RAW;
|
|
|
|
attr->config = config;
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_HANDLED;
|
2009-06-06 15:58:57 +08:00
|
|
|
}
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-01 11:04:34 +08:00
|
|
|
}
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
2009-07-01 11:04:34 +08:00
|
|
|
parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
|
|
|
|
{
|
|
|
|
const char *str = *strp;
|
|
|
|
char *endp;
|
|
|
|
unsigned long type;
|
|
|
|
u64 config;
|
|
|
|
|
|
|
|
type = strtoul(str, &endp, 0);
|
|
|
|
if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
|
|
|
|
str = endp + 1;
|
|
|
|
config = strtoul(str, &endp, 0);
|
|
|
|
if (endp > str) {
|
|
|
|
attr->type = type;
|
|
|
|
attr->config = config;
|
|
|
|
*strp = endp;
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_HANDLED;
|
2009-06-02 04:50:19 +08:00
|
|
|
}
|
2009-07-01 11:04:34 +08:00
|
|
|
}
|
2009-09-12 05:19:45 +08:00
|
|
|
return EVT_FAILED;
|
2009-07-01 11:04:34 +08:00
|
|
|
}
|
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
2009-07-01 11:04:34 +08:00
|
|
|
parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
|
|
|
|
{
|
|
|
|
const char *str = *strp;
|
|
|
|
int eu = 1, ek = 1, eh = 1;
|
2009-06-06 15:58:57 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
if (*str++ != ':')
|
2009-06-06 15:58:57 +08:00
|
|
|
return 0;
|
2009-07-01 11:04:34 +08:00
|
|
|
while (*str) {
|
|
|
|
if (*str == 'u')
|
|
|
|
eu = 0;
|
|
|
|
else if (*str == 'k')
|
|
|
|
ek = 0;
|
|
|
|
else if (*str == 'h')
|
|
|
|
eh = 0;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
++str;
|
2009-05-26 15:17:18 +08:00
|
|
|
}
|
2009-07-01 11:04:34 +08:00
|
|
|
if (str >= *strp + 2) {
|
|
|
|
*strp = str;
|
|
|
|
attr->exclude_user = eu;
|
|
|
|
attr->exclude_kernel = ek;
|
|
|
|
attr->exclude_hv = eh;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
/*
|
|
|
|
* Each event can have multiple symbolic names.
|
|
|
|
* Symbolic names are (almost) exactly matched.
|
|
|
|
*/
|
2009-09-12 05:19:45 +08:00
|
|
|
static enum event_result
|
|
|
|
parse_event_symbols(const char **str, struct perf_counter_attr *attr)
|
2009-07-01 11:04:34 +08:00
|
|
|
{
|
2009-09-12 05:19:45 +08:00
|
|
|
enum event_result ret;
|
|
|
|
|
|
|
|
ret = parse_tracepoint_event(str, attr);
|
|
|
|
if (ret != EVT_FAILED)
|
|
|
|
goto modifier;
|
|
|
|
|
|
|
|
ret = parse_raw_event(str, attr);
|
|
|
|
if (ret != EVT_FAILED)
|
|
|
|
goto modifier;
|
2009-06-06 15:58:57 +08:00
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
ret = parse_numeric_event(str, attr);
|
|
|
|
if (ret != EVT_FAILED)
|
|
|
|
goto modifier;
|
|
|
|
|
|
|
|
ret = parse_symbolic_event(str, attr);
|
|
|
|
if (ret != EVT_FAILED)
|
|
|
|
goto modifier;
|
|
|
|
|
|
|
|
ret = parse_generic_hw_event(str, attr);
|
|
|
|
if (ret != EVT_FAILED)
|
|
|
|
goto modifier;
|
|
|
|
|
|
|
|
return EVT_FAILED;
|
|
|
|
|
|
|
|
modifier:
|
2009-07-01 11:04:34 +08:00
|
|
|
parse_event_modifier(str, attr);
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
return ret;
|
2009-05-26 17:10:09 +08:00
|
|
|
}
|
|
|
|
|
2009-07-01 18:37:06 +08:00
|
|
|
int parse_events(const struct option *opt __used, const char *str, int unset __used)
|
2009-05-26 17:10:09 +08:00
|
|
|
{
|
2009-06-06 15:58:57 +08:00
|
|
|
struct perf_counter_attr attr;
|
2009-09-12 05:19:45 +08:00
|
|
|
enum event_result ret;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
for (;;) {
|
|
|
|
if (nr_counters == MAX_COUNTERS)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memset(&attr, 0, sizeof(attr));
|
2009-09-12 05:19:45 +08:00
|
|
|
ret = parse_event_symbols(&str, &attr);
|
|
|
|
if (ret == EVT_FAILED)
|
2009-07-01 11:04:34 +08:00
|
|
|
return -1;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
if (!(*str == 0 || *str == ',' || isspace(*str)))
|
|
|
|
return -1;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-09-12 05:19:45 +08:00
|
|
|
if (ret != EVT_HANDLED_ALL) {
|
|
|
|
attrs[nr_counters] = attr;
|
|
|
|
nr_counters++;
|
|
|
|
}
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-07-01 11:04:34 +08:00
|
|
|
if (*str == 0)
|
|
|
|
break;
|
|
|
|
if (*str == ',')
|
|
|
|
++str;
|
|
|
|
while (isspace(*str))
|
|
|
|
++str;
|
2009-05-26 17:10:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
static const char * const event_type_descriptors[] = {
|
|
|
|
"",
|
|
|
|
"Hardware event",
|
|
|
|
"Software event",
|
|
|
|
"Tracepoint event",
|
|
|
|
"Hardware cache event",
|
|
|
|
};
|
|
|
|
|
2009-07-22 00:20:22 +08:00
|
|
|
/*
|
|
|
|
* Print the events from <debugfs_mount_point>/tracing/events
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void print_tracepoint_events(void)
|
|
|
|
{
|
|
|
|
DIR *sys_dir, *evt_dir;
|
|
|
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
2009-09-05 03:39:51 +08:00
|
|
|
int sys_dir_fd;
|
2009-07-22 00:20:22 +08:00
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
if (valid_debugfs_mount(debugfs_path))
|
2009-07-22 00:20:22 +08:00
|
|
|
return;
|
|
|
|
|
2009-07-22 02:16:29 +08:00
|
|
|
sys_dir = opendir(debugfs_path);
|
2009-07-22 00:20:22 +08:00
|
|
|
if (!sys_dir)
|
|
|
|
goto cleanup;
|
2009-09-05 03:39:51 +08:00
|
|
|
sys_dir_fd = dirfd(sys_dir);
|
|
|
|
|
|
|
|
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
|
|
|
int dfd = openat(sys_dir_fd, sys_dirent.d_name,
|
|
|
|
O_RDONLY|O_DIRECTORY), evt_dir_fd;
|
|
|
|
if (dfd == -1)
|
|
|
|
continue;
|
|
|
|
evt_dir = fdopendir(dfd);
|
|
|
|
if (!evt_dir) {
|
|
|
|
close(dfd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
evt_dir_fd = dirfd(evt_dir);
|
|
|
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
2009-07-22 00:20:22 +08:00
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s:%s",
|
|
|
|
sys_dirent.d_name, evt_dirent.d_name);
|
2009-08-11 04:53:06 +08:00
|
|
|
fprintf(stderr, " %-42s [%s]\n", evt_path,
|
2009-07-22 00:20:22 +08:00
|
|
|
event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
|
|
|
|
}
|
|
|
|
closedir(evt_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
closedir(sys_dir);
|
|
|
|
}
|
|
|
|
|
2009-05-26 17:10:09 +08:00
|
|
|
/*
|
2009-06-06 18:24:17 +08:00
|
|
|
* Print the help text for the event symbols:
|
2009-05-26 17:10:09 +08:00
|
|
|
*/
|
2009-06-06 18:24:17 +08:00
|
|
|
void print_events(void)
|
2009-05-26 17:10:09 +08:00
|
|
|
{
|
2009-06-06 18:24:17 +08:00
|
|
|
struct event_symbol *syms = event_symbols;
|
2009-07-01 21:06:18 +08:00
|
|
|
unsigned int i, type, op, prev_type = -1;
|
2009-06-22 19:14:28 +08:00
|
|
|
char name[40];
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
|
|
|
|
type = syms->type + 1;
|
2009-07-13 08:25:47 +08:00
|
|
|
if (type >= ARRAY_SIZE(event_type_descriptors))
|
2009-06-06 18:24:17 +08:00
|
|
|
type = 0;
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
if (type != prev_type)
|
|
|
|
fprintf(stderr, "\n");
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-22 19:14:28 +08:00
|
|
|
if (strlen(syms->alias))
|
|
|
|
sprintf(name, "%s OR %s", syms->symbol, syms->alias);
|
|
|
|
else
|
|
|
|
strcpy(name, syms->symbol);
|
2009-08-11 04:53:06 +08:00
|
|
|
fprintf(stderr, " %-42s [%s]\n", name,
|
2009-06-06 18:24:17 +08:00
|
|
|
event_type_descriptors[type]);
|
2009-05-26 17:10:09 +08:00
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
prev_type = type;
|
2009-05-26 17:10:09 +08:00
|
|
|
}
|
|
|
|
|
2009-07-01 21:06:18 +08:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
|
|
|
|
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
|
|
|
|
/* skip invalid cache type */
|
|
|
|
if (!is_cache_op_valid(type, op))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
|
2009-08-11 04:53:06 +08:00
|
|
|
fprintf(stderr, " %-42s [%s]\n",
|
2009-07-01 21:06:18 +08:00
|
|
|
event_cache_name(type, op, i),
|
|
|
|
event_type_descriptors[4]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
fprintf(stderr, "\n");
|
2009-08-11 04:53:06 +08:00
|
|
|
fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
|
2009-06-06 18:24:17 +08:00
|
|
|
"rNNN");
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
|
2009-07-22 00:20:22 +08:00
|
|
|
print_tracepoint_events();
|
|
|
|
|
2009-06-06 18:24:17 +08:00
|
|
|
exit(129);
|
2009-05-26 17:10:09 +08:00
|
|
|
}
|