mirror of https://gitee.com/openkylin/linux.git
Merge branch 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf
* 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf: (31 commits) perf_counter tools: Give perf top inherit option perf_counter tools: Fix vmlinux symbol generation breakage perf_counter: Detect debugfs location perf_counter: Add tracepoint support to perf list, perf stat perf symbol: C++ demangling perf: avoid structure size confusion by using a fixed size perf_counter: Fix throttle/unthrottle event logging perf_counter: Improve perf stat and perf record option parsing perf_counter: PERF_SAMPLE_ID and inherited counters perf_counter: Plug more stack leaks perf: Fix stack data leak perf_counter: Remove unused variables perf_counter: Make call graph option consistent perf_counter: Add perf record option to log addresses perf_counter: Log vfork as a fork event perf_counter: Synthesize VDSO mmap event perf_counter: Make sure we dont leak kernel memory to userspace perf_counter tools: Fix index boundary check perf_counter: Fix the tracepoint channel to perfcounters perf_counter, x86: Extend perf_counter Pentium M support ...
This commit is contained in:
commit
3c3301083e
|
@ -65,6 +65,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
|
||||||
.enabled = 1,
|
.enabled = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not sure about some of these
|
||||||
|
*/
|
||||||
|
static const u64 p6_perfmon_event_map[] =
|
||||||
|
{
|
||||||
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||||
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||||
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
|
||||||
|
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
|
||||||
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||||
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||||
|
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||||
|
};
|
||||||
|
|
||||||
|
static u64 p6_pmu_event_map(int event)
|
||||||
|
{
|
||||||
|
return p6_perfmon_event_map[event];
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Counter setting that is specified not to count anything.
|
||||||
|
* We use this to effectively disable a counter.
|
||||||
|
*
|
||||||
|
* L2_RQSTS with 0 MESI unit mask.
|
||||||
|
*/
|
||||||
|
#define P6_NOP_COUNTER 0x0000002EULL
|
||||||
|
|
||||||
|
static u64 p6_pmu_raw_event(u64 event)
|
||||||
|
{
|
||||||
|
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||||
|
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||||
|
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||||
|
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
||||||
|
#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
|
||||||
|
|
||||||
|
#define P6_EVNTSEL_MASK \
|
||||||
|
(P6_EVNTSEL_EVENT_MASK | \
|
||||||
|
P6_EVNTSEL_UNIT_MASK | \
|
||||||
|
P6_EVNTSEL_EDGE_MASK | \
|
||||||
|
P6_EVNTSEL_INV_MASK | \
|
||||||
|
P6_EVNTSEL_COUNTER_MASK)
|
||||||
|
|
||||||
|
return event & P6_EVNTSEL_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel PerfMon v3. Used on Core2 and later.
|
* Intel PerfMon v3. Used on Core2 and later.
|
||||||
*/
|
*/
|
||||||
|
@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
struct perf_counter_attr *attr = &counter->attr;
|
struct perf_counter_attr *attr = &counter->attr;
|
||||||
struct hw_perf_counter *hwc = &counter->hw;
|
struct hw_perf_counter *hwc = &counter->hw;
|
||||||
|
u64 config;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!x86_pmu_initialized())
|
if (!x86_pmu_initialized())
|
||||||
|
@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||||
|
|
||||||
if (attr->config >= x86_pmu.max_events)
|
if (attr->config >= x86_pmu.max_events)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The generic map:
|
* The generic map:
|
||||||
*/
|
*/
|
||||||
hwc->config |= x86_pmu.event_map(attr->config);
|
config = x86_pmu.event_map(attr->config);
|
||||||
|
|
||||||
|
if (config == 0)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (config == -1LL)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
hwc->config |= config;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void p6_pmu_disable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
if (!cpuc->enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpuc->enabled = 0;
|
||||||
|
barrier();
|
||||||
|
|
||||||
|
/* p6 only has one enable register */
|
||||||
|
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
|
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_pmu_disable_all(void)
|
static void intel_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||||
|
@ -767,6 +840,23 @@ void hw_perf_disable(void)
|
||||||
return x86_pmu.disable_all();
|
return x86_pmu.disable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void p6_pmu_enable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
|
unsigned long val;
|
||||||
|
|
||||||
|
if (cpuc->enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpuc->enabled = 1;
|
||||||
|
barrier();
|
||||||
|
|
||||||
|
/* p6 only has one enable register */
|
||||||
|
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
|
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_pmu_enable_all(void)
|
static void intel_pmu_enable_all(void)
|
||||||
{
|
{
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||||
|
@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
struct perf_counter *counter = cpuc->counters[idx];
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
continue;
|
continue;
|
||||||
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
||||||
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
|
val = counter->hw.config;
|
||||||
continue;
|
|
||||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
||||||
}
|
}
|
||||||
|
@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
|
||||||
|
|
||||||
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
{
|
{
|
||||||
int err;
|
(void)checking_wrmsrl(hwc->config_base + idx,
|
||||||
err = checking_wrmsrl(hwc->config_base + idx,
|
|
||||||
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
|
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
{
|
{
|
||||||
int err;
|
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
|
||||||
err = checking_wrmsrl(hwc->config_base + idx,
|
|
||||||
hwc->config);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
|
||||||
{
|
{
|
||||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||||
u64 ctrl_val, mask;
|
u64 ctrl_val, mask;
|
||||||
int err;
|
|
||||||
|
|
||||||
mask = 0xfULL << (idx * 4);
|
mask = 0xfULL << (idx * 4);
|
||||||
|
|
||||||
rdmsrl(hwc->config_base, ctrl_val);
|
rdmsrl(hwc->config_base, ctrl_val);
|
||||||
ctrl_val &= ~mask;
|
ctrl_val &= ~mask;
|
||||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
|
{
|
||||||
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
|
u64 val = P6_NOP_COUNTER;
|
||||||
|
|
||||||
|
if (cpuc->enabled)
|
||||||
|
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
|
||||||
|
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
|
||||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
|
{
|
||||||
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
val = hwc->config;
|
||||||
|
if (cpuc->enabled)
|
||||||
|
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
|
||||||
|
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
{
|
{
|
||||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||||
|
@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||||
|
|
||||||
if (cpuc->enabled)
|
if (cpuc->enabled)
|
||||||
x86_pmu_enable_counter(hwc, idx);
|
x86_pmu_enable_counter(hwc, idx);
|
||||||
else
|
|
||||||
x86_pmu_disable_counter(hwc, idx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int p6_pmu_handle_irq(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct perf_sample_data data;
|
||||||
|
struct cpu_hw_counters *cpuc;
|
||||||
|
struct perf_counter *counter;
|
||||||
|
struct hw_perf_counter *hwc;
|
||||||
|
int idx, handled = 0;
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
data.regs = regs;
|
||||||
|
data.addr = 0;
|
||||||
|
|
||||||
|
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
|
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
counter = cpuc->counters[idx];
|
||||||
|
hwc = &counter->hw;
|
||||||
|
|
||||||
|
val = x86_perf_counter_update(counter, hwc, idx);
|
||||||
|
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* counter overflow
|
||||||
|
*/
|
||||||
|
handled = 1;
|
||||||
|
data.period = counter->hw.last_period;
|
||||||
|
|
||||||
|
if (!x86_perf_counter_set_period(counter, hwc, idx))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (perf_counter_overflow(counter, 1, &data))
|
||||||
|
p6_pmu_disable_counter(hwc, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (handled)
|
||||||
|
inc_irq_stat(apic_perf_irqs);
|
||||||
|
|
||||||
|
return handled;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This handler is triggered by the local APIC, so the APIC IRQ handling
|
* This handler is triggered by the local APIC, so the APIC IRQ handling
|
||||||
|
@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_counters *cpuc;
|
struct cpu_hw_counters *cpuc;
|
||||||
int bit, cpu, loops;
|
int bit, loops;
|
||||||
u64 ack, status;
|
u64 ack, status;
|
||||||
|
|
||||||
data.regs = regs;
|
data.regs = regs;
|
||||||
data.addr = 0;
|
data.addr = 0;
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
cpuc = &per_cpu(cpu_hw_counters, cpu);
|
|
||||||
|
|
||||||
perf_disable();
|
perf_disable();
|
||||||
status = intel_pmu_get_status();
|
status = intel_pmu_get_status();
|
||||||
|
@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
|
||||||
struct cpu_hw_counters *cpuc;
|
struct cpu_hw_counters *cpuc;
|
||||||
struct perf_counter *counter;
|
struct perf_counter *counter;
|
||||||
struct hw_perf_counter *hwc;
|
struct hw_perf_counter *hwc;
|
||||||
int cpu, idx, handled = 0;
|
int idx, handled = 0;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
data.regs = regs;
|
data.regs = regs;
|
||||||
data.addr = 0;
|
data.addr = 0;
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||||
cpuc = &per_cpu(cpu_hw_counters, cpu);
|
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
if (!test_bit(idx, cpuc->active_mask))
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
|
||||||
.priority = 1
|
.priority = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct x86_pmu p6_pmu = {
|
||||||
|
.name = "p6",
|
||||||
|
.handle_irq = p6_pmu_handle_irq,
|
||||||
|
.disable_all = p6_pmu_disable_all,
|
||||||
|
.enable_all = p6_pmu_enable_all,
|
||||||
|
.enable = p6_pmu_enable_counter,
|
||||||
|
.disable = p6_pmu_disable_counter,
|
||||||
|
.eventsel = MSR_P6_EVNTSEL0,
|
||||||
|
.perfctr = MSR_P6_PERFCTR0,
|
||||||
|
.event_map = p6_pmu_event_map,
|
||||||
|
.raw_event = p6_pmu_raw_event,
|
||||||
|
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||||
|
.max_period = (1ULL << 31) - 1,
|
||||||
|
.version = 0,
|
||||||
|
.num_counters = 2,
|
||||||
|
/*
|
||||||
|
* Counters have 40 bits implemented. However they are designed such
|
||||||
|
* that bits [32-39] are sign extensions of bit 31. As such the
|
||||||
|
* effective width of a counter for P6-like PMU is 32 bits only.
|
||||||
|
*
|
||||||
|
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
||||||
|
*/
|
||||||
|
.counter_bits = 32,
|
||||||
|
.counter_mask = (1ULL << 32) - 1,
|
||||||
|
};
|
||||||
|
|
||||||
static struct x86_pmu intel_pmu = {
|
static struct x86_pmu intel_pmu = {
|
||||||
.name = "Intel",
|
.name = "Intel",
|
||||||
.handle_irq = intel_pmu_handle_irq,
|
.handle_irq = intel_pmu_handle_irq,
|
||||||
|
@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
|
||||||
.max_period = (1ULL << 47) - 1,
|
.max_period = (1ULL << 47) - 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int p6_pmu_init(void)
|
||||||
|
{
|
||||||
|
switch (boot_cpu_data.x86_model) {
|
||||||
|
case 1:
|
||||||
|
case 3: /* Pentium Pro */
|
||||||
|
case 5:
|
||||||
|
case 6: /* Pentium II */
|
||||||
|
case 7:
|
||||||
|
case 8:
|
||||||
|
case 11: /* Pentium III */
|
||||||
|
break;
|
||||||
|
case 9:
|
||||||
|
case 13:
|
||||||
|
/* Pentium M */
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_cont("unsupported p6 CPU model %d ",
|
||||||
|
boot_cpu_data.x86_model);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cpu_has_apic) {
|
||||||
|
pr_info("no Local APIC, try rebooting with lapic");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
x86_pmu = p6_pmu;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_pmu_init(void)
|
static int intel_pmu_init(void)
|
||||||
{
|
{
|
||||||
union cpuid10_edx edx;
|
union cpuid10_edx edx;
|
||||||
|
@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
|
||||||
unsigned int ebx;
|
unsigned int ebx;
|
||||||
int version;
|
int version;
|
||||||
|
|
||||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
||||||
|
/* check for P6 processor family */
|
||||||
|
if (boot_cpu_data.x86 == 6) {
|
||||||
|
return p6_pmu_init();
|
||||||
|
} else {
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether the Architectural PerfMon supports
|
* Check whether the Architectural PerfMon supports
|
||||||
|
|
|
@ -120,8 +120,9 @@ enum perf_counter_sample_format {
|
||||||
PERF_SAMPLE_ID = 1U << 6,
|
PERF_SAMPLE_ID = 1U << 6,
|
||||||
PERF_SAMPLE_CPU = 1U << 7,
|
PERF_SAMPLE_CPU = 1U << 7,
|
||||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||||
|
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||||
|
|
||||||
PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
|
PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -312,16 +313,7 @@ enum perf_event_type {
|
||||||
* struct perf_event_header header;
|
* struct perf_event_header header;
|
||||||
* u64 time;
|
* u64 time;
|
||||||
* u64 id;
|
* u64 id;
|
||||||
* u64 sample_period;
|
* u64 stream_id;
|
||||||
* };
|
|
||||||
*/
|
|
||||||
PERF_EVENT_PERIOD = 4,
|
|
||||||
|
|
||||||
/*
|
|
||||||
* struct {
|
|
||||||
* struct perf_event_header header;
|
|
||||||
* u64 time;
|
|
||||||
* u64 id;
|
|
||||||
* };
|
* };
|
||||||
*/
|
*/
|
||||||
PERF_EVENT_THROTTLE = 5,
|
PERF_EVENT_THROTTLE = 5,
|
||||||
|
@ -356,6 +348,7 @@ enum perf_event_type {
|
||||||
* { u64 time; } && PERF_SAMPLE_TIME
|
* { u64 time; } && PERF_SAMPLE_TIME
|
||||||
* { u64 addr; } && PERF_SAMPLE_ADDR
|
* { u64 addr; } && PERF_SAMPLE_ADDR
|
||||||
* { u64 id; } && PERF_SAMPLE_ID
|
* { u64 id; } && PERF_SAMPLE_ID
|
||||||
|
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||||
*
|
*
|
||||||
|
|
|
@ -962,7 +962,7 @@ config PERF_COUNTERS
|
||||||
|
|
||||||
config EVENT_PROFILE
|
config EVENT_PROFILE
|
||||||
bool "Tracepoint profile sources"
|
bool "Tracepoint profile sources"
|
||||||
depends on PERF_COUNTERS && EVENT_TRACER
|
depends on PERF_COUNTERS && EVENT_TRACING
|
||||||
default y
|
default y
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -1407,14 +1407,11 @@ long do_fork(unsigned long clone_flags,
|
||||||
if (clone_flags & CLONE_VFORK) {
|
if (clone_flags & CLONE_VFORK) {
|
||||||
p->vfork_done = &vfork;
|
p->vfork_done = &vfork;
|
||||||
init_completion(&vfork);
|
init_completion(&vfork);
|
||||||
} else if (!(clone_flags & CLONE_VM)) {
|
|
||||||
/*
|
|
||||||
* vfork will do an exec which will call
|
|
||||||
* set_task_comm()
|
|
||||||
*/
|
|
||||||
perf_counter_fork(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(clone_flags & CLONE_THREAD))
|
||||||
|
perf_counter_fork(p);
|
||||||
|
|
||||||
audit_finish_fork(p);
|
audit_finish_fork(p);
|
||||||
tracehook_report_clone(regs, clone_flags, nr, p);
|
tracehook_report_clone(regs, clone_flags, nr, p);
|
||||||
|
|
||||||
|
|
|
@ -146,6 +146,28 @@ static void put_ctx(struct perf_counter_context *ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void unclone_ctx(struct perf_counter_context *ctx)
|
||||||
|
{
|
||||||
|
if (ctx->parent_ctx) {
|
||||||
|
put_ctx(ctx->parent_ctx);
|
||||||
|
ctx->parent_ctx = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we inherit counters we want to return the parent counter id
|
||||||
|
* to userspace.
|
||||||
|
*/
|
||||||
|
static u64 primary_counter_id(struct perf_counter *counter)
|
||||||
|
{
|
||||||
|
u64 id = counter->id;
|
||||||
|
|
||||||
|
if (counter->parent)
|
||||||
|
id = counter->parent->id;
|
||||||
|
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the perf_counter_context for a task and lock it.
|
* Get the perf_counter_context for a task and lock it.
|
||||||
* This has to cope with with the fact that until it is locked,
|
* This has to cope with with the fact that until it is locked,
|
||||||
|
@ -1288,7 +1310,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
|
||||||
#define MAX_INTERRUPTS (~0ULL)
|
#define MAX_INTERRUPTS (~0ULL)
|
||||||
|
|
||||||
static void perf_log_throttle(struct perf_counter *counter, int enable);
|
static void perf_log_throttle(struct perf_counter *counter, int enable);
|
||||||
static void perf_log_period(struct perf_counter *counter, u64 period);
|
|
||||||
|
|
||||||
static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
||||||
{
|
{
|
||||||
|
@ -1307,8 +1328,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
||||||
if (!sample_period)
|
if (!sample_period)
|
||||||
sample_period = 1;
|
sample_period = 1;
|
||||||
|
|
||||||
perf_log_period(counter, sample_period);
|
|
||||||
|
|
||||||
hwc->sample_period = sample_period;
|
hwc->sample_period = sample_period;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1463,10 +1482,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
|
||||||
/*
|
/*
|
||||||
* Unclone this context if we enabled any counter.
|
* Unclone this context if we enabled any counter.
|
||||||
*/
|
*/
|
||||||
if (enabled && ctx->parent_ctx) {
|
if (enabled)
|
||||||
put_ctx(ctx->parent_ctx);
|
unclone_ctx(ctx);
|
||||||
ctx->parent_ctx = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
|
@ -1526,7 +1543,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
|
||||||
|
|
||||||
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
||||||
{
|
{
|
||||||
struct perf_counter_context *parent_ctx;
|
|
||||||
struct perf_counter_context *ctx;
|
struct perf_counter_context *ctx;
|
||||||
struct perf_cpu_context *cpuctx;
|
struct perf_cpu_context *cpuctx;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
@ -1586,11 +1602,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
||||||
retry:
|
retry:
|
||||||
ctx = perf_lock_task_context(task, &flags);
|
ctx = perf_lock_task_context(task, &flags);
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
parent_ctx = ctx->parent_ctx;
|
unclone_ctx(ctx);
|
||||||
if (parent_ctx) {
|
|
||||||
put_ctx(parent_ctx);
|
|
||||||
ctx->parent_ctx = NULL; /* no longer a clone */
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1704,7 +1716,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
||||||
values[n++] = counter->total_time_running +
|
values[n++] = counter->total_time_running +
|
||||||
atomic64_read(&counter->child_total_time_running);
|
atomic64_read(&counter->child_total_time_running);
|
||||||
if (counter->attr.read_format & PERF_FORMAT_ID)
|
if (counter->attr.read_format & PERF_FORMAT_ID)
|
||||||
values[n++] = counter->id;
|
values[n++] = primary_counter_id(counter);
|
||||||
mutex_unlock(&counter->child_mutex);
|
mutex_unlock(&counter->child_mutex);
|
||||||
|
|
||||||
if (count < n * sizeof(u64))
|
if (count < n * sizeof(u64))
|
||||||
|
@ -1811,8 +1823,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
|
||||||
|
|
||||||
counter->attr.sample_freq = value;
|
counter->attr.sample_freq = value;
|
||||||
} else {
|
} else {
|
||||||
perf_log_period(counter, value);
|
|
||||||
|
|
||||||
counter->attr.sample_period = value;
|
counter->attr.sample_period = value;
|
||||||
counter->hw.sample_period = value;
|
counter->hw.sample_period = value;
|
||||||
}
|
}
|
||||||
|
@ -2661,6 +2671,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||||
if (sample_type & PERF_SAMPLE_ID)
|
if (sample_type & PERF_SAMPLE_ID)
|
||||||
header.size += sizeof(u64);
|
header.size += sizeof(u64);
|
||||||
|
|
||||||
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||||
|
header.size += sizeof(u64);
|
||||||
|
|
||||||
if (sample_type & PERF_SAMPLE_CPU) {
|
if (sample_type & PERF_SAMPLE_CPU) {
|
||||||
header.size += sizeof(cpu_entry);
|
header.size += sizeof(cpu_entry);
|
||||||
|
|
||||||
|
@ -2704,7 +2717,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||||
if (sample_type & PERF_SAMPLE_ADDR)
|
if (sample_type & PERF_SAMPLE_ADDR)
|
||||||
perf_output_put(&handle, data->addr);
|
perf_output_put(&handle, data->addr);
|
||||||
|
|
||||||
if (sample_type & PERF_SAMPLE_ID)
|
if (sample_type & PERF_SAMPLE_ID) {
|
||||||
|
u64 id = primary_counter_id(counter);
|
||||||
|
|
||||||
|
perf_output_put(&handle, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||||
perf_output_put(&handle, counter->id);
|
perf_output_put(&handle, counter->id);
|
||||||
|
|
||||||
if (sample_type & PERF_SAMPLE_CPU)
|
if (sample_type & PERF_SAMPLE_CPU)
|
||||||
|
@ -2727,7 +2746,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||||
if (sub != counter)
|
if (sub != counter)
|
||||||
sub->pmu->read(sub);
|
sub->pmu->read(sub);
|
||||||
|
|
||||||
group_entry.id = sub->id;
|
group_entry.id = primary_counter_id(sub);
|
||||||
group_entry.counter = atomic64_read(&sub->count);
|
group_entry.counter = atomic64_read(&sub->count);
|
||||||
|
|
||||||
perf_output_put(&handle, group_entry);
|
perf_output_put(&handle, group_entry);
|
||||||
|
@ -2787,15 +2806,8 @@ perf_counter_read_event(struct perf_counter *counter,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (counter->attr.read_format & PERF_FORMAT_ID) {
|
if (counter->attr.read_format & PERF_FORMAT_ID) {
|
||||||
u64 id;
|
|
||||||
|
|
||||||
event.header.size += sizeof(u64);
|
event.header.size += sizeof(u64);
|
||||||
if (counter->parent)
|
event.format[i++] = primary_counter_id(counter);
|
||||||
id = counter->parent->id;
|
|
||||||
else
|
|
||||||
id = counter->id;
|
|
||||||
|
|
||||||
event.format[i++] = id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
|
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
|
||||||
|
@ -2896,8 +2908,11 @@ void perf_counter_fork(struct task_struct *task)
|
||||||
.event = {
|
.event = {
|
||||||
.header = {
|
.header = {
|
||||||
.type = PERF_EVENT_FORK,
|
.type = PERF_EVENT_FORK,
|
||||||
|
.misc = 0,
|
||||||
.size = sizeof(fork_event.event),
|
.size = sizeof(fork_event.event),
|
||||||
},
|
},
|
||||||
|
/* .pid */
|
||||||
|
/* .ppid */
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2969,8 +2984,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
|
||||||
struct perf_cpu_context *cpuctx;
|
struct perf_cpu_context *cpuctx;
|
||||||
struct perf_counter_context *ctx;
|
struct perf_counter_context *ctx;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
char *comm = comm_event->task->comm;
|
char comm[TASK_COMM_LEN];
|
||||||
|
|
||||||
|
memset(comm, 0, sizeof(comm));
|
||||||
|
strncpy(comm, comm_event->task->comm, sizeof(comm));
|
||||||
size = ALIGN(strlen(comm)+1, sizeof(u64));
|
size = ALIGN(strlen(comm)+1, sizeof(u64));
|
||||||
|
|
||||||
comm_event->comm = comm;
|
comm_event->comm = comm;
|
||||||
|
@ -3005,8 +3022,16 @@ void perf_counter_comm(struct task_struct *task)
|
||||||
|
|
||||||
comm_event = (struct perf_comm_event){
|
comm_event = (struct perf_comm_event){
|
||||||
.task = task,
|
.task = task,
|
||||||
|
/* .comm */
|
||||||
|
/* .comm_size */
|
||||||
.event = {
|
.event = {
|
||||||
.header = { .type = PERF_EVENT_COMM, },
|
.header = {
|
||||||
|
.type = PERF_EVENT_COMM,
|
||||||
|
.misc = 0,
|
||||||
|
/* .size */
|
||||||
|
},
|
||||||
|
/* .pid */
|
||||||
|
/* .tid */
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3089,8 +3114,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
|
||||||
char *buf = NULL;
|
char *buf = NULL;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
|
memset(tmp, 0, sizeof(tmp));
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
buf = kzalloc(PATH_MAX, GFP_KERNEL);
|
/*
|
||||||
|
* d_path works from the end of the buffer backwards, so we
|
||||||
|
* need to add enough zero bytes after the string to handle
|
||||||
|
* the 64bit alignment we do later.
|
||||||
|
*/
|
||||||
|
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
name = strncpy(tmp, "//enomem", sizeof(tmp));
|
name = strncpy(tmp, "//enomem", sizeof(tmp));
|
||||||
goto got_name;
|
goto got_name;
|
||||||
|
@ -3101,9 +3133,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
|
||||||
goto got_name;
|
goto got_name;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
name = arch_vma_name(mmap_event->vma);
|
if (arch_vma_name(mmap_event->vma)) {
|
||||||
if (name)
|
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
|
||||||
|
sizeof(tmp));
|
||||||
goto got_name;
|
goto got_name;
|
||||||
|
}
|
||||||
|
|
||||||
if (!vma->vm_mm) {
|
if (!vma->vm_mm) {
|
||||||
name = strncpy(tmp, "[vdso]", sizeof(tmp));
|
name = strncpy(tmp, "[vdso]", sizeof(tmp));
|
||||||
|
@ -3148,8 +3182,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
|
||||||
|
|
||||||
mmap_event = (struct perf_mmap_event){
|
mmap_event = (struct perf_mmap_event){
|
||||||
.vma = vma,
|
.vma = vma,
|
||||||
|
/* .file_name */
|
||||||
|
/* .file_size */
|
||||||
.event = {
|
.event = {
|
||||||
.header = { .type = PERF_EVENT_MMAP, },
|
.header = {
|
||||||
|
.type = PERF_EVENT_MMAP,
|
||||||
|
.misc = 0,
|
||||||
|
/* .size */
|
||||||
|
},
|
||||||
|
/* .pid */
|
||||||
|
/* .tid */
|
||||||
.start = vma->vm_start,
|
.start = vma->vm_start,
|
||||||
.len = vma->vm_end - vma->vm_start,
|
.len = vma->vm_end - vma->vm_start,
|
||||||
.pgoff = vma->vm_pgoff,
|
.pgoff = vma->vm_pgoff,
|
||||||
|
@ -3159,49 +3201,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
|
||||||
perf_counter_mmap_event(&mmap_event);
|
perf_counter_mmap_event(&mmap_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Log sample_period changes so that analyzing tools can re-normalize the
|
|
||||||
* event flow.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct freq_event {
|
|
||||||
struct perf_event_header header;
|
|
||||||
u64 time;
|
|
||||||
u64 id;
|
|
||||||
u64 period;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void perf_log_period(struct perf_counter *counter, u64 period)
|
|
||||||
{
|
|
||||||
struct perf_output_handle handle;
|
|
||||||
struct freq_event event;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (counter->hw.sample_period == period)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
|
|
||||||
return;
|
|
||||||
|
|
||||||
event = (struct freq_event) {
|
|
||||||
.header = {
|
|
||||||
.type = PERF_EVENT_PERIOD,
|
|
||||||
.misc = 0,
|
|
||||||
.size = sizeof(event),
|
|
||||||
},
|
|
||||||
.time = sched_clock(),
|
|
||||||
.id = counter->id,
|
|
||||||
.period = period,
|
|
||||||
};
|
|
||||||
|
|
||||||
ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
|
|
||||||
if (ret)
|
|
||||||
return;
|
|
||||||
|
|
||||||
perf_output_put(&handle, event);
|
|
||||||
perf_output_end(&handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IRQ throttle logging
|
* IRQ throttle logging
|
||||||
*/
|
*/
|
||||||
|
@ -3215,16 +3214,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
|
||||||
struct perf_event_header header;
|
struct perf_event_header header;
|
||||||
u64 time;
|
u64 time;
|
||||||
u64 id;
|
u64 id;
|
||||||
|
u64 stream_id;
|
||||||
} throttle_event = {
|
} throttle_event = {
|
||||||
.header = {
|
.header = {
|
||||||
.type = PERF_EVENT_THROTTLE + 1,
|
.type = PERF_EVENT_THROTTLE,
|
||||||
.misc = 0,
|
.misc = 0,
|
||||||
.size = sizeof(throttle_event),
|
.size = sizeof(throttle_event),
|
||||||
},
|
},
|
||||||
.time = sched_clock(),
|
.time = sched_clock(),
|
||||||
.id = counter->id,
|
.id = primary_counter_id(counter),
|
||||||
|
.stream_id = counter->id,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (enable)
|
||||||
|
throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
|
||||||
|
|
||||||
ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
|
ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return;
|
return;
|
||||||
|
@ -3672,7 +3676,7 @@ static const struct pmu perf_ops_task_clock = {
|
||||||
void perf_tpcounter_event(int event_id)
|
void perf_tpcounter_event(int event_id)
|
||||||
{
|
{
|
||||||
struct perf_sample_data data = {
|
struct perf_sample_data data = {
|
||||||
.regs = get_irq_regs();
|
.regs = get_irq_regs(),
|
||||||
.addr = 0,
|
.addr = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3688,16 +3692,12 @@ extern void ftrace_profile_disable(int);
|
||||||
|
|
||||||
static void tp_perf_counter_destroy(struct perf_counter *counter)
|
static void tp_perf_counter_destroy(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
ftrace_profile_disable(perf_event_id(&counter->attr));
|
ftrace_profile_disable(counter->attr.config);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
int event_id = perf_event_id(&counter->attr);
|
if (ftrace_profile_enable(counter->attr.config))
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = ftrace_profile_enable(event_id);
|
|
||||||
if (ret)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
counter->destroy = tp_perf_counter_destroy;
|
counter->destroy = tp_perf_counter_destroy;
|
||||||
|
@ -4256,15 +4256,12 @@ void perf_counter_exit_task(struct task_struct *child)
|
||||||
*/
|
*/
|
||||||
spin_lock(&child_ctx->lock);
|
spin_lock(&child_ctx->lock);
|
||||||
child->perf_counter_ctxp = NULL;
|
child->perf_counter_ctxp = NULL;
|
||||||
if (child_ctx->parent_ctx) {
|
/*
|
||||||
/*
|
* If this context is a clone; unclone it so it can't get
|
||||||
* This context is a clone; unclone it so it can't get
|
* swapped to another process while we're removing all
|
||||||
* swapped to another process while we're removing all
|
* the counters from it.
|
||||||
* the counters from it.
|
*/
|
||||||
*/
|
unclone_ctx(child_ctx);
|
||||||
put_ctx(child_ctx->parent_ctx);
|
|
||||||
child_ctx->parent_ctx = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock(&child_ctx->lock);
|
spin_unlock(&child_ctx->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,9 @@ OPTIONS
|
||||||
--dsos=::
|
--dsos=::
|
||||||
Only consider symbols in these dsos. CSV that understands
|
Only consider symbols in these dsos. CSV that understands
|
||||||
file://filename entries.
|
file://filename entries.
|
||||||
|
-n
|
||||||
|
--show-nr-samples
|
||||||
|
Show the number of samples for each symbol
|
||||||
-C::
|
-C::
|
||||||
--comms=::
|
--comms=::
|
||||||
Only consider symbols in these comms. CSV that understands
|
Only consider symbols in these comms. CSV that understands
|
||||||
|
@ -33,6 +36,18 @@ OPTIONS
|
||||||
Only consider these symbols. CSV that understands
|
Only consider these symbols. CSV that understands
|
||||||
file://filename entries.
|
file://filename entries.
|
||||||
|
|
||||||
|
-w::
|
||||||
|
--field-width=::
|
||||||
|
Force each column width to the provided list, for large terminal
|
||||||
|
readability.
|
||||||
|
|
||||||
|
-t::
|
||||||
|
--field-separator=::
|
||||||
|
|
||||||
|
Use a special separator character and don't pad with spaces, replacing
|
||||||
|
all occurances of this separator in symbol names (and other output)
|
||||||
|
with a '.' character, that thus it's the only non valid separator.
|
||||||
|
|
||||||
SEE ALSO
|
SEE ALSO
|
||||||
--------
|
--------
|
||||||
linkperf:perf-stat[1]
|
linkperf:perf-stat[1]
|
||||||
|
|
|
@ -345,7 +345,7 @@ BUILTIN_OBJS += builtin-stat.o
|
||||||
BUILTIN_OBJS += builtin-top.o
|
BUILTIN_OBJS += builtin-top.o
|
||||||
|
|
||||||
PERFLIBS = $(LIB_FILE)
|
PERFLIBS = $(LIB_FILE)
|
||||||
EXTLIBS =
|
EXTLIBS = -lbfd
|
||||||
|
|
||||||
#
|
#
|
||||||
# Platform specific tweaks
|
# Platform specific tweaks
|
||||||
|
|
|
@ -74,20 +74,12 @@ struct fork_event {
|
||||||
u32 pid, ppid;
|
u32 pid, ppid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct period_event {
|
|
||||||
struct perf_event_header header;
|
|
||||||
u64 time;
|
|
||||||
u64 id;
|
|
||||||
u64 sample_period;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef union event_union {
|
typedef union event_union {
|
||||||
struct perf_event_header header;
|
struct perf_event_header header;
|
||||||
struct ip_event ip;
|
struct ip_event ip;
|
||||||
struct mmap_event mmap;
|
struct mmap_event mmap;
|
||||||
struct comm_event comm;
|
struct comm_event comm;
|
||||||
struct fork_event fork;
|
struct fork_event fork;
|
||||||
struct period_event period;
|
|
||||||
} event_t;
|
} event_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -997,19 +989,6 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
process_period_event(event_t *event, unsigned long offset, unsigned long head)
|
|
||||||
{
|
|
||||||
dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
|
|
||||||
(void *)(offset + head),
|
|
||||||
(void *)(long)(event->header.size),
|
|
||||||
event->period.time,
|
|
||||||
event->period.id,
|
|
||||||
event->period.sample_period);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
process_event(event_t *event, unsigned long offset, unsigned long head)
|
process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
{
|
{
|
||||||
|
@ -1025,9 +1004,6 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
|
|
||||||
case PERF_EVENT_FORK:
|
case PERF_EVENT_FORK:
|
||||||
return process_fork_event(event, offset, head);
|
return process_fork_event(event, offset, head);
|
||||||
|
|
||||||
case PERF_EVENT_PERIOD:
|
|
||||||
return process_period_event(event, offset, head);
|
|
||||||
/*
|
/*
|
||||||
* We dont process them right now but they are fine:
|
* We dont process them right now but they are fine:
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -43,6 +43,7 @@ static int call_graph = 0;
|
||||||
static int verbose = 0;
|
static int verbose = 0;
|
||||||
static int inherit_stat = 0;
|
static int inherit_stat = 0;
|
||||||
static int no_samples = 0;
|
static int no_samples = 0;
|
||||||
|
static int sample_address = 0;
|
||||||
|
|
||||||
static long samples;
|
static long samples;
|
||||||
static struct timeval last_read;
|
static struct timeval last_read;
|
||||||
|
@ -313,6 +314,10 @@ static void pid_synthesize_mmap_samples(pid_t pid)
|
||||||
if (*pbf == 'x') { /* vm_exec */
|
if (*pbf == 'x') { /* vm_exec */
|
||||||
char *execname = strchr(bf, '/');
|
char *execname = strchr(bf, '/');
|
||||||
|
|
||||||
|
/* Catch VDSO */
|
||||||
|
if (execname == NULL)
|
||||||
|
execname = strstr(bf, "[vdso]");
|
||||||
|
|
||||||
if (execname == NULL)
|
if (execname == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -401,6 +406,9 @@ static void create_counter(int counter, int cpu, pid_t pid)
|
||||||
if (inherit_stat)
|
if (inherit_stat)
|
||||||
attr->inherit_stat = 1;
|
attr->inherit_stat = 1;
|
||||||
|
|
||||||
|
if (sample_address)
|
||||||
|
attr->sample_type |= PERF_SAMPLE_ADDR;
|
||||||
|
|
||||||
if (call_graph)
|
if (call_graph)
|
||||||
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
|
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
|
||||||
|
|
||||||
|
@ -645,6 +653,8 @@ static const struct option options[] = {
|
||||||
"be more verbose (show counter open errors, etc)"),
|
"be more verbose (show counter open errors, etc)"),
|
||||||
OPT_BOOLEAN('s', "stat", &inherit_stat,
|
OPT_BOOLEAN('s', "stat", &inherit_stat,
|
||||||
"per thread counts"),
|
"per thread counts"),
|
||||||
|
OPT_BOOLEAN('d', "data", &sample_address,
|
||||||
|
"Sample addresses"),
|
||||||
OPT_BOOLEAN('n', "no-samples", &no_samples,
|
OPT_BOOLEAN('n', "no-samples", &no_samples,
|
||||||
"don't sample"),
|
"don't sample"),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
|
@ -654,7 +664,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int counter;
|
int counter;
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, record_usage, 0);
|
argc = parse_options(argc, argv, options, record_usage,
|
||||||
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
if (!argc && target_pid == -1 && !system_wide)
|
if (!argc && target_pid == -1 && !system_wide)
|
||||||
usage_with_options(record_usage, options);
|
usage_with_options(record_usage, options);
|
||||||
|
|
||||||
|
|
|
@ -33,8 +33,10 @@ static char *vmlinux = NULL;
|
||||||
|
|
||||||
static char default_sort_order[] = "comm,dso";
|
static char default_sort_order[] = "comm,dso";
|
||||||
static char *sort_order = default_sort_order;
|
static char *sort_order = default_sort_order;
|
||||||
static char *dso_list_str, *comm_list_str, *sym_list_str;
|
static char *dso_list_str, *comm_list_str, *sym_list_str,
|
||||||
|
*col_width_list_str;
|
||||||
static struct strlist *dso_list, *comm_list, *sym_list;
|
static struct strlist *dso_list, *comm_list, *sym_list;
|
||||||
|
static char *field_sep;
|
||||||
|
|
||||||
static int input;
|
static int input;
|
||||||
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
|
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
|
||||||
|
@ -49,6 +51,7 @@ static int verbose;
|
||||||
static int modules;
|
static int modules;
|
||||||
|
|
||||||
static int full_paths;
|
static int full_paths;
|
||||||
|
static int show_nr_samples;
|
||||||
|
|
||||||
static unsigned long page_size;
|
static unsigned long page_size;
|
||||||
static unsigned long mmap_window = 32;
|
static unsigned long mmap_window = 32;
|
||||||
|
@ -98,13 +101,6 @@ struct fork_event {
|
||||||
u32 pid, ppid;
|
u32 pid, ppid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct period_event {
|
|
||||||
struct perf_event_header header;
|
|
||||||
u64 time;
|
|
||||||
u64 id;
|
|
||||||
u64 sample_period;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct lost_event {
|
struct lost_event {
|
||||||
struct perf_event_header header;
|
struct perf_event_header header;
|
||||||
u64 id;
|
u64 id;
|
||||||
|
@ -124,11 +120,37 @@ typedef union event_union {
|
||||||
struct mmap_event mmap;
|
struct mmap_event mmap;
|
||||||
struct comm_event comm;
|
struct comm_event comm;
|
||||||
struct fork_event fork;
|
struct fork_event fork;
|
||||||
struct period_event period;
|
|
||||||
struct lost_event lost;
|
struct lost_event lost;
|
||||||
struct read_event read;
|
struct read_event read;
|
||||||
} event_t;
|
} event_t;
|
||||||
|
|
||||||
|
static int repsep_fprintf(FILE *fp, const char *fmt, ...)
|
||||||
|
{
|
||||||
|
int n;
|
||||||
|
va_list ap;
|
||||||
|
|
||||||
|
va_start(ap, fmt);
|
||||||
|
if (!field_sep)
|
||||||
|
n = vfprintf(fp, fmt, ap);
|
||||||
|
else {
|
||||||
|
char *bf = NULL;
|
||||||
|
n = vasprintf(&bf, fmt, ap);
|
||||||
|
if (n > 0) {
|
||||||
|
char *sep = bf;
|
||||||
|
while (1) {
|
||||||
|
sep = strchr(sep, *field_sep);
|
||||||
|
if (sep == NULL)
|
||||||
|
break;
|
||||||
|
*sep = '.';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fputs(bf, fp);
|
||||||
|
free(bf);
|
||||||
|
}
|
||||||
|
va_end(ap);
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
static LIST_HEAD(dsos);
|
static LIST_HEAD(dsos);
|
||||||
static struct dso *kernel_dso;
|
static struct dso *kernel_dso;
|
||||||
static struct dso *vdso;
|
static struct dso *vdso;
|
||||||
|
@ -360,12 +382,28 @@ static struct thread *thread__new(pid_t pid)
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int dsos__col_width,
|
||||||
|
comms__col_width,
|
||||||
|
threads__col_width;
|
||||||
|
|
||||||
static int thread__set_comm(struct thread *self, const char *comm)
|
static int thread__set_comm(struct thread *self, const char *comm)
|
||||||
{
|
{
|
||||||
if (self->comm)
|
if (self->comm)
|
||||||
free(self->comm);
|
free(self->comm);
|
||||||
self->comm = strdup(comm);
|
self->comm = strdup(comm);
|
||||||
return self->comm ? 0 : -ENOMEM;
|
if (!self->comm)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (!col_width_list_str && !field_sep &&
|
||||||
|
(!comm_list || strlist__has_entry(comm_list, comm))) {
|
||||||
|
unsigned int slen = strlen(comm);
|
||||||
|
if (slen > comms__col_width) {
|
||||||
|
comms__col_width = slen;
|
||||||
|
threads__col_width = slen + 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
||||||
|
@ -536,7 +574,9 @@ struct sort_entry {
|
||||||
|
|
||||||
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
|
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
|
||||||
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
|
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
|
||||||
size_t (*print)(FILE *fp, struct hist_entry *);
|
size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
|
||||||
|
unsigned int *width;
|
||||||
|
bool elide;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int64_t cmp_null(void *l, void *r)
|
static int64_t cmp_null(void *l, void *r)
|
||||||
|
@ -558,15 +598,17 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
sort__thread_print(FILE *fp, struct hist_entry *self)
|
sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||||
{
|
{
|
||||||
return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
|
return repsep_fprintf(fp, "%*s:%5d", width - 6,
|
||||||
|
self->thread->comm ?: "", self->thread->pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sort_entry sort_thread = {
|
static struct sort_entry sort_thread = {
|
||||||
.header = " Command: Pid",
|
.header = "Command: Pid",
|
||||||
.cmp = sort__thread_cmp,
|
.cmp = sort__thread_cmp,
|
||||||
.print = sort__thread_print,
|
.print = sort__thread_print,
|
||||||
|
.width = &threads__col_width,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* --sort comm */
|
/* --sort comm */
|
||||||
|
@ -590,16 +632,17 @@ sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
sort__comm_print(FILE *fp, struct hist_entry *self)
|
sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||||
{
|
{
|
||||||
return fprintf(fp, "%16s", self->thread->comm);
|
return repsep_fprintf(fp, "%*s", width, self->thread->comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sort_entry sort_comm = {
|
static struct sort_entry sort_comm = {
|
||||||
.header = " Command",
|
.header = "Command",
|
||||||
.cmp = sort__comm_cmp,
|
.cmp = sort__comm_cmp,
|
||||||
.collapse = sort__comm_collapse,
|
.collapse = sort__comm_collapse,
|
||||||
.print = sort__comm_print,
|
.print = sort__comm_print,
|
||||||
|
.width = &comms__col_width,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* --sort dso */
|
/* --sort dso */
|
||||||
|
@ -617,18 +660,19 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
sort__dso_print(FILE *fp, struct hist_entry *self)
|
sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||||
{
|
{
|
||||||
if (self->dso)
|
if (self->dso)
|
||||||
return fprintf(fp, "%-25s", self->dso->name);
|
return repsep_fprintf(fp, "%-*s", width, self->dso->name);
|
||||||
|
|
||||||
return fprintf(fp, "%016llx ", (u64)self->ip);
|
return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sort_entry sort_dso = {
|
static struct sort_entry sort_dso = {
|
||||||
.header = "Shared Object ",
|
.header = "Shared Object",
|
||||||
.cmp = sort__dso_cmp,
|
.cmp = sort__dso_cmp,
|
||||||
.print = sort__dso_print,
|
.print = sort__dso_print,
|
||||||
|
.width = &dsos__col_width,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* --sort symbol */
|
/* --sort symbol */
|
||||||
|
@ -648,22 +692,22 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
sort__sym_print(FILE *fp, struct hist_entry *self)
|
sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
|
||||||
{
|
{
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
ret += fprintf(fp, "%#018llx ", (u64)self->ip);
|
ret += repsep_fprintf(fp, "%#018llx ", (u64)self->ip);
|
||||||
|
|
||||||
|
ret += repsep_fprintf(fp, "[%c] ", self->level);
|
||||||
if (self->sym) {
|
if (self->sym) {
|
||||||
ret += fprintf(fp, "[%c] %s",
|
ret += repsep_fprintf(fp, "%s", self->sym->name);
|
||||||
self->dso == kernel_dso ? 'k' :
|
|
||||||
self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
|
|
||||||
|
|
||||||
if (self->sym->module)
|
if (self->sym->module)
|
||||||
ret += fprintf(fp, "\t[%s]", self->sym->module->name);
|
ret += repsep_fprintf(fp, "\t[%s]",
|
||||||
|
self->sym->module->name);
|
||||||
} else {
|
} else {
|
||||||
ret += fprintf(fp, "%#016llx", (u64)self->ip);
|
ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -690,19 +734,19 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
sort__parent_print(FILE *fp, struct hist_entry *self)
|
sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
|
||||||
{
|
{
|
||||||
size_t ret = 0;
|
return repsep_fprintf(fp, "%-*s", width,
|
||||||
|
self->parent ? self->parent->name : "[other]");
|
||||||
ret += fprintf(fp, "%-20s", self->parent ? self->parent->name : "[other]");
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int parent_symbol__col_width;
|
||||||
|
|
||||||
static struct sort_entry sort_parent = {
|
static struct sort_entry sort_parent = {
|
||||||
.header = "Parent symbol ",
|
.header = "Parent symbol",
|
||||||
.cmp = sort__parent_cmp,
|
.cmp = sort__parent_cmp,
|
||||||
.print = sort__parent_print,
|
.print = sort__parent_print,
|
||||||
|
.width = &parent_symbol__col_width,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sort__need_collapse = 0;
|
static int sort__need_collapse = 0;
|
||||||
|
@ -967,17 +1011,25 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (total_samples)
|
if (total_samples)
|
||||||
ret = percent_color_fprintf(fp, " %6.2f%%",
|
ret = percent_color_fprintf(fp,
|
||||||
(self->count * 100.0) / total_samples);
|
field_sep ? "%.2f" : " %6.2f%%",
|
||||||
|
(self->count * 100.0) / total_samples);
|
||||||
else
|
else
|
||||||
ret = fprintf(fp, "%12Ld ", self->count);
|
ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
|
||||||
|
|
||||||
|
if (show_nr_samples) {
|
||||||
|
if (field_sep)
|
||||||
|
fprintf(fp, "%c%lld", *field_sep, self->count);
|
||||||
|
else
|
||||||
|
fprintf(fp, "%11lld", self->count);
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||||
if (exclude_other && (se == &sort_parent))
|
if (se->elide)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
fprintf(fp, " ");
|
fprintf(fp, "%s", field_sep ?: " ");
|
||||||
ret += se->print(fp, self);
|
ret += se->print(fp, self, se->width ? *se->width : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret += fprintf(fp, "\n");
|
ret += fprintf(fp, "\n");
|
||||||
|
@ -992,6 +1044,18 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void dso__calc_col_width(struct dso *self)
|
||||||
|
{
|
||||||
|
if (!col_width_list_str && !field_sep &&
|
||||||
|
(!dso_list || strlist__has_entry(dso_list, self->name))) {
|
||||||
|
unsigned int slen = strlen(self->name);
|
||||||
|
if (slen > dsos__col_width)
|
||||||
|
dsos__col_width = slen;
|
||||||
|
}
|
||||||
|
|
||||||
|
self->slen_calculated = 1;
|
||||||
|
}
|
||||||
|
|
||||||
static struct symbol *
|
static struct symbol *
|
||||||
resolve_symbol(struct thread *thread, struct map **mapp,
|
resolve_symbol(struct thread *thread, struct map **mapp,
|
||||||
struct dso **dsop, u64 *ipp)
|
struct dso **dsop, u64 *ipp)
|
||||||
|
@ -1011,6 +1075,14 @@ resolve_symbol(struct thread *thread, struct map **mapp,
|
||||||
|
|
||||||
map = thread__find_map(thread, ip);
|
map = thread__find_map(thread, ip);
|
||||||
if (map != NULL) {
|
if (map != NULL) {
|
||||||
|
/*
|
||||||
|
* We have to do this here as we may have a dso
|
||||||
|
* with no symbol hit that has a name longer than
|
||||||
|
* the ones with symbols sampled.
|
||||||
|
*/
|
||||||
|
if (!sort_dso.elide && !map->dso->slen_calculated)
|
||||||
|
dso__calc_col_width(map->dso);
|
||||||
|
|
||||||
if (mapp)
|
if (mapp)
|
||||||
*mapp = map;
|
*mapp = map;
|
||||||
got_map:
|
got_map:
|
||||||
|
@ -1282,35 +1354,67 @@ static size_t output__fprintf(FILE *fp, u64 total_samples)
|
||||||
struct sort_entry *se;
|
struct sort_entry *se;
|
||||||
struct rb_node *nd;
|
struct rb_node *nd;
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
|
unsigned int width;
|
||||||
|
char *col_width = col_width_list_str;
|
||||||
|
|
||||||
fprintf(fp, "\n");
|
fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
|
||||||
fprintf(fp, "#\n");
|
|
||||||
fprintf(fp, "# (%Ld samples)\n", (u64)total_samples);
|
|
||||||
fprintf(fp, "#\n");
|
fprintf(fp, "#\n");
|
||||||
|
|
||||||
fprintf(fp, "# Overhead");
|
fprintf(fp, "# Overhead");
|
||||||
|
if (show_nr_samples) {
|
||||||
|
if (field_sep)
|
||||||
|
fprintf(fp, "%cSamples", *field_sep);
|
||||||
|
else
|
||||||
|
fputs(" Samples ", fp);
|
||||||
|
}
|
||||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||||
if (exclude_other && (se == &sort_parent))
|
if (se->elide)
|
||||||
continue;
|
continue;
|
||||||
fprintf(fp, " %s", se->header);
|
if (field_sep) {
|
||||||
|
fprintf(fp, "%c%s", *field_sep, se->header);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
width = strlen(se->header);
|
||||||
|
if (se->width) {
|
||||||
|
if (col_width_list_str) {
|
||||||
|
if (col_width) {
|
||||||
|
*se->width = atoi(col_width);
|
||||||
|
col_width = strchr(col_width, ',');
|
||||||
|
if (col_width)
|
||||||
|
++col_width;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
width = *se->width = max(*se->width, width);
|
||||||
|
}
|
||||||
|
fprintf(fp, " %*s", width, se->header);
|
||||||
}
|
}
|
||||||
fprintf(fp, "\n");
|
fprintf(fp, "\n");
|
||||||
|
|
||||||
|
if (field_sep)
|
||||||
|
goto print_entries;
|
||||||
|
|
||||||
fprintf(fp, "# ........");
|
fprintf(fp, "# ........");
|
||||||
|
if (show_nr_samples)
|
||||||
|
fprintf(fp, " ..........");
|
||||||
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
list_for_each_entry(se, &hist_entry__sort_list, list) {
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (exclude_other && (se == &sort_parent))
|
if (se->elide)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
fprintf(fp, " ");
|
fprintf(fp, " ");
|
||||||
for (i = 0; i < strlen(se->header); i++)
|
if (se->width)
|
||||||
|
width = *se->width;
|
||||||
|
else
|
||||||
|
width = strlen(se->header);
|
||||||
|
for (i = 0; i < width; i++)
|
||||||
fprintf(fp, ".");
|
fprintf(fp, ".");
|
||||||
}
|
}
|
||||||
fprintf(fp, "\n");
|
fprintf(fp, "\n");
|
||||||
|
|
||||||
fprintf(fp, "#\n");
|
fprintf(fp, "#\n");
|
||||||
|
|
||||||
|
print_entries:
|
||||||
for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
|
for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
|
||||||
pos = rb_entry(nd, struct hist_entry, rb_node);
|
pos = rb_entry(nd, struct hist_entry, rb_node);
|
||||||
ret += hist_entry__fprintf(fp, pos, total_samples);
|
ret += hist_entry__fprintf(fp, pos, total_samples);
|
||||||
|
@ -1523,19 +1627,6 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
process_period_event(event_t *event, unsigned long offset, unsigned long head)
|
|
||||||
{
|
|
||||||
dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
|
|
||||||
(void *)(offset + head),
|
|
||||||
(void *)(long)(event->header.size),
|
|
||||||
event->period.time,
|
|
||||||
event->period.id,
|
|
||||||
event->period.sample_period);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
process_lost_event(event_t *event, unsigned long offset, unsigned long head)
|
process_lost_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
{
|
{
|
||||||
|
@ -1617,9 +1708,6 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
|
||||||
case PERF_EVENT_FORK:
|
case PERF_EVENT_FORK:
|
||||||
return process_fork_event(event, offset, head);
|
return process_fork_event(event, offset, head);
|
||||||
|
|
||||||
case PERF_EVENT_PERIOD:
|
|
||||||
return process_period_event(event, offset, head);
|
|
||||||
|
|
||||||
case PERF_EVENT_LOST:
|
case PERF_EVENT_LOST:
|
||||||
return process_lost_event(event, offset, head);
|
return process_lost_event(event, offset, head);
|
||||||
|
|
||||||
|
@ -1883,6 +1971,8 @@ static const struct option options[] = {
|
||||||
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
|
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
|
||||||
OPT_BOOLEAN('m', "modules", &modules,
|
OPT_BOOLEAN('m', "modules", &modules,
|
||||||
"load module symbols - WARNING: use only with -k and LIVE kernel"),
|
"load module symbols - WARNING: use only with -k and LIVE kernel"),
|
||||||
|
OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
|
||||||
|
"Show a column with the number of samples"),
|
||||||
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
||||||
"sort by key(s): pid, comm, dso, symbol, parent"),
|
"sort by key(s): pid, comm, dso, symbol, parent"),
|
||||||
OPT_BOOLEAN('P', "full-paths", &full_paths,
|
OPT_BOOLEAN('P', "full-paths", &full_paths,
|
||||||
|
@ -1891,15 +1981,21 @@ static const struct option options[] = {
|
||||||
"regex filter to identify parent, see: '--sort parent'"),
|
"regex filter to identify parent, see: '--sort parent'"),
|
||||||
OPT_BOOLEAN('x', "exclude-other", &exclude_other,
|
OPT_BOOLEAN('x', "exclude-other", &exclude_other,
|
||||||
"Only display entries with parent-match"),
|
"Only display entries with parent-match"),
|
||||||
OPT_CALLBACK_DEFAULT('c', "callchain", NULL, "output_type,min_percent",
|
OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
|
||||||
"Display callchains using output_type and min percent threshold. "
|
"Display callchains using output_type and min percent threshold. "
|
||||||
"Default: flat,0", &parse_callchain_opt, callchain_default_opt),
|
"Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
|
||||||
OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
|
OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
|
||||||
"only consider symbols in these dsos"),
|
"only consider symbols in these dsos"),
|
||||||
OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
|
OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
|
||||||
"only consider symbols in these comms"),
|
"only consider symbols in these comms"),
|
||||||
OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
|
OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
|
||||||
"only consider these symbols"),
|
"only consider these symbols"),
|
||||||
|
OPT_STRING('w', "column-widths", &col_width_list_str,
|
||||||
|
"width[,width...]",
|
||||||
|
"don't try to adjust column width, use these fixed values"),
|
||||||
|
OPT_STRING('t', "field-separator", &field_sep, "separator",
|
||||||
|
"separator for columns, no spaces will be added between "
|
||||||
|
"columns '.' is reserved."),
|
||||||
OPT_END()
|
OPT_END()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1919,7 +2015,8 @@ static void setup_sorting(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setup_list(struct strlist **list, const char *list_str,
|
static void setup_list(struct strlist **list, const char *list_str,
|
||||||
const char *list_name)
|
struct sort_entry *se, const char *list_name,
|
||||||
|
FILE *fp)
|
||||||
{
|
{
|
||||||
if (list_str) {
|
if (list_str) {
|
||||||
*list = strlist__new(true, list_str);
|
*list = strlist__new(true, list_str);
|
||||||
|
@ -1928,6 +2025,11 @@ static void setup_list(struct strlist **list, const char *list_str,
|
||||||
list_name);
|
list_name);
|
||||||
exit(129);
|
exit(129);
|
||||||
}
|
}
|
||||||
|
if (strlist__nr_entries(*list) == 1) {
|
||||||
|
fprintf(fp, "# %s: %s\n", list_name,
|
||||||
|
strlist__entry(*list, 0)->s);
|
||||||
|
se->elide = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1941,9 +2043,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
|
||||||
|
|
||||||
setup_sorting();
|
setup_sorting();
|
||||||
|
|
||||||
if (parent_pattern != default_parent_pattern)
|
if (parent_pattern != default_parent_pattern) {
|
||||||
sort_dimension__add("parent");
|
sort_dimension__add("parent");
|
||||||
else
|
sort_parent.elide = 1;
|
||||||
|
} else
|
||||||
exclude_other = 0;
|
exclude_other = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1952,11 +2055,17 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
|
||||||
if (argc)
|
if (argc)
|
||||||
usage_with_options(report_usage, options);
|
usage_with_options(report_usage, options);
|
||||||
|
|
||||||
setup_list(&dso_list, dso_list_str, "dso");
|
|
||||||
setup_list(&comm_list, comm_list_str, "comm");
|
|
||||||
setup_list(&sym_list, sym_list_str, "symbol");
|
|
||||||
|
|
||||||
setup_pager();
|
setup_pager();
|
||||||
|
|
||||||
|
setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
|
||||||
|
setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
|
||||||
|
setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
|
||||||
|
|
||||||
|
if (field_sep && *field_sep == '.') {
|
||||||
|
fputs("'.' is the only non valid --field-separator argument\n",
|
||||||
|
stderr);
|
||||||
|
exit(129);
|
||||||
|
}
|
||||||
|
|
||||||
return __cmd_report();
|
return __cmd_report();
|
||||||
}
|
}
|
||||||
|
|
|
@ -511,7 +511,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
{
|
{
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
argc = parse_options(argc, argv, options, stat_usage, 0);
|
argc = parse_options(argc, argv, options, stat_usage,
|
||||||
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||||
if (!argc)
|
if (!argc)
|
||||||
usage_with_options(stat_usage, options);
|
usage_with_options(stat_usage, options);
|
||||||
if (run_count <= 0 || run_count > MAX_RUN)
|
if (run_count <= 0 || run_count > MAX_RUN)
|
||||||
|
|
|
@ -58,6 +58,7 @@ static u64 count_filter = 5;
|
||||||
static int print_entries = 15;
|
static int print_entries = 15;
|
||||||
|
|
||||||
static int target_pid = -1;
|
static int target_pid = -1;
|
||||||
|
static int inherit = 0;
|
||||||
static int profile_cpu = -1;
|
static int profile_cpu = -1;
|
||||||
static int nr_cpus = 0;
|
static int nr_cpus = 0;
|
||||||
static unsigned int realtime_prio = 0;
|
static unsigned int realtime_prio = 0;
|
||||||
|
@ -549,7 +550,7 @@ int group_fd;
|
||||||
static void start_counter(int i, int counter)
|
static void start_counter(int i, int counter)
|
||||||
{
|
{
|
||||||
struct perf_counter_attr *attr;
|
struct perf_counter_attr *attr;
|
||||||
unsigned int cpu;
|
int cpu;
|
||||||
|
|
||||||
cpu = profile_cpu;
|
cpu = profile_cpu;
|
||||||
if (target_pid == -1 && profile_cpu == -1)
|
if (target_pid == -1 && profile_cpu == -1)
|
||||||
|
@ -559,6 +560,7 @@ static void start_counter(int i, int counter)
|
||||||
|
|
||||||
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
||||||
attr->freq = freq;
|
attr->freq = freq;
|
||||||
|
attr->inherit = (cpu < 0) && inherit;
|
||||||
|
|
||||||
try_again:
|
try_again:
|
||||||
fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
|
fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
|
||||||
|
@ -685,6 +687,8 @@ static const struct option options[] = {
|
||||||
"only display functions with more events than this"),
|
"only display functions with more events than this"),
|
||||||
OPT_BOOLEAN('g', "group", &group,
|
OPT_BOOLEAN('g', "group", &group,
|
||||||
"put the counters into a counter group"),
|
"put the counters into a counter group"),
|
||||||
|
OPT_BOOLEAN('i', "inherit", &inherit,
|
||||||
|
"child tasks inherit counters"),
|
||||||
OPT_STRING('s', "sym-filter", &sym_filter, "pattern",
|
OPT_STRING('s', "sym-filter", &sym_filter, "pattern",
|
||||||
"only display symbols matchig this pattern"),
|
"only display symbols matchig this pattern"),
|
||||||
OPT_BOOLEAN('z', "zero", &zero,
|
OPT_BOOLEAN('z', "zero", &zero,
|
||||||
|
|
|
@ -12,6 +12,8 @@
|
||||||
#include "util/cache.h"
|
#include "util/cache.h"
|
||||||
#include "util/quote.h"
|
#include "util/quote.h"
|
||||||
#include "util/run-command.h"
|
#include "util/run-command.h"
|
||||||
|
#include "util/parse-events.h"
|
||||||
|
#include "util/string.h"
|
||||||
|
|
||||||
const char perf_usage_string[] =
|
const char perf_usage_string[] =
|
||||||
"perf [--version] [--help] COMMAND [ARGS]";
|
"perf [--version] [--help] COMMAND [ARGS]";
|
||||||
|
@ -25,6 +27,8 @@ struct pager_config {
|
||||||
int val;
|
int val;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static char debugfs_mntpt[MAXPATHLEN];
|
||||||
|
|
||||||
static int pager_command_config(const char *var, const char *value, void *data)
|
static int pager_command_config(const char *var, const char *value, void *data)
|
||||||
{
|
{
|
||||||
struct pager_config *c = data;
|
struct pager_config *c = data;
|
||||||
|
@ -56,6 +60,15 @@ static void commit_pager_choice(void) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_debugfs_path(void)
|
||||||
|
{
|
||||||
|
char *path;
|
||||||
|
|
||||||
|
path = getenv(PERF_DEBUGFS_ENVIRONMENT);
|
||||||
|
snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
|
||||||
|
"tracing/events");
|
||||||
|
}
|
||||||
|
|
||||||
static int handle_options(const char*** argv, int* argc, int* envchanged)
|
static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||||
{
|
{
|
||||||
int handled = 0;
|
int handled = 0;
|
||||||
|
@ -122,6 +135,22 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||||
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1);
|
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1);
|
||||||
if (envchanged)
|
if (envchanged)
|
||||||
*envchanged = 1;
|
*envchanged = 1;
|
||||||
|
} else if (!strcmp(cmd, "--debugfs-dir")) {
|
||||||
|
if (*argc < 2) {
|
||||||
|
fprintf(stderr, "No directory given for --debugfs-dir.\n");
|
||||||
|
usage(perf_usage_string);
|
||||||
|
}
|
||||||
|
strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
|
||||||
|
debugfs_mntpt[MAXPATHLEN - 1] = '\0';
|
||||||
|
if (envchanged)
|
||||||
|
*envchanged = 1;
|
||||||
|
(*argv)++;
|
||||||
|
(*argc)--;
|
||||||
|
} else if (!prefixcmp(cmd, "--debugfs-dir=")) {
|
||||||
|
strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN);
|
||||||
|
debugfs_mntpt[MAXPATHLEN - 1] = '\0';
|
||||||
|
if (envchanged)
|
||||||
|
*envchanged = 1;
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "Unknown option: %s\n", cmd);
|
fprintf(stderr, "Unknown option: %s\n", cmd);
|
||||||
usage(perf_usage_string);
|
usage(perf_usage_string);
|
||||||
|
@ -228,6 +257,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
|
||||||
if (use_pager == -1 && p->option & USE_PAGER)
|
if (use_pager == -1 && p->option & USE_PAGER)
|
||||||
use_pager = 1;
|
use_pager = 1;
|
||||||
commit_pager_choice();
|
commit_pager_choice();
|
||||||
|
set_debugfs_path();
|
||||||
|
|
||||||
status = p->fn(argc, argv, prefix);
|
status = p->fn(argc, argv, prefix);
|
||||||
if (status)
|
if (status)
|
||||||
|
@ -346,6 +376,49 @@ static int run_argv(int *argcp, const char ***argv)
|
||||||
return done_alias;
|
return done_alias;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
|
||||||
|
static void get_debugfs_mntpt(void)
|
||||||
|
{
|
||||||
|
FILE *file;
|
||||||
|
char fs_type[100];
|
||||||
|
char debugfs[MAXPATHLEN];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* try the standard location
|
||||||
|
*/
|
||||||
|
if (valid_debugfs_mount("/sys/kernel/debug/") == 0) {
|
||||||
|
strcpy(debugfs_mntpt, "/sys/kernel/debug/");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* try the sane location
|
||||||
|
*/
|
||||||
|
if (valid_debugfs_mount("/debug/") == 0) {
|
||||||
|
strcpy(debugfs_mntpt, "/debug/");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* give up and parse /proc/mounts
|
||||||
|
*/
|
||||||
|
file = fopen("/proc/mounts", "r");
|
||||||
|
if (file == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
while (fscanf(file, "%*s %"
|
||||||
|
STR(MAXPATHLEN)
|
||||||
|
"s %99s %*s %*d %*d\n",
|
||||||
|
debugfs, fs_type) == 2) {
|
||||||
|
if (strcmp(fs_type, "debugfs") == 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
fclose(file);
|
||||||
|
if (strcmp(fs_type, "debugfs") == 0) {
|
||||||
|
strncpy(debugfs_mntpt, debugfs, MAXPATHLEN);
|
||||||
|
debugfs_mntpt[MAXPATHLEN - 1] = '\0';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, const char **argv)
|
int main(int argc, const char **argv)
|
||||||
{
|
{
|
||||||
|
@ -354,7 +427,8 @@ int main(int argc, const char **argv)
|
||||||
cmd = perf_extract_argv0_path(argv[0]);
|
cmd = perf_extract_argv0_path(argv[0]);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
cmd = "perf-help";
|
cmd = "perf-help";
|
||||||
|
/* get debugfs mount point from /proc/mounts */
|
||||||
|
get_debugfs_mntpt();
|
||||||
/*
|
/*
|
||||||
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
|
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
|
||||||
*
|
*
|
||||||
|
@ -377,6 +451,7 @@ int main(int argc, const char **argv)
|
||||||
argc--;
|
argc--;
|
||||||
handle_options(&argv, &argc, NULL);
|
handle_options(&argv, &argc, NULL);
|
||||||
commit_pager_choice();
|
commit_pager_choice();
|
||||||
|
set_debugfs_path();
|
||||||
if (argc > 0) {
|
if (argc > 0) {
|
||||||
if (!prefixcmp(argv[0], "--"))
|
if (!prefixcmp(argv[0], "--"))
|
||||||
argv[0] += 2;
|
argv[0] += 2;
|
||||||
|
|
|
@ -1,7 +1,13 @@
|
||||||
#ifndef _PERF_PERF_H
|
#ifndef _PERF_PERF_H
|
||||||
#define _PERF_PERF_H
|
#define _PERF_PERF_H
|
||||||
|
|
||||||
#if defined(__x86_64__) || defined(__i386__)
|
#if defined(__i386__)
|
||||||
|
#include "../../arch/x86/include/asm/unistd.h"
|
||||||
|
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
|
||||||
|
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__x86_64__)
|
||||||
#include "../../arch/x86/include/asm/unistd.h"
|
#include "../../arch/x86/include/asm/unistd.h"
|
||||||
#define rmb() asm volatile("lfence" ::: "memory")
|
#define rmb() asm volatile("lfence" ::: "memory")
|
||||||
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#define PERFATTRIBUTES_FILE ".perfattributes"
|
#define PERFATTRIBUTES_FILE ".perfattributes"
|
||||||
#define INFOATTRIBUTES_FILE "info/attributes"
|
#define INFOATTRIBUTES_FILE "info/attributes"
|
||||||
#define ATTRIBUTE_MACRO_PREFIX "[attr]"
|
#define ATTRIBUTE_MACRO_PREFIX "[attr]"
|
||||||
|
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
|
||||||
|
|
||||||
typedef int (*config_fn_t)(const char *, const char *, void *);
|
typedef int (*config_fn_t)(const char *, const char *, void *);
|
||||||
extern int perf_default_config(const char *, const char *, void *);
|
extern int perf_default_config(const char *, const char *, void *);
|
||||||
|
|
|
@ -16,7 +16,7 @@ struct perf_header {
|
||||||
int frozen;
|
int frozen;
|
||||||
int attrs, size;
|
int attrs, size;
|
||||||
struct perf_header_attr **attr;
|
struct perf_header_attr **attr;
|
||||||
off_t attr_offset;
|
s64 attr_offset;
|
||||||
u64 data_offset;
|
u64 data_offset;
|
||||||
u64 data_size;
|
u64 data_size;
|
||||||
};
|
};
|
||||||
|
|
|
@ -18,4 +18,12 @@
|
||||||
(type *)((char *)__mptr - offsetof(type, member)); })
|
(type *)((char *)__mptr - offsetof(type, member)); })
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef max
|
||||||
|
#define max(x, y) ({ \
|
||||||
|
typeof(x) _max1 = (x); \
|
||||||
|
typeof(y) _max2 = (y); \
|
||||||
|
(void) (&_max1 == &_max2); \
|
||||||
|
_max1 > _max2 ? _max1 : _max2; })
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include "parse-events.h"
|
#include "parse-events.h"
|
||||||
#include "exec_cmd.h"
|
#include "exec_cmd.h"
|
||||||
#include "string.h"
|
#include "string.h"
|
||||||
|
#include "cache.h"
|
||||||
|
|
||||||
extern char *strcasestr(const char *haystack, const char *needle);
|
extern char *strcasestr(const char *haystack, const char *needle);
|
||||||
|
|
||||||
|
@ -19,6 +20,8 @@ struct event_symbol {
|
||||||
char *alias;
|
char *alias;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
char debugfs_path[MAXPATHLEN];
|
||||||
|
|
||||||
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
|
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
|
||||||
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
|
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
|
||||||
|
|
||||||
|
@ -71,8 +74,8 @@ static char *sw_event_names[] = {
|
||||||
#define MAX_ALIASES 8
|
#define MAX_ALIASES 8
|
||||||
|
|
||||||
static char *hw_cache[][MAX_ALIASES] = {
|
static char *hw_cache[][MAX_ALIASES] = {
|
||||||
{ "L1-d$", "l1-d", "l1d", "L1-data", },
|
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
|
||||||
{ "L1-i$", "l1-i", "l1i", "L1-instruction", },
|
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
|
||||||
{ "LLC", "L2" },
|
{ "LLC", "L2" },
|
||||||
{ "dTLB", "d-tlb", "Data-TLB", },
|
{ "dTLB", "d-tlb", "Data-TLB", },
|
||||||
{ "iTLB", "i-tlb", "Instruction-TLB", },
|
{ "iTLB", "i-tlb", "Instruction-TLB", },
|
||||||
|
@ -110,6 +113,88 @@ static unsigned long hw_cache_stat[C(MAX)] = {
|
||||||
[C(BPU)] = (CACHE_READ),
|
[C(BPU)] = (CACHE_READ),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \
|
||||||
|
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
|
||||||
|
if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \
|
||||||
|
sys_dirent.d_name) && \
|
||||||
|
(!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
|
||||||
|
(strcmp(sys_dirent.d_name, ".")) && \
|
||||||
|
(strcmp(sys_dirent.d_name, "..")))
|
||||||
|
|
||||||
|
#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \
|
||||||
|
while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
|
||||||
|
if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \
|
||||||
|
sys_dirent.d_name, evt_dirent.d_name) && \
|
||||||
|
(!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
|
||||||
|
(strcmp(evt_dirent.d_name, ".")) && \
|
||||||
|
(strcmp(evt_dirent.d_name, "..")))
|
||||||
|
|
||||||
|
#define MAX_EVENT_LENGTH 30
|
||||||
|
|
||||||
|
int valid_debugfs_mount(const char *debugfs)
|
||||||
|
{
|
||||||
|
struct statfs st_fs;
|
||||||
|
|
||||||
|
if (statfs(debugfs, &st_fs) < 0)
|
||||||
|
return -ENOENT;
|
||||||
|
else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
|
||||||
|
return -ENOENT;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *tracepoint_id_to_name(u64 config)
|
||||||
|
{
|
||||||
|
static char tracepoint_name[2 * MAX_EVENT_LENGTH];
|
||||||
|
DIR *sys_dir, *evt_dir;
|
||||||
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
||||||
|
struct stat st;
|
||||||
|
char id_buf[4];
|
||||||
|
int fd;
|
||||||
|
u64 id;
|
||||||
|
char evt_path[MAXPATHLEN];
|
||||||
|
|
||||||
|
if (valid_debugfs_mount(debugfs_path))
|
||||||
|
return "unkown";
|
||||||
|
|
||||||
|
sys_dir = opendir(debugfs_path);
|
||||||
|
if (!sys_dir)
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
|
||||||
|
evt_dir = opendir(evt_path);
|
||||||
|
if (!evt_dir)
|
||||||
|
goto cleanup;
|
||||||
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
|
||||||
|
evt_path, st) {
|
||||||
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id",
|
||||||
|
debugfs_path, sys_dirent.d_name,
|
||||||
|
evt_dirent.d_name);
|
||||||
|
fd = open(evt_path, O_RDONLY);
|
||||||
|
if (fd < 0)
|
||||||
|
continue;
|
||||||
|
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
|
||||||
|
close(fd);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
close(fd);
|
||||||
|
id = atoll(id_buf);
|
||||||
|
if (id == config) {
|
||||||
|
closedir(evt_dir);
|
||||||
|
closedir(sys_dir);
|
||||||
|
snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH,
|
||||||
|
"%s:%s", sys_dirent.d_name,
|
||||||
|
evt_dirent.d_name);
|
||||||
|
return tracepoint_name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
closedir(evt_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
closedir(sys_dir);
|
||||||
|
return "unkown";
|
||||||
|
}
|
||||||
|
|
||||||
static int is_cache_op_valid(u8 cache_type, u8 cache_op)
|
static int is_cache_op_valid(u8 cache_type, u8 cache_op)
|
||||||
{
|
{
|
||||||
if (hw_cache_stat[cache_type] & COP(cache_op))
|
if (hw_cache_stat[cache_type] & COP(cache_op))
|
||||||
|
@ -177,6 +262,9 @@ char *event_name(int counter)
|
||||||
return sw_event_names[config];
|
return sw_event_names[config];
|
||||||
return "unknown-software";
|
return "unknown-software";
|
||||||
|
|
||||||
|
case PERF_TYPE_TRACEPOINT:
|
||||||
|
return tracepoint_id_to_name(config);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -265,6 +353,53 @@ parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int parse_tracepoint_event(const char **strp,
|
||||||
|
struct perf_counter_attr *attr)
|
||||||
|
{
|
||||||
|
const char *evt_name;
|
||||||
|
char sys_name[MAX_EVENT_LENGTH];
|
||||||
|
char id_buf[4];
|
||||||
|
int fd;
|
||||||
|
unsigned int sys_length, evt_length;
|
||||||
|
u64 id;
|
||||||
|
char evt_path[MAXPATHLEN];
|
||||||
|
|
||||||
|
if (valid_debugfs_mount(debugfs_path))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
evt_name = strchr(*strp, ':');
|
||||||
|
if (!evt_name)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
sys_length = evt_name - *strp;
|
||||||
|
if (sys_length >= MAX_EVENT_LENGTH)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
strncpy(sys_name, *strp, sys_length);
|
||||||
|
sys_name[sys_length] = '\0';
|
||||||
|
evt_name = evt_name + 1;
|
||||||
|
evt_length = strlen(evt_name);
|
||||||
|
if (evt_length >= MAX_EVENT_LENGTH)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
|
||||||
|
sys_name, evt_name);
|
||||||
|
fd = open(evt_path, O_RDONLY);
|
||||||
|
if (fd < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
|
||||||
|
close(fd);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
close(fd);
|
||||||
|
id = atoll(id_buf);
|
||||||
|
attr->config = id;
|
||||||
|
attr->type = PERF_TYPE_TRACEPOINT;
|
||||||
|
*strp = evt_name + evt_length;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int check_events(const char *str, unsigned int i)
|
static int check_events(const char *str, unsigned int i)
|
||||||
{
|
{
|
||||||
int n;
|
int n;
|
||||||
|
@ -374,7 +509,8 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
|
||||||
*/
|
*/
|
||||||
static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
|
static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
|
||||||
{
|
{
|
||||||
if (!(parse_raw_event(str, attr) ||
|
if (!(parse_tracepoint_event(str, attr) ||
|
||||||
|
parse_raw_event(str, attr) ||
|
||||||
parse_numeric_event(str, attr) ||
|
parse_numeric_event(str, attr) ||
|
||||||
parse_symbolic_event(str, attr) ||
|
parse_symbolic_event(str, attr) ||
|
||||||
parse_generic_hw_event(str, attr)))
|
parse_generic_hw_event(str, attr)))
|
||||||
|
@ -422,6 +558,42 @@ static const char * const event_type_descriptors[] = {
|
||||||
"Hardware cache event",
|
"Hardware cache event",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Print the events from <debugfs_mount_point>/tracing/events
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void print_tracepoint_events(void)
|
||||||
|
{
|
||||||
|
DIR *sys_dir, *evt_dir;
|
||||||
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
||||||
|
struct stat st;
|
||||||
|
char evt_path[MAXPATHLEN];
|
||||||
|
|
||||||
|
if (valid_debugfs_mount(debugfs_path))
|
||||||
|
return;
|
||||||
|
|
||||||
|
sys_dir = opendir(debugfs_path);
|
||||||
|
if (!sys_dir)
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
|
||||||
|
evt_dir = opendir(evt_path);
|
||||||
|
if (!evt_dir)
|
||||||
|
goto cleanup;
|
||||||
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
|
||||||
|
evt_path, st) {
|
||||||
|
snprintf(evt_path, MAXPATHLEN, "%s:%s",
|
||||||
|
sys_dirent.d_name, evt_dirent.d_name);
|
||||||
|
fprintf(stderr, " %-40s [%s]\n", evt_path,
|
||||||
|
event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
|
||||||
|
}
|
||||||
|
closedir(evt_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
closedir(sys_dir);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print the help text for the event symbols:
|
* Print the help text for the event symbols:
|
||||||
*/
|
*/
|
||||||
|
@ -436,7 +608,7 @@ void print_events(void)
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
|
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
|
||||||
type = syms->type + 1;
|
type = syms->type + 1;
|
||||||
if (type > ARRAY_SIZE(event_type_descriptors))
|
if (type >= ARRAY_SIZE(event_type_descriptors))
|
||||||
type = 0;
|
type = 0;
|
||||||
|
|
||||||
if (type != prev_type)
|
if (type != prev_type)
|
||||||
|
@ -472,5 +644,7 @@ void print_events(void)
|
||||||
"rNNN");
|
"rNNN");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
|
print_tracepoint_events();
|
||||||
|
|
||||||
exit(129);
|
exit(129);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
* Parse symbolic events/counts passed in as options:
|
* Parse symbolic events/counts passed in as options:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
struct option;
|
||||||
|
|
||||||
extern int nr_counters;
|
extern int nr_counters;
|
||||||
|
|
||||||
extern struct perf_counter_attr attrs[MAX_COUNTERS];
|
extern struct perf_counter_attr attrs[MAX_COUNTERS];
|
||||||
|
@ -15,3 +17,6 @@ extern int parse_events(const struct option *opt, const char *str, int unset);
|
||||||
|
|
||||||
extern void print_events(void);
|
extern void print_events(void);
|
||||||
|
|
||||||
|
extern char debugfs_path[];
|
||||||
|
extern int valid_debugfs_mount(const char *debugfs);
|
||||||
|
|
||||||
|
|
|
@ -5,4 +5,7 @@
|
||||||
|
|
||||||
int hex2u64(const char *ptr, u64 *val);
|
int hex2u64(const char *ptr, u64 *val);
|
||||||
|
|
||||||
|
#define _STR(x) #x
|
||||||
|
#define STR(x) _STR(x)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -64,6 +64,7 @@ int strlist__add(struct strlist *self, const char *new_entry)
|
||||||
|
|
||||||
rb_link_node(&sn->rb_node, parent, p);
|
rb_link_node(&sn->rb_node, parent, p);
|
||||||
rb_insert_color(&sn->rb_node, &self->entries);
|
rb_insert_color(&sn->rb_node, &self->entries);
|
||||||
|
++self->nr_entries;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -155,8 +156,9 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
|
||||||
struct strlist *self = malloc(sizeof(*self));
|
struct strlist *self = malloc(sizeof(*self));
|
||||||
|
|
||||||
if (self != NULL) {
|
if (self != NULL) {
|
||||||
self->entries = RB_ROOT;
|
self->entries = RB_ROOT;
|
||||||
self->dupstr = dupstr;
|
self->dupstr = dupstr;
|
||||||
|
self->nr_entries = 0;
|
||||||
if (slist && strlist__parse_list(self, slist) != 0)
|
if (slist && strlist__parse_list(self, slist) != 0)
|
||||||
goto out_error;
|
goto out_error;
|
||||||
}
|
}
|
||||||
|
@ -182,3 +184,17 @@ void strlist__delete(struct strlist *self)
|
||||||
free(self);
|
free(self);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
|
||||||
|
{
|
||||||
|
struct rb_node *nd;
|
||||||
|
|
||||||
|
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
|
||||||
|
struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
|
||||||
|
|
||||||
|
if (!idx--)
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
|
@ -11,7 +11,8 @@ struct str_node {
|
||||||
|
|
||||||
struct strlist {
|
struct strlist {
|
||||||
struct rb_root entries;
|
struct rb_root entries;
|
||||||
bool dupstr;
|
unsigned int nr_entries;
|
||||||
|
bool dupstr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct strlist *strlist__new(bool dupstr, const char *slist);
|
struct strlist *strlist__new(bool dupstr, const char *slist);
|
||||||
|
@ -21,11 +22,17 @@ void strlist__remove(struct strlist *self, struct str_node *sn);
|
||||||
int strlist__load(struct strlist *self, const char *filename);
|
int strlist__load(struct strlist *self, const char *filename);
|
||||||
int strlist__add(struct strlist *self, const char *str);
|
int strlist__add(struct strlist *self, const char *str);
|
||||||
|
|
||||||
|
struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
|
||||||
bool strlist__has_entry(struct strlist *self, const char *entry);
|
bool strlist__has_entry(struct strlist *self, const char *entry);
|
||||||
|
|
||||||
static inline bool strlist__empty(const struct strlist *self)
|
static inline bool strlist__empty(const struct strlist *self)
|
||||||
{
|
{
|
||||||
return rb_first(&self->entries) == NULL;
|
return self->nr_entries == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int strlist__nr_entries(const struct strlist *self)
|
||||||
|
{
|
||||||
|
return self->nr_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
int strlist__parse_list(struct strlist *self, const char *s);
|
int strlist__parse_list(struct strlist *self, const char *s);
|
||||||
|
|
|
@ -6,9 +6,15 @@
|
||||||
#include <libelf.h>
|
#include <libelf.h>
|
||||||
#include <gelf.h>
|
#include <gelf.h>
|
||||||
#include <elf.h>
|
#include <elf.h>
|
||||||
|
#include <bfd.h>
|
||||||
|
|
||||||
const char *sym_hist_filter;
|
const char *sym_hist_filter;
|
||||||
|
|
||||||
|
#ifndef DMGL_PARAMS
|
||||||
|
#define DMGL_PARAMS (1 << 0) /* Include function args */
|
||||||
|
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct symbol *symbol__new(u64 start, u64 len,
|
static struct symbol *symbol__new(u64 start, u64 len,
|
||||||
const char *name, unsigned int priv_size,
|
const char *name, unsigned int priv_size,
|
||||||
u64 obj_start, int verbose)
|
u64 obj_start, int verbose)
|
||||||
|
@ -65,6 +71,7 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size)
|
||||||
self->syms = RB_ROOT;
|
self->syms = RB_ROOT;
|
||||||
self->sym_priv_size = sym_priv_size;
|
self->sym_priv_size = sym_priv_size;
|
||||||
self->find_symbol = dso__find_symbol;
|
self->find_symbol = dso__find_symbol;
|
||||||
|
self->slen_calculated = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
|
@ -373,36 +380,61 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
||||||
idx < nr_entries; \
|
idx < nr_entries; \
|
||||||
++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
|
++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
|
||||||
|
|
||||||
static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
|
/*
|
||||||
GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym,
|
* We need to check if we have a .dynsym, so that we can handle the
|
||||||
GElf_Shdr *shdr_dynsym,
|
* .plt, synthesizing its symbols, that aren't on the symtabs (be it
|
||||||
size_t dynsym_idx, int verbose)
|
* .dynsym or .symtab).
|
||||||
|
* And always look at the original dso, not at debuginfo packages, that
|
||||||
|
* have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
|
||||||
|
*/
|
||||||
|
static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
|
||||||
{
|
{
|
||||||
uint32_t nr_rel_entries, idx;
|
uint32_t nr_rel_entries, idx;
|
||||||
GElf_Sym sym;
|
GElf_Sym sym;
|
||||||
u64 plt_offset;
|
u64 plt_offset;
|
||||||
GElf_Shdr shdr_plt;
|
GElf_Shdr shdr_plt;
|
||||||
struct symbol *f;
|
struct symbol *f;
|
||||||
GElf_Shdr shdr_rel_plt;
|
GElf_Shdr shdr_rel_plt, shdr_dynsym;
|
||||||
Elf_Data *reldata, *syms, *symstrs;
|
Elf_Data *reldata, *syms, *symstrs;
|
||||||
Elf_Scn *scn_plt_rel, *scn_symstrs;
|
Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
|
||||||
|
size_t dynsym_idx;
|
||||||
|
GElf_Ehdr ehdr;
|
||||||
char sympltname[1024];
|
char sympltname[1024];
|
||||||
int nr = 0, symidx;
|
Elf *elf;
|
||||||
|
int nr = 0, symidx, fd, err = 0;
|
||||||
|
|
||||||
scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt,
|
fd = open(self->name, O_RDONLY);
|
||||||
|
if (fd < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
|
||||||
|
if (elf == NULL)
|
||||||
|
goto out_close;
|
||||||
|
|
||||||
|
if (gelf_getehdr(elf, &ehdr) == NULL)
|
||||||
|
goto out_elf_end;
|
||||||
|
|
||||||
|
scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
|
||||||
|
".dynsym", &dynsym_idx);
|
||||||
|
if (scn_dynsym == NULL)
|
||||||
|
goto out_elf_end;
|
||||||
|
|
||||||
|
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
|
||||||
".rela.plt", NULL);
|
".rela.plt", NULL);
|
||||||
if (scn_plt_rel == NULL) {
|
if (scn_plt_rel == NULL) {
|
||||||
scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt,
|
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
|
||||||
".rel.plt", NULL);
|
".rel.plt", NULL);
|
||||||
if (scn_plt_rel == NULL)
|
if (scn_plt_rel == NULL)
|
||||||
return 0;
|
goto out_elf_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shdr_rel_plt.sh_link != dynsym_idx)
|
err = -1;
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (elf_section_by_name(elf, ehdr, &shdr_plt, ".plt", NULL) == NULL)
|
if (shdr_rel_plt.sh_link != dynsym_idx)
|
||||||
return 0;
|
goto out_elf_end;
|
||||||
|
|
||||||
|
if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
|
||||||
|
goto out_elf_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fetch the relocation section to find the indexes to the GOT
|
* Fetch the relocation section to find the indexes to the GOT
|
||||||
|
@ -410,19 +442,19 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
|
||||||
*/
|
*/
|
||||||
reldata = elf_getdata(scn_plt_rel, NULL);
|
reldata = elf_getdata(scn_plt_rel, NULL);
|
||||||
if (reldata == NULL)
|
if (reldata == NULL)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
syms = elf_getdata(scn_dynsym, NULL);
|
syms = elf_getdata(scn_dynsym, NULL);
|
||||||
if (syms == NULL)
|
if (syms == NULL)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
scn_symstrs = elf_getscn(elf, shdr_dynsym->sh_link);
|
scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
|
||||||
if (scn_symstrs == NULL)
|
if (scn_symstrs == NULL)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
symstrs = elf_getdata(scn_symstrs, NULL);
|
symstrs = elf_getdata(scn_symstrs, NULL);
|
||||||
if (symstrs == NULL)
|
if (symstrs == NULL)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
|
nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
|
||||||
plt_offset = shdr_plt.sh_offset;
|
plt_offset = shdr_plt.sh_offset;
|
||||||
|
@ -441,7 +473,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
|
||||||
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
|
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
|
||||||
sympltname, self->sym_priv_size, 0, verbose);
|
sympltname, self->sym_priv_size, 0, verbose);
|
||||||
if (!f)
|
if (!f)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
dso__insert_symbol(self, f);
|
dso__insert_symbol(self, f);
|
||||||
++nr;
|
++nr;
|
||||||
|
@ -459,19 +491,25 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
|
||||||
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
|
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
|
||||||
sympltname, self->sym_priv_size, 0, verbose);
|
sympltname, self->sym_priv_size, 0, verbose);
|
||||||
if (!f)
|
if (!f)
|
||||||
return -1;
|
goto out_elf_end;
|
||||||
|
|
||||||
dso__insert_symbol(self, f);
|
dso__insert_symbol(self, f);
|
||||||
++nr;
|
++nr;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* TODO: There are still one more shdr_rel_plt.sh_type
|
|
||||||
* I have to investigate, but probably should be ignored.
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nr;
|
err = 0;
|
||||||
|
out_elf_end:
|
||||||
|
elf_end(elf);
|
||||||
|
out_close:
|
||||||
|
close(fd);
|
||||||
|
|
||||||
|
if (err == 0)
|
||||||
|
return nr;
|
||||||
|
out:
|
||||||
|
fprintf(stderr, "%s: problems reading %s PLT info.\n",
|
||||||
|
__func__, self->name);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dso__load_sym(struct dso *self, int fd, const char *name,
|
static int dso__load_sym(struct dso *self, int fd, const char *name,
|
||||||
|
@ -485,10 +523,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
|
||||||
GElf_Shdr shdr;
|
GElf_Shdr shdr;
|
||||||
Elf_Data *syms;
|
Elf_Data *syms;
|
||||||
GElf_Sym sym;
|
GElf_Sym sym;
|
||||||
Elf_Scn *sec, *sec_dynsym, *sec_strndx;
|
Elf_Scn *sec, *sec_strndx;
|
||||||
Elf *elf;
|
Elf *elf;
|
||||||
size_t dynsym_idx;
|
int nr = 0, kernel = !strcmp("[kernel]", self->name);
|
||||||
int nr = 0;
|
|
||||||
|
|
||||||
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
|
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
|
||||||
if (elf == NULL) {
|
if (elf == NULL) {
|
||||||
|
@ -504,32 +541,11 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
|
||||||
goto out_elf_end;
|
goto out_elf_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to check if we have a .dynsym, so that we can handle the
|
|
||||||
* .plt, synthesizing its symbols, that aren't on the symtabs (be it
|
|
||||||
* .dynsym or .symtab)
|
|
||||||
*/
|
|
||||||
sec_dynsym = elf_section_by_name(elf, &ehdr, &shdr,
|
|
||||||
".dynsym", &dynsym_idx);
|
|
||||||
if (sec_dynsym != NULL) {
|
|
||||||
nr = dso__synthesize_plt_symbols(self, elf, &ehdr,
|
|
||||||
sec_dynsym, &shdr,
|
|
||||||
dynsym_idx, verbose);
|
|
||||||
if (nr < 0)
|
|
||||||
goto out_elf_end;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* But if we have a full .symtab (that is a superset of .dynsym) we
|
|
||||||
* should add the symbols not in the .dynsyn
|
|
||||||
*/
|
|
||||||
sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
|
sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
|
||||||
if (sec == NULL) {
|
if (sec == NULL) {
|
||||||
if (sec_dynsym == NULL)
|
sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
|
||||||
|
if (sec == NULL)
|
||||||
goto out_elf_end;
|
goto out_elf_end;
|
||||||
|
|
||||||
sec = sec_dynsym;
|
|
||||||
gelf_getshdr(sec, &shdr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
syms = elf_getdata(sec, NULL);
|
syms = elf_getdata(sec, NULL);
|
||||||
|
@ -555,12 +571,17 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
|
||||||
nr_syms = shdr.sh_size / shdr.sh_entsize;
|
nr_syms = shdr.sh_size / shdr.sh_entsize;
|
||||||
|
|
||||||
memset(&sym, 0, sizeof(sym));
|
memset(&sym, 0, sizeof(sym));
|
||||||
self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
|
if (!kernel) {
|
||||||
|
self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
|
||||||
elf_section_by_name(elf, &ehdr, &shdr,
|
elf_section_by_name(elf, &ehdr, &shdr,
|
||||||
".gnu.prelink_undo",
|
".gnu.prelink_undo",
|
||||||
NULL) != NULL);
|
NULL) != NULL);
|
||||||
|
} else self->adjust_symbols = 0;
|
||||||
|
|
||||||
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
|
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
|
||||||
struct symbol *f;
|
struct symbol *f;
|
||||||
|
const char *name;
|
||||||
|
char *demangled;
|
||||||
u64 obj_start;
|
u64 obj_start;
|
||||||
struct section *section = NULL;
|
struct section *section = NULL;
|
||||||
int is_label = elf_sym__is_label(&sym);
|
int is_label = elf_sym__is_label(&sym);
|
||||||
|
@ -599,10 +620,19 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
|
||||||
goto out_elf_end;
|
goto out_elf_end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* We need to figure out if the object was created from C++ sources
|
||||||
|
* DWARF DW_compile_unit has this, but we don't always have access
|
||||||
|
* to it...
|
||||||
|
*/
|
||||||
|
name = elf_sym__name(&sym, symstrs);
|
||||||
|
demangled = bfd_demangle(NULL, name, DMGL_PARAMS | DMGL_ANSI);
|
||||||
|
if (demangled != NULL)
|
||||||
|
name = demangled;
|
||||||
|
|
||||||
f = symbol__new(sym.st_value, sym.st_size,
|
f = symbol__new(sym.st_value, sym.st_size, name,
|
||||||
elf_sym__name(&sym, symstrs),
|
|
||||||
self->sym_priv_size, obj_start, verbose);
|
self->sym_priv_size, obj_start, verbose);
|
||||||
|
free(demangled);
|
||||||
if (!f)
|
if (!f)
|
||||||
goto out_elf_end;
|
goto out_elf_end;
|
||||||
|
|
||||||
|
@ -668,6 +698,11 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto more;
|
goto more;
|
||||||
|
|
||||||
|
if (ret > 0) {
|
||||||
|
int nr_plt = dso__synthesize_plt_symbols(self, verbose);
|
||||||
|
if (nr_plt > 0)
|
||||||
|
ret += nr_plt;
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
free(name);
|
free(name);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -25,6 +25,7 @@ struct dso {
|
||||||
struct symbol *(*find_symbol)(struct dso *, u64 ip);
|
struct symbol *(*find_symbol)(struct dso *, u64 ip);
|
||||||
unsigned int sym_priv_size;
|
unsigned int sym_priv_size;
|
||||||
unsigned char adjust_symbols;
|
unsigned char adjust_symbols;
|
||||||
|
unsigned char slen_calculated;
|
||||||
char name[0];
|
char name[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
#include <sys/statfs.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
@ -80,6 +81,7 @@
|
||||||
#include <netdb.h>
|
#include <netdb.h>
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
#include "../../../include/linux/magic.h"
|
||||||
|
|
||||||
#ifndef NO_ICONV
|
#ifndef NO_ICONV
|
||||||
#include <iconv.h>
|
#include <iconv.h>
|
||||||
|
|
Loading…
Reference in New Issue