2011-02-04 19:45:46 +08:00
|
|
|
#ifndef __PERF_ANNOTATE_H
|
|
|
|
#define __PERF_ANNOTATE_H
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
2012-04-26 01:16:03 +08:00
|
|
|
#include <stdint.h>
|
2014-04-26 03:31:02 +08:00
|
|
|
#include <linux/types.h>
|
2011-02-04 19:45:46 +08:00
|
|
|
#include "symbol.h"
|
2012-11-02 13:50:05 +08:00
|
|
|
#include "hist.h"
|
2013-02-07 17:02:08 +08:00
|
|
|
#include "sort.h"
|
2011-02-04 19:45:46 +08:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/rbtree.h>
|
2012-09-08 08:43:19 +08:00
|
|
|
#include <pthread.h>
|
2011-02-04 19:45:46 +08:00
|
|
|
|
2016-11-24 22:16:06 +08:00
|
|
|
struct ins_ops;
|
|
|
|
|
|
|
|
struct ins {
|
|
|
|
const char *name;
|
|
|
|
struct ins_ops *ops;
|
|
|
|
};
|
2012-04-19 21:16:27 +08:00
|
|
|
|
2012-04-21 01:38:46 +08:00
|
|
|
struct ins_operands {
|
|
|
|
char *raw;
|
2012-04-25 19:00:23 +08:00
|
|
|
struct {
|
2012-05-12 03:48:49 +08:00
|
|
|
char *raw;
|
2012-04-25 19:00:23 +08:00
|
|
|
char *name;
|
|
|
|
u64 addr;
|
perf annotate: Fix jump target outside of function address range
If jump target is outside of function range, perf is not handling it
correctly. Especially when target address is lesser than function start
address, target offset will be negative. But, target address declared to
be unsigned, converts negative number into 2's complement. See below
example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is
lesser than function start address(34cf0).
34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0
Objdump output:
0000000000034cf0 <__sigaction>:
__GI___sigaction():
34cf0: lea -0x20(%rdi),%eax
34cf3: cmp -bashx1,%eax
34cf6: jbe 34d00 <__sigaction+0x10>
34cf8: jmpq 34ac0 <__GI___libc_sigaction>
34cfd: nopl (%rax)
34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8>
34d07: movl -bashx16,%fs:(%rax)
34d0e: mov -bashxffffffff,%eax
34d13: retq
perf annotate before applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
v jmpq fffffffffffffdd0
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
perf annotate after applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
^ jmpq 34ac0 <__GI___libc_sigaction>
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 23:56:47 +08:00
|
|
|
s64 offset;
|
|
|
|
bool offset_avail;
|
2012-04-25 19:00:23 +08:00
|
|
|
} target;
|
2012-05-13 00:15:34 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
char *raw;
|
|
|
|
char *name;
|
|
|
|
u64 addr;
|
|
|
|
} source;
|
|
|
|
struct {
|
2016-11-24 22:16:06 +08:00
|
|
|
struct ins ins;
|
2012-05-13 00:15:34 +08:00
|
|
|
struct ins_operands *ops;
|
|
|
|
} locked;
|
|
|
|
};
|
2012-04-21 01:38:46 +08:00
|
|
|
};
|
|
|
|
|
2016-11-17 02:39:50 +08:00
|
|
|
struct arch;
|
|
|
|
|
2012-04-19 00:58:34 +08:00
|
|
|
struct ins_ops {
|
2012-05-13 00:26:20 +08:00
|
|
|
void (*free)(struct ins_operands *ops);
|
2016-11-17 02:39:50 +08:00
|
|
|
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map *map);
|
2012-04-19 21:16:27 +08:00
|
|
|
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
|
2012-05-08 05:54:16 +08:00
|
|
|
struct ins_operands *ops);
|
2012-04-19 00:58:34 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
bool ins__is_jump(const struct ins *ins);
|
2012-04-19 03:07:38 +08:00
|
|
|
bool ins__is_call(const struct ins *ins);
|
2016-06-24 19:53:58 +08:00
|
|
|
bool ins__is_ret(const struct ins *ins);
|
2017-07-07 13:06:35 +08:00
|
|
|
bool ins__is_lock(const struct ins *ins);
|
2012-05-08 05:54:16 +08:00
|
|
|
int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
|
perf annotate: Check for fused instructions
Macro fusion merges two instructions to a single micro-op. Intel core
platform performs this hardware optimization under limited
circumstances.
For example, CMP + JCC can be "fused" and executed /retired together.
While with sampling this can result in the sample sometimes being on the
JCC and sometimes on the CMP. So for the fused instruction pair, they
could be considered together.
On Nehalem, fused instruction pairs:
cmp/test + jcc.
On other new CPU:
cmp/test/add/sub/and/inc/dec + jcc.
This patch adds an x86-specific function which checks if 2 instructions
are in a "fused" pair. For non-x86 arch, the function is just NULL.
Changelog:
v4: Move the CPU model checking to symbol__disassemble and save the CPU
family/model in arch structure.
It avoids checking every time when jump arrow printed.
v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs
just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC).
v2: Remove the original weak function. Arnaldo points out that doing it
as a weak function that will be overridden by the host arch doesn't
work. So now it's implemented as an arch-specific function.
Committer fix:
Do not access evsel->evlist->env->cpuid, ->env can be null, introduce
perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in
this function call.
The original patch was segfaulting 'perf top' + annotation.
But this essentially disables this fused instructions augmentation in
'perf top', the right thing is to get the cpuid from the running kernel,
left for a later patch tho.
Signed-off-by: Yao Jin <yao.jin@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 13:06:34 +08:00
|
|
|
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
|
2012-04-19 00:58:34 +08:00
|
|
|
|
2013-03-05 13:53:30 +08:00
|
|
|
struct annotation;
|
|
|
|
|
2012-04-16 02:24:39 +08:00
|
|
|
struct disasm_line {
|
2012-04-21 01:38:46 +08:00
|
|
|
struct list_head node;
|
|
|
|
s64 offset;
|
|
|
|
char *line;
|
2016-11-24 22:16:06 +08:00
|
|
|
struct ins ins;
|
2014-11-13 10:05:26 +08:00
|
|
|
int line_nr;
|
2015-07-18 23:24:50 +08:00
|
|
|
float ipc;
|
|
|
|
u64 cycles;
|
2012-04-21 01:38:46 +08:00
|
|
|
struct ins_operands ops;
|
2011-02-04 19:45:46 +08:00
|
|
|
};
|
|
|
|
|
2012-04-26 01:16:03 +08:00
|
|
|
static inline bool disasm_line__has_offset(const struct disasm_line *dl)
|
|
|
|
{
|
perf annotate: Fix jump target outside of function address range
If jump target is outside of function range, perf is not handling it
correctly. Especially when target address is lesser than function start
address, target offset will be negative. But, target address declared to
be unsigned, converts negative number into 2's complement. See below
example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is
lesser than function start address(34cf0).
34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0
Objdump output:
0000000000034cf0 <__sigaction>:
__GI___sigaction():
34cf0: lea -0x20(%rdi),%eax
34cf3: cmp -bashx1,%eax
34cf6: jbe 34d00 <__sigaction+0x10>
34cf8: jmpq 34ac0 <__GI___libc_sigaction>
34cfd: nopl (%rax)
34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8>
34d07: movl -bashx16,%fs:(%rax)
34d0e: mov -bashxffffffff,%eax
34d13: retq
perf annotate before applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
v jmpq fffffffffffffdd0
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
perf annotate after applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
^ jmpq 34ac0 <__GI___libc_sigaction>
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 23:56:47 +08:00
|
|
|
return dl->ops.target.offset_avail;
|
2012-04-26 01:16:03 +08:00
|
|
|
}
|
|
|
|
|
2017-07-20 05:36:45 +08:00
|
|
|
struct sym_hist_entry {
|
|
|
|
u64 nr_samples;
|
|
|
|
u64 period;
|
|
|
|
};
|
|
|
|
|
2012-04-16 02:24:39 +08:00
|
|
|
void disasm_line__free(struct disasm_line *dl);
|
|
|
|
struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
|
2012-05-08 05:54:16 +08:00
|
|
|
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
|
2012-04-16 02:52:18 +08:00
|
|
|
size_t disasm__fprintf(struct list_head *head, FILE *fp);
|
2013-03-05 13:53:30 +08:00
|
|
|
double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
|
2017-07-20 05:36:45 +08:00
|
|
|
s64 end, const char **path, struct sym_hist_entry *sample);
|
2011-02-04 19:45:46 +08:00
|
|
|
|
|
|
|
struct sym_hist {
|
2017-07-20 05:36:51 +08:00
|
|
|
u64 nr_samples;
|
2017-07-20 05:36:45 +08:00
|
|
|
struct sym_hist_entry addr[0];
|
2011-02-04 19:45:46 +08:00
|
|
|
};
|
|
|
|
|
2015-07-18 23:24:48 +08:00
|
|
|
struct cyc_hist {
|
|
|
|
u64 start;
|
|
|
|
u64 cycles;
|
|
|
|
u64 cycles_aggr;
|
|
|
|
u32 num;
|
|
|
|
u32 num_aggr;
|
|
|
|
u8 have_start;
|
|
|
|
/* 1 byte padding */
|
|
|
|
u16 reset;
|
|
|
|
};
|
|
|
|
|
2015-06-20 03:36:12 +08:00
|
|
|
struct source_line_samples {
|
2011-02-04 19:45:46 +08:00
|
|
|
double percent;
|
2012-11-09 13:58:49 +08:00
|
|
|
double percent_sum;
|
perf annotate: Fix missing number of samples for source_line_samples
The option 'show-total-period' works fine without a option '-l'. But if
running 'perf annotate --stdio -l --show-total-period', you can see a
problem showing only zero '0' for number of samples.
Before:
$ perf annotate --stdio -l --show-total-period
...
0 : 400816: push %rbp
0 : 400817: mov %rsp,%rbp
0 : 40081a: mov %edi,-0x24(%rbp)
0 : 40081d: mov %rsi,-0x30(%rbp)
0 : 400821: mov -0x24(%rbp),%eax
0 : 400824: mov -0x30(%rbp),%rdx
0 : 400828: mov (%rdx),%esi
0 : 40082a: mov $0x0,%edx
...
The reason is it was missed to set number of samples of
source_line_samples, so set it ordinarily.
After:
$ perf annotate --stdio -l --show-total-period
...
3 : 400816: push %rbp
4 : 400817: mov %rsp,%rbp
0 : 40081a: mov %edi,-0x24(%rbp)
0 : 40081d: mov %rsi,-0x30(%rbp)
1 : 400821: mov -0x24(%rbp),%eax
2 : 400824: mov -0x30(%rbp),%rdx
0 : 400828: mov (%rdx),%esi
1 : 40082a: mov $0x0,%edx
...
Signed-off-by: Taeung Song <treeze.taeung@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Martin Liska <mliska@suse.cz>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Wang Nan <wangnan0@huawei.com>
Fixes: 0c4a5bcea460 ("perf annotate: Display total number of samples with --show-total-period")
Link: http://lkml.kernel.org/r/1490703125-13643-1-git-send-email-treeze.taeung@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-28 20:12:05 +08:00
|
|
|
u64 nr;
|
2013-03-05 13:53:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct source_line {
|
|
|
|
struct rb_node node;
|
2011-02-04 19:45:46 +08:00
|
|
|
char *path;
|
2013-03-05 13:53:28 +08:00
|
|
|
int nr_pcnt;
|
2015-06-20 03:36:12 +08:00
|
|
|
struct source_line_samples samples[1];
|
2011-02-04 19:45:46 +08:00
|
|
|
};
|
|
|
|
|
2011-02-08 23:27:39 +08:00
|
|
|
/** struct annotated_source - symbols with hits have this attached as in sannotation
|
2011-02-04 23:43:24 +08:00
|
|
|
*
|
|
|
|
* @histogram: Array of addr hit histograms per event being monitored
|
2011-02-08 23:27:39 +08:00
|
|
|
* @lines: If 'print_lines' is specified, per source code line percentages
|
2012-04-16 02:24:39 +08:00
|
|
|
* @source: source parsed from a disassembler like objdump -dS
|
2015-07-18 23:24:48 +08:00
|
|
|
* @cyc_hist: Average cycles per basic block
|
2011-02-04 23:43:24 +08:00
|
|
|
*
|
2011-02-08 23:27:39 +08:00
|
|
|
* lines is allocated, percentages calculated and all sorted by percentage
|
2011-02-04 23:43:24 +08:00
|
|
|
* when the annotation is about to be presented, so the percentages are for
|
|
|
|
* one of the entries in the histogram array, i.e. for the event/counter being
|
|
|
|
* presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
|
|
|
|
* returns.
|
|
|
|
*/
|
2011-02-08 23:27:39 +08:00
|
|
|
struct annotated_source {
|
|
|
|
struct list_head source;
|
|
|
|
struct source_line *lines;
|
2011-02-07 00:54:44 +08:00
|
|
|
int nr_histograms;
|
2015-10-06 02:06:03 +08:00
|
|
|
size_t sizeof_sym_hist;
|
2015-07-18 23:24:48 +08:00
|
|
|
struct cyc_hist *cycles_hist;
|
2011-02-08 23:27:39 +08:00
|
|
|
struct sym_hist histograms[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct annotation {
|
|
|
|
pthread_mutex_t lock;
|
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-06 03:08:12 +08:00
|
|
|
u64 max_coverage;
|
2011-02-08 23:27:39 +08:00
|
|
|
struct annotated_source *src;
|
2011-02-04 19:45:46 +08:00
|
|
|
};
|
|
|
|
|
2011-02-04 23:43:24 +08:00
|
|
|
static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
|
|
|
|
{
|
2011-02-08 23:27:39 +08:00
|
|
|
return (((void *)¬es->src->histograms) +
|
|
|
|
(notes->src->sizeof_sym_hist * idx));
|
2011-02-04 23:43:24 +08:00
|
|
|
}
|
|
|
|
|
2011-02-04 19:45:46 +08:00
|
|
|
static inline struct annotation *symbol__annotation(struct symbol *sym)
|
|
|
|
{
|
2015-01-14 19:18:05 +08:00
|
|
|
return (void *)sym - symbol_conf.priv_size;
|
2011-02-04 19:45:46 +08:00
|
|
|
}
|
|
|
|
|
2017-07-21 03:28:53 +08:00
|
|
|
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
|
|
|
|
int evidx);
|
2013-12-19 03:48:29 +08:00
|
|
|
|
2015-07-18 23:24:48 +08:00
|
|
|
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
|
|
|
|
struct addr_map_symbol *start,
|
|
|
|
unsigned cycles);
|
|
|
|
|
2017-07-21 03:28:53 +08:00
|
|
|
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
|
|
|
|
int evidx, u64 addr);
|
2013-12-19 04:10:15 +08:00
|
|
|
|
2011-11-12 08:17:32 +08:00
|
|
|
int symbol__alloc_hist(struct symbol *sym);
|
2011-02-07 00:54:44 +08:00
|
|
|
void symbol__annotate_zero_histograms(struct symbol *sym);
|
2011-02-04 19:45:46 +08:00
|
|
|
|
2017-06-19 10:55:56 +08:00
|
|
|
int symbol__disassemble(struct symbol *sym, struct map *map,
|
|
|
|
const char *arch_name, size_t privsize,
|
perf annotate: Check for fused instructions
Macro fusion merges two instructions to a single micro-op. Intel core
platform performs this hardware optimization under limited
circumstances.
For example, CMP + JCC can be "fused" and executed /retired together.
While with sampling this can result in the sample sometimes being on the
JCC and sometimes on the CMP. So for the fused instruction pair, they
could be considered together.
On Nehalem, fused instruction pairs:
cmp/test + jcc.
On other new CPU:
cmp/test/add/sub/and/inc/dec + jcc.
This patch adds an x86-specific function which checks if 2 instructions
are in a "fused" pair. For non-x86 arch, the function is just NULL.
Changelog:
v4: Move the CPU model checking to symbol__disassemble and save the CPU
family/model in arch structure.
It avoids checking every time when jump arrow printed.
v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs
just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC).
v2: Remove the original weak function. Arnaldo points out that doing it
as a weak function that will be overridden by the host arch doesn't
work. So now it's implemented as an arch-specific function.
Committer fix:
Do not access evsel->evlist->env->cpuid, ->env can be null, introduce
perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in
this function call.
The original patch was segfaulting 'perf top' + annotation.
But this essentially disables this fused instructions augmentation in
'perf top', the right thing is to get the cpuid from the running kernel,
left for a later patch tho.
Signed-off-by: Yao Jin <yao.jin@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 13:06:34 +08:00
|
|
|
struct arch **parch, char *cpuid);
|
2013-12-19 04:10:15 +08:00
|
|
|
|
2016-07-30 03:27:18 +08:00
|
|
|
enum symbol_disassemble_errno {
|
|
|
|
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose an arbitrary negative big number not to clash with standard
|
|
|
|
* errno since SUS requires the errno has distinct positive values.
|
|
|
|
* See 'Issue 6' in the link below.
|
|
|
|
*
|
|
|
|
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
|
|
|
*/
|
|
|
|
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
|
|
|
|
|
|
|
|
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
|
|
|
|
|
|
|
|
__SYMBOL_ANNOTATE_ERRNO__END,
|
|
|
|
};
|
|
|
|
|
|
|
|
int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
|
|
|
|
int errnum, char *buf, size_t buflen);
|
|
|
|
|
2013-03-05 13:53:21 +08:00
|
|
|
int symbol__annotate_printf(struct symbol *sym, struct map *map,
|
|
|
|
struct perf_evsel *evsel, bool full_paths,
|
|
|
|
int min_pcnt, int max_lines, int context);
|
2011-02-07 00:54:44 +08:00
|
|
|
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
|
2011-02-08 23:27:39 +08:00
|
|
|
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
|
2012-04-16 02:24:39 +08:00
|
|
|
void disasm__purge(struct list_head *head);
|
2011-02-04 19:45:46 +08:00
|
|
|
|
2014-02-20 09:32:53 +08:00
|
|
|
bool ui__has_annotation(void);
|
|
|
|
|
2013-03-05 13:53:21 +08:00
|
|
|
int symbol__tty_annotate(struct symbol *sym, struct map *map,
|
|
|
|
struct perf_evsel *evsel, bool print_lines,
|
|
|
|
bool full_paths, int min_pcnt, int max_lines);
|
2011-02-04 19:45:46 +08:00
|
|
|
|
2013-09-30 18:07:11 +08:00
|
|
|
#ifdef HAVE_SLANG_SUPPORT
|
2013-03-05 13:53:21 +08:00
|
|
|
int symbol__tui_annotate(struct symbol *sym, struct map *map,
|
|
|
|
struct perf_evsel *evsel,
|
2012-11-02 13:50:05 +08:00
|
|
|
struct hist_browser_timer *hbt);
|
2012-09-28 17:32:02 +08:00
|
|
|
#else
|
2012-09-11 06:15:03 +08:00
|
|
|
static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
|
2013-03-05 13:53:21 +08:00
|
|
|
struct map *map __maybe_unused,
|
|
|
|
struct perf_evsel *evsel __maybe_unused,
|
|
|
|
struct hist_browser_timer *hbt
|
|
|
|
__maybe_unused)
|
2011-02-04 19:45:46 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-09-16 05:31:41 +08:00
|
|
|
extern const char *disassembler_style;
|
|
|
|
|
2011-02-04 19:45:46 +08:00
|
|
|
#endif /* __PERF_ANNOTATE_H */
|