perf trace arm64: Use generated syscall table
This should speed up accessing new system calls introduced with the kernel rather than waiting for libaudit updates to include them. It also enables users to specify wildcards, for example, perf trace -e 'open*', just like was already possible on x86, s390, and powerpc, which means arm64 can now pass the "Check open filename arg using perf trace + vfs_getname" test. Signed-off-by: Kim Phillips <kim.phillips@arm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Thomas Richter <tmricht@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20180706163454.f714b9ab49ecc8566a0b3565@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
2b58824356
commit
a7f660d657
|
@ -54,6 +54,8 @@ endif
|
|||
|
||||
ifeq ($(SRCARCH),arm64)
|
||||
NO_PERF_REGS := 0
|
||||
NO_SYSCALL_TABLE := 0
|
||||
CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
|
||||
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
|
||||
endif
|
||||
|
||||
|
|
|
@ -38,6 +38,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_64;
|
|||
#include <asm/syscalls_32.c>
|
||||
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
|
||||
static const char **syscalltbl_native = syscalltbl_powerpc_32;
|
||||
#elif defined(__aarch64__)
|
||||
#include <asm/syscalls.c>
|
||||
const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
|
||||
static const char **syscalltbl_native = syscalltbl_arm64;
|
||||
#endif
|
||||
|
||||
struct syscall {
|
||||
|
|
Loading…
Reference in New Issue