mirror of https://gitee.com/openkylin/linux.git
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "This includes perf namespace support kernel side fixes, plus an accumulated set of perf tooling fixes - including UAPI header synchronization that should make the perf build less noisy" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (31 commits) tooling/headers: Synchronize updated s390 and x86 UAPI headers tools headers: Syncronize mman.h ABI header tools headers: Synchronize prctl.h ABI header tools headers: Synchronize KVM arch ABI headers tools headers: Synchronize drm/i915_drm.h tools headers uapi: Synchronize drm/drm.h tools headers: Synchronize perf_event.h header tools headers: Synchronize kernel ABI headers wrt SPDX tags tools/headers: Synchronize kernel x86 UAPI headers perf intel-pt: Bring instruction decoder files into line with the kernel perf test: Fix test 21 for s390x perf bench numa: Fixup discontiguous/sparse numa nodes perf top: Use signal interface for SIGWINCH handler perf top: Fix window dimensions change handling perf: Fix header.size for namespace events perf top: Ignore kptr_restrict when not sampling the kernel perf record: Ignore kptr_restrict when not sampling the kernel perf report: Ignore kptr_restrict when not sampling the kernel perf evlist: Add helper to check if attr.exclude_kernel is set in all evsels perf test shell: Fix test case probe libc's inet_pton on s390x ...
This commit is contained in:
commit
1c7647253c
|
@ -6639,6 +6639,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
|
|||
struct perf_namespaces_event *namespaces_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
u16 header_size = namespaces_event->event_id.header.size;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_namespaces_match(event))
|
||||
|
@ -6649,7 +6650,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
|
|||
ret = perf_output_begin(&handle, event,
|
||||
namespaces_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
namespaces_event->event_id.pid = perf_event_pid(event,
|
||||
namespaces_event->task);
|
||||
|
@ -6661,6 +6662,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
|
|||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
out:
|
||||
namespaces_event->event_id.header.size = header_size;
|
||||
}
|
||||
|
||||
static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
|
||||
|
|
|
@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
|
|||
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
|
||||
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
|
||||
|
||||
/* PL1 Physical Timer Registers */
|
||||
#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
|
||||
#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
|
||||
#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
|
||||
|
||||
/* Virtual Timer Registers */
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
|
||||
|
@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
|
|||
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
|
||||
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
|
||||
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
|
||||
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
|
|
|
@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
|
|||
|
||||
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
|
||||
|
||||
/* Physical Timer EL0 Registers */
|
||||
#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
|
||||
#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
|
||||
#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
|
||||
|
||||
/* EL0 Virtual Timer Registers */
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
|
||||
|
@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
|
|||
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
|
||||
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
|
||||
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
|
||||
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
|
||||
|
||||
/* Device Control API on vcpu fd */
|
||||
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
|
||||
|
|
|
@ -6,10 +6,6 @@
|
|||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
|
|
|
@ -4,10 +4,6 @@
|
|||
*
|
||||
* Copyright 2014 IBM Corp.
|
||||
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_KVM_PERF_S390_H
|
||||
|
|
|
@ -13,173 +13,176 @@
|
|||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 18 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
#define NCAPINTS 18 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
||||
*
|
||||
* When adding new features here that depend on other features,
|
||||
* please update the table in kernel/cpu/cpuid-deps.c as well.
|
||||
*/
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
|
||||
/* (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
||||
/* Don't duplicate feature flags which are redundant with Intel! */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
|
||||
|
||||
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
||||
|
||||
/* Other features, Linux-defined mapping, word 3 */
|
||||
/* This range is used for feature bits which conflict or are synthesized */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
||||
/* cpu types for specific tunings: */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
|
||||
#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
|
||||
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
||||
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
|
||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
||||
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
||||
/* CPU types for specific tunings: */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
|
||||
#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
|
||||
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
|
||||
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
|
||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
||||
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
|
||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
|
||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
||||
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
||||
|
||||
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
||||
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
|
||||
#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
|
||||
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
|
||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
|
||||
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
||||
#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
|
||||
#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
|
||||
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
|
||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
|
@ -187,146 +190,154 @@
|
|||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
|
||||
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
|
||||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
||||
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
|
||||
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
|
||||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
||||
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
||||
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
|
||||
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
|
||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
|
||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
|
||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
|
||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
|
||||
|
||||
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
|
||||
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
|
||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
|
||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
|
||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
|
||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
|
||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
|
||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
|
||||
|
||||
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
|
||||
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
||||
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
|
||||
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
|
||||
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
||||
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
|
||||
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
|
||||
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
|
||||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
|
||||
#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
|
||||
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||
|
||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
|
||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
|
||||
* to avoid confusion.
|
||||
*/
|
||||
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||
#endif
|
||||
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
|
||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
|
||||
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
|
||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
|
||||
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
|
||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
|
||||
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
|
||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -16,6 +16,12 @@
|
|||
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_UMIP
|
||||
# define DISABLE_UMIP 0
|
||||
#else
|
||||
# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
|
||||
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
|
||||
|
@ -63,7 +69,7 @@
|
|||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
#define DISABLED_MASK15 0
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
|
||||
#define DISABLED_MASK17 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
|
||||
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
|
||||
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
|
||||
#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
|
||||
|
||||
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
|
||||
|
||||
|
|
|
@ -737,6 +737,28 @@ struct drm_syncobj_array {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
/* Query current scanout sequence number */
|
||||
struct drm_crtc_get_sequence {
|
||||
__u32 crtc_id; /* requested crtc_id */
|
||||
__u32 active; /* return: crtc output is active */
|
||||
__u64 sequence; /* return: most recent vblank sequence */
|
||||
__s64 sequence_ns; /* return: most recent time of first pixel out */
|
||||
};
|
||||
|
||||
/* Queue event to be delivered at specified sequence. Time stamp marks
|
||||
* when the first pixel of the refresh cycle leaves the display engine
|
||||
* for the display
|
||||
*/
|
||||
#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
|
||||
#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
|
||||
|
||||
struct drm_crtc_queue_sequence {
|
||||
__u32 crtc_id;
|
||||
__u32 flags;
|
||||
__u64 sequence; /* on input, target sequence. on output, actual sequence */
|
||||
__u64 user_data; /* user data passed to event */
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
@ -819,6 +841,9 @@ extern "C" {
|
|||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
|
||||
|
||||
#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
|
||||
#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
|
||||
|
||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
||||
|
||||
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
|
||||
|
@ -863,6 +888,11 @@ extern "C" {
|
|||
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
|
||||
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
|
||||
|
||||
#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
|
||||
#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
|
||||
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
|
||||
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
|
||||
|
||||
/**
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x9f.
|
||||
|
@ -893,6 +923,7 @@ struct drm_event {
|
|||
|
||||
#define DRM_EVENT_VBLANK 0x01
|
||||
#define DRM_EVENT_FLIP_COMPLETE 0x02
|
||||
#define DRM_EVENT_CRTC_SEQUENCE 0x03
|
||||
|
||||
struct drm_event_vblank {
|
||||
struct drm_event base;
|
||||
|
@ -903,6 +934,16 @@ struct drm_event_vblank {
|
|||
__u32 crtc_id; /* 0 on older kernels that do not support this */
|
||||
};
|
||||
|
||||
/* Event delivered at sequence. Time stamp marks when the first pixel
|
||||
* of the refresh cycle leaves the display engine for the display
|
||||
*/
|
||||
struct drm_event_crtc_sequence {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__s64 time_ns;
|
||||
__u64 sequence;
|
||||
};
|
||||
|
||||
/* typedef area */
|
||||
#ifndef __KERNEL__
|
||||
typedef struct drm_clip_rect drm_clip_rect_t;
|
||||
|
|
|
@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_MIN_EU_IN_POOL 39
|
||||
#define I915_PARAM_MMAP_GTT_VERSION 40
|
||||
|
||||
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
|
||||
/*
|
||||
* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
|
||||
* priorities and the driver will attempt to execute batches in priority order.
|
||||
* The param returns a capability bitmask, nonzero implies that the scheduler
|
||||
* is enabled, with different features present according to the mask.
|
||||
*
|
||||
* The initial priority for each batch is supplied by the context and is
|
||||
* controlled via I915_CONTEXT_PARAM_PRIORITY.
|
||||
*/
|
||||
#define I915_PARAM_HAS_SCHEDULER 41
|
||||
#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
|
||||
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
|
||||
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
|
||||
|
||||
#define I915_PARAM_HUC_STATUS 42
|
||||
|
||||
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
|
||||
|
@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
|
|||
* be specified
|
||||
*/
|
||||
__u64 offset;
|
||||
#define I915_REG_READ_8B_WA (1ul << 0)
|
||||
|
||||
__u64 val; /* Return value */
|
||||
};
|
||||
/* Known registers:
|
||||
*
|
||||
* Render engine timestamp - 0x2358 + 64bit - gen7+
|
||||
* - Note this register returns an invalid value if using the default
|
||||
* single instruction 8byte read, in order to workaround that use
|
||||
* offset (0x2538 | 1) instead.
|
||||
* single instruction 8byte read, in order to workaround that pass
|
||||
* flag I915_REG_READ_8B_WA in offset field.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
|
|||
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
|
||||
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
|
||||
#define I915_CONTEXT_PARAM_BANNABLE 0x5
|
||||
#define I915_CONTEXT_PARAM_PRIORITY 0x6
|
||||
#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
|
||||
#define I915_CONTEXT_DEFAULT_PRIORITY 0
|
||||
#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
|
@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
|
|||
__u32 n_boolean_regs;
|
||||
__u32 n_flex_regs;
|
||||
|
||||
__u64 __user mux_regs_ptr;
|
||||
__u64 __user boolean_regs_ptr;
|
||||
__u64 __user flex_regs_ptr;
|
||||
/*
|
||||
* These fields are pointers to tuples of u32 values (register
|
||||
* address, value). For example the expected length of the buffer
|
||||
* pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
|
||||
*/
|
||||
__u64 mux_regs_ptr;
|
||||
__u64 boolean_regs_ptr;
|
||||
__u64 flex_regs_ptr;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _UAPI_LINUX_KCMP_H
|
||||
#define _UAPI_LINUX_KCMP_H
|
||||
|
||||
|
|
|
@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
|
|||
#define KVM_CAP_PPC_SMT_POSSIBLE 147
|
||||
#define KVM_CAP_HYPERV_SYNIC2 148
|
||||
#define KVM_CAP_HYPERV_VP_INDEX 149
|
||||
#define KVM_CAP_S390_AIS_MIGRATION 150
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
|
@ -942,6 +942,7 @@ enum perf_callchain_context {
|
|||
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
|
||||
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
|
||||
#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
|
||||
#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
|
||||
|
||||
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
|
||||
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _LINUX_PRCTL_H
|
||||
#define _LINUX_PRCTL_H
|
||||
|
||||
|
@ -197,4 +198,13 @@ struct prctl_mm_map {
|
|||
# define PR_CAP_AMBIENT_LOWER 3
|
||||
# define PR_CAP_AMBIENT_CLEAR_ALL 4
|
||||
|
||||
/* arm64 Scalable Vector Extension controls */
|
||||
/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
|
||||
#define PR_SVE_SET_VL 50 /* set task vector length */
|
||||
# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
|
||||
#define PR_SVE_GET_VL 51 /* get task vector length */
|
||||
/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
|
||||
# define PR_SVE_VL_LEN_MASK 0xffff
|
||||
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
|
||||
|
||||
#endif /* _LINUX_PRCTL_H */
|
||||
|
|
|
@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
/*
|
||||
* To get number of numa nodes present.
|
||||
*/
|
||||
static int nr_numa_nodes(void)
|
||||
{
|
||||
int i, nr_nodes = 0;
|
||||
|
||||
for (i = 0; i < g->p.nr_nodes; i++) {
|
||||
if (numa_bitmask_isbitset(numa_nodes_ptr, i))
|
||||
nr_nodes++;
|
||||
}
|
||||
|
||||
return nr_nodes;
|
||||
}
|
||||
|
||||
/*
|
||||
* To check if given numa node is present.
|
||||
*/
|
||||
static int is_node_present(int node)
|
||||
{
|
||||
return numa_bitmask_isbitset(numa_nodes_ptr, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* To check given numa node has cpus.
|
||||
*/
|
||||
static bool node_has_cpus(int node)
|
||||
{
|
||||
struct bitmask *cpu = numa_allocate_cpumask();
|
||||
unsigned int i;
|
||||
|
||||
if (cpu && !numa_node_to_cpus(node, cpu)) {
|
||||
for (i = 0; i < cpu->size; i++) {
|
||||
if (numa_bitmask_isbitset(cpu, i))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false; /* lets fall back to nocpus safely */
|
||||
}
|
||||
|
||||
static cpu_set_t bind_to_cpu(int target_cpu)
|
||||
{
|
||||
cpu_set_t orig_mask, mask;
|
||||
|
@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
|
|||
|
||||
static cpu_set_t bind_to_node(int target_node)
|
||||
{
|
||||
int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
|
||||
int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
|
||||
cpu_set_t orig_mask, mask;
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
|
||||
BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
|
||||
BUG_ON(!cpus_per_node);
|
||||
|
||||
ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
|
||||
|
@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < mul; i++) {
|
||||
if (t >= g->p.nr_tasks) {
|
||||
if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
|
||||
printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
|
||||
goto out;
|
||||
}
|
||||
|
@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
|
|||
sum = 0;
|
||||
|
||||
for (node = 0; node < g->p.nr_nodes; node++) {
|
||||
if (!is_node_present(node))
|
||||
continue;
|
||||
nr = nodes[node];
|
||||
nr_min = min(nr, nr_min);
|
||||
nr_max = max(nr, nr_max);
|
||||
|
@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
|
|||
process_groups = 0;
|
||||
|
||||
for (node = 0; node < g->p.nr_nodes; node++) {
|
||||
int processes = count_node_processes(node);
|
||||
int processes;
|
||||
|
||||
if (!is_node_present(node))
|
||||
continue;
|
||||
processes = count_node_processes(node);
|
||||
nr = nodes[node];
|
||||
tprintf(" %2d/%-2d", nr, processes);
|
||||
|
||||
|
@ -1291,7 +1337,7 @@ static void print_summary(void)
|
|||
|
||||
printf("\n ###\n");
|
||||
printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
|
||||
g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
|
||||
g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
|
||||
printf(" # %5dx %5ldMB global shared mem operations\n",
|
||||
g->p.nr_loops, g->p.bytes_global/1024/1024);
|
||||
printf(" # %5dx %5ldMB process shared mem operations\n",
|
||||
|
|
|
@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
|
|||
add_man_viewer(value);
|
||||
return 0;
|
||||
}
|
||||
if (!strstarts(var, "man."))
|
||||
if (strstarts(var, "man."))
|
||||
return add_man_viewer_info(var, value);
|
||||
|
||||
return 0;
|
||||
|
@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
|
|||
|
||||
if (!perf_cmd)
|
||||
return "perf";
|
||||
else if (!strstarts(perf_cmd, "perf"))
|
||||
else if (strstarts(perf_cmd, "perf"))
|
||||
return perf_cmd;
|
||||
|
||||
return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
|
||||
|
|
|
@ -339,6 +339,22 @@ static int record__open(struct record *rec)
|
|||
struct perf_evsel_config_term *err_term;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* For initial_delay we need to add a dummy event so that we can track
|
||||
* PERF_RECORD_MMAP while we wait for the initial delay to enable the
|
||||
* real events, the ones asked by the user.
|
||||
*/
|
||||
if (opts->initial_delay) {
|
||||
if (perf_evlist__add_dummy(evlist))
|
||||
return -ENOMEM;
|
||||
|
||||
pos = perf_evlist__first(evlist);
|
||||
pos->tracking = 0;
|
||||
pos = perf_evlist__last(evlist);
|
||||
pos->tracking = 1;
|
||||
pos->attr.enable_on_exec = 1;
|
||||
}
|
||||
|
||||
perf_evlist__config(evlist, opts, &callchain_param);
|
||||
|
||||
evlist__for_each_entry(evlist, pos) {
|
||||
|
@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine);
|
||||
WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
|
||||
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
|
||||
"Check /proc/kallsyms permission or run as root.\n");
|
||||
if (!perf_evlist__exclude_kernel(rec->evlist)) {
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine);
|
||||
WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
|
||||
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
|
||||
"Check /proc/kallsyms permission or run as root.\n");
|
||||
|
||||
err = perf_event__synthesize_modules(tool, process_synthesized_event,
|
||||
machine);
|
||||
WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
|
||||
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
|
||||
"Check /proc/modules permission or run as root.\n");
|
||||
err = perf_event__synthesize_modules(tool, process_synthesized_event,
|
||||
machine);
|
||||
WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
|
||||
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
|
||||
"Check /proc/modules permission or run as root.\n");
|
||||
}
|
||||
|
||||
if (perf_guest) {
|
||||
machines__process_guests(&session->machines,
|
||||
|
@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
|
|||
|
||||
err = -ENOMEM;
|
||||
|
||||
if (symbol_conf.kptr_restrict)
|
||||
if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
|
||||
pr_warning(
|
||||
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
|
||||
"check /proc/sys/kernel/kptr_restrict.\n\n"
|
||||
|
|
|
@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
|
|||
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
|
||||
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
|
||||
|
||||
if (perf_evlist__exclude_kernel(rep->session->evlist))
|
||||
return;
|
||||
|
||||
if (kernel_map == NULL ||
|
||||
(kernel_map->dso->hit &&
|
||||
(kernel_kmap->ref_reloc_sym == NULL ||
|
||||
|
|
|
@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
|
|||
struct perf_evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(script->session->evlist, evsel) {
|
||||
/*
|
||||
* Already setup? I.e. we may be called twice in cases like
|
||||
* Intel PT, one for the intel_pt// and dummy events, then
|
||||
* for the evsels syntheized from the auxtrace info.
|
||||
*
|
||||
* Ses perf_script__process_auxtrace_info.
|
||||
*/
|
||||
if (evsel->priv != NULL)
|
||||
continue;
|
||||
|
||||
evsel->priv = perf_evsel_script__new(evsel, script->session->data);
|
||||
if (evsel->priv == NULL)
|
||||
goto out_err_fclose;
|
||||
|
@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
|
|||
return set_maps(script);
|
||||
}
|
||||
|
||||
#ifdef HAVE_AUXTRACE_SUPPORT
|
||||
static int perf_script__process_auxtrace_info(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_session *session)
|
||||
{
|
||||
int ret = perf_event__process_auxtrace_info(tool, event, session);
|
||||
|
||||
if (ret == 0) {
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
|
||||
ret = perf_script__setup_per_event_dump(script);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
#define perf_script__process_auxtrace_info 0
|
||||
#endif
|
||||
|
||||
int cmd_script(int argc, const char **argv)
|
||||
{
|
||||
bool show_full_info = false;
|
||||
|
@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
|
|||
.feature = perf_event__process_feature,
|
||||
.build_id = perf_event__process_build_id,
|
||||
.id_index = perf_event__process_id_index,
|
||||
.auxtrace_info = perf_event__process_auxtrace_info,
|
||||
.auxtrace_info = perf_script__process_auxtrace_info,
|
||||
.auxtrace = perf_event__process_auxtrace,
|
||||
.auxtrace_error = perf_event__process_auxtrace_error,
|
||||
.stat = perf_event__process_stat_event,
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
#include "sane_ctype.h"
|
||||
|
||||
static volatile int done;
|
||||
static volatile int resize;
|
||||
|
||||
#define HEADER_LINE_NR 5
|
||||
|
||||
|
@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
|
|||
top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
|
||||
}
|
||||
|
||||
static void perf_top__sig_winch(int sig __maybe_unused,
|
||||
siginfo_t *info __maybe_unused, void *arg)
|
||||
static void winch_sig(int sig __maybe_unused)
|
||||
{
|
||||
struct perf_top *top = arg;
|
||||
resize = 1;
|
||||
}
|
||||
|
||||
static void perf_top__resize(struct perf_top *top)
|
||||
{
|
||||
get_term_dimensions(&top->winsize);
|
||||
perf_top__update_print_entries(top);
|
||||
}
|
||||
|
@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
|||
case 'e':
|
||||
prompt_integer(&top->print_entries, "Enter display entries (lines)");
|
||||
if (top->print_entries == 0) {
|
||||
struct sigaction act = {
|
||||
.sa_sigaction = perf_top__sig_winch,
|
||||
.sa_flags = SA_SIGINFO,
|
||||
};
|
||||
perf_top__sig_winch(SIGWINCH, NULL, top);
|
||||
sigaction(SIGWINCH, &act, NULL);
|
||||
perf_top__resize(top);
|
||||
signal(SIGWINCH, winch_sig);
|
||||
} else {
|
||||
signal(SIGWINCH, SIG_DFL);
|
||||
}
|
||||
|
@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
|
|||
if (!machine->kptr_restrict_warned &&
|
||||
symbol_conf.kptr_restrict &&
|
||||
al.cpumode == PERF_RECORD_MISC_KERNEL) {
|
||||
ui__warning(
|
||||
if (!perf_evlist__exclude_kernel(top->session->evlist)) {
|
||||
ui__warning(
|
||||
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
|
||||
"Check /proc/sys/kernel/kptr_restrict.\n\n"
|
||||
"Kernel%s samples will not be resolved.\n",
|
||||
al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
|
||||
" modules" : "");
|
||||
if (use_browser <= 0)
|
||||
sleep(5);
|
||||
if (use_browser <= 0)
|
||||
sleep(5);
|
||||
}
|
||||
machine->kptr_restrict_warned = true;
|
||||
}
|
||||
|
||||
|
@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
|
|||
|
||||
if (hits == top->samples)
|
||||
ret = perf_evlist__poll(top->evlist, 100);
|
||||
|
||||
if (resize) {
|
||||
perf_top__resize(top);
|
||||
resize = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
|
|||
|
||||
get_term_dimensions(&top.winsize);
|
||||
if (top.print_entries == 0) {
|
||||
struct sigaction act = {
|
||||
.sa_sigaction = perf_top__sig_winch,
|
||||
.sa_flags = SA_SIGINFO,
|
||||
};
|
||||
perf_top__update_print_entries(&top);
|
||||
sigaction(SIGWINCH, &act, NULL);
|
||||
signal(SIGWINCH, winch_sig);
|
||||
}
|
||||
|
||||
status = __cmd_top(&top);
|
||||
|
|
|
@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
|
|||
if (trace->host == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
|
||||
return -errno;
|
||||
err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
|
||||
evlist->threads, trace__tool_process, false,
|
||||
trace->opts.proc_map_timeout, 1);
|
||||
out:
|
||||
if (err)
|
||||
symbol__exit();
|
||||
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
|
||||
. $(dirname $0)/lib/probe.sh
|
||||
|
||||
ld=$(realpath /lib64/ld*.so.* | uniq)
|
||||
libc=$(echo $ld | sed 's/ld/libc/g')
|
||||
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
|
||||
nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
|
||||
|
||||
trace_libc_inet_pton_backtrace() {
|
||||
idx=0
|
||||
|
@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
|
|||
done
|
||||
}
|
||||
|
||||
# Check for IPv6 interface existence
|
||||
ip a sh lo | fgrep -q inet6 || exit 2
|
||||
|
||||
skip_if_no_perf_probe && \
|
||||
perf probe -q $libc inet_pton && \
|
||||
trace_libc_inet_pton_backtrace
|
||||
|
|
|
@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
|
|||
file=$(mktemp /tmp/temporary_file.XXXXX)
|
||||
|
||||
trace_open_vfs_getname() {
|
||||
perf trace -e open touch $file 2>&1 | \
|
||||
egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
|
||||
test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
|
||||
|
||||
perf trace -e ${svc:-open} touch $file 2>&1 | \
|
||||
egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel->attr.task = 1;
|
||||
#ifdef __s390x__
|
||||
evsel->attr.sample_freq = 1000000;
|
||||
#else
|
||||
evsel->attr.sample_freq = 1;
|
||||
#endif
|
||||
evsel->attr.inherit = 0;
|
||||
evsel->attr.watermark = 0;
|
||||
evsel->attr.wakeup_events = 1;
|
||||
|
|
|
@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
|
|||
P_MMAP_FLAG(POPULATE);
|
||||
P_MMAP_FLAG(STACK);
|
||||
P_MMAP_FLAG(UNINITIALIZED);
|
||||
#ifdef MAP_SYNC
|
||||
P_MMAP_FLAG(SYNC);
|
||||
#endif
|
||||
#undef P_MMAP_FLAG
|
||||
|
||||
if (flags)
|
||||
|
|
|
@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
|
|||
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
|
||||
struct ins_operands *ops)
|
||||
{
|
||||
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
|
||||
return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
|
||||
}
|
||||
|
||||
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
|
||||
|
@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
|
|||
struct ins_operands *ops)
|
||||
{
|
||||
if (ops->target.name)
|
||||
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
|
||||
return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
|
||||
|
||||
if (ops->target.addr == 0)
|
||||
return ins__raw_scnprintf(ins, bf, size, ops);
|
||||
|
||||
return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
|
||||
return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
|
||||
}
|
||||
|
||||
static struct ins_ops call_ops = {
|
||||
|
@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
|
|||
c++;
|
||||
}
|
||||
|
||||
return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
|
||||
return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
|
||||
ins->name, c ? c - ops->raw : 0, ops->raw,
|
||||
ops->target.offset);
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
|
|||
if (ops->locked.ins.ops == NULL)
|
||||
return ins__raw_scnprintf(ins, bf, size, ops);
|
||||
|
||||
printed = scnprintf(bf, size, "%-6.6s ", ins->name);
|
||||
printed = scnprintf(bf, size, "%-6s ", ins->name);
|
||||
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
|
||||
size - printed, ops->locked.ops);
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *m
|
|||
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
|
||||
struct ins_operands *ops)
|
||||
{
|
||||
return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
|
||||
return scnprintf(bf, size, "%-6s %s,%s", ins->name,
|
||||
ops->source.name ?: ops->source.raw,
|
||||
ops->target.name ?: ops->target.raw);
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
|
|||
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
|
||||
struct ins_operands *ops)
|
||||
{
|
||||
return scnprintf(bf, size, "%-6.6s %s", ins->name,
|
||||
return scnprintf(bf, size, "%-6s %s", ins->name,
|
||||
ops->target.name ?: ops->target.raw);
|
||||
}
|
||||
|
||||
|
@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
|
|||
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
|
||||
struct ins_operands *ops __maybe_unused)
|
||||
{
|
||||
return scnprintf(bf, size, "%-6.6s", "nop");
|
||||
return scnprintf(bf, size, "%-6s", "nop");
|
||||
}
|
||||
|
||||
static struct ins_ops nop_ops = {
|
||||
|
@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
|
|||
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
|
||||
{
|
||||
if (raw || !dl->ins.ops)
|
||||
return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
|
||||
return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
|
||||
|
||||
return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
|
||||
}
|
||||
|
|
|
@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
|
|||
.config = PERF_COUNT_SW_DUMMY,
|
||||
.size = sizeof(attr), /* to capture ABI version */
|
||||
};
|
||||
struct perf_evsel *evsel = perf_evsel__new(&attr);
|
||||
struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
|
||||
|
||||
if (evsel == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
|
|||
state_err:
|
||||
return;
|
||||
}
|
||||
|
||||
bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (!evsel->attr.exclude_kernel)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
|
|||
|
||||
struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
|
||||
union perf_event *event);
|
||||
|
||||
bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
|
||||
#endif /* __PERF_EVLIST_H */
|
||||
|
|
|
@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
|
|||
list_for_each_entry(term, config_terms, list) {
|
||||
switch (term->type) {
|
||||
case PERF_EVSEL__CONFIG_TERM_PERIOD:
|
||||
attr->sample_period = term->val.period;
|
||||
attr->freq = 0;
|
||||
if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
|
||||
attr->sample_period = term->val.period;
|
||||
attr->freq = 0;
|
||||
}
|
||||
break;
|
||||
case PERF_EVSEL__CONFIG_TERM_FREQ:
|
||||
attr->sample_freq = term->val.freq;
|
||||
attr->freq = 1;
|
||||
if (!(term->weak && opts->user_freq != UINT_MAX)) {
|
||||
attr->sample_freq = term->val.freq;
|
||||
attr->freq = 1;
|
||||
}
|
||||
break;
|
||||
case PERF_EVSEL__CONFIG_TERM_TIME:
|
||||
if (term->val.time)
|
||||
|
@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
|
|||
static int
|
||||
perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
|
||||
{
|
||||
struct perf_stat_evsel *ps = leader->priv;
|
||||
struct perf_stat_evsel *ps = leader->stats;
|
||||
u64 read_format = leader->attr.read_format;
|
||||
int size = perf_evsel__read_size(leader);
|
||||
u64 *data = ps->group_data;
|
||||
|
|
|
@ -67,6 +67,7 @@ struct perf_evsel_config_term {
|
|||
bool overwrite;
|
||||
char *branch;
|
||||
} val;
|
||||
bool weak;
|
||||
};
|
||||
|
||||
struct perf_stat_evsel;
|
||||
|
|
|
@ -97,6 +97,16 @@
|
|||
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
|
||||
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
|
||||
|
||||
/* Identifiers for segment registers */
|
||||
#define INAT_SEG_REG_IGNORE 0
|
||||
#define INAT_SEG_REG_DEFAULT 1
|
||||
#define INAT_SEG_REG_CS 2
|
||||
#define INAT_SEG_REG_SS 3
|
||||
#define INAT_SEG_REG_DS 4
|
||||
#define INAT_SEG_REG_ES 5
|
||||
#define INAT_SEG_REG_FS 6
|
||||
#define INAT_SEG_REG_GS 7
|
||||
|
||||
/* Attribute search APIs */
|
||||
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
|
||||
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
|
||||
|
|
|
@ -896,7 +896,7 @@ EndTable
|
|||
|
||||
GrpTable: Grp3_1
|
||||
0: TEST Eb,Ib
|
||||
1:
|
||||
1: TEST Eb,Ib
|
||||
2: NOT Eb
|
||||
3: NEG Eb
|
||||
4: MUL AL,Eb
|
||||
|
|
|
@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (machine == NULL)
|
||||
return;
|
||||
|
||||
machine__destroy_kernel_maps(machine);
|
||||
map_groups__exit(&machine->kmaps);
|
||||
dsos__exit(&machine->dsos);
|
||||
|
|
|
@ -1116,6 +1116,7 @@ do { \
|
|||
INIT_LIST_HEAD(&__t->list); \
|
||||
__t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
|
||||
__t->val.__name = __val; \
|
||||
__t->weak = term->weak; \
|
||||
list_add_tail(&__t->list, head_terms); \
|
||||
} while (0)
|
||||
|
||||
|
@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
|
|||
|
||||
*term = *temp;
|
||||
INIT_LIST_HEAD(&term->list);
|
||||
term->weak = false;
|
||||
|
||||
switch (term->type_val) {
|
||||
case PARSE_EVENTS__TERM_TYPE_NUM:
|
||||
|
|
|
@ -101,6 +101,9 @@ struct parse_events_term {
|
|||
/* error string indexes for within parsed string */
|
||||
int err_term;
|
||||
int err_val;
|
||||
|
||||
/* Coming from implicit alias */
|
||||
bool weak;
|
||||
};
|
||||
|
||||
struct parse_events_error {
|
||||
|
|
|
@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
|
|||
parse_events_terms__purge(&list);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Weak terms don't override command line options,
|
||||
* which we don't want for implicit terms in aliases.
|
||||
*/
|
||||
cloned->weak = true;
|
||||
list_add_tail(&cloned->list, &list);
|
||||
}
|
||||
list_splice(&list, terms);
|
||||
|
|
Loading…
Reference in New Issue