linux/arch/x86/include/uapi/asm/processor-flags.h

167 lines
6.5 KiB
C
Raw Normal View History

License cleanup: add SPDX license identifier to uapi header files with no license Many user space API headers are missing licensing information, which makes it hard for compliance tools to determine the correct license. By default are files without license information under the default license of the kernel, which is GPLV2. Marking them GPLV2 would exclude them from being included in non GPLV2 code, which is obviously not intended. The user space API headers fall under the syscall exception which is in the kernels COPYING file: NOTE! This copyright does *not* cover user programs that use kernel services by normal system calls - this is merely considered normal use of the kernel, and does *not* fall under the heading of "derived work". otherwise syscall usage would not be possible. Update the files which contain no license information with an SPDX license identifier. The chosen identifier is 'GPL-2.0 WITH Linux-syscall-note' which is the officially assigned identifier for the Linux syscall exception. SPDX license identifiers are a legally binding shorthand, which can be used instead of the full boiler plate text. This patch is based on work done by Thomas Gleixner and Kate Stewart and Philippe Ombredanne. See the previous patch in this series for the methodology of how this patch was researched. Reviewed-by: Kate Stewart <kstewart@linuxfoundation.org> Reviewed-by: Philippe Ombredanne <pombredanne@nexb.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-11-01 22:08:43 +08:00
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H
#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
/* Various flags defined: can be included from assembler. */
#include <linux/const.h>
/*
* EFLAGS bits
*/
#define X86_EFLAGS_CF_BIT 0 /* Carry Flag */
#define X86_EFLAGS_CF _BITUL(X86_EFLAGS_CF_BIT)
#define X86_EFLAGS_FIXED_BIT 1 /* Bit 1 - always on */
#define X86_EFLAGS_FIXED _BITUL(X86_EFLAGS_FIXED_BIT)
#define X86_EFLAGS_PF_BIT 2 /* Parity Flag */
#define X86_EFLAGS_PF _BITUL(X86_EFLAGS_PF_BIT)
#define X86_EFLAGS_AF_BIT 4 /* Auxiliary carry Flag */
#define X86_EFLAGS_AF _BITUL(X86_EFLAGS_AF_BIT)
#define X86_EFLAGS_ZF_BIT 6 /* Zero Flag */
#define X86_EFLAGS_ZF _BITUL(X86_EFLAGS_ZF_BIT)
#define X86_EFLAGS_SF_BIT 7 /* Sign Flag */
#define X86_EFLAGS_SF _BITUL(X86_EFLAGS_SF_BIT)
#define X86_EFLAGS_TF_BIT 8 /* Trap Flag */
#define X86_EFLAGS_TF _BITUL(X86_EFLAGS_TF_BIT)
#define X86_EFLAGS_IF_BIT 9 /* Interrupt Flag */
#define X86_EFLAGS_IF _BITUL(X86_EFLAGS_IF_BIT)
#define X86_EFLAGS_DF_BIT 10 /* Direction Flag */
#define X86_EFLAGS_DF _BITUL(X86_EFLAGS_DF_BIT)
#define X86_EFLAGS_OF_BIT 11 /* Overflow Flag */
#define X86_EFLAGS_OF _BITUL(X86_EFLAGS_OF_BIT)
#define X86_EFLAGS_IOPL_BIT 12 /* I/O Privilege Level (2 bits) */
#define X86_EFLAGS_IOPL (_AC(3,UL) << X86_EFLAGS_IOPL_BIT)
#define X86_EFLAGS_NT_BIT 14 /* Nested Task */
#define X86_EFLAGS_NT _BITUL(X86_EFLAGS_NT_BIT)
#define X86_EFLAGS_RF_BIT 16 /* Resume Flag */
#define X86_EFLAGS_RF _BITUL(X86_EFLAGS_RF_BIT)
#define X86_EFLAGS_VM_BIT 17 /* Virtual Mode */
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT)
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT)
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */
#define X86_EFLAGS_VIP _BITUL(X86_EFLAGS_VIP_BIT)
#define X86_EFLAGS_ID_BIT 21 /* CPUID detection */
#define X86_EFLAGS_ID _BITUL(X86_EFLAGS_ID_BIT)
/*
* Basic CPU control in CR0
*/
#define X86_CR0_PE_BIT 0 /* Protection Enable */
#define X86_CR0_PE _BITUL(X86_CR0_PE_BIT)
#define X86_CR0_MP_BIT 1 /* Monitor Coprocessor */
#define X86_CR0_MP _BITUL(X86_CR0_MP_BIT)
#define X86_CR0_EM_BIT 2 /* Emulation */
#define X86_CR0_EM _BITUL(X86_CR0_EM_BIT)
#define X86_CR0_TS_BIT 3 /* Task Switched */
#define X86_CR0_TS _BITUL(X86_CR0_TS_BIT)
#define X86_CR0_ET_BIT 4 /* Extension Type */
#define X86_CR0_ET _BITUL(X86_CR0_ET_BIT)
#define X86_CR0_NE_BIT 5 /* Numeric Error */
#define X86_CR0_NE _BITUL(X86_CR0_NE_BIT)
#define X86_CR0_WP_BIT 16 /* Write Protect */
#define X86_CR0_WP _BITUL(X86_CR0_WP_BIT)
#define X86_CR0_AM_BIT 18 /* Alignment Mask */
#define X86_CR0_AM _BITUL(X86_CR0_AM_BIT)
#define X86_CR0_NW_BIT 29 /* Not Write-through */
#define X86_CR0_NW _BITUL(X86_CR0_NW_BIT)
#define X86_CR0_CD_BIT 30 /* Cache Disable */
#define X86_CR0_CD _BITUL(X86_CR0_CD_BIT)
#define X86_CR0_PG_BIT 31 /* Paging */
#define X86_CR0_PG _BITUL(X86_CR0_PG_BIT)
/*
* Paging options in CR3
*/
#define X86_CR3_PWT_BIT 3 /* Page Write Through */
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
x86/mm: Use/Fix PCID to optimize user/kernel switches We can use PCID to retain the TLBs across CR3 switches; including those now part of the user/kernel switch. This increases performance of kernel entry/exit at the cost of more expensive/complicated TLB flushing. Now that we have two address spaces, one for kernel and one for user space, we need two PCIDs per mm. We use the top PCID bit to indicate a user PCID (just like we use the PFN LSB for the PGD). Since we do TLB invalidation from kernel space, the existing code will only invalidate the kernel PCID, we augment that by marking the corresponding user PCID invalid, and upon switching back to userspace, use a flushing CR3 write for the switch. In order to access the user_pcid_flush_mask we use PER_CPU storage, which means the previously established SWAPGS vs CR3 ordering is now mandatory and required. Having to do this memory access does require additional registers, most sites have a functioning stack and we can spill one (RAX), sites without functional stack need to otherwise provide the second scratch register. Note: PCID is generally available on Intel Sandybridge and later CPUs. Note: Up until this point TLB flushing was broken in this series. Based-on-code-from: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-12-04 22:07:59 +08:00
#define X86_CR3_PCID_BITS 12
#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL))
#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
/*
* Intel CPU features in CR4
*/
#define X86_CR4_VME_BIT 0 /* enable vm86 extensions */
#define X86_CR4_VME _BITUL(X86_CR4_VME_BIT)
#define X86_CR4_PVI_BIT 1 /* virtual interrupts flag enable */
#define X86_CR4_PVI _BITUL(X86_CR4_PVI_BIT)
#define X86_CR4_TSD_BIT 2 /* disable time stamp at ipl 3 */
#define X86_CR4_TSD _BITUL(X86_CR4_TSD_BIT)
#define X86_CR4_DE_BIT 3 /* enable debugging extensions */
#define X86_CR4_DE _BITUL(X86_CR4_DE_BIT)
#define X86_CR4_PSE_BIT 4 /* enable page size extensions */
#define X86_CR4_PSE _BITUL(X86_CR4_PSE_BIT)
#define X86_CR4_PAE_BIT 5 /* enable physical address extensions */
#define X86_CR4_PAE _BITUL(X86_CR4_PAE_BIT)
#define X86_CR4_MCE_BIT 6 /* Machine check enable */
#define X86_CR4_MCE _BITUL(X86_CR4_MCE_BIT)
#define X86_CR4_PGE_BIT 7 /* enable global pages */
#define X86_CR4_PGE _BITUL(X86_CR4_PGE_BIT)
#define X86_CR4_PCE_BIT 8 /* enable performance counters at ipl 3 */
#define X86_CR4_PCE _BITUL(X86_CR4_PCE_BIT)
#define X86_CR4_OSFXSR_BIT 9 /* enable fast FPU save and restore */
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT)
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT)
x86/cpufeature: Add User-Mode Instruction Prevention definitions User-Mode Instruction Prevention is a security feature present in new Intel processors that, when set, prevents the execution of a subset of instructions if such instructions are executed in user mode (CPL > 0). Attempting to execute such instructions causes a general protection exception. The subset of instructions comprises: * SGDT - Store Global Descriptor Table * SIDT - Store Interrupt Descriptor Table * SLDT - Store Local Descriptor Table * SMSW - Store Machine Status Word * STR - Store Task Register This feature is also added to the list of disabled-features to allow a cleaner handling of build-time configuration. Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Chen Yucong <slaoub@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huang Rui <ray.huang@amd.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi V. Shankar <ravi.v.shankar@intel.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: ricardo.neri@intel.com Link: http://lkml.kernel.org/r/1509935277-22138-7-git-send-email-ricardo.neri-calderon@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-11-06 10:27:51 +08:00
#define X86_CR4_UMIP_BIT 11 /* enable UMIP support */
#define X86_CR4_UMIP _BITUL(X86_CR4_UMIP_BIT)
#define X86_CR4_LA57_BIT 12 /* enable 5-level page tables */
#define X86_CR4_LA57 _BITUL(X86_CR4_LA57_BIT)
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */
#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT)
#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */
#define X86_CR4_SMXE _BITUL(X86_CR4_SMXE_BIT)
#define X86_CR4_FSGSBASE_BIT 16 /* enable RDWRFSGS support */
#define X86_CR4_FSGSBASE _BITUL(X86_CR4_FSGSBASE_BIT)
#define X86_CR4_PCIDE_BIT 17 /* enable PCID support */
#define X86_CR4_PCIDE _BITUL(X86_CR4_PCIDE_BIT)
#define X86_CR4_OSXSAVE_BIT 18 /* enable xsave and xrestore */
#define X86_CR4_OSXSAVE _BITUL(X86_CR4_OSXSAVE_BIT)
#define X86_CR4_SMEP_BIT 20 /* enable SMEP support */
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT)
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT)
#define X86_CR4_PKE_BIT 22 /* enable Protection Keys support */
#define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT)
/*
* x86-64 Task Priority Register, CR8
*/
#define X86_CR8_TPR _AC(0x0000000f,UL) /* task priority register */
/*
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
*/
/*
* NSC/Cyrix CPU configuration register indexes
*/
#define CX86_PCR0 0x20
#define CX86_GCR 0xb8
#define CX86_CCR0 0xc0
#define CX86_CCR1 0xc1
#define CX86_CCR2 0xc2
#define CX86_CCR3 0xc3
#define CX86_CCR4 0xe8
#define CX86_CCR5 0xe9
#define CX86_CCR6 0xea
#define CX86_CCR7 0xeb
#define CX86_PCR1 0xf0
#define CX86_DIR0 0xfe
#define CX86_DIR1 0xff
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
x86/boot: Relocate definition of the initial state of CR0 Both head_32.S and head_64.S utilize the same value to initialize the control register CR0. Also, other parts of the kernel might want to access this initial definition (e.g., emulation code for User-Mode Instruction Prevention uses this state to provide a sane dummy value for CR0 when emulating the smsw instruction). Thus, relocate this definition to a header file from which it can be conveniently accessed. Suggested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Andy Lutomirski <luto@kernel.org> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: ricardo.neri@intel.com Cc: linux-mm@kvack.org Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Huang Rui <ray.huang@amd.com> Cc: Shuah Khan <shuah@kernel.org> Cc: linux-arch@vger.kernel.org Cc: Jonathan Corbet <corbet@lwn.net> Cc: Jiri Slaby <jslaby@suse.cz> Cc: "Ravi V. Shankar" <ravi.v.shankar@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Chen Yucong <slaoub@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lkml.kernel.org/r/1509135945-13762-3-git-send-email-ricardo.neri-calderon@linux.intel.com
2017-10-28 04:25:29 +08:00
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
X86_CR0_PG)
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */