linux/arch/x86/include/asm/special_insns.h

308 lines
6.7 KiB
C
Raw Normal View History

#ifndef _ASM_X86_SPECIAL_INSNS_H
#define _ASM_X86_SPECIAL_INSNS_H
#ifdef __KERNEL__
x86/asm: Add support for the pcommit instruction Add support for the new pcommit (persistent commit) instruction. This instruction was announced in the document "Intel Architecture Instruction Set Extensions Programming Reference" with reference number 319433-022: https://software.intel.com/sites/default/files/managed/0d/53/319433-022.pdf The pcommit instruction ensures that data that has been flushed from the processor's cache hierarchy with clwb, clflushopt or clflush is accepted to memory and is durable on the DIMM. The primary use case for this is persistent memory. This function shows how to properly use clwb/clflushopt/clflush and pcommit with appropriate fencing: void flush_and_commit_buffer(void *vaddr, unsigned int size) { void *vend = vaddr + size - 1; for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) clwb(vaddr); /* Flush any possible final partial cacheline */ clwb(vend); /* * sfence to order clwb/clflushopt/clflush cache flushes * mfence via mb() also works */ wmb(); /* pcommit and the required sfence for ordering */ pcommit_sfence(); } After this function completes the data pointed to by vaddr is has been accepted to memory and will be durable if the vaddr points to persistent memory. Pcommit must always be ordered by an mfence or sfence, so to help simplify things we include both the pcommit and the required sfence in the alternatives generated by pcommit_sfence(). The other option is to keep them separated, but on platforms that don't support pcommit this would then turn into: void flush_and_commit_buffer(void *vaddr, unsigned int size) { void *vend = vaddr + size - 1; for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) clwb(vaddr); /* Flush any possible final partial cacheline */ clwb(vend); /* * sfence to order clwb/clflushopt/clflush cache flushes * mfence via mb() also works */ wmb(); nop(); /* from pcommit(), via alternatives */ /* * sfence to order pcommit * mfence via mb() also works */ wmb(); } This is still correct, but now you've got two fences separated by only a nop. With the commit and the fence together in pcommit_sfence() you avoid the final unneeded fence. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Borislav Petkov <bp@suse.de> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1424367448-24254-1-git-send-email-ross.zwisler@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-20 01:37:28 +08:00
#include <asm/nops.h>
static inline void native_clts(void)
{
asm volatile("clts");
}
/*
* Volatile isn't enough to prevent the compiler from reordering the
* read/write functions for the control registers and messing everything up.
* A memory clobber would solve the problem, but would prevent reordering of
* all loads stores around it, which can hurt performance. Solution is to
* use a variable and mimic reads and writes to it to enforce serialization
*/
extern unsigned long __force_order;
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline void native_write_cr0(unsigned long val)
{
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
}
static inline unsigned long native_read_cr2(void)
{
unsigned long val;
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline void native_write_cr2(unsigned long val)
{
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
static inline unsigned long native_read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline void native_write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
}
static inline unsigned long native_read_cr4(void)
{
unsigned long val;
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline unsigned long native_read_cr4_safe(void)
{
unsigned long val;
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
* exists, so it will never fail. */
#ifdef CONFIG_X86_32
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
_ASM_EXTABLE(1b, 2b)
: "=r" (val), "=m" (__force_order) : "0" (0));
#else
val = native_read_cr4();
#endif
return val;
}
static inline void native_write_cr4(unsigned long val)
{
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
}
#ifdef CONFIG_X86_64
static inline unsigned long native_read_cr8(void)
{
unsigned long cr8;
asm volatile("movq %%cr8,%0" : "=r" (cr8));
return cr8;
}
static inline void native_write_cr8(unsigned long val)
{
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
}
#endif
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
static inline u32 __read_pkru(void)
{
u32 ecx = 0;
u32 edx, pkru;
/*
* "rdpkru" instruction. Places PKRU contents in to EAX,
* clears EDX and requires that ecx=0.
*/
asm volatile(".byte 0x0f,0x01,0xee\n\t"
: "=a" (pkru), "=d" (edx)
: "c" (ecx));
return pkru;
}
static inline void __write_pkru(u32 pkru)
{
u32 ecx = 0, edx = 0;
/*
* "wrpkru" instruction. Loads contents in EAX to PKRU,
* requires that ecx = edx = 0.
*/
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (pkru), "c"(ecx), "d"(edx));
}
#else
static inline u32 __read_pkru(void)
{
return 0;
}
static inline void __write_pkru(u32 pkru)
{
}
#endif
static inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
extern asmlinkage void native_load_gs_index(unsigned);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
static inline unsigned long read_cr0(void)
{
return native_read_cr0();
}
static inline void write_cr0(unsigned long x)
{
native_write_cr0(x);
}
static inline unsigned long read_cr2(void)
{
return native_read_cr2();
}
static inline void write_cr2(unsigned long x)
{
native_write_cr2(x);
}
static inline unsigned long read_cr3(void)
{
return native_read_cr3();
}
static inline void write_cr3(unsigned long x)
{
native_write_cr3(x);
}
static inline unsigned long __read_cr4(void)
{
return native_read_cr4();
}
static inline unsigned long __read_cr4_safe(void)
{
return native_read_cr4_safe();
}
static inline void __write_cr4(unsigned long x)
{
native_write_cr4(x);
}
static inline void wbinvd(void)
{
native_wbinvd();
}
#ifdef CONFIG_X86_64
static inline unsigned long read_cr8(void)
{
return native_read_cr8();
}
static inline void write_cr8(unsigned long x)
{
native_write_cr8(x);
}
static inline void load_gs_index(unsigned selector)
{
native_load_gs_index(selector);
}
#endif
/* Clear the 'TS' bit */
static inline void clts(void)
{
native_clts();
}
#endif/* CONFIG_PARAVIRT */
#define stts() write_cr0(read_cr0() | X86_CR0_TS)
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}
static inline void clflushopt(volatile void *__p)
{
alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
".byte 0x66; clflush %P0",
X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p));
}
x86/asm: Add support for the CLWB instruction Add support for the new CLWB (cache line write back) instruction. This instruction was announced in the document "Intel Architecture Instruction Set Extensions Programming Reference" with reference number 319433-022. https://software.intel.com/sites/default/files/managed/0d/53/319433-022.pdf The CLWB instruction is used to write back the contents of dirtied cache lines to memory without evicting the cache lines from the processor's cache hierarchy. This should be used in favor of clflushopt or clflush in cases where you require the cache line to be written to memory but plan to access the data again in the near future. One of the main use cases for this is with persistent memory where CLWB can be used with PCOMMIT to ensure that data has been accepted to memory and is durable on the DIMM. This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT with appropriate fencing: void flush_and_commit_buffer(void *vaddr, unsigned int size) { void *vend = vaddr + size - 1; for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) clwb(vaddr); /* Flush any possible final partial cacheline */ clwb(vend); /* * Use SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes. * (MFENCE via mb() also works) */ wmb(); /* PCOMMIT and the required SFENCE for ordering */ pcommit_sfence(); } After this function completes the data pointed to by vaddr is has been accepted to memory and will be durable if the vaddr points to persistent memory. Regarding the details of how the alternatives assembly is set up, we need one additional byte at the beginning of the CLFLUSH so that we can flip it into a CLFLUSHOPT by changing that byte into a 0x66 prefix. Two options are to either insert a 1 byte ASM_NOP1, or to add a 1 byte NOP_DS_PREFIX. Both have no functional effect with the plain CLFLUSH, but I've been told that executing a CLFLUSH + prefix should be faster than executing a CLFLUSH + NOP. We had to hard code the assembly for CLWB because, lacking the ability to assemble the CLWB instruction itself, the next closest thing is to have an xsaveopt instruction with a 0x66 prefix. Unfortunately XSAVEOPT itself is also relatively new, and isn't included by all the GCC versions that the kernel needs to support. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Borislav Petkov <bp@suse.de> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1422377631-8986-3-git-send-email-ross.zwisler@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-01-28 00:53:51 +08:00
static inline void clwb(volatile void *__p)
{
volatile struct { char x[64]; } *p = __p;
asm volatile(ALTERNATIVE_2(
".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
X86_FEATURE_CLFLUSHOPT,
".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
X86_FEATURE_CLWB)
: [p] "+m" (*p)
: [pax] "a" (p));
}
/**
* pcommit_sfence() - persistent commit and fence
*
* The PCOMMIT instruction ensures that data that has been flushed from the
* processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
* memory and is durable on the DIMM. The primary use case for this is
* persistent memory.
*
* This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
* with appropriate fencing.
*
* Example:
* void flush_and_commit_buffer(void *vaddr, unsigned int size)
* {
* unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
* void *vend = vaddr + size;
* void *p;
*
* for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
* p < vend; p += boot_cpu_data.x86_clflush_size)
* clwb(p);
*
* // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
* // MFENCE via mb() also works
* wmb();
*
* // PCOMMIT and the required SFENCE for ordering
* pcommit_sfence();
* }
*
* After this function completes the data pointed to by 'vaddr' has been
* accepted to memory and will be durable if the 'vaddr' points to persistent
* memory.
*
* PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
* things we include both the PCOMMIT and the required SFENCE in the
* alternatives generated by pcommit_sfence().
*/
x86/asm: Add support for the pcommit instruction Add support for the new pcommit (persistent commit) instruction. This instruction was announced in the document "Intel Architecture Instruction Set Extensions Programming Reference" with reference number 319433-022: https://software.intel.com/sites/default/files/managed/0d/53/319433-022.pdf The pcommit instruction ensures that data that has been flushed from the processor's cache hierarchy with clwb, clflushopt or clflush is accepted to memory and is durable on the DIMM. The primary use case for this is persistent memory. This function shows how to properly use clwb/clflushopt/clflush and pcommit with appropriate fencing: void flush_and_commit_buffer(void *vaddr, unsigned int size) { void *vend = vaddr + size - 1; for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) clwb(vaddr); /* Flush any possible final partial cacheline */ clwb(vend); /* * sfence to order clwb/clflushopt/clflush cache flushes * mfence via mb() also works */ wmb(); /* pcommit and the required sfence for ordering */ pcommit_sfence(); } After this function completes the data pointed to by vaddr is has been accepted to memory and will be durable if the vaddr points to persistent memory. Pcommit must always be ordered by an mfence or sfence, so to help simplify things we include both the pcommit and the required sfence in the alternatives generated by pcommit_sfence(). The other option is to keep them separated, but on platforms that don't support pcommit this would then turn into: void flush_and_commit_buffer(void *vaddr, unsigned int size) { void *vend = vaddr + size - 1; for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) clwb(vaddr); /* Flush any possible final partial cacheline */ clwb(vend); /* * sfence to order clwb/clflushopt/clflush cache flushes * mfence via mb() also works */ wmb(); nop(); /* from pcommit(), via alternatives */ /* * sfence to order pcommit * mfence via mb() also works */ wmb(); } This is still correct, but now you've got two fences separated by only a nop. With the commit and the fence together in pcommit_sfence() you avoid the final unneeded fence. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Borislav Petkov <bp@suse.de> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1424367448-24254-1-git-send-email-ross.zwisler@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-02-20 01:37:28 +08:00
static inline void pcommit_sfence(void)
{
alternative(ASM_NOP7,
".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
"sfence",
X86_FEATURE_PCOMMIT);
}
#define nop() asm volatile ("nop")
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */