mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86/cpu' into x86/asm, to merge more patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
cb44d0cfc2
|
@ -1,7 +1,6 @@
|
||||||
#ifndef _ASM_IA64_IOMMU_H
|
#ifndef _ASM_IA64_IOMMU_H
|
||||||
#define _ASM_IA64_IOMMU_H 1
|
#define _ASM_IA64_IOMMU_H 1
|
||||||
|
|
||||||
#define cpu_has_x2apic 0
|
|
||||||
/* 10 seconds */
|
/* 10 seconds */
|
||||||
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
|
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
|
||||||
|
|
||||||
|
|
|
@ -97,8 +97,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
*addr64 = fdt_translate_address((const void *)initial_boot_params,
|
*addr64 = of_flat_dt_translate_address(node);
|
||||||
node);
|
|
||||||
|
|
||||||
return *addr64 == OF_BAD_ADDR ? 0 : 1;
|
return *addr64 == OF_BAD_ADDR ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -562,7 +562,8 @@ static int __init camellia_aesni_init(void)
|
||||||
{
|
{
|
||||||
const char *feature_name;
|
const char *feature_name;
|
||||||
|
|
||||||
if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
|
if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes ||
|
||||||
|
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||||
pr_info("AVX2 or AES-NI instructions are not detected.\n");
|
pr_info("AVX2 or AES-NI instructions are not detected.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -554,7 +554,7 @@ static int __init camellia_aesni_init(void)
|
||||||
{
|
{
|
||||||
const char *feature_name;
|
const char *feature_name;
|
||||||
|
|
||||||
if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
|
if (!cpu_has_avx || !cpu_has_aes || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||||
pr_info("AVX or AES-NI instructions are not detected.\n");
|
pr_info("AVX or AES-NI instructions are not detected.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -179,7 +179,7 @@ static struct shash_alg alg = {
|
||||||
|
|
||||||
static int __init poly1305_simd_mod_init(void)
|
static int __init poly1305_simd_mod_init(void)
|
||||||
{
|
{
|
||||||
if (!cpu_has_xmm2)
|
if (!boot_cpu_has(X86_FEATURE_XMM2))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
#ifdef CONFIG_AS_AVX2
|
#ifdef CONFIG_AS_AVX2
|
||||||
|
|
|
@ -538,7 +538,7 @@ static int __init init(void)
|
||||||
{
|
{
|
||||||
const char *feature_name;
|
const char *feature_name;
|
||||||
|
|
||||||
if (!cpu_has_avx2 || !cpu_has_osxsave) {
|
if (!cpu_has_avx2 || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||||
pr_info("AVX2 instructions are not detected.\n");
|
pr_info("AVX2 instructions are not detected.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -600,7 +600,7 @@ static struct crypto_alg serpent_algs[10] = { {
|
||||||
|
|
||||||
static int __init serpent_sse2_init(void)
|
static int __init serpent_sse2_init(void)
|
||||||
{
|
{
|
||||||
if (!cpu_has_xmm2) {
|
if (!boot_cpu_has(X86_FEATURE_XMM2)) {
|
||||||
printk(KERN_INFO "SSE2 instructions are not detected.\n");
|
printk(KERN_INFO "SSE2 instructions are not detected.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -677,7 +677,7 @@ static int __init cstate_pmu_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (cpu_has_hypervisor)
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
err = cstate_init();
|
err = cstate_init();
|
||||||
|
|
|
@ -1383,7 +1383,7 @@ static int __init intel_uncore_init(void)
|
||||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (cpu_has_hypervisor)
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
max_packages = topology_max_packages();
|
max_packages = topology_max_packages();
|
||||||
|
|
|
@ -239,10 +239,10 @@ extern void __init check_x2apic(void);
|
||||||
extern void x2apic_setup(void);
|
extern void x2apic_setup(void);
|
||||||
static inline int x2apic_enabled(void)
|
static inline int x2apic_enabled(void)
|
||||||
{
|
{
|
||||||
return cpu_has_x2apic && apic_is_x2apic_enabled();
|
return boot_cpu_has(X86_FEATURE_X2APIC) && apic_is_x2apic_enabled();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define x2apic_supported() (cpu_has_x2apic)
|
#define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC))
|
||||||
#else /* !CONFIG_X86_X2APIC */
|
#else /* !CONFIG_X86_X2APIC */
|
||||||
static inline void check_x2apic(void) { }
|
static inline void check_x2apic(void) { }
|
||||||
static inline void x2apic_setup(void) { }
|
static inline void x2apic_setup(void) { }
|
||||||
|
|
|
@ -119,25 +119,16 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
|
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
|
||||||
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
|
|
||||||
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
|
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
|
||||||
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
|
|
||||||
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
|
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
|
||||||
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
|
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
|
||||||
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
|
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
|
||||||
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
|
|
||||||
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
|
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
|
||||||
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
|
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
|
||||||
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
|
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
|
||||||
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
|
|
||||||
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
|
|
||||||
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
|
|
||||||
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
|
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
|
||||||
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
|
|
||||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||||
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
|
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
|
||||||
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
|
|
||||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
|
||||||
/*
|
/*
|
||||||
* Do not add any more of those clumsy macros - use static_cpu_has() for
|
* Do not add any more of those clumsy macros - use static_cpu_has() for
|
||||||
* fast paths and boot_cpu_has() otherwise!
|
* fast paths and boot_cpu_has() otherwise!
|
||||||
|
|
|
@ -183,7 +183,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
|
||||||
|
|
||||||
static inline int has_transparent_hugepage(void)
|
static inline int has_transparent_hugepage(void)
|
||||||
{
|
{
|
||||||
return cpu_has_pse;
|
return boot_cpu_has(X86_FEATURE_PSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __HAVE_ARCH_PTE_DEVMAP
|
#ifdef __HAVE_ARCH_PTE_DEVMAP
|
||||||
|
|
|
@ -181,7 +181,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
|
||||||
|
|
||||||
static inline void __flush_tlb_all(void)
|
static inline void __flush_tlb_all(void)
|
||||||
{
|
{
|
||||||
if (cpu_has_pge)
|
if (static_cpu_has(X86_FEATURE_PGE))
|
||||||
__flush_tlb_global();
|
__flush_tlb_global();
|
||||||
else
|
else
|
||||||
__flush_tlb();
|
__flush_tlb();
|
||||||
|
|
|
@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
|
||||||
|
|
||||||
#define AVX_XOR_SPEED \
|
#define AVX_XOR_SPEED \
|
||||||
do { \
|
do { \
|
||||||
if (cpu_has_avx && cpu_has_osxsave) \
|
if (cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
|
||||||
xor_speed(&xor_block_avx); \
|
xor_speed(&xor_block_avx); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define AVX_SELECT(FASTEST) \
|
#define AVX_SELECT(FASTEST) \
|
||||||
(cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
|
(cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
|
|
@ -1561,7 +1561,7 @@ void __init check_x2apic(void)
|
||||||
pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
|
pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
|
||||||
x2apic_mode = 1;
|
x2apic_mode = 1;
|
||||||
x2apic_state = X2APIC_ON;
|
x2apic_state = X2APIC_ON;
|
||||||
} else if (!cpu_has_x2apic) {
|
} else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
|
||||||
x2apic_state = X2APIC_DISABLED;
|
x2apic_state = X2APIC_DISABLED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -751,7 +751,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||||
if (c->x86 >= 0xf)
|
if (c->x86 >= 0xf)
|
||||||
set_cpu_cap(c, X86_FEATURE_K8);
|
set_cpu_cap(c, X86_FEATURE_K8);
|
||||||
|
|
||||||
if (cpu_has_xmm2) {
|
if (cpu_has(c, X86_FEATURE_XMM2)) {
|
||||||
/* MFENCE stops RDTSC speculation */
|
/* MFENCE stops RDTSC speculation */
|
||||||
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,9 +152,9 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||||
* the TLB when any changes are made to any of the page table entries.
|
* the TLB when any changes are made to any of the page table entries.
|
||||||
* The operating system must reload CR3 to cause the TLB to be flushed"
|
* The operating system must reload CR3 to cause the TLB to be flushed"
|
||||||
*
|
*
|
||||||
* As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
|
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
|
||||||
* be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
|
* should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
|
||||||
* to be modified
|
* to be modified.
|
||||||
*/
|
*/
|
||||||
if (c->x86 == 5 && c->x86_model == 9) {
|
if (c->x86 == 5 && c->x86_model == 9) {
|
||||||
pr_info("Disabling PGE capability bit\n");
|
pr_info("Disabling PGE capability bit\n");
|
||||||
|
@ -456,7 +456,7 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||||
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_xmm2)
|
if (cpu_has(c, X86_FEATURE_XMM2))
|
||||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_DS)) {
|
if (boot_cpu_has(X86_FEATURE_DS)) {
|
||||||
|
@ -468,7 +468,7 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||||
set_cpu_cap(c, X86_FEATURE_PEBS);
|
set_cpu_cap(c, X86_FEATURE_PEBS);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->x86 == 6 && cpu_has_clflush &&
|
if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
|
||||||
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
|
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
|
||||||
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ static void prepare_set(void)
|
||||||
u32 cr0;
|
u32 cr0;
|
||||||
|
|
||||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||||
if (cpu_has_pge) {
|
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||||
cr4 = __read_cr4();
|
cr4 = __read_cr4();
|
||||||
__write_cr4(cr4 & ~X86_CR4_PGE);
|
__write_cr4(cr4 & ~X86_CR4_PGE);
|
||||||
}
|
}
|
||||||
|
@ -170,7 +170,7 @@ static void post_set(void)
|
||||||
write_cr0(read_cr0() & ~X86_CR0_CD);
|
write_cr0(read_cr0() & ~X86_CR0_CD);
|
||||||
|
|
||||||
/* Restore value of CR4 */
|
/* Restore value of CR4 */
|
||||||
if (cpu_has_pge)
|
if (boot_cpu_has(X86_FEATURE_PGE))
|
||||||
__write_cr4(cr4);
|
__write_cr4(cr4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -741,7 +741,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
|
||||||
wbinvd();
|
wbinvd();
|
||||||
|
|
||||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||||
if (cpu_has_pge) {
|
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||||
cr4 = __read_cr4();
|
cr4 = __read_cr4();
|
||||||
__write_cr4(cr4 & ~X86_CR4_PGE);
|
__write_cr4(cr4 & ~X86_CR4_PGE);
|
||||||
}
|
}
|
||||||
|
@ -771,7 +771,7 @@ static void post_set(void) __releases(set_atomicity_lock)
|
||||||
write_cr0(read_cr0() & ~X86_CR0_CD);
|
write_cr0(read_cr0() & ~X86_CR0_CD);
|
||||||
|
|
||||||
/* Restore value of CR4 */
|
/* Restore value of CR4 */
|
||||||
if (cpu_has_pge)
|
if (boot_cpu_has(X86_FEATURE_PGE))
|
||||||
__write_cr4(cr4);
|
__write_cr4(cr4);
|
||||||
raw_spin_unlock(&set_atomicity_lock);
|
raw_spin_unlock(&set_atomicity_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,7 +94,7 @@ static void __init vmware_platform_setup(void)
|
||||||
*/
|
*/
|
||||||
static uint32_t __init vmware_platform(void)
|
static uint32_t __init vmware_platform(void)
|
||||||
{
|
{
|
||||||
if (cpu_has_hypervisor) {
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||||
unsigned int eax;
|
unsigned int eax;
|
||||||
unsigned int hyper_vendor_id[3];
|
unsigned int hyper_vendor_id[3];
|
||||||
|
|
||||||
|
|
|
@ -522,7 +522,7 @@ static noinline uint32_t __kvm_cpuid_base(void)
|
||||||
if (boot_cpu_data.cpuid_level < 0)
|
if (boot_cpu_data.cpuid_level < 0)
|
||||||
return 0; /* So we don't blow up on old processors */
|
return 0; /* So we don't blow up on old processors */
|
||||||
|
|
||||||
if (cpu_has_hypervisor)
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
|
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
static inline void flush_tce(void* tceaddr)
|
static inline void flush_tce(void* tceaddr)
|
||||||
{
|
{
|
||||||
/* a single tce can't cross a cache line */
|
/* a single tce can't cross a cache line */
|
||||||
if (cpu_has_clflush)
|
if (boot_cpu_has(X86_FEATURE_CLFLUSH))
|
||||||
clflush(tceaddr);
|
clflush(tceaddr);
|
||||||
else
|
else
|
||||||
wbinvd();
|
wbinvd();
|
||||||
|
|
|
@ -3836,7 +3836,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
|
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
|
||||||
boot_cpu_data.x86_phys_bits,
|
boot_cpu_data.x86_phys_bits,
|
||||||
context->shadow_root_level, false,
|
context->shadow_root_level, false,
|
||||||
cpu_has_gbpages, true, true);
|
boot_cpu_has(X86_FEATURE_GBPAGES),
|
||||||
|
true, true);
|
||||||
else
|
else
|
||||||
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
|
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
|
||||||
boot_cpu_data.x86_phys_bits,
|
boot_cpu_data.x86_phys_bits,
|
||||||
|
|
|
@ -612,7 +612,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
|
||||||
{
|
{
|
||||||
stac();
|
stac();
|
||||||
#ifdef CONFIG_X86_INTEL_USERCOPY
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
||||||
if (n > 64 && cpu_has_xmm2)
|
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
|
||||||
n = __copy_user_zeroing_intel_nocache(to, from, n);
|
n = __copy_user_zeroing_intel_nocache(to, from, n);
|
||||||
else
|
else
|
||||||
__copy_user_zeroing(to, from, n);
|
__copy_user_zeroing(to, from, n);
|
||||||
|
@ -629,7 +629,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
|
||||||
{
|
{
|
||||||
stac();
|
stac();
|
||||||
#ifdef CONFIG_X86_INTEL_USERCOPY
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
||||||
if (n > 64 && cpu_has_xmm2)
|
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
|
||||||
n = __copy_user_intel_nocache(to, from, n);
|
n = __copy_user_intel_nocache(to, from, n);
|
||||||
else
|
else
|
||||||
__copy_user(to, from, n);
|
__copy_user(to, from, n);
|
||||||
|
|
|
@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char *opt)
|
||||||
unsigned long ps = memparse(opt, &opt);
|
unsigned long ps = memparse(opt, &opt);
|
||||||
if (ps == PMD_SIZE) {
|
if (ps == PMD_SIZE) {
|
||||||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||||
} else if (ps == PUD_SIZE && cpu_has_gbpages) {
|
} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
|
||||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
|
printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
|
||||||
|
@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz);
|
||||||
static __init int gigantic_pages_init(void)
|
static __init int gigantic_pages_init(void)
|
||||||
{
|
{
|
||||||
/* With compaction or CMA we can allocate gigantic pages at runtime */
|
/* With compaction or CMA we can allocate gigantic pages at runtime */
|
||||||
if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
|
if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
|
||||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,23 +157,23 @@ static void __init probe_page_size_mask(void)
|
||||||
* This will simplify cpa(), which otherwise needs to support splitting
|
* This will simplify cpa(), which otherwise needs to support splitting
|
||||||
* large pages into small in interrupt context, etc.
|
* large pages into small in interrupt context, etc.
|
||||||
*/
|
*/
|
||||||
if (cpu_has_pse && !debug_pagealloc_enabled())
|
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
|
||||||
page_size_mask |= 1 << PG_LEVEL_2M;
|
page_size_mask |= 1 << PG_LEVEL_2M;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Enable PSE if available */
|
/* Enable PSE if available */
|
||||||
if (cpu_has_pse)
|
if (boot_cpu_has(X86_FEATURE_PSE))
|
||||||
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
||||||
|
|
||||||
/* Enable PGE if available */
|
/* Enable PGE if available */
|
||||||
if (cpu_has_pge) {
|
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||||
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
||||||
__supported_pte_mask |= _PAGE_GLOBAL;
|
__supported_pte_mask |= _PAGE_GLOBAL;
|
||||||
} else
|
} else
|
||||||
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
||||||
|
|
||||||
/* Enable 1 GB linear kernel mappings if available: */
|
/* Enable 1 GB linear kernel mappings if available: */
|
||||||
if (direct_gbpages && cpu_has_gbpages) {
|
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
|
||||||
printk(KERN_INFO "Using GB pages for direct mapping\n");
|
printk(KERN_INFO "Using GB pages for direct mapping\n");
|
||||||
page_size_mask |= 1 << PG_LEVEL_1G;
|
page_size_mask |= 1 << PG_LEVEL_1G;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned long start,
|
||||||
*/
|
*/
|
||||||
mapping_iter = 1;
|
mapping_iter = 1;
|
||||||
|
|
||||||
if (!cpu_has_pse)
|
if (!boot_cpu_has(X86_FEATURE_PSE))
|
||||||
use_pse = 0;
|
use_pse = 0;
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
|
|
|
@ -1295,7 +1295,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||||
struct vmem_altmap *altmap = to_vmem_altmap(start);
|
struct vmem_altmap *altmap = to_vmem_altmap(start);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (cpu_has_pse)
|
if (boot_cpu_has(X86_FEATURE_PSE))
|
||||||
err = vmemmap_populate_hugepages(start, end, node, altmap);
|
err = vmemmap_populate_hugepages(start, end, node, altmap);
|
||||||
else if (altmap) {
|
else if (altmap) {
|
||||||
pr_err_once("%s: no cpu support for altmap allocations\n",
|
pr_err_once("%s: no cpu support for altmap allocations\n",
|
||||||
|
@ -1338,7 +1338,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
|
||||||
}
|
}
|
||||||
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
|
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
|
||||||
|
|
||||||
if (!cpu_has_pse) {
|
if (!boot_cpu_has(X86_FEATURE_PSE)) {
|
||||||
next = (addr + PAGE_SIZE) & PAGE_MASK;
|
next = (addr + PAGE_SIZE) & PAGE_MASK;
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
if (pmd_none(*pmd))
|
if (pmd_none(*pmd))
|
||||||
|
|
|
@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
|
||||||
int __init arch_ioremap_pud_supported(void)
|
int __init arch_ioremap_pud_supported(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
return cpu_has_gbpages;
|
return boot_cpu_has(X86_FEATURE_GBPAGES);
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(void)
|
||||||
|
|
||||||
int __init arch_ioremap_pmd_supported(void)
|
int __init arch_ioremap_pmd_supported(void)
|
||||||
{
|
{
|
||||||
return cpu_has_pse;
|
return boot_cpu_has(X86_FEATURE_PSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1055,7 +1055,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
||||||
/*
|
/*
|
||||||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||||
*/
|
*/
|
||||||
while (cpu_has_gbpages && end - start >= PUD_SIZE) {
|
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
|
||||||
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||||
massage_pgprot(pud_pgprot)));
|
massage_pgprot(pud_pgprot)));
|
||||||
|
|
||||||
|
@ -1460,7 +1460,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||||
* error case we fall back to cpa_flush_all (which uses
|
* error case we fall back to cpa_flush_all (which uses
|
||||||
* WBINVD):
|
* WBINVD):
|
||||||
*/
|
*/
|
||||||
if (!ret && cpu_has_clflush) {
|
if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
|
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
|
||||||
cpa_flush_array(addr, numpages, cache,
|
cpa_flush_array(addr, numpages, cache,
|
||||||
cpa.flags, pages);
|
cpa.flags, pages);
|
||||||
|
|
|
@ -636,7 +636,7 @@ static int __init ppro_init(char **cpu_type)
|
||||||
__u8 cpu_model = boot_cpu_data.x86_model;
|
__u8 cpu_model = boot_cpu_data.x86_model;
|
||||||
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
|
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
|
||||||
|
|
||||||
if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
|
if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -761,7 +761,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||||
if (cpu_type)
|
if (cpu_type)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!cpu_has_arch_perfmon)
|
if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* use arch perfmon as fallback */
|
/* use arch perfmon as fallback */
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
u64 val;
|
u64 val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (cpu_has_arch_perfmon) {
|
if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
|
||||||
union cpuid10_eax eax;
|
union cpuid10_eax eax;
|
||||||
eax.full = cpuid_eax(0xa);
|
eax.full = cpuid_eax(0xa);
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
|
||||||
* normal page tables.
|
* normal page tables.
|
||||||
* NOTE: We can mark everything as executable here
|
* NOTE: We can mark everything as executable here
|
||||||
*/
|
*/
|
||||||
if (cpu_has_pse) {
|
if (boot_cpu_has(X86_FEATURE_PSE)) {
|
||||||
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
|
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
|
||||||
pfn += PTRS_PER_PTE;
|
pfn += PTRS_PER_PTE;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1469,10 +1469,10 @@ static void xen_pvh_set_cr_flags(int cpu)
|
||||||
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
|
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
|
||||||
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
|
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
|
||||||
*/
|
*/
|
||||||
if (cpu_has_pse)
|
if (boot_cpu_has(X86_FEATURE_PSE))
|
||||||
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
||||||
|
|
||||||
if (cpu_has_pge)
|
if (boot_cpu_has(X86_FEATURE_PGE))
|
||||||
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
|
||||||
int cached_ret = -ENOKEY;
|
int cached_ret = -ENOKEY;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
*_trusted = false;
|
||||||
|
|
||||||
for (p = pkcs7->certs; p; p = p->next)
|
for (p = pkcs7->certs; p; p = p->next)
|
||||||
p->seen = false;
|
p->seen = false;
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
|
||||||
{
|
{
|
||||||
|
|
||||||
#if defined(CONFIG_X86)
|
#if defined(CONFIG_X86)
|
||||||
if (cpu_has_clflush) {
|
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
drm_cache_flush_clflush(pages, num_pages);
|
drm_cache_flush_clflush(pages, num_pages);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ void
|
||||||
drm_clflush_sg(struct sg_table *st)
|
drm_clflush_sg(struct sg_table *st)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_X86)
|
#if defined(CONFIG_X86)
|
||||||
if (cpu_has_clflush) {
|
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
struct sg_page_iter sg_iter;
|
struct sg_page_iter sg_iter;
|
||||||
|
|
||||||
mb();
|
mb();
|
||||||
|
@ -129,7 +129,7 @@ void
|
||||||
drm_clflush_virt_range(void *addr, unsigned long length)
|
drm_clflush_virt_range(void *addr, unsigned long length)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_X86)
|
#if defined(CONFIG_X86)
|
||||||
if (cpu_has_clflush) {
|
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
const int size = boot_cpu_data.x86_clflush_size;
|
const int size = boot_cpu_data.x86_clflush_size;
|
||||||
void *end = addr + length;
|
void *end = addr + length;
|
||||||
addr = (void *)(((unsigned long)addr) & -size);
|
addr = (void *)(((unsigned long)addr) & -size);
|
||||||
|
|
|
@ -488,7 +488,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||||
ret = relocate_entry_cpu(obj, reloc, target_offset);
|
ret = relocate_entry_cpu(obj, reloc, target_offset);
|
||||||
else if (obj->map_and_fenceable)
|
else if (obj->map_and_fenceable)
|
||||||
ret = relocate_entry_gtt(obj, reloc, target_offset);
|
ret = relocate_entry_gtt(obj, reloc, target_offset);
|
||||||
else if (cpu_has_clflush)
|
else if (static_cpu_has(X86_FEATURE_CLFLUSH))
|
||||||
ret = relocate_entry_clflush(obj, reloc, target_offset);
|
ret = relocate_entry_clflush(obj, reloc, target_offset);
|
||||||
else {
|
else {
|
||||||
WARN_ONCE(1, "Impossible case in relocation handling\n");
|
WARN_ONCE(1, "Impossible case in relocation handling\n");
|
||||||
|
|
|
@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
|
||||||
|
|
||||||
int max1111_read_channel(int channel)
|
int max1111_read_channel(int channel)
|
||||||
{
|
{
|
||||||
|
if (!the_max1111 || !the_max1111->spi)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
return max1111_read(&the_max1111->spi->dev, channel);
|
return max1111_read(&the_max1111->spi->dev, channel);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(max1111_read_channel);
|
EXPORT_SYMBOL(max1111_read_channel);
|
||||||
|
@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
|
||||||
{
|
{
|
||||||
struct max1111_data *data = spi_get_drvdata(spi);
|
struct max1111_data *data = spi_get_drvdata(spi);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SHARPSL_PM
|
||||||
|
the_max1111 = NULL;
|
||||||
|
#endif
|
||||||
hwmon_device_unregister(data->hwmon_dev);
|
hwmon_device_unregister(data->hwmon_dev);
|
||||||
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
|
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
|
||||||
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
|
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
|
||||||
|
|
|
@ -599,7 +599,7 @@ void __init lguest_arch_host_init(void)
|
||||||
* doing this.
|
* doing this.
|
||||||
*/
|
*/
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
if (cpu_has_pge) { /* We have a broader idea of "global". */
|
if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
|
||||||
/* Remember that this was originally set (for cleanup). */
|
/* Remember that this was originally set (for cleanup). */
|
||||||
cpu_had_pge = 1;
|
cpu_had_pge = 1;
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2425,7 +2425,7 @@ static __init uint32_t visorutil_spar_detect(void)
|
||||||
{
|
{
|
||||||
unsigned int eax, ebx, ecx, edx;
|
unsigned int eax, ebx, ecx, edx;
|
||||||
|
|
||||||
if (cpu_has_hypervisor) {
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||||
/* check the ID */
|
/* check the ID */
|
||||||
cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
|
cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
|
||||||
return (ebx == UNISYS_SPAR_ID_EBX) &&
|
return (ebx == UNISYS_SPAR_ID_EBX) &&
|
||||||
|
|
|
@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
|
||||||
struct dlm_cluster *cl = NULL;
|
struct dlm_cluster *cl = NULL;
|
||||||
struct dlm_spaces *sps = NULL;
|
struct dlm_spaces *sps = NULL;
|
||||||
struct dlm_comms *cms = NULL;
|
struct dlm_comms *cms = NULL;
|
||||||
void *gps = NULL;
|
|
||||||
|
|
||||||
cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
|
cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
|
||||||
sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
|
sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
|
||||||
cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
|
cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
|
||||||
|
|
||||||
if (!cl || !gps || !sps || !cms)
|
if (!cl || !sps || !cms)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
config_group_init_type_name(&cl->group, name, &cluster_type);
|
config_group_init_type_name(&cl->group, name, &cluster_type);
|
||||||
|
|
Loading…
Reference in New Issue