x86/microcode: Decrease CPUID use

Get CPUID(1).EAX value once per CPU and propagate value into the callers
instead of conveniently calling it every time.

Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170120202955.4091-9-bp@alien8.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Borislav Petkov 2017-01-20 21:29:47 +01:00 committed by Thomas Gleixner
parent 8801b3fcb5
commit 309aac7776
2 changed files with 38 additions and 52 deletions

View File

@ -47,6 +47,7 @@ static struct equiv_cpu_entry *equiv_cpu_table;
*/ */
static struct cont_desc { static struct cont_desc {
struct microcode_amd *mc; struct microcode_amd *mc;
u32 cpuid_1_eax;
u32 psize; u32 psize;
u16 eq_id; u16 eq_id;
u8 *data; u8 *data;
@ -86,7 +87,6 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
struct equiv_cpu_entry *eq; struct equiv_cpu_entry *eq;
ssize_t orig_size = size; ssize_t orig_size = size;
u32 *hdr = (u32 *)ucode; u32 *hdr = (u32 *)ucode;
u32 eax, ebx, ecx, edx;
u16 eq_id; u16 eq_id;
u8 *buf; u8 *buf;
@ -102,12 +102,8 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
eax = 1;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Find the equivalence ID of our CPU in this table: */ /* Find the equivalence ID of our CPU in this table: */
eq_id = find_equiv_id(eq, eax); eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
buf += hdr[2] + CONTAINER_HDR_SZ; buf += hdr[2] + CONTAINER_HDR_SZ;
size -= hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ;
@ -205,8 +201,9 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
* *
* Returns true if container found (sets @desc), false otherwise. * Returns true if container found (sets @desc), false otherwise.
*/ */
static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch, static bool
struct cont_desc *ret_desc) apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
bool save_patch, struct cont_desc *ret_desc)
{ {
struct cont_desc desc = { 0 }; struct cont_desc desc = { 0 };
u8 (*patch)[PATCH_MAX_SIZE]; u8 (*patch)[PATCH_MAX_SIZE];
@ -225,6 +222,8 @@ static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
if (check_current_patch_level(&rev, true)) if (check_current_patch_level(&rev, true))
return false; return false;
desc.cpuid_1_eax = cpuid_1_eax;
scan_containers(ucode, size, &desc); scan_containers(ucode, size, &desc);
if (!desc.eq_id) if (!desc.eq_id)
return ret; return ret;
@ -267,10 +266,9 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
#endif #endif
} }
void __init load_ucode_amd_bsp(unsigned int family) void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
{ {
struct ucode_cpu_info *uci; struct ucode_cpu_info *uci;
u32 eax, ebx, ecx, edx;
struct cpio_data cp; struct cpio_data cp;
const char *path; const char *path;
bool use_pa; bool use_pa;
@ -285,19 +283,16 @@ void __init load_ucode_amd_bsp(unsigned int family)
use_pa = false; use_pa = false;
} }
if (!get_builtin_microcode(&cp, family)) if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd(path, use_pa); cp = find_microcode_in_initrd(path, use_pa);
if (!(cp.data && cp.size)) if (!(cp.data && cp.size))
return; return;
/* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ /* Needed in load_microcode_amd() */
eax = 1; uci->cpu_sig.sig = cpuid_1_eax;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
uci->cpu_sig.sig = eax;
apply_microcode_early_amd(cp.data, cp.size, true, NULL); apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
@ -308,7 +303,7 @@ void __init load_ucode_amd_bsp(unsigned int family)
* In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend. * which is used upon resume from suspend.
*/ */
void load_ucode_amd_ap(unsigned int family) void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{ {
struct microcode_amd *mc; struct microcode_amd *mc;
struct cpio_data cp; struct cpio_data cp;
@ -319,7 +314,7 @@ void load_ucode_amd_ap(unsigned int family)
return; return;
} }
if (!get_builtin_microcode(&cp, family)) if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true); cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
if (!(cp.data && cp.size)) if (!(cp.data && cp.size))
@ -329,14 +324,14 @@ void load_ucode_amd_ap(unsigned int family)
* This would set amd_ucode_patch above so that the following APs can * This would set amd_ucode_patch above so that the following APs can
* use it directly instead of going down this path again. * use it directly instead of going down this path again.
*/ */
apply_microcode_early_amd(cp.data, cp.size, true, NULL); apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
} }
#else #else
void load_ucode_amd_ap(unsigned int family) void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{ {
struct equiv_cpu_entry *eq; struct equiv_cpu_entry *eq;
struct microcode_amd *mc; struct microcode_amd *mc;
u32 rev, eax; u32 rev;
u16 eq_id; u16 eq_id;
/* 64-bit runs with paging enabled, thus early==false. */ /* 64-bit runs with paging enabled, thus early==false. */
@ -351,7 +346,7 @@ void load_ucode_amd_ap(unsigned int family)
return; return;
reget: reget:
if (!get_builtin_microcode(&cp, family)) { if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) {
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
cp = find_cpio_data(ucode_path, (void *)initrd_start, cp = find_cpio_data(ucode_path, (void *)initrd_start,
initrd_end - initrd_start, NULL); initrd_end - initrd_start, NULL);
@ -367,17 +362,16 @@ void load_ucode_amd_ap(unsigned int family)
} }
} }
if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) { if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) {
cont.data = NULL; cont.data = NULL;
cont.size = -1; cont.size = -1;
return; return;
} }
} }
eax = cpuid_eax(0x00000001);
eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ); eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, eax); eq_id = find_equiv_id(eq, cpuid_1_eax);
if (!eq_id) if (!eq_id)
return; return;
@ -403,7 +397,7 @@ void load_ucode_amd_ap(unsigned int family)
static enum ucode_state static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int fam) int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{ {
enum ucode_state ret; enum ucode_state ret;
int retval = 0; int retval = 0;
@ -422,6 +416,8 @@ int __init save_microcode_in_initrd_amd(unsigned int fam)
return -EINVAL; return -EINVAL;
} }
cont.cpuid_1_eax = cpuid_1_eax;
scan_containers(cp.data, cp.size, &cont); scan_containers(cp.data, cp.size, &cont);
if (!cont.eq_id) { if (!cont.eq_id) {
cont.size = -1; cont.size = -1;
@ -432,7 +428,7 @@ int __init save_microcode_in_initrd_amd(unsigned int fam)
return -EINVAL; return -EINVAL;
} }
ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size); ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), cont.data, cont.size);
if (ret != UCODE_OK) if (ret != UCODE_OK)
retval = -EINVAL; retval = -EINVAL;

View File

@ -64,10 +64,6 @@ static DEFINE_MUTEX(microcode_mutex);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
/*
* Operations that are run on a target cpu:
*/
struct cpu_info_ctx { struct cpu_info_ctx {
struct cpu_signature *cpu_sig; struct cpu_signature *cpu_sig;
int err; int err;
@ -76,7 +72,6 @@ struct cpu_info_ctx {
static bool __init check_loader_disabled_bsp(void) static bool __init check_loader_disabled_bsp(void)
{ {
static const char *__dis_opt_str = "dis_ucode_ldr"; static const char *__dis_opt_str = "dis_ucode_ldr";
u32 a, b, c, d;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line); const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
@ -92,16 +87,12 @@ static bool __init check_loader_disabled_bsp(void)
if (!have_cpuid_p()) if (!have_cpuid_p())
return *res; return *res;
a = 1;
c = 0;
native_cpuid(&a, &b, &c, &d);
/* /*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but * completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway. * that's good enough as they don't land on the BSP path anyway.
*/ */
if (c & BIT(31)) if (native_cpuid_ecx(1) & BIT(31))
return *res; return *res;
if (cmdline_find_option_bool(cmdline, option) <= 0) if (cmdline_find_option_bool(cmdline, option) <= 0)
@ -131,23 +122,22 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void) void __init load_ucode_bsp(void)
{ {
int vendor; unsigned int vendor, cpuid_1_eax;
unsigned int family;
if (check_loader_disabled_bsp()) if (check_loader_disabled_bsp())
return; return;
vendor = x86_cpuid_vendor(); vendor = x86_cpuid_vendor();
family = x86_cpuid_family(); cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) { switch (vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (family >= 6) if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_bsp(); load_ucode_intel_bsp();
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (family >= 0x10) if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_bsp(family); load_ucode_amd_bsp(cpuid_1_eax);
break; break;
default: default:
break; break;
@ -165,22 +155,22 @@ static bool check_loader_disabled_ap(void)
void load_ucode_ap(void) void load_ucode_ap(void)
{ {
int vendor, family; unsigned int vendor, cpuid_1_eax;
if (check_loader_disabled_ap()) if (check_loader_disabled_ap())
return; return;
vendor = x86_cpuid_vendor(); vendor = x86_cpuid_vendor();
family = x86_cpuid_family(); cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) { switch (vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (family >= 6) if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_ap(); load_ucode_intel_ap();
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (family >= 0x10) if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_ap(family); load_ucode_amd_ap(cpuid_1_eax);
break; break;
default: default:
break; break;
@ -198,7 +188,7 @@ static int __init save_microcode_in_initrd(void)
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (c->x86 >= 0x10) if (c->x86 >= 0x10)
return save_microcode_in_initrd_amd(c->x86); return save_microcode_in_initrd_amd(cpuid_eax(1));
break; break;
default: default:
break; break;