i386: Clean up cache CPUID code

Always initialize CPUCaches structs with cache information, even
if legacy_cache=true.  Use different CPUCaches struct for
CPUID[2], CPUID[4], and the AMD CPUID leaves.

This will simplify a lot the logic inside cpu_x86_cpuid().

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <1527176614-26271-2-git-send-email-babu.moger@amd.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
This commit is contained in:
Eduardo Habkost 2018-05-24 11:43:30 -04:00
parent 0d2fa03dae
commit a9f27ea9ad
2 changed files with 67 additions and 64 deletions

View File

@ -1112,7 +1112,7 @@ struct X86CPUDefinition {
}; };
static CPUCaches epyc_cache_info = { static CPUCaches epyc_cache_info = {
.l1d_cache = { .l1d_cache = &(CPUCacheInfo) {
.type = DCACHE, .type = DCACHE,
.level = 1, .level = 1,
.size = 32 * KiB, .size = 32 * KiB,
@ -1124,7 +1124,7 @@ static CPUCaches epyc_cache_info = {
.self_init = 1, .self_init = 1,
.no_invd_sharing = true, .no_invd_sharing = true,
}, },
.l1i_cache = { .l1i_cache = &(CPUCacheInfo) {
.type = ICACHE, .type = ICACHE,
.level = 1, .level = 1,
.size = 64 * KiB, .size = 64 * KiB,
@ -1136,7 +1136,7 @@ static CPUCaches epyc_cache_info = {
.self_init = 1, .self_init = 1,
.no_invd_sharing = true, .no_invd_sharing = true,
}, },
.l2_cache = { .l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE, .type = UNIFIED_CACHE,
.level = 2, .level = 2,
.size = 512 * KiB, .size = 512 * KiB,
@ -1146,7 +1146,7 @@ static CPUCaches epyc_cache_info = {
.sets = 1024, .sets = 1024,
.lines_per_tag = 1, .lines_per_tag = 1,
}, },
.l3_cache = { .l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE, .type = UNIFIED_CACHE,
.level = 3, .level = 3,
.size = 8 * MiB, .size = 8 * MiB,
@ -3340,9 +3340,8 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
env->features[w] = def->features[w]; env->features[w] = def->features[w];
} }
/* Store Cache information from the X86CPUDefinition if available */ /* legacy-cache defaults to 'off' if CPU model provides cache info */
env->cache_info = def->cache_info; cpu->legacy_cache = !def->cache_info;
cpu->legacy_cache = def->cache_info ? 0 : 1;
/* Special cases not set in the X86CPUDefinition structs: */ /* Special cases not set in the X86CPUDefinition structs: */
/* TODO: in-kernel irqchip for hvf */ /* TODO: in-kernel irqchip for hvf */
@ -3693,21 +3692,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (!cpu->enable_l3_cache) { if (!cpu->enable_l3_cache) {
*ecx = 0; *ecx = 0;
} else { } else {
if (env->cache_info && !cpu->legacy_cache) { *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
*ecx = cpuid2_cache_descriptor(&env->cache_info->l3_cache);
} else {
*ecx = cpuid2_cache_descriptor(&legacy_l3_cache);
}
}
if (env->cache_info && !cpu->legacy_cache) {
*edx = (cpuid2_cache_descriptor(&env->cache_info->l1d_cache) << 16) |
(cpuid2_cache_descriptor(&env->cache_info->l1i_cache) << 8) |
(cpuid2_cache_descriptor(&env->cache_info->l2_cache));
} else {
*edx = (cpuid2_cache_descriptor(&legacy_l1d_cache) << 16) |
(cpuid2_cache_descriptor(&legacy_l1i_cache) << 8) |
(cpuid2_cache_descriptor(&legacy_l2_cache_cpuid2));
} }
*edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
break; break;
case 4: case 4:
/* cache info: needed for Core compatibility */ /* cache info: needed for Core compatibility */
@ -3720,35 +3709,27 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
} }
} else { } else {
*eax = 0; *eax = 0;
CPUCacheInfo *l1d, *l1i, *l2, *l3;
if (env->cache_info && !cpu->legacy_cache) {
l1d = &env->cache_info->l1d_cache;
l1i = &env->cache_info->l1i_cache;
l2 = &env->cache_info->l2_cache;
l3 = &env->cache_info->l3_cache;
} else {
l1d = &legacy_l1d_cache;
l1i = &legacy_l1i_cache;
l2 = &legacy_l2_cache;
l3 = &legacy_l3_cache;
}
switch (count) { switch (count) {
case 0: /* L1 dcache info */ case 0: /* L1 dcache info */
encode_cache_cpuid4(l1d, 1, cs->nr_cores, encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
1, cs->nr_cores,
eax, ebx, ecx, edx); eax, ebx, ecx, edx);
break; break;
case 1: /* L1 icache info */ case 1: /* L1 icache info */
encode_cache_cpuid4(l1i, 1, cs->nr_cores, encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
1, cs->nr_cores,
eax, ebx, ecx, edx); eax, ebx, ecx, edx);
break; break;
case 2: /* L2 cache info */ case 2: /* L2 cache info */
encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores, encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
cs->nr_threads, cs->nr_cores,
eax, ebx, ecx, edx); eax, ebx, ecx, edx);
break; break;
case 3: /* L3 cache info */ case 3: /* L3 cache info */
pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
if (cpu->enable_l3_cache) { if (cpu->enable_l3_cache) {
encode_cache_cpuid4(l3, (1 << pkg_offset), cs->nr_cores, encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
(1 << pkg_offset), cs->nr_cores,
eax, ebx, ecx, edx); eax, ebx, ecx, edx);
break; break;
} }
@ -3961,13 +3942,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
if (env->cache_info && !cpu->legacy_cache) { *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
*ecx = encode_cache_cpuid80000005(&env->cache_info->l1d_cache); *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
*edx = encode_cache_cpuid80000005(&env->cache_info->l1i_cache);
} else {
*ecx = encode_cache_cpuid80000005(&legacy_l1d_cache_amd);
*edx = encode_cache_cpuid80000005(&legacy_l1i_cache_amd);
}
break; break;
case 0x80000006: case 0x80000006:
/* cache info (L2 cache) */ /* cache info (L2 cache) */
@ -3983,17 +3959,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
(L2_DTLB_4K_ENTRIES << 16) | \ (L2_DTLB_4K_ENTRIES << 16) | \
(AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
(L2_ITLB_4K_ENTRIES); (L2_ITLB_4K_ENTRIES);
if (env->cache_info && !cpu->legacy_cache) { encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
encode_cache_cpuid80000006(&env->cache_info->l2_cache, cpu->enable_l3_cache ?
cpu->enable_l3_cache ? env->cache_info_amd.l3_cache : NULL,
&env->cache_info->l3_cache : NULL, ecx, edx);
ecx, edx);
} else {
encode_cache_cpuid80000006(&legacy_l2_cache_amd,
cpu->enable_l3_cache ?
&legacy_l3_cache : NULL,
ecx, edx);
}
break; break;
case 0x80000007: case 0x80000007:
*eax = 0; *eax = 0;
@ -4690,6 +4659,37 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->phys_bits = 32; cpu->phys_bits = 32;
} }
} }
/* Cache information initialization */
if (!cpu->legacy_cache) {
if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
char *name = x86_cpu_class_get_model_name(xcc);
error_setg(errp,
"CPU model '%s' doesn't support legacy-cache=off", name);
g_free(name);
return;
}
env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
*xcc->cpu_def->cache_info;
} else {
/* Build legacy cache information */
env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
env->cache_info_amd.l3_cache = &legacy_l3_cache;
}
cpu_exec_realizefn(cs, &local_err); cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) { if (local_err != NULL) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
@ -5173,11 +5173,10 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
/* /*
* lecacy_cache defaults to CPU model being chosen. This is set in * lecacy_cache defaults to true unless the CPU model provides its
* x86_cpu_load_def based on cache_info which is initialized in * own cache information (see x86_cpu_load_def()).
* builtin_x86_defs
*/ */
DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, false), DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
/* /*
* From "Requirements for Implementing the Microsoft * From "Requirements for Implementing the Microsoft

View File

@ -1098,10 +1098,10 @@ typedef struct CPUCacheInfo {
typedef struct CPUCaches { typedef struct CPUCaches {
CPUCacheInfo l1d_cache; CPUCacheInfo *l1d_cache;
CPUCacheInfo l1i_cache; CPUCacheInfo *l1i_cache;
CPUCacheInfo l2_cache; CPUCacheInfo *l2_cache;
CPUCacheInfo l3_cache; CPUCacheInfo *l3_cache;
} CPUCaches; } CPUCaches;
typedef struct CPUX86State { typedef struct CPUX86State {
@ -1293,7 +1293,11 @@ typedef struct CPUX86State {
/* Features that were explicitly enabled/disabled */ /* Features that were explicitly enabled/disabled */
FeatureWordArray user_features; FeatureWordArray user_features;
uint32_t cpuid_model[12]; uint32_t cpuid_model[12];
CPUCaches *cache_info; /* Cache information for CPUID. When legacy-cache=on, the cache data
* on each CPUID leaf will be different, because we keep compatibility
* with old QEMU versions.
*/
CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
/* MTRRs */ /* MTRRs */
uint64_t mtrr_fixed[11]; uint64_t mtrr_fixed[11];