cputlb: Move env->vtlb_index to env->tlb_d.vindex

The rest of the tlb victim cache is per-tlb,
the next use index should be as well.

Tested-by: Emilio G. Cota <cota@braap.org>
Reviewed-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2018-10-19 12:46:18 -07:00
parent 1308e02671
commit d5363e5849
2 changed files with 5 additions and 5 deletions

View File

@ -119,6 +119,7 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
env->tlb_d[mmu_idx].large_page_addr = -1; env->tlb_d[mmu_idx].large_page_addr = -1;
env->tlb_d[mmu_idx].large_page_mask = -1; env->tlb_d[mmu_idx].large_page_mask = -1;
env->tlb_d[mmu_idx].vindex = 0;
} }
/* This is OK because CPU architectures generally permit an /* This is OK because CPU architectures generally permit an
@ -149,8 +150,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
qemu_spin_unlock(&env->tlb_c.lock); qemu_spin_unlock(&env->tlb_c.lock);
cpu_tb_jmp_cache_clear(cpu); cpu_tb_jmp_cache_clear(cpu);
env->vtlb_index = 0;
} }
static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data) static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
@ -667,7 +666,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* different page; otherwise just overwrite the stale data. * different page; otherwise just overwrite the stale data.
*/ */
if (!tlb_hit_page_anyprot(te, vaddr_page)) { if (!tlb_hit_page_anyprot(te, vaddr_page)) {
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
/* Evict the old entry into the victim tlb. */ /* Evict the old entry into the victim tlb. */

View File

@ -150,6 +150,8 @@ typedef struct CPUTLBDesc {
*/ */
target_ulong large_page_addr; target_ulong large_page_addr;
target_ulong large_page_mask; target_ulong large_page_mask;
/* The next index to use in the tlb victim table. */
size_t vindex;
} CPUTLBDesc; } CPUTLBDesc;
/* /*
@ -178,8 +180,7 @@ typedef struct CPUTLBCommon {
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
size_t tlb_flush_count; \ size_t tlb_flush_count;
target_ulong vtlb_index; \
#else #else