mirror of https://gitee.com/openkylin/qemu.git
translate-all: make l1_map lockless
Groundwork for supporting parallel TCG generation. We never remove entries from the radix tree, so we can use cmpxchg to implement lockless insertions. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Emilio G. Cota <cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
1e05197f24
commit
78722ed0b8
|
@ -469,20 +469,12 @@ static void page_init(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If alloc=1:
|
|
||||||
* Called with tb_lock held for system emulation.
|
|
||||||
* Called with mmap_lock held for user-mode emulation.
|
|
||||||
*/
|
|
||||||
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
{
|
{
|
||||||
PageDesc *pd;
|
PageDesc *pd;
|
||||||
void **lp;
|
void **lp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (alloc) {
|
|
||||||
assert_memory_lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Level 1. Always allocated. */
|
/* Level 1. Always allocated. */
|
||||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||||
|
|
||||||
|
@ -491,11 +483,17 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
void **p = atomic_rcu_read(lp);
|
void **p = atomic_rcu_read(lp);
|
||||||
|
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
|
void *existing;
|
||||||
|
|
||||||
if (!alloc) {
|
if (!alloc) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
p = g_new0(void *, V_L2_SIZE);
|
p = g_new0(void *, V_L2_SIZE);
|
||||||
atomic_rcu_set(lp, p);
|
existing = atomic_cmpxchg(lp, NULL, p);
|
||||||
|
if (unlikely(existing)) {
|
||||||
|
g_free(p);
|
||||||
|
p = existing;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||||
|
@ -503,11 +501,17 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
|
|
||||||
pd = atomic_rcu_read(lp);
|
pd = atomic_rcu_read(lp);
|
||||||
if (pd == NULL) {
|
if (pd == NULL) {
|
||||||
|
void *existing;
|
||||||
|
|
||||||
if (!alloc) {
|
if (!alloc) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||||
atomic_rcu_set(lp, pd);
|
existing = atomic_cmpxchg(lp, NULL, pd);
|
||||||
|
if (unlikely(existing)) {
|
||||||
|
g_free(pd);
|
||||||
|
pd = existing;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pd + (index & (V_L2_SIZE - 1));
|
return pd + (index & (V_L2_SIZE - 1));
|
||||||
|
|
|
@ -134,8 +134,8 @@ tb_set_jmp_target() code. Modification to the linked lists that allow
|
||||||
searching for linked pages are done under the protect of the
|
searching for linked pages are done under the protect of the
|
||||||
tb_lock().
|
tb_lock().
|
||||||
|
|
||||||
The global page table is protected by the tb_lock() in system-mode and
|
The global page table is a lockless radix tree; cmpxchg is used
|
||||||
mmap_lock() in linux-user mode.
|
to atomically insert new elements.
|
||||||
|
|
||||||
The lookup caches are updated atomically and the lookup hash uses QHT
|
The lookup caches are updated atomically and the lookup hash uses QHT
|
||||||
which is designed for concurrent safe lookup.
|
which is designed for concurrent safe lookup.
|
||||||
|
|
Loading…
Reference in New Issue