shmem: Convert shmem_add_to_page_cache to XArray
We can use xas_find_conflict() instead of radix_tree_gang_lookup_slot() to find any conflicting entry and combine the three paths through this function into one. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
e21a29552f
commit
552446a416
81
mm/shmem.c
81
mm/shmem.c
|
@ -577,9 +577,11 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
|
||||||
*/
|
*/
|
||||||
static int shmem_add_to_page_cache(struct page *page,
|
static int shmem_add_to_page_cache(struct page *page,
|
||||||
struct address_space *mapping,
|
struct address_space *mapping,
|
||||||
pgoff_t index, void *expected)
|
pgoff_t index, void *expected, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int error, nr = hpage_nr_pages(page);
|
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
|
||||||
|
unsigned long i = 0;
|
||||||
|
unsigned long nr = 1UL << compound_order(page);
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||||
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
|
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
|
||||||
|
@ -591,46 +593,39 @@ static int shmem_add_to_page_cache(struct page *page,
|
||||||
page->mapping = mapping;
|
page->mapping = mapping;
|
||||||
page->index = index;
|
page->index = index;
|
||||||
|
|
||||||
xa_lock_irq(&mapping->i_pages);
|
do {
|
||||||
|
void *entry;
|
||||||
|
xas_lock_irq(&xas);
|
||||||
|
entry = xas_find_conflict(&xas);
|
||||||
|
if (entry != expected)
|
||||||
|
xas_set_err(&xas, -EEXIST);
|
||||||
|
xas_create_range(&xas);
|
||||||
|
if (xas_error(&xas))
|
||||||
|
goto unlock;
|
||||||
|
next:
|
||||||
|
xas_store(&xas, page + i);
|
||||||
|
if (++i < nr) {
|
||||||
|
xas_next(&xas);
|
||||||
|
goto next;
|
||||||
|
}
|
||||||
if (PageTransHuge(page)) {
|
if (PageTransHuge(page)) {
|
||||||
void __rcu **results;
|
|
||||||
pgoff_t idx;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
error = 0;
|
|
||||||
if (radix_tree_gang_lookup_slot(&mapping->i_pages,
|
|
||||||
&results, &idx, index, 1) &&
|
|
||||||
idx < index + HPAGE_PMD_NR) {
|
|
||||||
error = -EEXIST;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!error) {
|
|
||||||
for (i = 0; i < HPAGE_PMD_NR; i++) {
|
|
||||||
error = radix_tree_insert(&mapping->i_pages,
|
|
||||||
index + i, page + i);
|
|
||||||
VM_BUG_ON(error);
|
|
||||||
}
|
|
||||||
count_vm_event(THP_FILE_ALLOC);
|
count_vm_event(THP_FILE_ALLOC);
|
||||||
}
|
|
||||||
} else if (!expected) {
|
|
||||||
error = radix_tree_insert(&mapping->i_pages, index, page);
|
|
||||||
} else {
|
|
||||||
error = shmem_replace_entry(mapping, index, expected, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!error) {
|
|
||||||
mapping->nrpages += nr;
|
|
||||||
if (PageTransHuge(page))
|
|
||||||
__inc_node_page_state(page, NR_SHMEM_THPS);
|
__inc_node_page_state(page, NR_SHMEM_THPS);
|
||||||
|
}
|
||||||
|
mapping->nrpages += nr;
|
||||||
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
|
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
|
||||||
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
|
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
unlock:
|
||||||
} else {
|
xas_unlock_irq(&xas);
|
||||||
|
} while (xas_nomem(&xas, gfp));
|
||||||
|
|
||||||
|
if (xas_error(&xas)) {
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
|
||||||
page_ref_sub(page, nr);
|
page_ref_sub(page, nr);
|
||||||
|
return xas_error(&xas);
|
||||||
}
|
}
|
||||||
return error;
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1183,7 +1178,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
|
||||||
*/
|
*/
|
||||||
if (!error)
|
if (!error)
|
||||||
error = shmem_add_to_page_cache(*pagep, mapping, index,
|
error = shmem_add_to_page_cache(*pagep, mapping, index,
|
||||||
radswap);
|
radswap, gfp);
|
||||||
if (error != -ENOMEM) {
|
if (error != -ENOMEM) {
|
||||||
/*
|
/*
|
||||||
* Truncation and eviction use free_swap_and_cache(), which
|
* Truncation and eviction use free_swap_and_cache(), which
|
||||||
|
@ -1700,7 +1695,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
false);
|
false);
|
||||||
if (!error) {
|
if (!error) {
|
||||||
error = shmem_add_to_page_cache(page, mapping, index,
|
error = shmem_add_to_page_cache(page, mapping, index,
|
||||||
swp_to_radix_entry(swap));
|
swp_to_radix_entry(swap), gfp);
|
||||||
/*
|
/*
|
||||||
* We already confirmed swap under page lock, and make
|
* We already confirmed swap under page lock, and make
|
||||||
* no memory allocation here, so usually no possibility
|
* no memory allocation here, so usually no possibility
|
||||||
|
@ -1806,13 +1801,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
|
||||||
PageTransHuge(page));
|
PageTransHuge(page));
|
||||||
if (error)
|
if (error)
|
||||||
goto unacct;
|
goto unacct;
|
||||||
error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
|
|
||||||
compound_order(page));
|
|
||||||
if (!error) {
|
|
||||||
error = shmem_add_to_page_cache(page, mapping, hindex,
|
error = shmem_add_to_page_cache(page, mapping, hindex,
|
||||||
NULL);
|
NULL, gfp & GFP_RECLAIM_MASK);
|
||||||
radix_tree_preload_end();
|
|
||||||
}
|
|
||||||
if (error) {
|
if (error) {
|
||||||
mem_cgroup_cancel_charge(page, memcg,
|
mem_cgroup_cancel_charge(page, memcg,
|
||||||
PageTransHuge(page));
|
PageTransHuge(page));
|
||||||
|
@ -2281,11 +2271,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_release;
|
goto out_release;
|
||||||
|
|
||||||
ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
|
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
|
||||||
if (!ret) {
|
gfp & GFP_RECLAIM_MASK);
|
||||||
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
|
|
||||||
radix_tree_preload_end();
|
|
||||||
}
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_release_uncharge;
|
goto out_release_uncharge;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue