mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask

There are only two callers of __alloc_pages() so prune the thicket of
alloc_page variants by combining the two functions together.  Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Link: https://lkml.kernel.org/r/20210225150642.2582252-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-04-29 23:01:15 -07:00 committed by Linus Torvalds
parent 6e5e0f286e
commit 84172f4bb7
7 changed files with 13 additions and 21 deletions

View File

@ -402,7 +402,7 @@ compact_fail
but failed. but failed.
It is possible to establish how long the stalls were using the function It is possible to establish how long the stalls were using the function
tracer to record how long was spent in __alloc_pages_nodemask and tracer to record how long was spent in __alloc_pages() and
using the mm_page_alloc tracepoint to identify which allocations were using the mm_page_alloc tracepoint to identify which allocations were
for huge pages. for huge pages.

View File

@ -515,15 +515,8 @@ static inline int arch_make_page_accessible(struct page *page)
} }
#endif #endif
struct page * struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask);
nodemask_t *nodemask);
static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
{
return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
}
/* /*
* Allocate pages, preferring the node given as nid. The node must be valid and * Allocate pages, preferring the node given as nid. The node must be valid and
@ -535,7 +528,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
return __alloc_pages(gfp_mask, order, nid); return __alloc_pages(gfp_mask, order, nid, NULL);
} }
/* /*

View File

@ -1616,7 +1616,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
gfp_mask |= __GFP_RETRY_MAYFAIL; gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); page = __alloc_pages(gfp_mask, order, nid, nmask);
if (page) if (page)
__count_vm_event(HTLB_BUDDY_PGALLOC); __count_vm_event(HTLB_BUDDY_PGALLOC);
else else

View File

@ -145,10 +145,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
* family of functions. * family of functions.
* *
* nodemask, migratetype and highest_zoneidx are initialized only once in * nodemask, migratetype and highest_zoneidx are initialized only once in
* __alloc_pages_nodemask() and then never change. * __alloc_pages() and then never change.
* *
* zonelist, preferred_zone and highest_zoneidx are set first in * zonelist, preferred_zone and highest_zoneidx are set first in
* __alloc_pages_nodemask() for the fast path, and might be later changed * __alloc_pages() for the fast path, and might be later changed
* in __alloc_pages_slowpath(). All other functions pass the whole structure * in __alloc_pages_slowpath(). All other functions pass the whole structure
* by a const pointer. * by a const pointer.
*/ */

View File

@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
{ {
struct page *page; struct page *page;
page = __alloc_pages(gfp, order, nid); page = __alloc_pages(gfp, order, nid, NULL);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key)) if (!static_branch_likely(&vm_numa_stat_key))
return page; return page;
@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol); nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node); preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); page = __alloc_pages(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol); mpol_cond_put(pol);
out: out:
return page; return page;
@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
if (pol->mode == MPOL_INTERLEAVE) if (pol->mode == MPOL_INTERLEAVE)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else else
page = __alloc_pages_nodemask(gfp, order, page = __alloc_pages(gfp, order,
policy_node(gfp, pol, numa_node_id()), policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol)); policy_nodemask(gfp, pol));

View File

@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM; gfp_mask |= __GFP_HIGHMEM;
new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
if (new_page && PageTransHuge(new_page)) if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page); prep_transhuge_page(new_page);

View File

@ -5013,8 +5013,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
/* /*
* This is the 'heart' of the zoned buddy allocator. * This is the 'heart' of the zoned buddy allocator.
*/ */
struct page * struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask) nodemask_t *nodemask)
{ {
struct page *page; struct page *page;
@ -5076,7 +5075,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
return page; return page;
} }
EXPORT_SYMBOL(__alloc_pages_nodemask); EXPORT_SYMBOL(__alloc_pages);
/* /*
* Common helper functions. Never use with __GFP_HIGHMEM because the returned * Common helper functions. Never use with __GFP_HIGHMEM because the returned