mm/page_owner: introduce split_page_owner and replace manual handling
split_page() calls set_page_owner() to set up page_owner to each pages. But, it has a drawback that head page and the others have different stacktrace because callsite of set_page_owner() is slightly differnt. To avoid this problem, this patch copies head page's page_owner to the others. It needs to introduce new function, split_page_owner() but it also remove the other function, get_page_owner_gfp() so looks good to do. Link: http://lkml.kernel.org/r/1464230275-25791-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a8efe1c982
commit
a9627bc5e3
|
@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops;
|
||||||
extern void __reset_page_owner(struct page *page, unsigned int order);
|
extern void __reset_page_owner(struct page *page, unsigned int order);
|
||||||
extern void __set_page_owner(struct page *page,
|
extern void __set_page_owner(struct page *page,
|
||||||
unsigned int order, gfp_t gfp_mask);
|
unsigned int order, gfp_t gfp_mask);
|
||||||
extern gfp_t __get_page_owner_gfp(struct page *page);
|
extern void __split_page_owner(struct page *page, unsigned int order);
|
||||||
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
|
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
|
||||||
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
||||||
extern void __dump_page_owner(struct page *page);
|
extern void __dump_page_owner(struct page *page);
|
||||||
|
@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page,
|
||||||
__set_page_owner(page, order, gfp_mask);
|
__set_page_owner(page, order, gfp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline gfp_t get_page_owner_gfp(struct page *page)
|
static inline void split_page_owner(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&page_owner_inited))
|
if (static_branch_unlikely(&page_owner_inited))
|
||||||
return __get_page_owner_gfp(page);
|
__split_page_owner(page, order);
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
|
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
|
||||||
{
|
{
|
||||||
|
@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page,
|
||||||
unsigned int order, gfp_t gfp_mask)
|
unsigned int order, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline gfp_t get_page_owner_gfp(struct page *page)
|
static inline void split_page_owner(struct page *page,
|
||||||
|
unsigned int order)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
|
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2461,7 +2461,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
|
||||||
void split_page(struct page *page, unsigned int order)
|
void split_page(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
gfp_t gfp_mask;
|
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageCompound(page), page);
|
VM_BUG_ON_PAGE(PageCompound(page), page);
|
||||||
VM_BUG_ON_PAGE(!page_count(page), page);
|
VM_BUG_ON_PAGE(!page_count(page), page);
|
||||||
|
@ -2475,12 +2474,9 @@ void split_page(struct page *page, unsigned int order)
|
||||||
split_page(virt_to_page(page[0].shadow), order);
|
split_page(virt_to_page(page[0].shadow), order);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
gfp_mask = get_page_owner_gfp(page);
|
for (i = 1; i < (1 << order); i++)
|
||||||
set_page_owner(page, 0, gfp_mask);
|
|
||||||
for (i = 1; i < (1 << order); i++) {
|
|
||||||
set_page_refcounted(page + i);
|
set_page_refcounted(page + i);
|
||||||
set_page_owner(page + i, 0, gfp_mask);
|
split_page_owner(page, order);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(split_page);
|
EXPORT_SYMBOL_GPL(split_page);
|
||||||
|
|
||||||
|
|
|
@ -94,17 +94,17 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||||
page_ext->last_migrate_reason = reason;
|
page_ext->last_migrate_reason = reason;
|
||||||
}
|
}
|
||||||
|
|
||||||
gfp_t __get_page_owner_gfp(struct page *page)
|
void __split_page_owner(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
struct page_ext *page_ext = lookup_page_ext(page);
|
struct page_ext *page_ext = lookup_page_ext(page);
|
||||||
if (unlikely(!page_ext))
|
|
||||||
/*
|
|
||||||
* The caller just returns 0 if no valid gfp
|
|
||||||
* So return 0 here too.
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return page_ext->gfp_mask;
|
if (unlikely(!page_ext))
|
||||||
|
return;
|
||||||
|
|
||||||
|
page_ext->order = 0;
|
||||||
|
for (i = 1; i < (1 << order); i++)
|
||||||
|
__copy_page_owner(page, page + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
||||||
|
|
Loading…
Reference in New Issue