mm, migrate: remove reason argument from new_page_t
No allocation callback is using this argument anymore. new_page_node used to use this parameter to convey node_id resp. migration error up to move_pages code (do_move_page_to_node_array). The error status never made it into the final status field and we have a better way to communicate node id to the status field now. All other allocation callbacks simply ignored the argument so we can drop it finally. [mhocko@suse.com: fix migration callback] Link: http://lkml.kernel.org/r/20180105085259.GH2801@dhcp22.suse.cz [akpm@linux-foundation.org: fix alloc_misplaced_dst_page()] [mhocko@kernel.org: fix build] Link: http://lkml.kernel.org/r/20180103091134.GB11319@dhcp22.suse.cz Link: http://lkml.kernel.org/r/20180103082555.14592-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Zi Yan <zi.yan@cs.rutgers.edu> Cc: Andrea Reale <ar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a49bd4d716
commit
666feb21a0
|
@ -75,8 +75,7 @@ EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
|
|||
/*
|
||||
* Taken from alloc_migrate_target with changes to remove CMA allocations
|
||||
*/
|
||||
struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
|
||||
int **resultp)
|
||||
struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_USER;
|
||||
struct page *new_page;
|
||||
|
|
|
@ -7,8 +7,7 @@
|
|||
#include <linux/migrate_mode.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
typedef struct page *new_page_t(struct page *page, unsigned long private,
|
||||
int **reason);
|
||||
typedef struct page *new_page_t(struct page *page, unsigned long private);
|
||||
typedef void free_page_t(struct page *page, unsigned long private);
|
||||
|
||||
/*
|
||||
|
|
|
@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|||
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
bool skip_hwpoisoned_pages);
|
||||
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private,
|
||||
int **resultp);
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1166,8 +1166,7 @@ static void isolate_freepages(struct compact_control *cc)
|
|||
* from the isolated freelists in the block we are migrating to.
|
||||
*/
|
||||
static struct page *compaction_alloc(struct page *migratepage,
|
||||
unsigned long data,
|
||||
int **result)
|
||||
unsigned long data)
|
||||
{
|
||||
struct compact_control *cc = (struct compact_control *)data;
|
||||
struct page *freepage;
|
||||
|
|
|
@ -538,5 +538,5 @@ static inline bool is_migrate_highatomic_page(struct page *page)
|
|||
}
|
||||
|
||||
void setup_zone_pageset(struct zone *zone);
|
||||
extern struct page *alloc_new_node_page(struct page *page, unsigned long node, int **x);
|
||||
extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
|
||||
#endif /* __MM_INTERNAL_H */
|
||||
|
|
|
@ -1487,7 +1487,7 @@ int unpoison_memory(unsigned long pfn)
|
|||
}
|
||||
EXPORT_SYMBOL(unpoison_memory);
|
||||
|
||||
static struct page *new_page(struct page *p, unsigned long private, int **x)
|
||||
static struct page *new_page(struct page *p, unsigned long private)
|
||||
{
|
||||
int nid = page_to_nid(p);
|
||||
|
||||
|
|
|
@ -1329,8 +1329,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct page *new_node_page(struct page *page, unsigned long private,
|
||||
int **result)
|
||||
static struct page *new_node_page(struct page *page, unsigned long private)
|
||||
{
|
||||
int nid = page_to_nid(page);
|
||||
nodemask_t nmask = node_states[N_MEMORY];
|
||||
|
|
|
@ -943,7 +943,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|||
}
|
||||
|
||||
/* page allocation callback for NUMA node migration */
|
||||
struct page *alloc_new_node_page(struct page *page, unsigned long node, int **x)
|
||||
struct page *alloc_new_node_page(struct page *page, unsigned long node)
|
||||
{
|
||||
if (PageHuge(page))
|
||||
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
||||
|
@ -1108,7 +1108,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|||
* list of pages handed to migrate_pages()--which is how we get here--
|
||||
* is in virtual address order.
|
||||
*/
|
||||
static struct page *new_page(struct page *page, unsigned long start, int **x)
|
||||
static struct page *new_page(struct page *page, unsigned long start)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long uninitialized_var(address);
|
||||
|
@ -1153,7 +1153,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static struct page *new_page(struct page *page, unsigned long start, int **x)
|
||||
static struct page *new_page(struct page *page, unsigned long start)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
21
mm/migrate.c
21
mm/migrate.c
|
@ -1137,10 +1137,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
|
|||
enum migrate_reason reason)
|
||||
{
|
||||
int rc = MIGRATEPAGE_SUCCESS;
|
||||
int *result = NULL;
|
||||
struct page *newpage;
|
||||
|
||||
newpage = get_new_page(page, private, &result);
|
||||
newpage = get_new_page(page, private);
|
||||
if (!newpage)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1231,12 +1230,6 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
|
|||
put_page(newpage);
|
||||
}
|
||||
|
||||
if (result) {
|
||||
if (rc)
|
||||
*result = rc;
|
||||
else
|
||||
*result = page_to_nid(newpage);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1264,7 +1257,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
enum migrate_mode mode, int reason)
|
||||
{
|
||||
int rc = -EAGAIN;
|
||||
int *result = NULL;
|
||||
int page_was_mapped = 0;
|
||||
struct page *new_hpage;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
|
@ -1281,7 +1273,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
new_hpage = get_new_page(hpage, private, &result);
|
||||
new_hpage = get_new_page(hpage, private);
|
||||
if (!new_hpage)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1345,12 +1337,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
else
|
||||
putback_active_hugepage(new_hpage);
|
||||
|
||||
if (result) {
|
||||
if (rc)
|
||||
*result = rc;
|
||||
else
|
||||
*result = page_to_nid(new_hpage);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1828,8 +1814,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
|
|||
}
|
||||
|
||||
static struct page *alloc_misplaced_dst_page(struct page *page,
|
||||
unsigned long data,
|
||||
int **result)
|
||||
unsigned long data)
|
||||
{
|
||||
int nid = (int) data;
|
||||
struct page *newpage;
|
||||
|
|
|
@ -309,8 +309,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|||
return pfn < end_pfn ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private,
|
||||
int **resultp)
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private)
|
||||
{
|
||||
return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue