mm/migrate: Add folio_migrate_mapping()
Reimplement migrate_page_move_mapping() as a wrapper around folio_migrate_mapping(). Saves 193 bytes of kernel text. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
d9c08e2232
commit
3417013e0d
|
@ -57,6 +57,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page);
|
struct page *newpage, struct page *page);
|
||||||
extern int migrate_page_move_mapping(struct address_space *mapping,
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page, int extra_count);
|
struct page *newpage, struct page *page, int extra_count);
|
||||||
|
int folio_migrate_mapping(struct address_space *mapping,
|
||||||
|
struct folio *newfolio, struct folio *folio, int extra_count);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline void putback_movable_pages(struct list_head *l) {}
|
static inline void putback_movable_pages(struct list_head *l) {}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
* eventually.
|
* eventually.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/migrate.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
|
|
||||||
|
@ -48,3 +49,13 @@ void mark_page_accessed(struct page *page)
|
||||||
folio_mark_accessed(page_folio(page));
|
folio_mark_accessed(page_folio(page));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mark_page_accessed);
|
EXPORT_SYMBOL(mark_page_accessed);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIGRATION
|
||||||
|
int migrate_page_move_mapping(struct address_space *mapping,
|
||||||
|
struct page *newpage, struct page *page, int extra_count)
|
||||||
|
{
|
||||||
|
return folio_migrate_mapping(mapping, page_folio(newpage),
|
||||||
|
page_folio(page), extra_count);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(migrate_page_move_mapping);
|
||||||
|
#endif
|
||||||
|
|
85
mm/migrate.c
85
mm/migrate.c
|
@ -364,7 +364,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
|
||||||
*/
|
*/
|
||||||
expected_count += is_device_private_page(page);
|
expected_count += is_device_private_page(page);
|
||||||
if (mapping)
|
if (mapping)
|
||||||
expected_count += thp_nr_pages(page) + page_has_private(page);
|
expected_count += compound_nr(page) + page_has_private(page);
|
||||||
|
|
||||||
return expected_count;
|
return expected_count;
|
||||||
}
|
}
|
||||||
|
@ -377,74 +377,75 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
|
||||||
* 2 for pages with a mapping
|
* 2 for pages with a mapping
|
||||||
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
|
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
|
||||||
*/
|
*/
|
||||||
int migrate_page_move_mapping(struct address_space *mapping,
|
int folio_migrate_mapping(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page, int extra_count)
|
struct folio *newfolio, struct folio *folio, int extra_count)
|
||||||
{
|
{
|
||||||
XA_STATE(xas, &mapping->i_pages, page_index(page));
|
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
|
||||||
struct zone *oldzone, *newzone;
|
struct zone *oldzone, *newzone;
|
||||||
int dirty;
|
int dirty;
|
||||||
int expected_count = expected_page_refs(mapping, page) + extra_count;
|
int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
|
||||||
int nr = thp_nr_pages(page);
|
long nr = folio_nr_pages(folio);
|
||||||
|
|
||||||
if (!mapping) {
|
if (!mapping) {
|
||||||
/* Anonymous page without mapping */
|
/* Anonymous page without mapping */
|
||||||
if (page_count(page) != expected_count)
|
if (folio_ref_count(folio) != expected_count)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
/* No turning back from here */
|
/* No turning back from here */
|
||||||
newpage->index = page->index;
|
newfolio->index = folio->index;
|
||||||
newpage->mapping = page->mapping;
|
newfolio->mapping = folio->mapping;
|
||||||
if (PageSwapBacked(page))
|
if (folio_test_swapbacked(folio))
|
||||||
__SetPageSwapBacked(newpage);
|
__folio_set_swapbacked(newfolio);
|
||||||
|
|
||||||
return MIGRATEPAGE_SUCCESS;
|
return MIGRATEPAGE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
oldzone = page_zone(page);
|
oldzone = folio_zone(folio);
|
||||||
newzone = page_zone(newpage);
|
newzone = folio_zone(newfolio);
|
||||||
|
|
||||||
xas_lock_irq(&xas);
|
xas_lock_irq(&xas);
|
||||||
if (page_count(page) != expected_count || xas_load(&xas) != page) {
|
if (folio_ref_count(folio) != expected_count ||
|
||||||
|
xas_load(&xas) != folio) {
|
||||||
xas_unlock_irq(&xas);
|
xas_unlock_irq(&xas);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!page_ref_freeze(page, expected_count)) {
|
if (!folio_ref_freeze(folio, expected_count)) {
|
||||||
xas_unlock_irq(&xas);
|
xas_unlock_irq(&xas);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now we know that no one else is looking at the page:
|
* Now we know that no one else is looking at the folio:
|
||||||
* no turning back from here.
|
* no turning back from here.
|
||||||
*/
|
*/
|
||||||
newpage->index = page->index;
|
newfolio->index = folio->index;
|
||||||
newpage->mapping = page->mapping;
|
newfolio->mapping = folio->mapping;
|
||||||
page_ref_add(newpage, nr); /* add cache reference */
|
folio_ref_add(newfolio, nr); /* add cache reference */
|
||||||
if (PageSwapBacked(page)) {
|
if (folio_test_swapbacked(folio)) {
|
||||||
__SetPageSwapBacked(newpage);
|
__folio_set_swapbacked(newfolio);
|
||||||
if (PageSwapCache(page)) {
|
if (folio_test_swapcache(folio)) {
|
||||||
SetPageSwapCache(newpage);
|
folio_set_swapcache(newfolio);
|
||||||
set_page_private(newpage, page_private(page));
|
newfolio->private = folio_get_private(folio);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
VM_BUG_ON_PAGE(PageSwapCache(page), page);
|
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Move dirty while page refs frozen and newpage not yet exposed */
|
/* Move dirty while page refs frozen and newpage not yet exposed */
|
||||||
dirty = PageDirty(page);
|
dirty = folio_test_dirty(folio);
|
||||||
if (dirty) {
|
if (dirty) {
|
||||||
ClearPageDirty(page);
|
folio_clear_dirty(folio);
|
||||||
SetPageDirty(newpage);
|
folio_set_dirty(newfolio);
|
||||||
}
|
}
|
||||||
|
|
||||||
xas_store(&xas, newpage);
|
xas_store(&xas, newfolio);
|
||||||
if (PageTransHuge(page)) {
|
if (nr > 1) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < nr; i++) {
|
for (i = 1; i < nr; i++) {
|
||||||
xas_next(&xas);
|
xas_next(&xas);
|
||||||
xas_store(&xas, newpage);
|
xas_store(&xas, newfolio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,7 +454,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||||
* to one less reference.
|
* to one less reference.
|
||||||
* We know this isn't the last reference.
|
* We know this isn't the last reference.
|
||||||
*/
|
*/
|
||||||
page_ref_unfreeze(page, expected_count - nr);
|
folio_ref_unfreeze(folio, expected_count - nr);
|
||||||
|
|
||||||
xas_unlock(&xas);
|
xas_unlock(&xas);
|
||||||
/* Leave irq disabled to prevent preemption while updating stats */
|
/* Leave irq disabled to prevent preemption while updating stats */
|
||||||
|
@ -472,18 +473,18 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||||
struct lruvec *old_lruvec, *new_lruvec;
|
struct lruvec *old_lruvec, *new_lruvec;
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
|
|
||||||
memcg = page_memcg(page);
|
memcg = folio_memcg(folio);
|
||||||
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
|
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
|
||||||
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
|
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
|
||||||
|
|
||||||
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
|
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
|
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
|
||||||
if (PageSwapBacked(page) && !PageSwapCache(page)) {
|
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
|
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
|
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_SWAP
|
#ifdef CONFIG_SWAP
|
||||||
if (PageSwapCache(page)) {
|
if (folio_test_swapcache(folio)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
|
__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
|
__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
|
||||||
}
|
}
|
||||||
|
@ -499,11 +500,11 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||||
|
|
||||||
return MIGRATEPAGE_SUCCESS;
|
return MIGRATEPAGE_SUCCESS;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(migrate_page_move_mapping);
|
EXPORT_SYMBOL(folio_migrate_mapping);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The expected number of remaining references is the same as that
|
* The expected number of remaining references is the same as that
|
||||||
* of migrate_page_move_mapping().
|
* of folio_migrate_mapping().
|
||||||
*/
|
*/
|
||||||
int migrate_huge_page_move_mapping(struct address_space *mapping,
|
int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page)
|
struct page *newpage, struct page *page)
|
||||||
|
@ -564,7 +565,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
|
||||||
if (PageMappedToDisk(page))
|
if (PageMappedToDisk(page))
|
||||||
SetPageMappedToDisk(newpage);
|
SetPageMappedToDisk(newpage);
|
||||||
|
|
||||||
/* Move dirty on pages not done by migrate_page_move_mapping() */
|
/* Move dirty on pages not done by folio_migrate_mapping() */
|
||||||
if (PageDirty(page))
|
if (PageDirty(page))
|
||||||
SetPageDirty(newpage);
|
SetPageDirty(newpage);
|
||||||
|
|
||||||
|
@ -640,11 +641,13 @@ int migrate_page(struct address_space *mapping,
|
||||||
struct page *newpage, struct page *page,
|
struct page *newpage, struct page *page,
|
||||||
enum migrate_mode mode)
|
enum migrate_mode mode)
|
||||||
{
|
{
|
||||||
|
struct folio *newfolio = page_folio(newpage);
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
|
BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */
|
||||||
|
|
||||||
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
|
rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
|
||||||
|
|
||||||
if (rc != MIGRATEPAGE_SUCCESS)
|
if (rc != MIGRATEPAGE_SUCCESS)
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -2470,7 +2473,7 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
|
||||||
* @page: struct page to check
|
* @page: struct page to check
|
||||||
*
|
*
|
||||||
* Pinned pages cannot be migrated. This is the same test as in
|
* Pinned pages cannot be migrated. This is the same test as in
|
||||||
* migrate_page_move_mapping(), except that here we allow migration of a
|
* folio_migrate_mapping(), except that here we allow migration of a
|
||||||
* ZONE_DEVICE page.
|
* ZONE_DEVICE page.
|
||||||
*/
|
*/
|
||||||
static bool migrate_vma_check_page(struct page *page)
|
static bool migrate_vma_check_page(struct page *page)
|
||||||
|
|
Loading…
Reference in New Issue