mm: move accounting updates before page_cache_tree_delete()

Move updates of various counters before page_cache_tree_delete() call.
It will be easier to batch things this way and there is no difference
whether the counters get updated before or after removal from the radix
tree.

Link: http://lkml.kernel.org/r/20171010151937.26984-5-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Acked-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jan Kara 2017-11-15 17:37:22 -08:00 committed by Linus Torvalds
parent 59c66c5f8c
commit 76253fbc8f
1 changed files with 25 additions and 24 deletions

View File

@ -224,15 +224,8 @@ void __delete_from_page_cache(struct page *page, void *shadow)
} }
} }
page_cache_tree_delete(mapping, page, shadow);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
/* hugetlb pages do not participate in page cache accounting. */ /* hugetlb pages do not participate in page cache accounting. */
if (PageHuge(page)) if (!PageHuge(page)) {
return;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) { if (PageSwapBacked(page)) {
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
@ -243,15 +236,23 @@ void __delete_from_page_cache(struct page *page, void *shadow)
} }
/* /*
* At this point page must be either written or cleaned by truncate. * At this point page must be either written or cleaned by
* Dirty page here signals a bug and loss of unwritten data. * truncate. Dirty page here signals a bug and loss of
* unwritten data.
* *
* This fixes dirty accounting after removing the page entirely but * This fixes dirty accounting after removing the page entirely
* leaves PageDirty set: it has no effect for truncated page and * but leaves PageDirty set: it has no effect for truncated
* anyway will be cleared before returning page into buddy allocator. * page and anyway will be cleared before returning page into
* buddy allocator.
*/ */
if (WARN_ON_ONCE(PageDirty(page))) if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); account_page_cleaned(page, mapping,
inode_to_wb(mapping->host));
}
page_cache_tree_delete(mapping, page, shadow);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
} }
static void page_cache_free_page(struct address_space *mapping, static void page_cache_free_page(struct address_space *mapping,