mm/page_owner: initialize page owner without holding the zone lock

It's not necessary to initialized page_owner with holding the zone lock.
It would cause more contention on the zone lock although it's not a big
problem since it is just debug feature.  But, it is better than before
so do it.  This is also preparation step to use stackdepot in page owner
feature.  Stackdepot allocates new pages when there is no reserved space
and holding the zone lock in this case will cause deadlock.

Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Joonsoo Kim 2016-07-26 15:23:43 -07:00 committed by Linus Torvalds
parent 66c64223ad
commit 83358ece26
3 changed files with 9 additions and 5 deletions

View File

@ -19,6 +19,7 @@
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/page_owner.h>
#include "internal.h" #include "internal.h"
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
@ -79,6 +80,8 @@ static void map_pages(struct list_head *list)
arch_alloc_page(page, order); arch_alloc_page(page, order);
kernel_map_pages(page, nr_pages, 1); kernel_map_pages(page, nr_pages, 1);
kasan_alloc_pages(page, order); kasan_alloc_pages(page, order);
set_page_owner(page, order, __GFP_MOVABLE);
if (order) if (order)
split_page(page, order); split_page(page, order);

View File

@ -2509,8 +2509,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--; zone->free_area[order].nr_free--;
rmv_page_order(page); rmv_page_order(page);
set_page_owner(page, order, __GFP_MOVABLE);
/* Set the pageblock if the isolated page is at least a pageblock */ /* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) { if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1; struct page *endpage = page + (1 << order) - 1;

View File

@ -7,6 +7,7 @@
#include <linux/pageblock-flags.h> #include <linux/pageblock-flags.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/page_owner.h>
#include "internal.h" #include "internal.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
@ -108,8 +109,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
if (pfn_valid_within(page_to_pfn(buddy)) && if (pfn_valid_within(page_to_pfn(buddy)) &&
!is_migrate_isolate_page(buddy)) { !is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order); __isolate_free_page(page, order);
kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
isolated_page = page; isolated_page = page;
} }
} }
@ -128,8 +127,12 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
zone->nr_isolate_pageblock--; zone->nr_isolate_pageblock--;
out: out:
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
if (isolated_page) if (isolated_page) {
kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
set_page_owner(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order); __free_pages(isolated_page, order);
}
} }
static inline struct page * static inline struct page *