kmemcheck: move hook into __alloc_pages_nodemask() for the page allocator
Now kmemcheck_pagealloc_alloc() is only called by __alloc_pages_slowpath(). __alloc_pages_nodemask() __alloc_pages_slowpath() kmemcheck_pagealloc_alloc() And the page will not be tracked by kmemcheck in the following path. __alloc_pages_nodemask() get_page_from_freelist() So move kmemcheck_pagealloc_alloc() into __alloc_pages_nodemask(), like this: __alloc_pages_nodemask() ... get_page_from_freelist() if (!page) __alloc_pages_slowpath() kmemcheck_pagealloc_alloc() ... Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Li Zefan <lizefan@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
91fbdc0f89
commit
23f086f962
|
@ -2842,11 +2842,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
|
||||
nopage:
|
||||
warn_alloc_failed(gfp_mask, order, NULL);
|
||||
return page;
|
||||
got_pg:
|
||||
if (kmemcheck_enabled)
|
||||
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@ -2916,6 +2912,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|||
preferred_zone, classzone_idx, migratetype);
|
||||
}
|
||||
|
||||
if (kmemcheck_enabled && page)
|
||||
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
||||
|
||||
trace_mm_page_alloc(page, order, alloc_mask, migratetype);
|
||||
|
||||
out:
|
||||
|
|
Loading…
Reference in New Issue