mirror of https://gitee.com/openkylin/linux.git
mm/thp: make is_huge_zero_pmd() safe and quicker
Most callers of is_huge_zero_pmd() supply a pmd already verified
present; but a few (notably zap_huge_pmd()) do not - it might be a pmd
migration entry, in which the pfn is encoded differently from a present
pmd: which might pass the is_huge_zero_pmd() test (though not on x86,
since L1TF forced us to protect against that); or perhaps even crash in
pmd_page() applied to a swap-like entry.
Make it safe by adding pmd_present() check into is_huge_zero_pmd()
itself; and make it quicker by saving huge_zero_pfn, so that
is_huge_zero_pmd() will not need to do that pmd_page() lookup each time.
__split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
but is unnecessary now that is_huge_zero_pmd() checks present.
Link: https://lkml.kernel.org/r/21ea9ca-a1f5-8b90-5e88-95fb1c49bbfa@google.com
Fixes: e71769ae52
("mm: enable thp migration for shmem thp")
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jue Wang <juew@google.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Wang Yugui <wangyugui@e16-tech.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
99fa8a4820
commit
3b77e8c8cd
|
@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||||
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||||
|
|
||||||
extern struct page *huge_zero_page;
|
extern struct page *huge_zero_page;
|
||||||
|
extern unsigned long huge_zero_pfn;
|
||||||
|
|
||||||
static inline bool is_huge_zero_page(struct page *page)
|
static inline bool is_huge_zero_page(struct page *page)
|
||||||
{
|
{
|
||||||
|
@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
|
||||||
|
|
||||||
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||||
{
|
{
|
||||||
return is_huge_zero_page(pmd_page(pmd));
|
return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_huge_zero_pud(pud_t pud)
|
static inline bool is_huge_zero_pud(pud_t pud)
|
||||||
|
@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_huge_zero_pud(pud_t pud)
|
static inline bool is_huge_zero_pud(pud_t pud)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
|
||||||
|
|
||||||
static atomic_t huge_zero_refcount;
|
static atomic_t huge_zero_refcount;
|
||||||
struct page *huge_zero_page __read_mostly;
|
struct page *huge_zero_page __read_mostly;
|
||||||
|
unsigned long huge_zero_pfn __read_mostly = ~0UL;
|
||||||
|
|
||||||
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
|
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
@ -98,6 +99,7 @@ static bool get_huge_zero_page(void)
|
||||||
__free_pages(zero_page, compound_order(zero_page));
|
__free_pages(zero_page, compound_order(zero_page));
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
|
||||||
|
|
||||||
/* We take additional reference here. It will be put back by shrinker */
|
/* We take additional reference here. It will be put back by shrinker */
|
||||||
atomic_set(&huge_zero_refcount, 2);
|
atomic_set(&huge_zero_refcount, 2);
|
||||||
|
@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
||||||
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
||||||
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
||||||
BUG_ON(zero_page == NULL);
|
BUG_ON(zero_page == NULL);
|
||||||
|
WRITE_ONCE(huge_zero_pfn, ~0UL);
|
||||||
__free_pages(zero_page, compound_order(zero_page));
|
__free_pages(zero_page, compound_order(zero_page));
|
||||||
return HPAGE_PMD_NR;
|
return HPAGE_PMD_NR;
|
||||||
}
|
}
|
||||||
|
@ -2071,7 +2074,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
|
if (is_huge_zero_pmd(*pmd)) {
|
||||||
/*
|
/*
|
||||||
* FIXME: Do we want to invalidate secondary mmu by calling
|
* FIXME: Do we want to invalidate secondary mmu by calling
|
||||||
* mmu_notifier_invalidate_range() see comments below inside
|
* mmu_notifier_invalidate_range() see comments below inside
|
||||||
|
|
Loading…
Reference in New Issue