arm64: switch to generic version of pte allocation
The PTE allocations in arm64 are identical to the generic ones modulo the GFP flags. Using the generic pte_alloc_one() functions ensures that the user page tables are allocated with __GFP_ACCOUNT set. The arm64 definition of PGALLOC_GFP is removed and replaced with GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables and GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now using GFP_PGTABLE_USER. The mappings created with create_pgd_mapping() are now using GFP_PGTABLE_KERNEL. The conversion to the generic version of pte_free_kernel() removes the NULL check for pte. The pte_free() version on arm64 is identical to the generic one and can be simply dropped. [cai@lca.pw: fix a bogus GFP flag in pgd_alloc()] Link: https://lore.kernel.org/r/1559656836-24940-1-git-send-email-cai@lca.pw/ [and fix it more] Link: https://lore.kernel.org/linux-mm/20190617151252.GF16810@rapoport-lnx/ Link: http://lkml.kernel.org/r/1557296232-15361-5-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Guo Ren <ren_guo@c-sky.com> Cc: Helge Deller <deller@gmx.de> Cc: Ley Foon Tan <lftan@altera.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Creasey <sammy@sammy.net> Cc: Vincent Chen <deanbo422@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
28bcf59375
commit
50f11a8a46
|
@ -13,18 +13,23 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
|
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
|
||||||
|
|
||||||
#define check_pgt_cache() do { } while (0)
|
#define check_pgt_cache() do { } while (0)
|
||||||
|
|
||||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
|
|
||||||
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
||||||
|
|
||||||
#if CONFIG_PGTABLE_LEVELS > 2
|
#if CONFIG_PGTABLE_LEVELS > 2
|
||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
|
gfp_t gfp = GFP_PGTABLE_USER;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = alloc_page(PGALLOC_GFP);
|
if (mm == &init_mm)
|
||||||
|
gfp = GFP_PGTABLE_KERNEL;
|
||||||
|
|
||||||
|
page = alloc_page(gfp);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_pmd_page_ctor(page)) {
|
if (!pgtable_pmd_page_ctor(page)) {
|
||||||
|
@ -61,7 +66,7 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return (pud_t *)__get_free_page(PGALLOC_GFP);
|
return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
||||||
|
@ -89,42 +94,6 @@ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
||||||
|
|
||||||
static inline pte_t *
|
|
||||||
pte_alloc_one_kernel(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
return (pte_t *)__get_free_page(PGALLOC_GFP);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pgtable_t
|
|
||||||
pte_alloc_one(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
struct page *pte;
|
|
||||||
|
|
||||||
pte = alloc_pages(PGALLOC_GFP, 0);
|
|
||||||
if (!pte)
|
|
||||||
return NULL;
|
|
||||||
if (!pgtable_page_ctor(pte)) {
|
|
||||||
__free_page(pte);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return pte;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Free a PTE table.
|
|
||||||
*/
|
|
||||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
|
|
||||||
{
|
|
||||||
if (ptep)
|
|
||||||
free_page((unsigned long)ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
||||||
{
|
|
||||||
pgtable_page_dtor(pte);
|
|
||||||
__free_page(pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||||
pmdval_t prot)
|
pmdval_t prot)
|
||||||
{
|
{
|
||||||
|
|
|
@ -362,7 +362,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
||||||
|
|
||||||
static phys_addr_t __pgd_pgtable_alloc(int shift)
|
static phys_addr_t __pgd_pgtable_alloc(int shift)
|
||||||
{
|
{
|
||||||
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
|
void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
|
||||||
BUG_ON(!ptr);
|
BUG_ON(!ptr);
|
||||||
|
|
||||||
/* Ensure the zeroed page is visible to the page table walker */
|
/* Ensure the zeroed page is visible to the page table walker */
|
||||||
|
|
|
@ -19,10 +19,12 @@ static struct kmem_cache *pgd_cache __ro_after_init;
|
||||||
|
|
||||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
gfp_t gfp = GFP_PGTABLE_USER;
|
||||||
|
|
||||||
if (PGD_SIZE == PAGE_SIZE)
|
if (PGD_SIZE == PAGE_SIZE)
|
||||||
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
return (pgd_t *)__get_free_page(gfp);
|
||||||
else
|
else
|
||||||
return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
|
return kmem_cache_alloc(pgd_cache, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
|
|
|
@ -129,7 +129,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
|
||||||
if (cache->nobjs >= min)
|
if (cache->nobjs >= min)
|
||||||
return 0;
|
return 0;
|
||||||
while (cache->nobjs < max) {
|
while (cache->nobjs < max) {
|
||||||
page = (void *)__get_free_page(PGALLOC_GFP);
|
page = (void *)__get_free_page(GFP_PGTABLE_USER);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
cache->objects[cache->nobjs++] = page;
|
cache->objects[cache->nobjs++] = page;
|
||||||
|
|
Loading…
Reference in New Issue