m68k: mm: Use table allocator for pgtables

With the new page-table layout, using full (4k) pages for (256 byte)
pte-tables is immensely wastefull. Move the pte-tables over to the
same allocator already used for the (512 byte) higher level tables
(pgd/pmd).

This reduces the pte-table waste from 15x to 2x.

Due to no longer being bound to 16 consecutive tables, this might
actually already be more efficient than the old code for sparse
tables.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.825295149@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
This commit is contained in:
Peter Zijlstra 2020-01-31 13:45:38 +01:00 committed by Geert Uytterhoeven
parent ef9285f69f
commit 61c64a25ae
3 changed files with 21 additions and 36 deletions

View File

@ -13,54 +13,28 @@ extern int free_pointer_table(pmd_t *);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte)
mmu_page_ctor(pte);
return pte;
return (pte_t *)get_pointer_table();
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
mmu_page_dtor(pte);
free_page((unsigned long) pte);
free_pointer_table((void *)pte);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
mmu_page_ctor(kmap(page));
kunmap(page);
return page;
return (pte_t *)get_pointer_table();
}
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
pgtable_pte_page_dtor(page);
mmu_page_dtor(kmap(page));
kunmap(page);
__free_page(page);
free_pointer_table((void *)pgtable);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
pgtable_pte_page_dtor(page);
mmu_page_dtor(kmap(page));
kunmap(page);
__free_page(page);
free_pointer_table((void *)pgtable);
}
@ -99,9 +73,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
pmd_set(pmd, page_address(page));
pmd_set(pmd, page);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{

View File

@ -144,7 +144,13 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
#define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
#define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
#define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; })
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
/*
* m68k does not have huge pages (020/030 actually could), but generic code
* expects pmd_page() to exists, only to then DCE it all. Provide a dummy to
* make the compiler happy.
*/
#define pmd_page(pmd) NULL
#define pud_none(pud) (!pud_val(pud))

View File

@ -30,7 +30,12 @@ typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
typedef struct page *pgtable_t;
#else
typedef pte_t *pgtable_t;
#endif
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)