powerpc/mm: Use page fragments for allocation page table at PMD level

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Aneesh Kumar K.V 2018-04-16 16:57:23 +05:30 committed by Michael Ellerman
parent 8a6c697b99
commit 738f964555
7 changed files with 6 additions and 23 deletions

View File

@ -23,16 +23,6 @@
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
defined(CONFIG_PPC_64K_PAGES)
/*
* only with hash 64k we need to use the second half of pmd page table
* to store pointer to deposited pgtable_t
*/
#define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
#else
#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
#endif
/* /*
* We store the slot details in the second half of page table. * We store the slot details in the second half of page table.
* Increase the pud level table so that hugetlb ptes can be stored * Increase the pud level table so that hugetlb ptes can be stored

View File

@ -90,8 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
* need to do this for 4k. * need to do this for 4k.
*/ */
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \ (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
(H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX))
memset(pgd, 0, PGD_TABLE_SIZE); memset(pgd, 0, PGD_TABLE_SIZE);
#endif #endif
return pgd; return pgd;
@ -138,13 +137,12 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), return pmd_fragment_alloc(mm, addr);
pgtable_gfp_flags(mm, GFP_KERNEL));
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); pmd_fragment_free((unsigned long *)pmd);
} }
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,

View File

@ -212,13 +212,13 @@ extern unsigned long __pte_index_size;
extern unsigned long __pmd_index_size; extern unsigned long __pmd_index_size;
extern unsigned long __pud_index_size; extern unsigned long __pud_index_size;
extern unsigned long __pgd_index_size; extern unsigned long __pgd_index_size;
extern unsigned long __pmd_cache_index;
extern unsigned long __pud_cache_index; extern unsigned long __pud_cache_index;
#define PTE_INDEX_SIZE __pte_index_size #define PTE_INDEX_SIZE __pte_index_size
#define PMD_INDEX_SIZE __pmd_index_size #define PMD_INDEX_SIZE __pmd_index_size
#define PUD_INDEX_SIZE __pud_index_size #define PUD_INDEX_SIZE __pud_index_size
#define PGD_INDEX_SIZE __pgd_index_size #define PGD_INDEX_SIZE __pgd_index_size
#define PMD_CACHE_INDEX __pmd_cache_index /* pmd table use page table fragments */
#define PMD_CACHE_INDEX 0
#define PUD_CACHE_INDEX __pud_cache_index #define PUD_CACHE_INDEX __pud_cache_index
/* /*
* Because of use of pte fragments and THP, size of page table * Because of use of pte fragments and THP, size of page table

View File

@ -1020,7 +1020,6 @@ void __init hash__early_init_mmu(void)
__pud_index_size = H_PUD_INDEX_SIZE; __pud_index_size = H_PUD_INDEX_SIZE;
__pgd_index_size = H_PGD_INDEX_SIZE; __pgd_index_size = H_PGD_INDEX_SIZE;
__pud_cache_index = H_PUD_CACHE_INDEX; __pud_cache_index = H_PUD_CACHE_INDEX;
__pmd_cache_index = H_PMD_CACHE_INDEX;
__pte_table_size = H_PTE_TABLE_SIZE; __pte_table_size = H_PTE_TABLE_SIZE;
__pmd_table_size = H_PMD_TABLE_SIZE; __pmd_table_size = H_PMD_TABLE_SIZE;
__pud_table_size = H_PUD_TABLE_SIZE; __pud_table_size = H_PUD_TABLE_SIZE;

View File

@ -400,7 +400,7 @@ static inline void pgtable_free(void *table, int index)
pte_fragment_free(table, 0); pte_fragment_free(table, 0);
break; break;
case PMD_INDEX: case PMD_INDEX:
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), table); pmd_fragment_free(table);
break; break;
case PUD_INDEX: case PUD_INDEX:
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
@ -431,7 +431,6 @@ void __tlb_remove_table(void *_table)
#else #else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
{ {
return pgtable_free(table, index); return pgtable_free(table, index);
} }
#endif #endif

View File

@ -617,7 +617,6 @@ void __init radix__early_init_mmu(void)
__pud_index_size = RADIX_PUD_INDEX_SIZE; __pud_index_size = RADIX_PUD_INDEX_SIZE;
__pgd_index_size = RADIX_PGD_INDEX_SIZE; __pgd_index_size = RADIX_PGD_INDEX_SIZE;
__pud_cache_index = RADIX_PUD_INDEX_SIZE; __pud_cache_index = RADIX_PUD_INDEX_SIZE;
__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
__pte_table_size = RADIX_PTE_TABLE_SIZE; __pte_table_size = RADIX_PTE_TABLE_SIZE;
__pmd_table_size = RADIX_PMD_TABLE_SIZE; __pmd_table_size = RADIX_PMD_TABLE_SIZE;
__pud_table_size = RADIX_PUD_TABLE_SIZE; __pud_table_size = RADIX_PUD_TABLE_SIZE;

View File

@ -72,8 +72,6 @@ unsigned long __pud_index_size;
EXPORT_SYMBOL(__pud_index_size); EXPORT_SYMBOL(__pud_index_size);
unsigned long __pgd_index_size; unsigned long __pgd_index_size;
EXPORT_SYMBOL(__pgd_index_size); EXPORT_SYMBOL(__pgd_index_size);
unsigned long __pmd_cache_index;
EXPORT_SYMBOL(__pmd_cache_index);
unsigned long __pud_cache_index; unsigned long __pud_cache_index;
EXPORT_SYMBOL(__pud_cache_index); EXPORT_SYMBOL(__pud_cache_index);
unsigned long __pte_table_size; unsigned long __pte_table_size;