mirror of https://gitee.com/openkylin/linux.git
powerpc: Replace find_linux_pte with find_linux_pte_or_hugepte
Replace find_linux_pte with find_linux_pte_or_hugepte and explicitly document why we don't need to handle transparent hugepages at callsites. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
ac52ae4721
commit
12bc9f6fc1
|
@ -344,30 +344,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
|
|||
|
||||
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
|
||||
void pgtable_cache_init(void);
|
||||
|
||||
/*
|
||||
* find_linux_pte returns the address of a linux pte for a given
|
||||
* effective address and directory. If not found, it returns zero.
|
||||
*/
|
||||
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
|
||||
{
|
||||
pgd_t *pg;
|
||||
pud_t *pu;
|
||||
pmd_t *pm;
|
||||
pte_t *pt = NULL;
|
||||
|
||||
pg = pgdir + pgd_index(ea);
|
||||
if (!pgd_none(*pg)) {
|
||||
pu = pud_offset(pg, ea);
|
||||
if (!pud_none(*pu)) {
|
||||
pm = pmd_offset(pu, ea);
|
||||
if (pmd_present(*pm))
|
||||
pt = pte_offset_kernel(pm, ea);
|
||||
}
|
||||
}
|
||||
return pt;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -260,10 +260,15 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|||
{
|
||||
pte_t *ptep;
|
||||
unsigned long pa;
|
||||
int hugepage_shift;
|
||||
|
||||
ptep = find_linux_pte(init_mm.pgd, token);
|
||||
/*
|
||||
* We won't find hugepages here, iomem
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
|
||||
if (!ptep)
|
||||
return token;
|
||||
WARN_ON(hugepage_shift);
|
||||
pa = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
|
||||
return pa | (token & (PAGE_SIZE-1));
|
||||
|
|
|
@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
|
|||
|
||||
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
||||
{
|
||||
unsigned hugepage_shift;
|
||||
struct iowa_bus *bus;
|
||||
int token;
|
||||
|
||||
|
@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
|
|||
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
|
||||
return NULL;
|
||||
|
||||
ptep = find_linux_pte(init_mm.pgd, vaddr);
|
||||
ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
|
||||
&hugepage_shift);
|
||||
if (ptep == NULL)
|
||||
paddr = 0;
|
||||
else
|
||||
else {
|
||||
/*
|
||||
* we don't have hugepages backing iomem
|
||||
*/
|
||||
WARN_ON(hugepage_shift);
|
||||
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
bus = iowa_pci_find(vaddr, paddr);
|
||||
|
||||
if (bus == NULL)
|
||||
|
|
|
@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x)
|
|||
unsigned long addr = (unsigned long) x;
|
||||
pte_t *p;
|
||||
|
||||
p = find_linux_pte(swapper_pg_dir, addr);
|
||||
p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
|
||||
if (!p || !pte_present(*p))
|
||||
return NULL;
|
||||
/* assume we don't have huge pages in vmalloc space... */
|
||||
|
|
|
@ -1145,6 +1145,7 @@ EXPORT_SYMBOL_GPL(hash_page);
|
|||
void hash_preload(struct mm_struct *mm, unsigned long ea,
|
||||
unsigned long access, unsigned long trap)
|
||||
{
|
||||
int hugepage_shift;
|
||||
unsigned long vsid;
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep;
|
||||
|
@ -1166,10 +1167,15 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
pgdir = mm->pgd;
|
||||
if (pgdir == NULL)
|
||||
return;
|
||||
ptep = find_linux_pte(pgdir, ea);
|
||||
/*
|
||||
* THP pages use update_mmu_cache_pmd. We don't do
|
||||
* hash preload there. Hence can ignore THP here
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
|
||||
if (!ptep)
|
||||
return;
|
||||
|
||||
WARN_ON(hugepage_shift);
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
|
||||
* a 64K kernel), then we don't preload, hash_page() will take
|
||||
|
|
|
@ -105,6 +105,7 @@ int pgd_huge(pgd_t pgd)
|
|||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
/* Only called for hugetlbfs pages, hence can ignore THP */
|
||||
return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
|
||||
}
|
||||
|
||||
|
@ -673,11 +674,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
|||
struct page *page;
|
||||
unsigned shift;
|
||||
unsigned long mask;
|
||||
|
||||
/*
|
||||
* Transparent hugepages are handled by generic code. We can skip them
|
||||
* here.
|
||||
*/
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
|
||||
/* Verify it is a huge page else bail. */
|
||||
if (!ptep || !shift)
|
||||
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mask = (1UL << shift) - 1;
|
||||
|
|
|
@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb)
|
|||
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
int hugepage_shift;
|
||||
unsigned long flags;
|
||||
|
||||
start = _ALIGN_DOWN(start, PAGE_SIZE);
|
||||
|
@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|||
local_irq_save(flags);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
pte_t *ptep = find_linux_pte(mm->pgd, start);
|
||||
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
|
||||
&hugepage_shift);
|
||||
unsigned long pte;
|
||||
|
||||
if (ptep == NULL)
|
||||
|
@ -214,7 +216,10 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|||
pte = pte_val(*ptep);
|
||||
if (!(pte & _PAGE_HASHPTE))
|
||||
continue;
|
||||
hpte_need_flush(mm, start, ptep, pte, 0);
|
||||
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
|
||||
hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
|
||||
else
|
||||
hpte_need_flush(mm, start, ptep, pte, 0);
|
||||
}
|
||||
arch_leave_lazy_mmu_mode();
|
||||
local_irq_restore(flags);
|
||||
|
|
Loading…
Reference in New Issue