powerpc/mm/hugetlb: remove follow_huge_addr for powerpc

With generic code now handling hugetlb entries at pgd level and also
supporting hugepage directory format, we can now remove the powerpc
sepcific follow_huge_addr implementation.

Link: http://lkml.kernel.org/r/1494926612-23928-9-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mike Kravetz <kravetz@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Aneesh Kumar K.V 2017-07-06 15:39:02 -07:00 committed by Linus Torvalds
parent 50791e6de0
commit 28c057160e
1 changed files with 0 additions and 64 deletions

View File

@ -619,11 +619,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} while (addr = next, addr != end); } while (addr = next, addr != end);
} }
/*
* 64 bit book3s use generic follow_page_mask
*/
#ifdef CONFIG_PPC_BOOK3S_64
struct page *follow_huge_pd(struct vm_area_struct *vma, struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd, unsigned long address, hugepd_t hpd,
int flags, int pdshift) int flags, int pdshift)
@ -657,65 +652,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
return page; return page;
} }
#else /* !CONFIG_PPC_BOOK3S_64 */
/*
* We are holding mmap_sem, so a parallel huge page collapse cannot run.
* To prevent hugepage split, disable irq.
*/
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
bool is_thp;
pte_t *ptep, pte;
unsigned shift;
unsigned long mask, flags;
struct page *page = ERR_PTR(-EINVAL);
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
if (!ptep)
goto no_page;
pte = READ_ONCE(*ptep);
/*
* Verify it is a huge page else bail.
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
if (!shift || is_thp)
goto no_page;
if (!pte_present(pte)) {
page = NULL;
goto no_page;
}
mask = (1UL << shift) - 1;
page = pte_page(pte);
if (page)
page += (address & mask) / PAGE_SIZE;
no_page:
local_irq_restore(flags);
return page;
}
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
BUG();
return NULL;
}
struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
BUG();
return NULL;
}
#endif
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz) unsigned long sz)
{ {