2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* TILE Huge TLB Page Support for Kernel.
|
|
|
|
* Taken from i386 hugetlb implementation:
|
|
|
|
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
2017-02-09 01:51:31 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2010-05-29 11:09:12 +08:00
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2012-04-02 02:04:21 +08:00
|
|
|
#include <asm/setup.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Provide an additional huge page size (in addition to the regular default
|
|
|
|
* huge page size) if no "hugepagesz" arguments are specified.
|
|
|
|
* Note that it must be smaller than the default huge page size so
|
|
|
|
* that it's possible to allocate them on demand from the buddy allocator.
|
|
|
|
* You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
|
|
|
|
* or not define it at all.
|
|
|
|
*/
|
|
|
|
#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
|
|
|
|
|
|
|
|
/* "Extra" page-size multipliers, one per level of the page table. */
|
|
|
|
int huge_shift[HUGE_SHIFT_ENTRIES] = {
|
|
|
|
#ifdef ADDITIONAL_HUGE_SIZE
|
|
|
|
#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
|
|
|
|
[HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
|
|
unsigned long addr, unsigned long sz)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
addr &= -sz; /* Mask off any low bits in the address. */
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
if (sz >= PGDIR_SIZE) {
|
|
|
|
BUG_ON(sz != PGDIR_SIZE &&
|
|
|
|
sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
|
|
|
|
return (pte_t *)pud;
|
|
|
|
} else {
|
|
|
|
pmd_t *pmd = pmd_alloc(mm, pud, addr);
|
|
|
|
if (sz >= PMD_SIZE) {
|
|
|
|
BUG_ON(sz != PMD_SIZE &&
|
|
|
|
sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
|
|
|
|
return (pte_t *)pmd;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
|
|
|
|
panic("Unexpected page size %#lx\n", sz);
|
2016-03-18 05:19:11 +08:00
|
|
|
return pte_alloc_map(mm, pmd, addr);
|
2012-04-02 02:04:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
BUG_ON(sz != PMD_SIZE);
|
|
|
|
return (pte_t *) pmd_alloc(mm, pud, addr);
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
static pte_t *get_pte(pte_t *base, int index, int level)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2012-04-02 02:04:21 +08:00
|
|
|
pte_t *ptep = base + index;
|
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
if (!pte_present(*ptep) && huge_shift[level] != 0) {
|
|
|
|
unsigned long mask = -1UL << huge_shift[level];
|
|
|
|
pte_t *super_ptep = base + (index & mask);
|
|
|
|
pte_t pte = *super_ptep;
|
|
|
|
if (pte_present(pte) && pte_super(pte))
|
|
|
|
ptep = super_ptep;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
2012-04-02 02:04:21 +08:00
|
|
|
#endif
|
|
|
|
return ptep;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2017-07-07 06:39:42 +08:00
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
|
|
|
unsigned long addr, unsigned long sz)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2012-04-02 02:04:21 +08:00
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
pte_t *pte;
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
/* Get the top-level page table entry. */
|
|
|
|
pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
/* We don't have four levels. */
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
|
# error support fourth page table level
|
|
|
|
#endif
|
2013-08-07 23:00:45 +08:00
|
|
|
if (!pud_present(*pud))
|
|
|
|
return NULL;
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
/* Check for an L0 huge PTE, if we have three levels. */
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
if (pud_huge(*pud))
|
|
|
|
return (pte_t *)pud;
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
|
|
|
|
pmd_index(addr), 1);
|
|
|
|
if (!pmd_present(*pmd))
|
|
|
|
return NULL;
|
|
|
|
#else
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
/* Check for an L1 huge PTE. */
|
|
|
|
if (pmd_huge(*pmd))
|
|
|
|
return (pte_t *)pmd;
|
|
|
|
|
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
/* Check for an L2 huge PTE. */
|
|
|
|
pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
|
|
|
|
if (!pte_present(*pte))
|
|
|
|
return NULL;
|
|
|
|
if (pte_super(*pte))
|
|
|
|
return pte;
|
|
|
|
#endif
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pud_huge(pud_t pud)
|
|
|
|
{
|
|
|
|
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
|
|
|
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
|
|
|
unsigned long addr, unsigned long len,
|
|
|
|
unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct hstate *h = hstate_file(file);
|
2012-12-12 08:02:17 +08:00
|
|
|
struct vm_unmapped_area_info info;
|
|
|
|
|
|
|
|
info.flags = 0;
|
|
|
|
info.length = len;
|
|
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
|
|
info.high_limit = TASK_SIZE;
|
|
|
|
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
|
|
|
info.align_offset = 0;
|
|
|
|
return vm_unmapped_area(&info);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
|
|
unsigned long addr0, unsigned long len,
|
|
|
|
unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct hstate *h = hstate_file(file);
|
2012-12-12 08:02:17 +08:00
|
|
|
struct vm_unmapped_area_info info;
|
|
|
|
unsigned long addr;
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-12-12 08:02:17 +08:00
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
|
|
info.length = len;
|
|
|
|
info.low_limit = PAGE_SIZE;
|
|
|
|
info.high_limit = current->mm->mmap_base;
|
|
|
|
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
|
|
|
info.align_offset = 0;
|
|
|
|
addr = vm_unmapped_area(&info);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A failed mmap() very likely causes application failure,
|
|
|
|
* so fall back to the bottom-up function here. This scenario
|
|
|
|
* can happen with large stack limits and large mmap()
|
|
|
|
* allocations.
|
|
|
|
*/
|
2012-12-12 08:02:17 +08:00
|
|
|
if (addr & ~PAGE_MASK) {
|
|
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
|
|
info.flags = 0;
|
|
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
|
|
info.high_limit = TASK_SIZE;
|
|
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
}
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct hstate *h = hstate_file(file);
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
if (len & ~huge_page_mask(h))
|
|
|
|
return -EINVAL;
|
|
|
|
if (len > TASK_SIZE)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (flags & MAP_FIXED) {
|
|
|
|
if (prepare_hugepage_range(file, addr, len))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr) {
|
|
|
|
addr = ALIGN(addr, huge_page_size(h));
|
|
|
|
vma = find_vma(mm, addr);
|
|
|
|
if (TASK_SIZE - len >= addr &&
|
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-06-19 19:03:24 +08:00
|
|
|
(!vma || addr + len <= vm_start_gap(vma)))
|
2010-05-29 11:09:12 +08:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
|
|
|
|
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
|
|
|
|
pgoff, flags);
|
|
|
|
else
|
|
|
|
return hugetlb_get_unmapped_area_topdown(file, addr, len,
|
|
|
|
pgoff, flags);
|
|
|
|
}
|
2012-04-02 02:04:21 +08:00
|
|
|
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
|
|
static __init int __setup_hugepagesz(unsigned long ps)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2012-04-02 02:04:21 +08:00
|
|
|
int log_ps = __builtin_ctzl(ps);
|
|
|
|
int level, base_shift;
|
|
|
|
|
|
|
|
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
|
|
|
|
ps);
|
2012-04-02 02:04:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ps > 64*1024*1024*1024UL) {
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
|
|
|
|
ps >> 20);
|
2012-04-02 02:04:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
} else if (ps >= PUD_SIZE) {
|
|
|
|
static long hv_jpage_size;
|
|
|
|
if (hv_jpage_size == 0)
|
|
|
|
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
|
|
|
|
if (hv_jpage_size != PUD_SIZE) {
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
|
2012-04-02 02:04:21 +08:00
|
|
|
PUD_SIZE >> 20, hv_jpage_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
level = 0;
|
|
|
|
base_shift = PUD_SHIFT;
|
|
|
|
} else if (ps >= PMD_SIZE) {
|
|
|
|
level = 1;
|
|
|
|
base_shift = PMD_SHIFT;
|
|
|
|
} else if (ps > PAGE_SIZE) {
|
|
|
|
level = 2;
|
|
|
|
base_shift = PAGE_SHIFT;
|
2010-05-29 11:09:12 +08:00
|
|
|
} else {
|
2012-04-02 02:04:21 +08:00
|
|
|
pr_err("hugepagesz: huge page size %ld too small\n", ps);
|
|
|
|
return -EINVAL;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
2012-04-02 02:04:21 +08:00
|
|
|
|
|
|
|
if (log_ps != base_shift) {
|
|
|
|
int shift_val = log_ps - base_shift;
|
|
|
|
if (huge_shift[level] != 0) {
|
|
|
|
int old_shift = base_shift + huge_shift[level];
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
|
2012-04-02 02:04:21 +08:00
|
|
|
ps >> 20, (1UL << old_shift) >> 20);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (hv_set_pte_super_shift(level, shift_val) != 0) {
|
2014-11-01 01:50:46 +08:00
|
|
|
pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
|
|
|
|
ps >> 20);
|
2012-04-02 02:04:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
|
|
|
|
huge_shift[level] = shift_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
hugetlb_add_hstate(log_ps - PAGE_SHIFT);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool saw_hugepagesz;
|
|
|
|
|
|
|
|
static __init int setup_hugepagesz(char *opt)
|
|
|
|
{
|
2016-05-20 08:11:17 +08:00
|
|
|
int rc;
|
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
if (!saw_hugepagesz) {
|
|
|
|
saw_hugepagesz = true;
|
|
|
|
memset(huge_shift, 0, sizeof(huge_shift));
|
|
|
|
}
|
2016-05-20 08:11:17 +08:00
|
|
|
rc = __setup_hugepagesz(memparse(opt, NULL));
|
|
|
|
if (rc)
|
|
|
|
hugetlb_bad_size();
|
|
|
|
return rc;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
__setup("hugepagesz=", setup_hugepagesz);
|
|
|
|
|
2012-04-02 02:04:21 +08:00
|
|
|
#ifdef ADDITIONAL_HUGE_SIZE
|
|
|
|
/*
|
|
|
|
* Provide an additional huge page size if no "hugepagesz" args are given.
|
|
|
|
* In that case, all the cores have properly set up their hv super_shift
|
|
|
|
* already, but we need to notify the hugetlb code to enable the
|
|
|
|
* new huge page size from the Linux point of view.
|
|
|
|
*/
|
|
|
|
static __init int add_default_hugepagesz(void)
|
|
|
|
{
|
|
|
|
if (!saw_hugepagesz) {
|
|
|
|
BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
|
|
|
|
ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
|
|
|
|
BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
|
|
|
|
ADDITIONAL_HUGE_SIZE);
|
|
|
|
BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
|
|
|
|
hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(add_default_hugepagesz);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* CONFIG_HUGETLB_SUPER_PAGES */
|