mm: remove CONFIG_TRANSPARENT_HUGE_PAGECACHE
Commite496cf3d78
("thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE") notes that it should be reverted when the PowerPC problem was fixed. The commit fixing the PowerPC problem (953c66c2b2
) did not revert the commit; instead setting CONFIG_TRANSPARENT_HUGE_PAGECACHE to the same as CONFIG_TRANSPARENT_HUGEPAGE. Checking with Kirill and Aneesh, this was an oversight, so remove the Kconfig symbol and undo the work of commite496cf3d78
. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Link: http://lkml.kernel.org/r/20200318140253.6141-6-willy@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a0650604a7
commit
396bcc5299
|
@ -78,6 +78,7 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
|
||||||
extern int shmem_unuse(unsigned int type, bool frontswap,
|
extern int shmem_unuse(unsigned int type, bool frontswap,
|
||||||
unsigned long *fs_pages_to_unuse);
|
unsigned long *fs_pages_to_unuse);
|
||||||
|
|
||||||
|
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
|
||||||
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
|
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
|
||||||
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
|
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
|
||||||
pgoff_t start, pgoff_t end);
|
pgoff_t start, pgoff_t end);
|
||||||
|
@ -114,15 +115,6 @@ static inline bool shmem_file(struct file *file)
|
||||||
extern bool shmem_charge(struct inode *inode, long pages);
|
extern bool shmem_charge(struct inode *inode, long pages);
|
||||||
extern void shmem_uncharge(struct inode *inode, long pages);
|
extern void shmem_uncharge(struct inode *inode, long pages);
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
|
||||||
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
|
|
||||||
#else
|
|
||||||
static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_SHMEM
|
#ifdef CONFIG_SHMEM
|
||||||
extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||||
struct vm_area_struct *dst_vma,
|
struct vm_area_struct *dst_vma,
|
||||||
|
|
|
@ -420,10 +420,6 @@ config THP_SWAP
|
||||||
|
|
||||||
For selection by architectures with reasonable THP sizes.
|
For selection by architectures with reasonable THP sizes.
|
||||||
|
|
||||||
config TRANSPARENT_HUGE_PAGECACHE
|
|
||||||
def_bool y
|
|
||||||
depends on TRANSPARENT_HUGEPAGE
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# UP and nommu archs use km based percpu allocator
|
# UP and nommu archs use km based percpu allocator
|
||||||
#
|
#
|
||||||
|
@ -714,7 +710,7 @@ config GUP_GET_PTE_LOW_HIGH
|
||||||
|
|
||||||
config READ_ONLY_THP_FOR_FS
|
config READ_ONLY_THP_FOR_FS
|
||||||
bool "Read-only THP for filesystems (EXPERIMENTAL)"
|
bool "Read-only THP for filesystems (EXPERIMENTAL)"
|
||||||
depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM
|
depends on TRANSPARENT_HUGEPAGE && SHMEM
|
||||||
|
|
||||||
help
|
help
|
||||||
Allow khugepaged to put read-only file-backed pages in THP.
|
Allow khugepaged to put read-only file-backed pages in THP.
|
||||||
|
|
|
@ -326,7 +326,7 @@ static struct attribute *hugepage_attr[] = {
|
||||||
&defrag_attr.attr,
|
&defrag_attr.attr,
|
||||||
&use_zero_page_attr.attr,
|
&use_zero_page_attr.attr,
|
||||||
&hpage_pmd_size_attr.attr,
|
&hpage_pmd_size_attr.attr,
|
||||||
#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
|
#ifdef CONFIG_SHMEM
|
||||||
&shmem_enabled_attr.attr,
|
&shmem_enabled_attr.attr,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
|
|
|
@ -414,8 +414,6 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
|
||||||
(IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
|
(IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
|
||||||
vma->vm_file &&
|
vma->vm_file &&
|
||||||
(vm_flags & VM_DENYWRITE))) {
|
(vm_flags & VM_DENYWRITE))) {
|
||||||
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
|
|
||||||
return false;
|
|
||||||
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
|
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
|
||||||
HPAGE_PMD_NR);
|
HPAGE_PMD_NR);
|
||||||
}
|
}
|
||||||
|
@ -1258,7 +1256,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
|
#ifdef CONFIG_SHMEM
|
||||||
/*
|
/*
|
||||||
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
|
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
|
||||||
* khugepaged should try to collapse the page table.
|
* khugepaged should try to collapse the page table.
|
||||||
|
@ -1973,6 +1971,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
|
||||||
if (khugepaged_scan.address < hstart)
|
if (khugepaged_scan.address < hstart)
|
||||||
khugepaged_scan.address = hstart;
|
khugepaged_scan.address = hstart;
|
||||||
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
|
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
|
||||||
|
if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
|
||||||
|
goto skip;
|
||||||
|
|
||||||
while (khugepaged_scan.address < hend) {
|
while (khugepaged_scan.address < hend) {
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1984,14 +1984,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
|
||||||
khugepaged_scan.address + HPAGE_PMD_SIZE >
|
khugepaged_scan.address + HPAGE_PMD_SIZE >
|
||||||
hend);
|
hend);
|
||||||
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
|
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
|
||||||
struct file *file;
|
struct file *file = get_file(vma->vm_file);
|
||||||
pgoff_t pgoff = linear_page_index(vma,
|
pgoff_t pgoff = linear_page_index(vma,
|
||||||
khugepaged_scan.address);
|
khugepaged_scan.address);
|
||||||
|
|
||||||
if (shmem_file(vma->vm_file)
|
|
||||||
&& !shmem_huge_enabled(vma))
|
|
||||||
goto skip;
|
|
||||||
file = get_file(vma->vm_file);
|
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
khugepaged_scan_file(mm, file, pgoff, hpage);
|
khugepaged_scan_file(mm, file, pgoff, hpage);
|
||||||
|
|
|
@ -3373,7 +3373,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
static void deposit_prealloc_pte(struct vm_fault *vmf)
|
static void deposit_prealloc_pte(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
@ -3475,8 +3475,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
vm_fault_t ret;
|
vm_fault_t ret;
|
||||||
|
|
||||||
if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
|
if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
|
||||||
IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
|
|
||||||
/* THP on COW? */
|
/* THP on COW? */
|
||||||
VM_BUG_ON_PAGE(memcg, page);
|
VM_BUG_ON_PAGE(memcg, page);
|
||||||
|
|
||||||
|
|
|
@ -933,7 +933,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||||
set_pte_at(vma->vm_mm, address, pte, entry);
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
pmd_t *pmd = pvmw.pmd;
|
pmd_t *pmd = pvmw.pmd;
|
||||||
pmd_t entry;
|
pmd_t entry;
|
||||||
|
|
||||||
|
|
34
mm/shmem.c
34
mm/shmem.c
|
@ -410,7 +410,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
|
||||||
#define SHMEM_HUGE_DENY (-1)
|
#define SHMEM_HUGE_DENY (-1)
|
||||||
#define SHMEM_HUGE_FORCE (-2)
|
#define SHMEM_HUGE_FORCE (-2)
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
/* ifdef here to avoid bloating shmem.o when not necessary */
|
/* ifdef here to avoid bloating shmem.o when not necessary */
|
||||||
|
|
||||||
static int shmem_huge __read_mostly;
|
static int shmem_huge __read_mostly;
|
||||||
|
@ -580,7 +580,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
|
||||||
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
|
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
|
||||||
return READ_ONCE(sbinfo->shrinklist_len);
|
return READ_ONCE(sbinfo->shrinklist_len);
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
|
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
#define shmem_huge SHMEM_HUGE_DENY
|
#define shmem_huge SHMEM_HUGE_DENY
|
||||||
|
|
||||||
|
@ -589,11 +589,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
|
static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||||
(shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
|
(shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
|
||||||
shmem_huge != SHMEM_HUGE_DENY)
|
shmem_huge != SHMEM_HUGE_DENY)
|
||||||
return true;
|
return true;
|
||||||
|
@ -1059,7 +1059,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
|
||||||
* Part of the huge page can be beyond i_size: subject
|
* Part of the huge page can be beyond i_size: subject
|
||||||
* to shrink under memory pressure.
|
* to shrink under memory pressure.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
||||||
spin_lock(&sbinfo->shrinklist_lock);
|
spin_lock(&sbinfo->shrinklist_lock);
|
||||||
/*
|
/*
|
||||||
* _careful to defend against unlocked access to
|
* _careful to defend against unlocked access to
|
||||||
|
@ -1510,7 +1510,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
|
||||||
int nr;
|
int nr;
|
||||||
int err = -ENOSPC;
|
int err = -ENOSPC;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
|
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
|
||||||
huge = false;
|
huge = false;
|
||||||
nr = huge ? HPAGE_PMD_NR : 1;
|
nr = huge ? HPAGE_PMD_NR : 1;
|
||||||
|
|
||||||
|
@ -2093,7 +2093,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
|
||||||
get_area = current->mm->get_unmapped_area;
|
get_area = current->mm->get_unmapped_area;
|
||||||
addr = get_area(file, uaddr, len, pgoff, flags);
|
addr = get_area(file, uaddr, len, pgoff, flags);
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
|
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
|
||||||
return addr;
|
return addr;
|
||||||
if (IS_ERR_VALUE(addr))
|
if (IS_ERR_VALUE(addr))
|
||||||
return addr;
|
return addr;
|
||||||
|
@ -2232,7 +2232,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
|
|
||||||
file_accessed(file);
|
file_accessed(file);
|
||||||
vma->vm_ops = &shmem_vm_ops;
|
vma->vm_ops = &shmem_vm_ops;
|
||||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||||
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
||||||
(vma->vm_end & HPAGE_PMD_MASK)) {
|
(vma->vm_end & HPAGE_PMD_MASK)) {
|
||||||
khugepaged_enter(vma, vma->vm_flags);
|
khugepaged_enter(vma, vma->vm_flags);
|
||||||
|
@ -3459,7 +3459,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
|
||||||
case Opt_huge:
|
case Opt_huge:
|
||||||
ctx->huge = result.uint_32;
|
ctx->huge = result.uint_32;
|
||||||
if (ctx->huge != SHMEM_HUGE_NEVER &&
|
if (ctx->huge != SHMEM_HUGE_NEVER &&
|
||||||
!(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
!(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||||
has_transparent_hugepage()))
|
has_transparent_hugepage()))
|
||||||
goto unsupported_parameter;
|
goto unsupported_parameter;
|
||||||
ctx->seen |= SHMEM_SEEN_HUGE;
|
ctx->seen |= SHMEM_SEEN_HUGE;
|
||||||
|
@ -3605,7 +3605,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
|
||||||
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
|
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
|
||||||
seq_printf(seq, ",gid=%u",
|
seq_printf(seq, ",gid=%u",
|
||||||
from_kgid_munged(&init_user_ns, sbinfo->gid));
|
from_kgid_munged(&init_user_ns, sbinfo->gid));
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
|
/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
|
||||||
if (sbinfo->huge)
|
if (sbinfo->huge)
|
||||||
seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
|
seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
|
||||||
|
@ -3850,7 +3850,7 @@ static const struct super_operations shmem_ops = {
|
||||||
.evict_inode = shmem_evict_inode,
|
.evict_inode = shmem_evict_inode,
|
||||||
.drop_inode = generic_delete_inode,
|
.drop_inode = generic_delete_inode,
|
||||||
.put_super = shmem_put_super,
|
.put_super = shmem_put_super,
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
.nr_cached_objects = shmem_unused_huge_count,
|
.nr_cached_objects = shmem_unused_huge_count,
|
||||||
.free_cached_objects = shmem_unused_huge_scan,
|
.free_cached_objects = shmem_unused_huge_scan,
|
||||||
#endif
|
#endif
|
||||||
|
@ -3912,7 +3912,7 @@ int __init shmem_init(void)
|
||||||
goto out1;
|
goto out1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
|
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
|
||||||
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
|
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
|
||||||
else
|
else
|
||||||
|
@ -3928,7 +3928,7 @@ int __init shmem_init(void)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
|
||||||
static ssize_t shmem_enabled_show(struct kobject *kobj,
|
static ssize_t shmem_enabled_show(struct kobject *kobj,
|
||||||
struct kobj_attribute *attr, char *buf)
|
struct kobj_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
@ -3980,9 +3980,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
|
||||||
|
|
||||||
struct kobj_attribute shmem_enabled_attr =
|
struct kobj_attribute shmem_enabled_attr =
|
||||||
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
|
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
bool shmem_huge_enabled(struct vm_area_struct *vma)
|
bool shmem_huge_enabled(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct inode *inode = file_inode(vma->vm_file);
|
struct inode *inode = file_inode(vma->vm_file);
|
||||||
|
@ -4017,7 +4017,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
#else /* !CONFIG_SHMEM */
|
#else /* !CONFIG_SHMEM */
|
||||||
|
|
||||||
|
@ -4186,7 +4186,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
|
||||||
vma->vm_file = file;
|
vma->vm_file = file;
|
||||||
vma->vm_ops = &shmem_vm_ops;
|
vma->vm_ops = &shmem_vm_ops;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||||
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
||||||
(vma->vm_end & HPAGE_PMD_MASK)) {
|
(vma->vm_end & HPAGE_PMD_MASK)) {
|
||||||
khugepaged_enter(vma, vma->vm_flags);
|
khugepaged_enter(vma, vma->vm_flags);
|
||||||
|
|
Loading…
Reference in New Issue