From 70323545e557eb853457288492be0bb2196c2733 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:24:31 -0700 Subject: [PATCH 001/164] mn10300: use Kbuild logic to include Signed-off-by: Geert Uytterhoeven Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mn10300/include/asm/Kbuild | 1 + arch/mn10300/include/asm/sections.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 arch/mn10300/include/asm/sections.h diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 77eb1a68d13b..54a062cb9f2c 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/sections.h b/arch/mn10300/include/asm/sections.h deleted file mode 100644 index 2b8c5160388f..000000000000 --- a/arch/mn10300/include/asm/sections.h +++ /dev/null @@ -1 +0,0 @@ -#include From 20882185dab2978952a705905284d2a9790b2bae Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:24:33 -0700 Subject: [PATCH 002/164] cris: use Kbuild logic to include Signed-off-by: Geert Uytterhoeven Acked-by: Jesper Nilsson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/cris/include/asm/Kbuild | 1 + arch/cris/include/asm/sections.h | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 arch/cris/include/asm/sections.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 802b94c4ca86..2ca489eaadd3 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += mcs_spinlock.h generic-y += module.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h generic-y += vga.h generic-y += xor.h diff --git a/arch/cris/include/asm/sections.h b/arch/cris/include/asm/sections.h deleted file mode 100644 index 2c998ce8967b..000000000000 --- a/arch/cris/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _CRIS_SECTIONS_H -#define _CRIS_SECTIONS_H - -/* nothing to see, move along */ -#include - -#endif From cafbaae8afdb1e6cf9f4715aea2c897cf407ddfd Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:24:35 -0700 Subject: [PATCH 003/164] fs/notify/group.c: make fsnotify_final_destroy_group() static No callers outside this file. Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/notify/fsnotify.h | 3 --- fs/notify/group.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index 85e7d2b431d9..9c0898c4cfe1 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -23,9 +23,6 @@ extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct vfsmount *mnt, int allow_dups); -/* final kfree of a group */ -extern void fsnotify_final_destroy_group(struct fsnotify_group *group); - /* vfsmount specific destruction of a mark */ extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark); /* inode specific destruction of a mark */ diff --git a/fs/notify/group.c b/fs/notify/group.c index ad1995980456..d16b62cb2854 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -31,7 +31,7 @@ /* * Final freeing of a group */ -void fsnotify_final_destroy_group(struct fsnotify_group *group) +static void fsnotify_final_destroy_group(struct fsnotify_group *group) { if (group->ops->free_group_priv) group->ops->free_group_priv(group); From 105d1b425303120c7681abc0761b6fc6c3f8a8e8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:24:37 -0700 Subject: [PATCH 004/164] fsnotify: don't put user context if it was never assigned On some failure paths we may attempt to free user context even if it wasn't assigned yet. This will cause a NULL ptr deref and a kernel BUG. The path I was looking at is in inotify_new_group(): oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); if (unlikely(!oevent)) { fsnotify_destroy_group(group); return ERR_PTR(-ENOMEM); } fsnotify_destroy_group() would get called here, but group->inotify_data.user is only getting assigned later: group->inotify_data.user = get_current_user(); Signed-off-by: Sasha Levin Cc: John McCutchan Cc: Robert Love Cc: Eric Paris Reviewed-by: Heinrich Schuchardt Reviewed-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/notify/inotify/inotify_fsnotify.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 0f88bc0b4e6c..7d888d77d59a 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -165,8 +165,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group) /* ideally the idr is empty and we won't hit the BUG in the callback */ idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_destroy(&group->inotify_data.idr); - atomic_dec(&group->inotify_data.user->inotify_devs); - free_uid(group->inotify_data.user); + if (group->inotify_data.user) { + atomic_dec(&group->inotify_data.user->inotify_devs); + free_uid(group->inotify_data.user); + } } static void inotify_free_event(struct fsnotify_event *fsn_event) From 0b37e097a648aa71d4db1ad108001e95b69a2da4 Mon Sep 17 00:00:00 2001 From: Yann Droneaud Date: Thu, 9 Oct 2014 15:24:40 -0700 Subject: [PATCH 005/164] fanotify: enable close-on-exec on events' fd when requested in fanotify_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to commit 80af258867648 ("fanotify: groups can specify their f_flags for new fd"), file descriptors created as part of file access notification events inherit flags from the event_f_flags argument passed to syscall fanotify_init(2)[1]. Unfortunately O_CLOEXEC is currently silently ignored. Indeed, event_f_flags are only given to dentry_open(), which only seems to care about O_ACCMODE and O_PATH in do_dentry_open(), O_DIRECT in open_check_o_direct() and O_LARGEFILE in generic_file_open(). It's a pity, since, according to some lookup on various search engines and http://codesearch.debian.net/, there's already some userspace code which use O_CLOEXEC: - in systemd's readahead[2]: fanotify_fd = fanotify_init(FAN_CLOEXEC|FAN_NONBLOCK, O_RDONLY|O_LARGEFILE|O_CLOEXEC|O_NOATIME); - in clsync[3]: #define FANOTIFY_EVFLAGS (O_LARGEFILE|O_RDONLY|O_CLOEXEC) int fanotify_d = fanotify_init(FANOTIFY_FLAGS, FANOTIFY_EVFLAGS); - in examples [4] from "Filesystem monitoring in the Linux kernel" article[5] by Aleksander Morgado: if ((fanotify_fd = fanotify_init (FAN_CLOEXEC, O_RDONLY | O_CLOEXEC | O_LARGEFILE)) < 0) Additionally, since commit 48149e9d3a7e ("fanotify: check file flags passed in fanotify_init"). having O_CLOEXEC as part of fanotify_init() second argument is expressly allowed. So it seems expected to set close-on-exec flag on the file descriptors if userspace is allowed to request it with O_CLOEXEC. But Andrew Morton raised[6] the concern that enabling now close-on-exec might break existing applications which ask for O_CLOEXEC but expect the file descriptor to be inherited across exec(). In the other hand, as reported by Mihai Dontu[7] close-on-exec on the file descriptor returned as part of file access notify can break applications due to deadlock. So close-on-exec is needed for most applications. More, applications asking for close-on-exec are likely expecting it to be enabled, relying on O_CLOEXEC being effective. If not, it might weaken their security, as noted by Jan Kara[8]. So this patch replaces call to macro get_unused_fd() by a call to function get_unused_fd_flags() with event_f_flags value as argument. This way O_CLOEXEC flag in the second argument of fanotify_init(2) syscall is interpreted and close-on-exec get enabled when requested. [1] http://man7.org/linux/man-pages/man2/fanotify_init.2.html [2] http://cgit.freedesktop.org/systemd/systemd/tree/src/readahead/readahead-collect.c?id=v208#n294 [3] https://github.com/xaionaro/clsync/blob/v0.2.1/sync.c#L1631 https://github.com/xaionaro/clsync/blob/v0.2.1/configuration.h#L38 [4] http://www.lanedo.com/~aleksander/fanotify/fanotify-example.c [5] http://www.lanedo.com/2013/filesystem-monitoring-linux-kernel/ [6] http://lkml.kernel.org/r/20141001153621.65e9258e65a6167bf2e4cb50@linux-foundation.org [7] http://lkml.kernel.org/r/20141002095046.3715eb69@mdontu-l [8] http://lkml.kernel.org/r/20141002104410.GB19748@quack.suse.cz Link: http://lkml.kernel.org/r/cover.1411562410.git.ydroneaud@opteya.com Signed-off-by: Yann Droneaud Reviewed-by: Jan Kara Reviewed by: Heinrich Schuchardt Tested-by: Heinrich Schuchardt Cc: Mihai Don\u021bu Cc: Pádraig Brady Cc: Heinrich Schuchardt Cc: Jan Kara Cc: Valdis Kletnieks Cc: Michael Kerrisk-manpages Cc: Lino Sanfilippo Cc: Richard Guy Briggs Cc: Eric Paris Cc: Al Viro Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/notify/fanotify/fanotify_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index b13992a41bd9..c991616acca9 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -78,7 +78,7 @@ static int create_fd(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); - client_fd = get_unused_fd(); + client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); if (client_fd < 0) return client_fd; From 3a28663696569624e36161e2100fd61e38bd9824 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:24:42 -0700 Subject: [PATCH 006/164] m32r: use Kbuild logic to include Signed-off-by: Geert Uytterhoeven Cc: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m32r/include/asm/Kbuild | 1 + arch/m32r/include/asm/sections.h | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 arch/m32r/include/asm/sections.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e02448b0648b..3796801d6e0c 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += mcs_spinlock.h generic-y += module.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/m32r/include/asm/sections.h b/arch/m32r/include/asm/sections.h deleted file mode 100644 index 5e5d21c4908a..000000000000 --- a/arch/m32r/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _M32R_SECTIONS_H -#define _M32R_SECTIONS_H - -/* nothing to see, move along */ -#include - -#endif /* _M32R_SECTIONS_H */ From 2f82df0f3e6e89867695956aa061b464fe2c8958 Mon Sep 17 00:00:00 2001 From: Michael Opdenacker Date: Thu, 9 Oct 2014 15:24:44 -0700 Subject: [PATCH 007/164] m32r: remove deprecated IRQF_DISABLED This patch removes the use of the IRQF_DISABLED flag from arch/m32r/kernel/time.c It's a NOOP since 2.6.35 and it will be removed one day. Signed-off-by: Michael Opdenacker Cc: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m32r/kernel/time.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 1a15f81ea1bd..093f2761aa51 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -134,7 +134,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "MFT2", }; From 5272d036b280d43a06ab790030609225480d2009 Mon Sep 17 00:00:00 2001 From: Anton Altaparmakov Date: Thu, 9 Oct 2014 15:24:46 -0700 Subject: [PATCH 008/164] ntfs: use find_get_page_flags() to mark page accessed as it is no longer marked later on Mel Gorman's commit 2457aec63745 ("mm: non-atomically mark page accessed during page cache allocation where possible") removed mark_page_accessed() calls from NTFS without updating the matching find_lock_page() to find_get_page_flags(GFP_LOCK | FGP_ACCESSED) thus causing the page to never be marked accessed. This patch fixes that. Signed-off-by: Anton Altaparmakov Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ntfs/file.c | 5 +++-- fs/ntfs/super.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index f5ec1ce7a532..643faa44f22b 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1,7 +1,7 @@ /* * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. + * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc. * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -410,7 +410,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping, BUG_ON(!nr_pages); err = nr = 0; do { - pages[nr] = find_lock_page(mapping, index); + pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | + FGP_ACCESSED); if (!pages[nr]) { if (!*cached_page) { *cached_page = page_cache_alloc(mapping); diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 6c3296e546c3..9e1e112074fb 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -3208,7 +3208,7 @@ static void __exit exit_ntfs_fs(void) } MODULE_AUTHOR("Anton Altaparmakov "); -MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc."); +MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc."); MODULE_VERSION(NTFS_VERSION); MODULE_LICENSE("GPL"); #ifdef DEBUG From 7143e494414f25c6209f94155d20b796e0f36626 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Thu, 9 Oct 2014 15:24:48 -0700 Subject: [PATCH 009/164] ntfs: remove bogus space fs/ntfs/debug.c:124: WARNING: space prohibited between function name and open parenthesis '(' Signed-off-by: Andrea Gelmini Signed-off-by: Anton Altaparmakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ntfs/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ntfs/debug.c b/fs/ntfs/debug.c index dd6103cc93c1..825a54e8f490 100644 --- a/fs/ntfs/debug.c +++ b/fs/ntfs/debug.c @@ -112,7 +112,7 @@ void __ntfs_error(const char *function, const struct super_block *sb, /* If 1, output debug messages, and if 0, don't. */ int debug_msgs = 0; -void __ntfs_debug (const char *file, int line, const char *function, +void __ntfs_debug(const char *file, int line, const char *function, const char *fmt, ...) { struct va_format vaf; From e2cabe1d426fb10cd8e04c26a49e70f1c864d25c Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:24:50 -0700 Subject: [PATCH 010/164] score: use Kbuild logic to include Signed-off-by: Geert Uytterhoeven Acked-by: Lennox Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/score/include/asm/Kbuild | 1 + arch/score/include/asm/sections.h | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) delete mode 100644 arch/score/include/asm/sections.h diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 3fe5681744f1..46461c19f284 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -10,6 +10,7 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h diff --git a/arch/score/include/asm/sections.h b/arch/score/include/asm/sections.h deleted file mode 100644 index 9441d23af005..000000000000 --- a/arch/score/include/asm/sections.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_SCORE_SECTIONS_H -#define _ASM_SCORE_SECTIONS_H - -#include - -#endif /* _ASM_SCORE_SECTIONS_H */ From 98acbf63d63c83e847c5cbe454b36a53cfbbc7a5 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:24:52 -0700 Subject: [PATCH 011/164] fs/ocfs2/stack_user.c: fix typo in ocfs2_control_release() It is supposed to zero pv_minor. Reported-by: Himangi Saraogi Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/stack_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 13a8537d8e8b..720aa389e0ea 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -591,7 +591,7 @@ static int ocfs2_control_release(struct inode *inode, struct file *file) */ ocfs2_control_this_node = -1; running_proto.pv_major = 0; - running_proto.pv_major = 0; + running_proto.pv_minor = 0; } out: From 190a7721ac865744a59fdf2f291c2a211cab6217 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 Oct 2014 15:24:54 -0700 Subject: [PATCH 012/164] ocfs2/dlm: refactor error handling in dlm_alloc_ctxt Refactoring error handling in dlm_alloc_ctxt to simplify code. Signed-off-by: Joseph Qi Reviewed-by: Alex Chen Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlm/dlmdomain.c | 42 +++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 3fcf205ee900..257a6dfe3f13 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1975,24 +1975,22 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, dlm = kzalloc(sizeof(*dlm), GFP_KERNEL); if (!dlm) { - mlog_errno(-ENOMEM); + ret = -ENOMEM; + mlog_errno(ret); goto leave; } dlm->name = kstrdup(domain, GFP_KERNEL); if (dlm->name == NULL) { - mlog_errno(-ENOMEM); - kfree(dlm); - dlm = NULL; + ret = -ENOMEM; + mlog_errno(ret); goto leave; } dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES); if (!dlm->lockres_hash) { - mlog_errno(-ENOMEM); - kfree(dlm->name); - kfree(dlm); - dlm = NULL; + ret = -ENOMEM; + mlog_errno(ret); goto leave; } @@ -2002,11 +2000,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, dlm->master_hash = (struct hlist_head **) dlm_alloc_pagevec(DLM_HASH_PAGES); if (!dlm->master_hash) { - mlog_errno(-ENOMEM); - dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); - kfree(dlm->name); - kfree(dlm); - dlm = NULL; + ret = -ENOMEM; + mlog_errno(ret); goto leave; } @@ -2017,14 +2012,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, dlm->node_num = o2nm_this_node(); ret = dlm_create_debugfs_subroot(dlm); - if (ret < 0) { - dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES); - dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); - kfree(dlm->name); - kfree(dlm); - dlm = NULL; + if (ret < 0) goto leave; - } spin_lock_init(&dlm->spinlock); spin_lock_init(&dlm->master_lock); @@ -2085,6 +2074,19 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, atomic_read(&dlm->dlm_refs.refcount)); leave: + if (ret < 0 && dlm) { + if (dlm->master_hash) + dlm_free_pagevec((void **)dlm->master_hash, + DLM_HASH_PAGES); + + if (dlm->lockres_hash) + dlm_free_pagevec((void **)dlm->lockres_hash, + DLM_HASH_PAGES); + + kfree(dlm->name); + kfree(dlm); + dlm = NULL; + } return dlm; } From 7fa05c6e46d76378d4403669ba6ea38364f910ac Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 Oct 2014 15:24:56 -0700 Subject: [PATCH 013/164] ocfs2: fix shift left operations overflow ocfs2_inode_info->ip_clusters and ocfs2_dinode->id1.bitmap1.i_total are defined as type u32, so the shift left operations may overflow if volume size is large, for example, 2TB and cluster size is 1MB. Signed-off-by: Joseph Qi Reviewed-by: Alex Chen Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/inode.h | 2 +- fs/ocfs2/move_extents.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index a6c991c0fc98..a9b76de46047 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -162,7 +162,7 @@ static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode) { int c_to_s_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits - 9; - return (blkcnt_t)(OCFS2_I(inode)->ip_clusters << c_to_s_bits); + return (blkcnt_t)OCFS2_I(inode)->ip_clusters << c_to_s_bits; } /* Validate that a bh contains a valid inode */ diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 6219aaadeb08..74caffeeee1d 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -404,7 +404,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode, * 'vict_blkno' was out of the valid range. */ if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || - (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << + (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << bits_per_unit))) { ret = -EINVAL; goto out; From 4a4e07c1bdbbc24d905e4c266b92cada9371db5d Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 Oct 2014 15:24:58 -0700 Subject: [PATCH 014/164] ocfs2: call o2quo_exit() if malloc failed in o2net_init() In o2net_init, if malloc failed, it directly returns -ENOMEM. Then o2quo_exit won't be called in init_o2nm. Signed-off-by: Joseph Qi Reviewed-by: joyce.xue Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/tcp.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ea34952f9496..56cebba2390c 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -2146,17 +2146,13 @@ int o2net_init(void) o2quo_init(); if (o2net_debugfs_init()) - return -ENOMEM; + goto out; o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL); o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); - if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) { - kfree(o2net_hand); - kfree(o2net_keep_req); - kfree(o2net_keep_resp); - return -ENOMEM; - } + if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) + goto out; o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION); o2net_hand->connector_id = cpu_to_be64(1); @@ -2181,6 +2177,14 @@ int o2net_init(void) } return 0; + +out: + kfree(o2net_hand); + kfree(o2net_keep_req); + kfree(o2net_keep_resp); + + o2quo_exit(); + return -ENOMEM; } void o2net_exit(void) From 9a7e6b5a0ad1a554ca982c555a34ce8086d5b994 Mon Sep 17 00:00:00 2001 From: alex chen Date: Thu, 9 Oct 2014 15:25:00 -0700 Subject: [PATCH 015/164] ocfs2/dlm: call dlm_lockres_put without resource spinlock dlm_lockres_put() should be called without &res->spinlock, otherwise a deadlock case may happen. spin_lock(&res->spinlock) ... dlm_lockres_put ->dlm_lockres_release ->dlm_print_one_lock_resource ->spin_lock(&res->spinlock) Signed-off-by: Alex Chen Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlm/dlmrecovery.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 45067faf5695..3365839d2971 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1710,9 +1710,12 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, BUG(); } else __dlm_lockres_grab_inflight_worker(dlm, res); - } else /* put.. incase we are not the master */ + spin_unlock(&res->spinlock); + } else { + /* put.. incase we are not the master */ + spin_unlock(&res->spinlock); dlm_lockres_put(res); - spin_unlock(&res->spinlock); + } } spin_unlock(&dlm->spinlock); From 6ae075485e2d91921bdd64e49896b1bae87d1ba2 Mon Sep 17 00:00:00 2001 From: Xue jiufei Date: Thu, 9 Oct 2014 15:25:03 -0700 Subject: [PATCH 016/164] ocfs2: remove unused code in dlm_new_lockres() Remove the branch that free res->lockname.name because the condition is never satisfied when jump to label error. Signed-off-by: joyce.xue Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlm/dlmmaster.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 12ba682fc53c..215e41abf101 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -625,9 +625,6 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, return res; error: - if (res && res->lockname.name) - kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); - if (res) kmem_cache_free(dlm_lockres_cache, res); return NULL; From 8f9ac032322b855ff9f578efcb5de891dcf85e9b Mon Sep 17 00:00:00 2001 From: Rob Jones Date: Thu, 9 Oct 2014 15:25:05 -0700 Subject: [PATCH 017/164] fs/ocfs2/dlm/dlmdebug.c: use seq_open_private() not seq_open() Reduce boilerplate code by using seq_open_private() instead of seq_open() Signed-off-by: Rob Jones Cc: Joel Becker Cc: Mark Fasheh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlm/dlmdebug.c | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 18f13c2e4a10..149eb556b8c6 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -647,41 +647,30 @@ static const struct seq_operations debug_lockres_ops = { static int debug_lockres_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; - int ret = -ENOMEM; - struct seq_file *seq; - struct debug_lockres *dl = NULL; + struct debug_lockres *dl; + void *buf; - dl = kzalloc(sizeof(struct debug_lockres), GFP_KERNEL); - if (!dl) { - mlog_errno(ret); + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) goto bail; - } + + dl = __seq_open_private(file, &debug_lockres_ops, sizeof(*dl)); + if (!dl) + goto bailfree; dl->dl_len = PAGE_SIZE; - dl->dl_buf = kmalloc(dl->dl_len, GFP_KERNEL); - if (!dl->dl_buf) { - mlog_errno(ret); - goto bail; - } - - ret = seq_open(file, &debug_lockres_ops); - if (ret) { - mlog_errno(ret); - goto bail; - } - - seq = file->private_data; - seq->private = dl; + dl->dl_buf = buf; dlm_grab(dlm); dl->dl_ctxt = dlm; return 0; + +bailfree: + kfree(buf); bail: - if (dl) - kfree(dl->dl_buf); - kfree(dl); - return ret; + mlog_errno(-ENOMEM); + return -ENOMEM; } static int debug_lockres_release(struct inode *inode, struct file *file) From f32883384846e1d4aa941c60dd8adb44093359c6 Mon Sep 17 00:00:00 2001 From: Rob Jones Date: Thu, 9 Oct 2014 15:25:07 -0700 Subject: [PATCH 018/164] fs/ocfs2/cluster/netdebug.c: use seq_open_private() not seq_open() Reduce boilerplate code by using seq_open_private() instead of seq_open() Note that the code in and using sc_common_open() has been quite extensively changed. Not least because there was a latent memory leak in the code as was: if sc_common_open() failed, the previously allocated buffer was not freed. Signed-off-by: Rob Jones Cc: Joel Becker Cc: Mark Fasheh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/netdebug.c | 78 +++++++++---------------------------- 1 file changed, 19 insertions(+), 59 deletions(-) diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 73ba81928bce..27d1242c8383 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -185,29 +185,13 @@ static const struct seq_operations nst_seq_ops = { static int nst_fop_open(struct inode *inode, struct file *file) { struct o2net_send_tracking *dummy_nst; - struct seq_file *seq; - int ret; - dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL); - if (dummy_nst == NULL) { - ret = -ENOMEM; - goto out; - } - dummy_nst->st_task = NULL; - - ret = seq_open(file, &nst_seq_ops); - if (ret) - goto out; - - seq = file->private_data; - seq->private = dummy_nst; + dummy_nst = __seq_open_private(file, &nst_seq_ops, sizeof(*dummy_nst)); + if (!dummy_nst) + return -ENOMEM; o2net_debug_add_nst(dummy_nst); - dummy_nst = NULL; - -out: - kfree(dummy_nst); - return ret; + return 0; } static int nst_fop_release(struct inode *inode, struct file *file) @@ -412,33 +396,27 @@ static const struct seq_operations sc_seq_ops = { .show = sc_seq_show, }; -static int sc_common_open(struct file *file, struct o2net_sock_debug *sd) +static int sc_common_open(struct file *file, int ctxt) { + struct o2net_sock_debug *sd; struct o2net_sock_container *dummy_sc; - struct seq_file *seq; - int ret; - dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL); - if (dummy_sc == NULL) { - ret = -ENOMEM; - goto out; + dummy_sc = kzalloc(sizeof(*dummy_sc), GFP_KERNEL); + if (!dummy_sc) + return -ENOMEM; + + sd = __seq_open_private(file, &sc_seq_ops, sizeof(*sd)); + if (!sd) { + kfree(dummy_sc); + return -ENOMEM; } - dummy_sc->sc_page = NULL; - ret = seq_open(file, &sc_seq_ops); - if (ret) - goto out; - - seq = file->private_data; - seq->private = sd; + sd->dbg_ctxt = ctxt; sd->dbg_sock = dummy_sc; + o2net_debug_add_sc(dummy_sc); - dummy_sc = NULL; - -out: - kfree(dummy_sc); - return ret; + return 0; } static int sc_fop_release(struct inode *inode, struct file *file) @@ -453,16 +431,7 @@ static int sc_fop_release(struct inode *inode, struct file *file) static int stats_fop_open(struct inode *inode, struct file *file) { - struct o2net_sock_debug *sd; - - sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); - if (sd == NULL) - return -ENOMEM; - - sd->dbg_ctxt = SHOW_SOCK_STATS; - sd->dbg_sock = NULL; - - return sc_common_open(file, sd); + return sc_common_open(file, SHOW_SOCK_STATS); } static const struct file_operations stats_seq_fops = { @@ -474,16 +443,7 @@ static const struct file_operations stats_seq_fops = { static int sc_fop_open(struct inode *inode, struct file *file) { - struct o2net_sock_debug *sd; - - sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); - if (sd == NULL) - return -ENOMEM; - - sd->dbg_ctxt = SHOW_SOCK_CONTAINERS; - sd->dbg_sock = NULL; - - return sc_common_open(file, sd); + return sc_common_open(file, SHOW_SOCK_CONTAINERS); } static const struct file_operations sc_seq_fops = { From 1848cb5530d3bada86c7b54f4f8b053b2081eb00 Mon Sep 17 00:00:00 2001 From: Rob Jones Date: Thu, 9 Oct 2014 15:25:09 -0700 Subject: [PATCH 019/164] fs/ocfs2/dlmglue.c: use __seq_open_private() not seq_open() Reduce boilerplate code by using seq_open_private() instead of seq_open() Signed-off-by: Rob Jones Cc: Joel Becker Cc: Mark Fasheh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlmglue.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 52cfe99ae056..21262f2b1654 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2892,37 +2892,24 @@ static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file) static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file) { - int ret; struct ocfs2_dlm_seq_priv *priv; - struct seq_file *seq; struct ocfs2_super *osb; - priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL); + priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv)); if (!priv) { - ret = -ENOMEM; - mlog_errno(ret); - goto out; + mlog_errno(-ENOMEM); + return -ENOMEM; } + osb = inode->i_private; ocfs2_get_dlm_debug(osb->osb_dlm_debug); priv->p_dlm_debug = osb->osb_dlm_debug; INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); - ret = seq_open(file, &ocfs2_dlm_seq_ops); - if (ret) { - kfree(priv); - mlog_errno(ret); - goto out; - } - - seq = file->private_data; - seq->private = priv; - ocfs2_add_lockres_tracking(&priv->p_iter_res, priv->p_dlm_debug); -out: - return ret; + return 0; } static const struct file_operations ocfs2_dlm_debug_fops = { From 5046f18d5bd9ad7638b32c3b304ff39a74c064df Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 9 Oct 2014 15:25:11 -0700 Subject: [PATCH 020/164] ocfs2: don't fire quorum before connection established Firing quorum before connection established can cause unexpected node to reboot. Assume there are 3 nodes in the cluster, Node 1, 2, 3. Node 2 and 3 have wrong ip address of Node 1 in cluster.conf and global heartbeat is enabled in the cluster. After the heatbeats are started on these three nodes, Node 1 will reboot due to quorum fencing. It is similar case if Node 1's networking is not ready when starting the global heartbeat. The reboot is not friendly as customer is not fully ready for ocfs2 to work. Fix it by not allowing firing quorum before the connection is established. In this case, ocfs2 will wait until the wrong configuration is fixed or networking is up to continue. Also update the log to guide the user where to check when connection is not built for a long time. Signed-off-by: Junxiao Bi Reviewed-by: Srinivas Eeda Cc: Joel Becker Cc: Mark Fasheh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/tcp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 56cebba2390c..509e6d5415e2 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -536,7 +536,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (nn->nn_persistent_error || nn->nn_sc_valid) wake_up(&nn->nn_sc_wq); - if (!was_err && nn->nn_persistent_error) { + if (was_valid && !was_err && nn->nn_persistent_error) { o2quo_conn_err(o2net_num_from_nn(nn)); queue_delayed_work(o2net_wq, &nn->nn_still_up, msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); @@ -1721,7 +1721,8 @@ static void o2net_connect_expired(struct work_struct *work) spin_lock(&nn->nn_lock); if (!nn->nn_sc_valid) { printk(KERN_NOTICE "o2net: No connection established with " - "node %u after %u.%u seconds, giving up.\n", + "node %u after %u.%u seconds, check network and" + " cluster configuration.\n", o2net_num_from_nn(nn), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); From 70e82a12dbfa3acbff41be08a36e8be4578878c9 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 Oct 2014 15:25:13 -0700 Subject: [PATCH 021/164] ocfs2: fix deadlock between o2hb thread and o2net_wq The following case may lead to o2net_wq and o2hb thread deadlock on o2hb_callback_sem. Currently there are 2 nodes say N1, N2 in the cluster. And N2 down, at the same time, N3 tries to join the cluster. So N1 will handle node down (N2) and join (N3) simultaneously. o2hb o2net_wq ->o2hb_do_disk_heartbeat ->o2hb_check_slot ->o2hb_run_event_list ->o2hb_fire_callbacks ->down_write(&o2hb_callback_sem) ->o2net_hb_node_down_cb ->flush_workqueue(o2net_wq) ->o2net_process_message ->dlm_query_join_handler ->o2hb_check_node_heartbeating ->o2hb_fill_node_map ->down_read(&o2hb_callback_sem) No need to take o2hb_callback_sem in dlm_query_join_handler, o2hb_live_lock is enough to protect live node map. Signed-off-by: Joseph Qi Cc: xMark Fasheh Cc: Joel Becker Cc: jiangyiwen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/heartbeat.c | 19 +++++++++++++++++++ fs/ocfs2/cluster/heartbeat.h | 1 + fs/ocfs2/dlm/dlmdomain.c | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 73039295d0d1..d13385448168 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2572,6 +2572,25 @@ int o2hb_check_node_heartbeating(u8 node_num) } EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating); +int o2hb_check_node_heartbeating_no_sem(u8 node_num) +{ + unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long flags; + + spin_lock_irqsave(&o2hb_live_lock, flags); + o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); + spin_unlock_irqrestore(&o2hb_live_lock, flags); + if (!test_bit(node_num, testing_map)) { + mlog(ML_HEARTBEAT, + "node (%u) does not have heartbeating enabled.\n", + node_num); + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_no_sem); + int o2hb_check_node_heartbeating_from_callback(u8 node_num) { unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h index 00ad8e8fea51..3ef5137dc362 100644 --- a/fs/ocfs2/cluster/heartbeat.h +++ b/fs/ocfs2/cluster/heartbeat.h @@ -80,6 +80,7 @@ void o2hb_fill_node_map(unsigned long *map, void o2hb_exit(void); int o2hb_init(void); int o2hb_check_node_heartbeating(u8 node_num); +int o2hb_check_node_heartbeating_no_sem(u8 node_num); int o2hb_check_node_heartbeating_from_callback(u8 node_num); int o2hb_check_local_node_heartbeating(void); void o2hb_stop_all_regions(void); diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 257a6dfe3f13..02d315fef432 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -839,7 +839,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, * to back off and try again. This gives heartbeat a chance * to catch up. */ - if (!o2hb_check_node_heartbeating(query->node_idx)) { + if (!o2hb_check_node_heartbeating_no_sem(query->node_idx)) { mlog(0, "node %u is not in our live map yet\n", query->node_idx); From f775da2fc2a8e42aa49eddbf5186ac3df8961a71 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 9 Oct 2014 15:25:15 -0700 Subject: [PATCH 022/164] ocfs2: fix deadlock due to wrong locking order For commit ocfs2 journal, ocfs2 journal thread will acquire the mutex osb->journal->j_trans_barrier and wake up jbd2 commit thread, then it will wait until jbd2 commit thread done. In order journal mode, jbd2 needs flushing dirty data pages first, and this needs get page lock. So osb->journal->j_trans_barrier should be got before page lock. But ocfs2_write_zero_page() and ocfs2_write_begin_inline() obey this locking order, and this will cause deadlock and hung the whole cluster. One deadlock catched is the following: PID: 13449 TASK: ffff8802e2f08180 CPU: 31 COMMAND: "oracle" #0 [ffff8802ee3f79b0] __schedule at ffffffff8150a524 #1 [ffff8802ee3f7a58] schedule at ffffffff8150acbf #2 [ffff8802ee3f7a68] rwsem_down_failed_common at ffffffff8150cb85 #3 [ffff8802ee3f7ad8] rwsem_down_read_failed at ffffffff8150cc55 #4 [ffff8802ee3f7ae8] call_rwsem_down_read_failed at ffffffff812617a4 #5 [ffff8802ee3f7b50] ocfs2_start_trans at ffffffffa0498919 [ocfs2] #6 [ffff8802ee3f7ba0] ocfs2_zero_start_ordered_transaction at ffffffffa048b2b8 [ocfs2] #7 [ffff8802ee3f7bf0] ocfs2_write_zero_page at ffffffffa048e9bd [ocfs2] #8 [ffff8802ee3f7c80] ocfs2_zero_extend_range at ffffffffa048ec83 [ocfs2] #9 [ffff8802ee3f7ce0] ocfs2_zero_extend at ffffffffa048edfd [ocfs2] #10 [ffff8802ee3f7d50] ocfs2_extend_file at ffffffffa049079e [ocfs2] #11 [ffff8802ee3f7da0] ocfs2_setattr at ffffffffa04910ed [ocfs2] #12 [ffff8802ee3f7e70] notify_change at ffffffff81187d29 #13 [ffff8802ee3f7ee0] do_truncate at ffffffff8116bbc1 #14 [ffff8802ee3f7f50] sys_ftruncate at ffffffff8116bcbd #15 [ffff8802ee3f7f80] system_call_fastpath at ffffffff81515142 RIP: 00007f8de750c6f7 RSP: 00007fffe786e478 RFLAGS: 00000206 RAX: 000000000000004d RBX: ffffffff81515142 RCX: 0000000000000000 RDX: 0000000000000200 RSI: 0000000000028400 RDI: 000000000000000d RBP: 00007fffe786e040 R8: 0000000000000000 R9: 000000000000000d R10: 0000000000000000 R11: 0000000000000206 R12: 000000000000000d R13: 00007fffe786e710 R14: 00007f8de70f8340 R15: 0000000000028400 ORIG_RAX: 000000000000004d CS: 0033 SS: 002b crash64> bt PID: 7610 TASK: ffff88100fd56140 CPU: 1 COMMAND: "ocfs2cmt" #0 [ffff88100f4d1c50] __schedule at ffffffff8150a524 #1 [ffff88100f4d1cf8] schedule at ffffffff8150acbf #2 [ffff88100f4d1d08] jbd2_log_wait_commit at ffffffffa01274fd [jbd2] #3 [ffff88100f4d1d98] jbd2_journal_flush at ffffffffa01280b4 [jbd2] #4 [ffff88100f4d1dd8] ocfs2_commit_cache at ffffffffa0499b14 [ocfs2] #5 [ffff88100f4d1e38] ocfs2_commit_thread at ffffffffa0499d38 [ocfs2] #6 [ffff88100f4d1ee8] kthread at ffffffff81090db6 #7 [ffff88100f4d1f48] kernel_thread_helper at ffffffff81516284 crash64> bt PID: 7609 TASK: ffff88100f2d4480 CPU: 0 COMMAND: "jbd2/dm-20-86" #0 [ffff88100def3920] __schedule at ffffffff8150a524 #1 [ffff88100def39c8] schedule at ffffffff8150acbf #2 [ffff88100def39d8] io_schedule at ffffffff8150ad6c #3 [ffff88100def39f8] sleep_on_page at ffffffff8111069e #4 [ffff88100def3a08] __wait_on_bit_lock at ffffffff8150b30a #5 [ffff88100def3a58] __lock_page at ffffffff81110687 #6 [ffff88100def3ab8] write_cache_pages at ffffffff8111b752 #7 [ffff88100def3be8] generic_writepages at ffffffff8111b901 #8 [ffff88100def3c48] journal_submit_data_buffers at ffffffffa0120f67 [jbd2] #9 [ffff88100def3cf8] jbd2_journal_commit_transaction at ffffffffa0121372[jbd2] #10 [ffff88100def3e68] kjournald2 at ffffffffa0127a86 [jbd2] #11 [ffff88100def3ee8] kthread at ffffffff81090db6 #12 [ffff88100def3f48] kernel_thread_helper at ffffffff81516284 Signed-off-by: Junxiao Bi Cc: Mark Fasheh Cc: Joel Becker Cc: Alex Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/aops.c | 15 ++++++++------- fs/ocfs2/file.c | 47 +++++++++++++++++++++++------------------------ 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 4a231a166cf8..1ef547e49373 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1481,8 +1481,16 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + page = find_or_create_page(mapping, 0, GFP_NOFS); if (!page) { + ocfs2_commit_trans(osb, handle); ret = -ENOMEM; mlog_errno(ret); goto out; @@ -1494,13 +1502,6 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, wc->w_pages[0] = wc->w_target_page = page; wc->w_num_pages = 1; - handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - mlog_errno(ret); - goto out; - } - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 2930e231f3f9..682732f3f0d8 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -760,7 +760,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, struct address_space *mapping = inode->i_mapping; struct page *page; unsigned long index = abs_from >> PAGE_CACHE_SHIFT; - handle_t *handle = NULL; + handle_t *handle; int ret = 0; unsigned zero_from, zero_to, block_start, block_end; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; @@ -769,11 +769,17 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); BUG_ON(abs_from & (inode->i_blkbits - 1)); + handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } + page = find_or_create_page(mapping, index, GFP_NOFS); if (!page) { ret = -ENOMEM; mlog_errno(ret); - goto out; + goto out_commit_trans; } /* Get the offsets within the page that we want to zero */ @@ -805,15 +811,6 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, goto out_unlock; } - if (!handle) { - handle = ocfs2_zero_start_ordered_transaction(inode, - di_bh); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - break; - } - } /* must not update i_size! */ ret = block_commit_write(page, block_start + 1, @@ -824,27 +821,29 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, ret = 0; } + /* + * fs-writeback will release the dirty pages without page lock + * whose offset are over inode size, the release happens at + * block_write_full_page(). + */ + i_size_write(inode, abs_to); + inode->i_blocks = ocfs2_inode_sector_count(inode); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + di->i_mtime_nsec = di->i_ctime_nsec; if (handle) { - /* - * fs-writeback will release the dirty pages without page lock - * whose offset are over inode size, the release happens at - * block_write_full_page(). - */ - i_size_write(inode, abs_to); - inode->i_blocks = ocfs2_inode_sector_count(inode); - di->i_size = cpu_to_le64((u64)i_size_read(inode)); - inode->i_mtime = inode->i_ctime = CURRENT_TIME; - di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); - di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - di->i_mtime_nsec = di->i_ctime_nsec; ocfs2_journal_dirty(handle, di_bh); ocfs2_update_inode_fsync_trans(handle, inode, 1); - ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); } out_unlock: unlock_page(page); page_cache_release(page); +out_commit_trans: + if (handle) + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: return ret; } From b1a8de1f534337b398c7778578a56ec4f018cb27 Mon Sep 17 00:00:00 2001 From: chai wen Date: Thu, 9 Oct 2014 15:25:17 -0700 Subject: [PATCH 023/164] softlockup: make detector be aware of task switch of processes hogging cpu For now, soft lockup detector warns once for each case of process softlockup. But the thread 'watchdog/n' may not always get the cpu at the time slot between the task switch of two processes hogging that cpu to reset soft_watchdog_warn. An example would be two processes hogging the cpu. Process A causes the softlockup warning and is killed manually by a user. Process B immediately becomes the new process hogging the cpu preventing the softlockup code from resetting the soft_watchdog_warn variable. This case is a false negative of "warn only once for a process", as there may be a different process that is going to hog the cpu. Resolve this by saving/checking the task pointer of the hogging process and use that to reset soft_watchdog_warn too. [dzickus@redhat.com: update comment] Signed-off-by: chai wen Signed-off-by: Don Zickus Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index a8d6914030fe..7b223b212683 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -47,6 +47,7 @@ static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); +static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); #ifdef CONFIG_HARDLOCKUP_DETECTOR static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); @@ -333,8 +334,22 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) return HRTIMER_RESTART; /* only warn once */ - if (__this_cpu_read(soft_watchdog_warn) == true) + if (__this_cpu_read(soft_watchdog_warn) == true) { + /* + * When multiple processes are causing softlockups the + * softlockup detector only warns on the first one + * because the code relies on a full quiet cycle to + * re-arm. The second process prevents the quiet cycle + * and never gets reported. Use task pointers to detect + * this. + */ + if (__this_cpu_read(softlockup_task_ptr_saved) != + current) { + __this_cpu_write(soft_watchdog_warn, false); + __touch_watchdog(); + } return HRTIMER_RESTART; + } if (softlockup_all_cpu_backtrace) { /* Prevent multiple soft-lockup reports if one cpu is already @@ -350,6 +365,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", smp_processor_id(), duration, current->comm, task_pid_nr(current)); + __this_cpu_write(softlockup_task_ptr_saved, current); print_modules(); print_irqtrace_events(current); if (regs) From 46c298cf69d0e9a27d33ff992a81bd7b441c7933 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:19 -0700 Subject: [PATCH 024/164] fs/proc/task_mmu.c: don't use task->mm in m_start() and show_*map() get_gate_vma(priv->task->mm) looks ugly and wrong, task->mm can be NULL or it can changed by exec right after mm_access(). And in theory this race is not harmless, the task can exec and then later exit and free the new mm_struct. In this case get_task_mm(oldmm) can't help, get_gate_vma(task->mm) can read the freed/unmapped memory. I think that priv->task should simply die and hold_task_mempolicy() logic can be simplified. tail_vma logic asks for cleanups too. Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c34156888d70..289dfdc0ec09 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -170,7 +170,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) return mm; down_read(&mm->mmap_sem); - tail_vma = get_gate_vma(priv->task->mm); + tail_vma = get_gate_vma(mm); priv->tail_vma = tail_vma; hold_task_mempolicy(priv); /* Start with last addr hint */ @@ -351,12 +351,11 @@ static int show_map(struct seq_file *m, void *v, int is_pid) { struct vm_area_struct *vma = v; struct proc_maps_private *priv = m->private; - struct task_struct *task = priv->task; show_map_vma(m, vma, is_pid); if (m->count < m->size) /* vma is copied successfully */ - m->version = (vma != get_gate_vma(task->mm)) + m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; return 0; } @@ -584,7 +583,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) static int show_smap(struct seq_file *m, void *v, int is_pid) { struct proc_maps_private *priv = m->private; - struct task_struct *task = priv->task; struct vm_area_struct *vma = v; struct mem_size_stats mss; struct mm_walk smaps_walk = { @@ -639,7 +637,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) show_smap_vma_flags(m, vma); if (m->count < m->size) /* vma is copied successfully */ - m->version = (vma != get_gate_vma(task->mm)) + m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; return 0; } From 4db7d0ee198d417f4144c58048fcb173d90096ea Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:21 -0700 Subject: [PATCH 025/164] fs/proc/task_mmu.c: unify/simplify do_maps_open() and numa_maps_open() do_maps_open() and numa_maps_open() are overcomplicated, they could use __seq_open_private(). Plus they do the same, just sizeof(*priv) Change them to use a new simple helper, proc_maps_open(ops, psize). This simplifies the code and allows us to do the next changes. Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 44 ++++++++++++++++---------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 289dfdc0ec09..4d716a09d500 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -231,23 +231,23 @@ static void m_stop(struct seq_file *m, void *v) put_task_struct(priv->task); } +static int proc_maps_open(struct inode *inode, struct file *file, + const struct seq_operations *ops, int psize) +{ + struct proc_maps_private *priv = __seq_open_private(file, ops, psize); + + if (!priv) + return -ENOMEM; + + priv->pid = proc_pid(inode); + return 0; +} + static int do_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { - struct proc_maps_private *priv; - int ret = -ENOMEM; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (priv) { - priv->pid = proc_pid(inode); - ret = seq_open(file, ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = priv; - } else { - kfree(priv); - } - } - return ret; + return proc_maps_open(inode, file, ops, + sizeof(struct proc_maps_private)); } static void @@ -1526,20 +1526,8 @@ static const struct seq_operations proc_tid_numa_maps_op = { static int numa_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { - struct numa_maps_private *priv; - int ret = -ENOMEM; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (priv) { - priv->proc_maps.pid = proc_pid(inode); - ret = seq_open(file, ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = priv; - } else { - kfree(priv); - } - } - return ret; + return proc_maps_open(inode, file, ops, + sizeof(struct numa_maps_private)); } static int pid_numa_maps_open(struct inode *inode, struct file *file) From 5381e169e78405bd54256860f151596f5a887617 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:24 -0700 Subject: [PATCH 026/164] proc: introduce proc_mem_open() Extract the mm_access() code from __mem_open() into the new helper, proc_mem_open(), the next patch will add another caller. Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 38 ++++++++++++++++++++++---------------- fs/proc/internal.h | 2 ++ 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/fs/proc/base.c b/fs/proc/base.c index baf852b648ad..4c542b907754 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -632,29 +632,35 @@ static const struct file_operations proc_single_file_operations = { .release = single_release, }; + +struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) +{ + struct task_struct *task = get_proc_task(inode); + struct mm_struct *mm = ERR_PTR(-ESRCH); + + if (task) { + mm = mm_access(task, mode); + put_task_struct(task); + + if (!IS_ERR_OR_NULL(mm)) { + /* ensure this mm_struct can't be freed */ + atomic_inc(&mm->mm_count); + /* but do not pin its memory */ + mmput(mm); + } + } + + return mm; +} + static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) { - struct task_struct *task = get_proc_task(file_inode(file)); - struct mm_struct *mm; - - if (!task) - return -ESRCH; - - mm = mm_access(task, mode); - put_task_struct(task); + struct mm_struct *mm = proc_mem_open(inode, mode); if (IS_ERR(mm)) return PTR_ERR(mm); - if (mm) { - /* ensure this mm_struct can't be freed */ - atomic_inc(&mm->mm_count); - /* but do not pin its memory */ - mmput(mm); - } - file->private_data = mm; - return 0; } diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 7da13e49128a..3c685563406f 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -278,6 +278,8 @@ struct proc_maps_private { #endif }; +struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode); + extern const struct file_operations proc_pid_maps_operations; extern const struct file_operations proc_tid_maps_operations; extern const struct file_operations proc_pid_numa_maps_operations; From 29a40ace841cba9b661711f042d1821cdc4ad47c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:26 -0700 Subject: [PATCH 027/164] fs/proc/task_mmu.c: shift mm_access() from m_start() to proc_maps_open() A simple test-case from Kirill Shutemov cat /proc/self/maps >/dev/null chmod +x /proc/self/net/packet exec /proc/self/net/packet makes lockdep unhappy, cat/exec take seq_file->lock + cred_guard_mutex in the opposite order. It's a false positive and probably we should not allow "chmod +x" on proc files. Still I think that we should avoid mm_access() and cred_guard_mutex in sys_read() paths, security checking should happen at open time. Besides, this doesn't even look right if the task changes its ->mm between m_stop() and m_start(). Add the new "mm_struct *mm" member into struct proc_maps_private and change proc_maps_open() to initialize it using proc_mem_open(). Change m_start() to use priv->mm if atomic_inc_not_zero(mm_users) succeeds or return NULL (eof) otherwise. The only complication is that proc_maps_open() users should additionally do mmdrop() in fop->release(), add the new proc_map_release() helper for that. Note: this is the user-visible change, if the task execs after open("maps") the new ->mm won't be visible via this file. I hope this is fine, and this matches /proc/pid/mem bahaviour. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov Reported-by: "Kirill A. Shutemov" Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/internal.h | 1 + fs/proc/task_mmu.c | 37 ++++++++++++++++++++++++++++--------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 3c685563406f..d27182854a28 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -270,6 +270,7 @@ extern int proc_remount(struct super_block *, int *, char *); struct proc_maps_private { struct pid *pid; struct task_struct *task; + struct mm_struct *mm; #ifdef CONFIG_MMU struct vm_area_struct *tail_vma; #endif diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4d716a09d500..a1454dac7e0a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -165,9 +165,9 @@ static void *m_start(struct seq_file *m, loff_t *pos) if (!priv->task) return ERR_PTR(-ESRCH); - mm = mm_access(priv->task, PTRACE_MODE_READ); - if (!mm || IS_ERR(mm)) - return mm; + mm = priv->mm; + if (!mm || !atomic_inc_not_zero(&mm->mm_users)) + return NULL; down_read(&mm->mmap_sem); tail_vma = get_gate_vma(mm); @@ -240,9 +240,28 @@ static int proc_maps_open(struct inode *inode, struct file *file, return -ENOMEM; priv->pid = proc_pid(inode); + priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); + if (IS_ERR(priv->mm)) { + int err = PTR_ERR(priv->mm); + + seq_release_private(inode, file); + return err; + } + return 0; } +static int proc_map_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct proc_maps_private *priv = seq->private; + + if (priv->mm) + mmdrop(priv->mm); + + return seq_release_private(inode, file); +} + static int do_maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { @@ -398,14 +417,14 @@ const struct file_operations proc_pid_maps_operations = { .open = pid_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; const struct file_operations proc_tid_maps_operations = { .open = tid_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; /* @@ -680,14 +699,14 @@ const struct file_operations proc_pid_smaps_operations = { .open = pid_smaps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; const struct file_operations proc_tid_smaps_operations = { .open = tid_smaps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; /* @@ -1544,13 +1563,13 @@ const struct file_operations proc_pid_numa_maps_operations = { .open = pid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; const struct file_operations proc_tid_numa_maps_operations = { .open = tid_numa_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = proc_map_release, }; #endif /* CONFIG_NUMA */ From 59b4bf12d4776c0e4f3fd5c02d942f3a0596da97 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:28 -0700 Subject: [PATCH 028/164] fs/proc/task_mmu.c: simplify the vma_stop() logic m_start() drops ->mmap_sem and does mmput() if it retuns vsyscall vma. This is because in this case m_stop()->vma_stop() obviously can't use gate_vma->vm_mm. Now that we have proc_maps_private->mm we can simplify this logic: - Change m_start() to return with ->mmap_sem held unless it returns IS_ERR_OR_NULL(). - Change vma_stop() to use priv->mm and avoid the ugly vma checks, this makes "vm_area_struct *vma" unnecessary. - This also allows m_start() to use vm_stop(). - Cleanup m_next() to follow the new locking rule. Note: m_stop() looks very ugly, and this temporary uglifies it even more. Fixed by the next change. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index a1454dac7e0a..fdbed8370db6 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -129,14 +129,13 @@ static void release_task_mempolicy(struct proc_maps_private *priv) } #endif -static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) +static void vma_stop(struct proc_maps_private *priv) { - if (vma && vma != priv->tail_vma) { - struct mm_struct *mm = vma->vm_mm; - release_task_mempolicy(priv); - up_read(&mm->mmap_sem); - mmput(mm); - } + struct mm_struct *mm = priv->mm; + + release_task_mempolicy(priv); + up_read(&mm->mmap_sem); + mmput(mm); } static void *m_start(struct seq_file *m, loff_t *pos) @@ -199,12 +198,13 @@ static void *m_start(struct seq_file *m, loff_t *pos) if (vma) return vma; - release_task_mempolicy(priv); /* End of vmas has been reached */ m->version = (tail_vma != NULL)? 0: -1UL; - up_read(&mm->mmap_sem); - mmput(mm); - return tail_vma; + if (tail_vma) + return tail_vma; + + vma_stop(priv); + return NULL; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) @@ -212,21 +212,24 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos) struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; struct vm_area_struct *tail_vma = priv->tail_vma; + struct vm_area_struct *next; (*pos)++; if (vma && (vma != tail_vma) && vma->vm_next) return vma->vm_next; - vma_stop(priv, vma); - return (vma != tail_vma)? tail_vma: NULL; + + next = (vma != tail_vma) ? tail_vma : NULL; + if (!next) + vma_stop(priv); + return next; } static void m_stop(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; - struct vm_area_struct *vma = v; - if (!IS_ERR(vma)) - vma_stop(priv, vma); + if (!IS_ERR_OR_NULL(v)) + vma_stop(priv); if (priv->task) put_task_struct(priv->task); } From 23d54837e4f3a44ad4514d5eae8245c2250217ff Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:30 -0700 Subject: [PATCH 029/164] fs/proc/task_mmu.c: cleanup the "tail_vma" horror in m_next() 1. Kill the first "vma != NULL" check. Firstly this is not possible, m_next() won't be called if ->start() or the previous ->next() returns NULL. And if it was possible the 2nd "vma != tail_vma" check is buggy, we should not wrongly return ->tail_vma. 2. Make this function readable. The logic is very simple, we should return check "vma != tail" once and return "vm_next || tail_vma". Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index fdbed8370db6..b7e31836a005 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -210,15 +210,13 @@ static void *m_start(struct seq_file *m, loff_t *pos) static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_maps_private *priv = m->private; - struct vm_area_struct *vma = v; struct vm_area_struct *tail_vma = priv->tail_vma; - struct vm_area_struct *next; + struct vm_area_struct *vma = v, *next = NULL; (*pos)++; - if (vma && (vma != tail_vma) && vma->vm_next) - return vma->vm_next; + if (vma != tail_vma) + next = vma->vm_next ?: tail_vma; - next = (vma != tail_vma) ? tail_vma : NULL; if (!next) vma_stop(priv); return next; From 0d5f5f45f9a4f1f6b694c37f5142ebea893f0a15 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:32 -0700 Subject: [PATCH 030/164] fs/proc/task_mmu.c: shift "priv->task = NULL" from m_start() to m_stop() 1. There is no reason to reset ->tail_vma in m_start(), if we return IS_ERR_OR_NULL() it won't be used. 2. m_start() also clears priv->task to ensure that m_stop() won't use the stale pointer if we fail before get_task_struct(). But this is ugly and confusing, move this initialization in m_stop(). Signed-off-by: Oleg Nesterov Acked-by: Kirill A. Shutemov Acked-by: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b7e31836a005..30aa2dd3e6f5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -146,17 +146,12 @@ static void *m_start(struct seq_file *m, loff_t *pos) struct vm_area_struct *vma, *tail_vma = NULL; loff_t l = *pos; - /* Clear the per syscall fields in priv */ - priv->task = NULL; - priv->tail_vma = NULL; - /* * We remember last_addr rather than next_addr to hit with * vmacache most of the time. We have zero last_addr at * the beginning and also after lseek. We will have -1 last_addr * after the end of the vmas. */ - if (last_addr == -1UL) return NULL; @@ -228,8 +223,10 @@ static void m_stop(struct seq_file *m, void *v) if (!IS_ERR_OR_NULL(v)) vma_stop(priv); - if (priv->task) + if (priv->task) { put_task_struct(priv->task); + priv->task = NULL; + } } static int proc_maps_open(struct inode *inode, struct file *file, From ebb6cdde1a50c3cd2a0a4668dfb571ecb3213449 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:34 -0700 Subject: [PATCH 031/164] fs/proc/task_mmu.c: kill the suboptimal and confusing m->version logic m_start() carefully documents, checks, and sets "m->version = -1" if we are going to return NULL. The only problem is that we will be never called again if m_start() returns NULL, so this is simply pointless and misleading. Otoh, ->show() methods m->version = 0 if vma == tail_vma and this is just wrong, we want -1 in this case. And in fact we also want -1 if ->vm_next == NULL and ->tail_vma == NULL. And it is not used consistently, the "scan vmas" loop in m_start() should update last_addr too. Finally, imo the whole "last_addr" logic in m_start() looks horrible. find_vma(last_addr) is called unconditionally even if we are not going to use the result. But the main problem is that this code participates in tail_vma-or-NULL mess, and this looks simply unfixable. Remove this optimization. We will add it back after some cleanups. Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 30aa2dd3e6f5..e182fc51ec2b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -141,20 +141,10 @@ static void vma_stop(struct proc_maps_private *priv) static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_maps_private *priv = m->private; - unsigned long last_addr = m->version; struct mm_struct *mm; struct vm_area_struct *vma, *tail_vma = NULL; loff_t l = *pos; - /* - * We remember last_addr rather than next_addr to hit with - * vmacache most of the time. We have zero last_addr at - * the beginning and also after lseek. We will have -1 last_addr - * after the end of the vmas. - */ - if (last_addr == -1UL) - return NULL; - priv->task = get_pid_task(priv->pid, PIDTYPE_PID); if (!priv->task) return ERR_PTR(-ESRCH); @@ -167,12 +157,6 @@ static void *m_start(struct seq_file *m, loff_t *pos) tail_vma = get_gate_vma(mm); priv->tail_vma = tail_vma; hold_task_mempolicy(priv); - /* Start with last addr hint */ - vma = find_vma(mm, last_addr); - if (last_addr && vma) { - vma = vma->vm_next; - goto out; - } /* * Check the vma index is within the range and do @@ -193,8 +177,6 @@ static void *m_start(struct seq_file *m, loff_t *pos) if (vma) return vma; - /* End of vmas has been reached */ - m->version = (tail_vma != NULL)? 0: -1UL; if (tail_vma) return tail_vma; @@ -366,14 +348,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) static int show_map(struct seq_file *m, void *v, int is_pid) { - struct vm_area_struct *vma = v; - struct proc_maps_private *priv = m->private; - - show_map_vma(m, vma, is_pid); - - if (m->count < m->size) /* vma is copied successfully */ - m->version = (vma != priv->tail_vma) - ? vma->vm_start : 0; + show_map_vma(m, v, is_pid); return 0; } @@ -599,7 +574,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) static int show_smap(struct seq_file *m, void *v, int is_pid) { - struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; struct mem_size_stats mss; struct mm_walk smaps_walk = { @@ -652,10 +626,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) mss.nonlinear >> 10); show_smap_vma_flags(m, vma); - - if (m->count < m->size) /* vma is copied successfully */ - m->version = (vma != priv->tail_vma) - ? vma->vm_start : 0; return 0; } @@ -1510,9 +1480,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_printf(m, " N%d=%lu", nid, md->node[nid]); out: seq_putc(m, '\n'); - - if (m->count < m->size) - m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; return 0; } From 0c255321f879c36bd74f58f9c7ed235ea6b919cb Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:36 -0700 Subject: [PATCH 032/164] fs/proc/task_mmu.c: simplify m_start() to make it readable Now that m->version is gone we can cleanup m_start(). In particular, - Remove the "unsigned long" typecast, m->index can't be negative or exceed ->map_count. But lets use "unsigned int pos" to make it clear that "pos < map_count" is safe. - Remove the unnecessary "vma != NULL" check in the main loop. It can't be NULL unless we have a vm bug. - This also means that "pos < map_count" case can simply return the valid vma and avoid "goto" and subsequent checks. Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e182fc51ec2b..bb16c967eefd 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -138,12 +138,12 @@ static void vma_stop(struct proc_maps_private *priv) mmput(mm); } -static void *m_start(struct seq_file *m, loff_t *pos) +static void *m_start(struct seq_file *m, loff_t *ppos) { struct proc_maps_private *priv = m->private; struct mm_struct *mm; - struct vm_area_struct *vma, *tail_vma = NULL; - loff_t l = *pos; + struct vm_area_struct *vma; + unsigned int pos = *ppos; priv->task = get_pid_task(priv->pid, PIDTYPE_PID); if (!priv->task) @@ -152,33 +152,19 @@ static void *m_start(struct seq_file *m, loff_t *pos) mm = priv->mm; if (!mm || !atomic_inc_not_zero(&mm->mm_users)) return NULL; + down_read(&mm->mmap_sem); - - tail_vma = get_gate_vma(mm); - priv->tail_vma = tail_vma; hold_task_mempolicy(priv); + priv->tail_vma = get_gate_vma(mm); - /* - * Check the vma index is within the range and do - * sequential scan until m_index. - */ - vma = NULL; - if ((unsigned long)l < mm->map_count) { - vma = mm->mmap; - while (l-- && vma) + if (pos < mm->map_count) { + for (vma = mm->mmap; pos; pos--) vma = vma->vm_next; - goto out; + return vma; } - if (l != mm->map_count) - tail_vma = NULL; /* After gate vma */ - -out: - if (vma) - return vma; - - if (tail_vma) - return tail_vma; + if (pos == mm->map_count && priv->tail_vma) + return priv->tail_vma; vma_stop(priv); return NULL; From ad2a00e4b7e20ab03700b0bb13270b6cee45c6e0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:39 -0700 Subject: [PATCH 033/164] fs/proc/task_mmu.c: introduce m_next_vma() helper Extract the tail_vma/vm_next calculation from m_next() into the new trivial helper, m_next_vma(). Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index bb16c967eefd..fef398948462 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -138,6 +138,14 @@ static void vma_stop(struct proc_maps_private *priv) mmput(mm); } +static struct vm_area_struct * +m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) +{ + if (vma == priv->tail_vma) + return NULL; + return vma->vm_next ?: priv->tail_vma; +} + static void *m_start(struct seq_file *m, loff_t *ppos) { struct proc_maps_private *priv = m->private; @@ -173,13 +181,10 @@ static void *m_start(struct seq_file *m, loff_t *ppos) static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_maps_private *priv = m->private; - struct vm_area_struct *tail_vma = priv->tail_vma; - struct vm_area_struct *vma = v, *next = NULL; + struct vm_area_struct *next; (*pos)++; - if (vma != tail_vma) - next = vma->vm_next ?: tail_vma; - + next = m_next_vma(priv, v); if (!next) vma_stop(priv); return next; From b8c20a9b85b057c850f63ee4c63531a356d8596a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:41 -0700 Subject: [PATCH 034/164] fs/proc/task_mmu.c: reintroduce m->version logic Add the "last_addr" optimization back. Like before, every ->show() method checks !seq_overflow() and sets m->version = vma->vm_start. However, it also checks that m_next_vma(vma) != NULL, otherwise it sets m->version = -1 for the lockless "EOF" fast-path in m_start(). m_start() can simply do find_vma() + m_next_vma() if last_addr is not zero, the code looks clear and simple and this case is clearly separated from "scan vmas" path. Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index fef398948462..c7228c2326d1 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -146,13 +146,24 @@ m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) return vma->vm_next ?: priv->tail_vma; } +static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) +{ + if (m->count < m->size) /* vma is copied successfully */ + m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; +} + static void *m_start(struct seq_file *m, loff_t *ppos) { struct proc_maps_private *priv = m->private; + unsigned long last_addr = m->version; struct mm_struct *mm; struct vm_area_struct *vma; unsigned int pos = *ppos; + /* See m_cache_vma(). Zero at the start or after lseek. */ + if (last_addr == -1UL) + return NULL; + priv->task = get_pid_task(priv->pid, PIDTYPE_PID); if (!priv->task) return ERR_PTR(-ESRCH); @@ -165,6 +176,13 @@ static void *m_start(struct seq_file *m, loff_t *ppos) hold_task_mempolicy(priv); priv->tail_vma = get_gate_vma(mm); + if (last_addr) { + vma = find_vma(mm, last_addr); + if (vma && (vma = m_next_vma(priv, vma))) + return vma; + } + + m->version = 0; if (pos < mm->map_count) { for (vma = mm->mmap; pos; pos--) vma = vma->vm_next; @@ -340,6 +358,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) static int show_map(struct seq_file *m, void *v, int is_pid) { show_map_vma(m, v, is_pid); + m_cache_vma(m, v); return 0; } @@ -617,6 +636,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) mss.nonlinear >> 10); show_smap_vma_flags(m, vma); + m_cache_vma(m, vma); return 0; } @@ -1471,6 +1491,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_printf(m, " N%d=%lu", nid, md->node[nid]); out: seq_putc(m, '\n'); + m_cache_vma(m, vma); return 0; } From 557c2d8a73dc078817ba6949697ceb8c0f3f7362 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:43 -0700 Subject: [PATCH 035/164] fs/proc/task_mmu.c: update m->version in the main loop in m_start() Change the main loop in m_start() to update m->version. Mostly for consistency, but this can help to avoid the same loop if the very 1st ->show() fails due to seq_overflow(). Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c7228c2326d1..34d93a1cdeec 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -184,11 +184,14 @@ static void *m_start(struct seq_file *m, loff_t *ppos) m->version = 0; if (pos < mm->map_count) { - for (vma = mm->mmap; pos; pos--) + for (vma = mm->mmap; pos; pos--) { + m->version = vma->vm_start; vma = vma->vm_next; + } return vma; } + /* we do not bother to update m->version in this case */ if (pos == mm->map_count && priv->tail_vma) return priv->tail_vma; From ce34fddb5bafb424a4aaa9f403feb7dbe776c7d1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:45 -0700 Subject: [PATCH 036/164] fs/proc/task_nommu.c: change maps_open() to use __seq_open_private() Cleanup and preparation. maps_open() can use __seq_open_private() like proc_maps_open() does. [akpm@linux-foundation.org: deuglify] Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Acked-by: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_nommu.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 678455d2d683..98c95d2833ea 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -269,20 +269,13 @@ static int maps_open(struct inode *inode, struct file *file, const struct seq_operations *ops) { struct proc_maps_private *priv; - int ret = -ENOMEM; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (priv) { - priv->pid = proc_pid(inode); - ret = seq_open(file, ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = priv; - } else { - kfree(priv); - } - } - return ret; + priv = __seq_open_private(file, ops, sizeof(struct proc_maps_private)); + if (!priv) + return -ENOMEM; + + priv->pid = proc_pid(inode); + return 0; } static int pid_maps_open(struct inode *inode, struct file *file) From 27692cd56e2aa6924b49f4361247d707a023484a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:47 -0700 Subject: [PATCH 037/164] fs/proc/task_nommu.c: shift mm_access() from m_start() to proc_maps_open() Copy-and-paste the changes from "fs/proc/task_mmu.c: shift mm_access() from m_start() to proc_maps_open()" into task_nommu.c. Change maps_open() to initialize priv->mm using proc_mem_open(), m_start() can rely on atomic_inc_not_zero(mm_users) like task_mmu.c does. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Acked-by: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_nommu.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 98c95d2833ea..9019f1de3f72 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -216,11 +216,11 @@ static void *m_start(struct seq_file *m, loff_t *pos) if (!priv->task) return ERR_PTR(-ESRCH); - mm = mm_access(priv->task, PTRACE_MODE_READ); - if (!mm || IS_ERR(mm)) { + mm = priv->mm; + if (!mm || !atomic_inc_not_zero(&mm->mm_users)) { put_task_struct(priv->task); priv->task = NULL; - return mm; + return NULL; } down_read(&mm->mmap_sem); @@ -270,14 +270,34 @@ static int maps_open(struct inode *inode, struct file *file, { struct proc_maps_private *priv; - priv = __seq_open_private(file, ops, sizeof(struct proc_maps_private)); + priv = __seq_open_private(file, ops, sizeof(*priv)); if (!priv) return -ENOMEM; priv->pid = proc_pid(inode); + priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); + if (IS_ERR(priv->mm)) { + int err = PTR_ERR(priv->mm); + + seq_release_private(inode, file); + return err; + } + return 0; } + +static int map_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct proc_maps_private *priv = seq->private; + + if (priv->mm) + mmdrop(priv->mm); + + return seq_release_private(inode, file); +} + static int pid_maps_open(struct inode *inode, struct file *file) { return maps_open(inode, file, &proc_pid_maps_ops); @@ -292,13 +312,13 @@ const struct file_operations proc_pid_maps_operations = { .open = pid_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = map_release, }; const struct file_operations proc_tid_maps_operations = { .open = tid_maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = map_release, }; From 47fecca15c0944924423390fe21b956eea57da30 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:49 -0700 Subject: [PATCH 038/164] fs/proc/task_nommu.c: don't use priv->task->mm I do not know if CONFIG_PREEMPT/SMP is possible without CONFIG_MMU but the usage of task->mm in m_stop(). The task can exit/exec before we take mmap_sem, in this case m_stop() can hit NULL or unlock the wrong rw_semaphore. Also, this code uses priv->task != NULL to decide whether we need up_read/mmput. This is correct, but we will probably kill priv->task. Change m_start/m_stop to rely on IS_ERR_OR_NULL() like task_mmu.c does. Signed-off-by: Oleg Nesterov Cc: Kirill A. Shutemov Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Acked-by: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_nommu.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 9019f1de3f72..429cb7a5419e 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -217,17 +217,17 @@ static void *m_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-ESRCH); mm = priv->mm; - if (!mm || !atomic_inc_not_zero(&mm->mm_users)) { - put_task_struct(priv->task); - priv->task = NULL; + if (!mm || !atomic_inc_not_zero(&mm->mm_users)) return NULL; - } - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem); /* start from the Nth VMA */ for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) if (n-- == 0) return p; + + up_read(&mm->mmap_sem); + mmput(mm); return NULL; } @@ -235,11 +235,13 @@ static void m_stop(struct seq_file *m, void *_vml) { struct proc_maps_private *priv = m->private; + if (!IS_ERR_OR_NULL(_vml)) { + up_read(&priv->mm->mmap_sem); + mmput(priv->mm); + } if (priv->task) { - struct mm_struct *mm = priv->task->mm; - up_read(&mm->mmap_sem); - mmput(mm); put_task_struct(priv->task); + priv->task = NULL; } } From 2c03376d2db005869b1d4449097d51c96196529e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:51 -0700 Subject: [PATCH 039/164] proc/maps: replace proc_maps_private->pid with "struct inode *inode" m_start() can use get_proc_task() instead, and "struct inode *" provides more potentially useful info, see the next changes. Signed-off-by: Oleg Nesterov Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: Greg Ungerer Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/internal.h | 2 +- fs/proc/task_mmu.c | 4 ++-- fs/proc/task_nommu.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/proc/internal.h b/fs/proc/internal.h index d27182854a28..aa7a0ee182e1 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -268,7 +268,7 @@ extern int proc_remount(struct super_block *, int *, char *); * task_[no]mmu.c */ struct proc_maps_private { - struct pid *pid; + struct inode *inode; struct task_struct *task; struct mm_struct *mm; #ifdef CONFIG_MMU diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 34d93a1cdeec..4793e4a843b0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -164,7 +164,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos) if (last_addr == -1UL) return NULL; - priv->task = get_pid_task(priv->pid, PIDTYPE_PID); + priv->task = get_proc_task(priv->inode); if (!priv->task) return ERR_PTR(-ESRCH); @@ -231,7 +231,7 @@ static int proc_maps_open(struct inode *inode, struct file *file, if (!priv) return -ENOMEM; - priv->pid = proc_pid(inode); + priv->inode = inode; priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(priv->mm)) { int err = PTR_ERR(priv->mm); diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 429cb7a5419e..f36e213835cc 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -212,7 +212,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) loff_t n = *pos; /* pin the task and mm whilst we play with them */ - priv->task = get_pid_task(priv->pid, PIDTYPE_PID); + priv->task = get_proc_task(priv->inode); if (!priv->task) return ERR_PTR(-ESRCH); @@ -276,7 +276,7 @@ static int maps_open(struct inode *inode, struct file *file, if (!priv) return -ENOMEM; - priv->pid = proc_pid(inode); + priv->inode = inode; priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(priv->mm)) { int err = PTR_ERR(priv->mm); From 58cb65487e92b47448d00a711c9f5922137d5678 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:54 -0700 Subject: [PATCH 040/164] proc/maps: make vm_is_stack() logic namespace-friendly - Rename vm_is_stack() to task_of_stack() and change it to return "struct task_struct *" rather than the global (and thus wrong in general) pid_t. - Add the new pid_of_stack() helper which calls task_of_stack() and uses the right namespace to report the correct pid_t. Unfortunately we need to define this helper twice, in task_mmu.c and in task_nommu.c. perhaps it makes sense to add fs/proc/util.c and move at least pid_of_stack/task_of_stack there to avoid the code duplication. - Change show_map_vma() and show_numa_map() to use the new helper. Signed-off-by: Oleg Nesterov Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: Greg Ungerer Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 25 +++++++++++++++++++++---- fs/proc/task_nommu.c | 21 ++++++++++++++++++++- include/linux/mm.h | 4 ++-- mm/util.c | 23 ++++++++--------------- 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4793e4a843b0..adddf697c4ea 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -261,13 +261,31 @@ static int do_maps_open(struct inode *inode, struct file *file, sizeof(struct proc_maps_private)); } +static pid_t pid_of_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, bool is_pid) +{ + struct inode *inode = priv->inode; + struct task_struct *task; + pid_t ret = 0; + + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); + if (task) { + task = task_of_stack(task, vma, is_pid); + if (task) + ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + } + rcu_read_unlock(); + + return ret; +} + static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; struct proc_maps_private *priv = m->private; - struct task_struct *task = priv->task; vm_flags_t flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; @@ -332,8 +350,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) goto done; } - tid = vm_is_stack(task, vma, is_pid); - + tid = pid_of_stack(priv, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or @@ -1446,7 +1463,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); } else { - pid_t tid = vm_is_stack(task, vma, is_pid); + pid_t tid = pid_of_stack(proc_priv, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index f36e213835cc..599ec2e20104 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -123,6 +123,25 @@ unsigned long task_statm(struct mm_struct *mm, return size; } +static pid_t pid_of_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, bool is_pid) +{ + struct inode *inode = priv->inode; + struct task_struct *task; + pid_t ret = 0; + + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); + if (task) { + task = task_of_stack(task, vma, is_pid); + if (task) + ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + } + rcu_read_unlock(); + + return ret; +} + /* * display a single VMA to a sequenced file */ @@ -163,7 +182,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, seq_pad(m, ' '); seq_path(m, &file->f_path, ""); } else if (mm) { - pid_t tid = vm_is_stack(priv->task, vma, is_pid); + pid_t tid = pid_of_stack(priv, vma, is_pid); if (tid != 0) { seq_pad(m, ' '); diff --git a/include/linux/mm.h b/include/linux/mm.h index 0f4196a0bc20..28df70774b81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1247,8 +1247,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern pid_t -vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); +extern struct task_struct *task_of_stack(struct task_struct *task, + struct vm_area_struct *vma, bool in_group); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, diff --git a/mm/util.c b/mm/util.c index 093c973f1697..fec39d4509a9 100644 --- a/mm/util.c +++ b/mm/util.c @@ -170,32 +170,25 @@ static int vm_is_stack_for_task(struct task_struct *t, /* * Check if the vma is being used as a stack. * If is_group is non-zero, check in the entire thread group or else - * just check in the current task. Returns the pid of the task that - * the vma is stack for. + * just check in the current task. Returns the task_struct of the task + * that the vma is stack for. Must be called under rcu_read_lock(). */ -pid_t vm_is_stack(struct task_struct *task, - struct vm_area_struct *vma, int in_group) +struct task_struct *task_of_stack(struct task_struct *task, + struct vm_area_struct *vma, bool in_group) { - pid_t ret = 0; - if (vm_is_stack_for_task(task, vma)) - return task->pid; + return task; if (in_group) { struct task_struct *t; - rcu_read_lock(); for_each_thread(task, t) { - if (vm_is_stack_for_task(t, vma)) { - ret = t->pid; - goto done; - } + if (vm_is_stack_for_task(t, vma)) + return t; } -done: - rcu_read_unlock(); } - return ret; + return NULL; } #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) From bf3e2692468fe46eed57d18b3dd1af5b30049122 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 9 Oct 2014 15:25:56 -0700 Subject: [PATCH 041/164] fs/proc/kcore.c: don't add modules range to kcore if it's equal to vmcore range On some ARCHs modules range is eauql to vmalloc range. E.g on i686 "#define MODULES_VADDR VMALLOC_START" "#define MODULES_END VMALLOC_END" This will cause 2 duplicate program segments in /proc/kcore, and no flag to indicate they are different. This is confusing. And usually people who need check the elf header or read the content of kcore will check memory ranges. Two program segments which are the same are unnecessary. So check if the modules range is equal to vmalloc range. If so, just skip adding the modules range. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Baoquan He Cc: Xishi Qiu Cc: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/kcore.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 6df8d0722c97..91a4e6426321 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -610,8 +610,10 @@ static void __init proc_kcore_text_init(void) struct kcore_list kcore_modules; static void __init add_modules_range(void) { - kclist_add(&kcore_modules, (void *)MODULES_VADDR, + if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { + kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_END - MODULES_VADDR, KCORE_VMALLOC); + } } #else static void __init add_modules_range(void) From 3aa24f519e48e0db0ccf198d1b766a61d9463ce6 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:25:58 -0700 Subject: [PATCH 042/164] mm/slab_common.c: suppress warning False positive: mm/slab_common.c: In function 'kmem_cache_create': mm/slab_common.c:204: warning: 's' may be used uninitialized in this function Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index d319502b2403..cabb842c4e7c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -211,8 +211,10 @@ kmem_cache_create(const char *name, size_t size, size_t align, mutex_lock(&slab_mutex); err = kmem_cache_sanity_check(name, size); - if (err) + if (err) { + s = NULL; /* suppress uninit var warning */ goto out_unlock; + } /* * Some allocators will constraint the set of valid flags to a subset From 07f361b2bee38896df8be17d8c3f8af3f3610606 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:00 -0700 Subject: [PATCH 043/164] mm/slab_common: move kmem_cache definition to internal header We don't need to keep kmem_cache definition in include/linux/slab.h if we don't need to inline kmem_cache_size(). According to my code inspection, this function is only called at lc_create() in lib/lru_cache.c which may be called at initialization phase of something, so we don't need to inline it. Therfore, move it to slab_common.c and move kmem_cache definition to internal header. After this change, we can change kmem_cache definition easily without full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS without full kernel build. [akpm@linux-foundation.org: export kmem_cache_size() to modules] [rdunlap@infradead.org: add header files to fix kmemcheck.c build errors] Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 42 +----------------------------------------- mm/kmemcheck.c | 1 + mm/slab.h | 35 +++++++++++++++++++++++++++++++++++ mm/slab_common.c | 9 +++++++++ 4 files changed, 46 insertions(+), 41 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 1d9abb7d22a0..9062e4ad1787 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -158,31 +158,6 @@ size_t ksize(const void *); #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CONFIG_SLOB -/* - * Common fields provided in kmem_cache by all slab allocators - * This struct is either used directly by the allocator (SLOB) - * or the allocator must include definitions for all fields - * provided in kmem_cache_common in their definition of kmem_cache. - * - * Once we can do anonymous structs (C11 standard) we could put a - * anonymous struct definition in these allocators so that the - * separate allocations in the kmem_cache structure of SLAB and - * SLUB is no longer needed. - */ -struct kmem_cache { - unsigned int object_size;/* The original size of the object */ - unsigned int size; /* The aligned/padded/added on size */ - unsigned int align; /* Alignment as calculated */ - unsigned long flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ - int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ - struct list_head list; /* List of all slab caches on the system */ -}; - -#endif /* CONFIG_SLOB */ - /* * Kmalloc array related definitions */ @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -#ifdef CONFIG_SLAB -#include -#endif - -#ifdef CONFIG_SLUB -#include -#endif - extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); #ifdef CONFIG_TRACING @@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } -/* - * Determine the size of a slab object - */ -static inline unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} - +unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index fd814fd61319..cab58bb592d8 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -2,6 +2,7 @@ #include #include #include +#include "slab.h" #include void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) diff --git a/mm/slab.h b/mm/slab.h index 0e0fdd365840..026e7c393f0b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -4,6 +4,41 @@ * Internal slab definitions */ +#ifdef CONFIG_SLOB +/* + * Common fields provided in kmem_cache by all slab allocators + * This struct is either used directly by the allocator (SLOB) + * or the allocator must include definitions for all fields + * provided in kmem_cache_common in their definition of kmem_cache. + * + * Once we can do anonymous structs (C11 standard) we could put a + * anonymous struct definition in these allocators so that the + * separate allocations in the kmem_cache structure of SLAB and + * SLUB is no longer needed. + */ +struct kmem_cache { + unsigned int object_size;/* The original size of the object */ + unsigned int size; /* The aligned/padded/added on size */ + unsigned int align; /* Alignment as calculated */ + unsigned long flags; /* Active flags on the slab */ + const char *name; /* Slab name for sysfs */ + int refcount; /* Use counter */ + void (*ctor)(void *); /* Called on object slot creation */ + struct list_head list; /* List of all slab caches on the system */ +}; + +#endif /* CONFIG_SLOB */ + +#ifdef CONFIG_SLAB +#include +#endif + +#ifdef CONFIG_SLUB +#include +#endif + +#include + /* * State of the slab allocator. * diff --git a/mm/slab_common.c b/mm/slab_common.c index cabb842c4e7c..d7d8ffd0c306 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -30,6 +30,15 @@ LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; +/* + * Determine the size of a slab object + */ +unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->object_size; +} +EXPORT_SYMBOL(kmem_cache_size); + #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, size_t size) { From 61f47105a2c9c60e950ca808b7560f776f9bfa31 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:02 -0700 Subject: [PATCH 044/164] mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller() Now, we track caller if tracing or slab debugging is enabled. If they are disabled, we could save one argument passing overhead by calling __kmalloc(_node)(). But, I think that it would be marginal. Furthermore, default slab allocator, SLUB, doesn't use this technique so I think that it's okay to change this situation. After this change, we can turn on/off CONFIG_DEBUG_SLAB without full kernel build and remove some complicated '#if' defintion. It looks more benefitial to me. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 22 ---------------------- mm/slab.c | 18 ------------------ mm/slob.c | 2 -- 3 files changed, 42 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 9062e4ad1787..c265bec6a57d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -549,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * allocator where we care about the real place the memory allocation * request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -#else -#define kmalloc_track_caller(size, flags) \ - __kmalloc(size, flags) -#endif /* DEBUG_SLAB */ #ifdef CONFIG_NUMA -/* - * kmalloc_node_track_caller is a special version of kmalloc_node that - * records the calling function of the routine calling it for slab leak - * tracking instead of just the calling function (confusing, eh?). - * It's useful when the call to kmalloc_node comes from a widely-used - * standard allocator where we care about the real place the memory - * allocation request comes from. - */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) -#else -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node(size, flags, node) -#endif #else /* CONFIG_NUMA */ diff --git a/mm/slab.c b/mm/slab.c index 7c52b3890d25..c52bc5aa6ba0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3496,7 +3496,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) return kmem_cache_alloc_node_trace(cachep, flags, node, size); } -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, _RET_IP_); @@ -3509,13 +3508,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, return __do_kmalloc_node(size, flags, node, caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); -#else -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, 0); -} -EXPORT_SYMBOL(__kmalloc_node); -#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_NUMA */ /** @@ -3541,8 +3533,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return ret; } - -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, _RET_IP_); @@ -3555,14 +3545,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) } EXPORT_SYMBOL(__kmalloc_track_caller); -#else -void *__kmalloc(size_t size, gfp_t flags) -{ - return __do_kmalloc(size, flags, 0); -} -EXPORT_SYMBOL(__kmalloc); -#endif - /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. diff --git a/mm/slob.c b/mm/slob.c index 21980e0f39a8..96a86206a26b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -468,7 +468,6 @@ void *__kmalloc(size_t size, gfp_t gfp) } EXPORT_SYMBOL(__kmalloc); -#ifdef CONFIG_TRACING void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) { return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); @@ -481,7 +480,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, return __do_kmalloc_node(size, gfp, node, caller); } #endif -#endif void kfree(const void *block) { From 3d88019408d6fbff1a38a58e694d56b7fd465408 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:04 -0700 Subject: [PATCH 045/164] mm/slab: move cache_flusharray() out of unlikely.text section Now, due to likely keyword, compiled code of cache_flusharray() is on unlikely.text section. Although it is uncommon case compared to free to cpu cache case, it is common case than free_block(). But, free_block() is on normal text section. This patch fix this odd situation to remove likely keyword. Signed-off-by: Joonsoo Kim Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index c52bc5aa6ba0..fa178e07d673 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3399,7 +3399,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return; - if (likely(ac->avail < ac->limit)) { + if (ac->avail < ac->limit) { STATS_INC_FREEHIT(cachep); } else { STATS_INC_FREEMISS(cachep); From d3aec34466d9d6c8ceaa7f95088ced5705823735 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:06 -0700 Subject: [PATCH 046/164] mm/slab: noinline __ac_put_obj() Our intention of __ac_put_obj() is that it doesn't affect anything if sk_memalloc_socks() is disabled. But, because __ac_put_obj() is too small, compiler inline it to ac_put_obj() and affect code size of free path. This patch add noinline keyword for __ac_put_obj() not to distrupt normal free path at all. nm -S slab-orig.o | grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free" 0000000000001e80 00000000000002f5 t cache_alloc_refill 0000000000001230 0000000000000258 T kfree 0000000000000690 000000000000024c T kmem_cache_free nm -S slab-patched.o | grep -e "t cache_alloc_refill" -e "T kfree" -e "T kmem_cache_free" 0000000000001e00 00000000000002e5 t cache_alloc_refill 00000000000011e0 0000000000000228 T kfree 0000000000000670 0000000000000216 T kmem_cache_free cache_alloc_refill: 0x2f5->0x2e5 kfree: 0x256->0x228 kmem_cache_free: 0x24c->0x216 code size of each function is reduced slightly. Signed-off-by: Joonsoo Kim Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index fa178e07d673..7c9ca82f6be9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -785,8 +785,8 @@ static inline void *ac_get_obj(struct kmem_cache *cachep, return objp; } -static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, - void *objp) +static noinline void *__ac_put_obj(struct kmem_cache *cachep, + struct array_cache *ac, void *objp) { if (unlikely(pfmemalloc_active)) { /* Some pfmemalloc slabs exist, check if this is one */ From 25c4f304be8cd6831105d3a2876028e4ecd254a1 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:09 -0700 Subject: [PATCH 047/164] mm/slab: factor out unlikely part of cache_free_alien() cache_free_alien() is rarely used function when node mismatch. But, it is defined with inline attribute so it is inlined to __cache_free() which is core free function of slab allocator. It uselessly makes kmem_cache_free()/kfree() functions large. What we really need to inline is just checking node match so this patch factor out other parts of cache_free_alien() to reduce code size of kmem_cache_free()/ kfree(). nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free" 00000000000011e0 0000000000000228 T kfree 0000000000000670 0000000000000216 T kmem_cache_free nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free" 0000000000001110 00000000000001b5 T kfree 0000000000000750 0000000000000181 T kmem_cache_free You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181. Signed-off-by: Joonsoo Kim Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 7c9ca82f6be9..f989af87b72c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep, } } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static int __cache_free_alien(struct kmem_cache *cachep, void *objp, + int node, int page_node) { - int nodeid = page_to_nid(virt_to_page(objp)); struct kmem_cache_node *n; struct alien_cache *alien = NULL; struct array_cache *ac; - int node; LIST_HEAD(list); - node = numa_mem_id(); - - /* - * Make sure we are not freeing a object from another node to the array - * cache on this cpu. - */ - if (likely(nodeid == node)) - return 0; - n = get_node(cachep, node); STATS_INC_NODEFREES(cachep); - if (n->alien && n->alien[nodeid]) { - alien = n->alien[nodeid]; + if (n->alien && n->alien[page_node]) { + alien = n->alien[page_node]; ac = &alien->ac; spin_lock(&alien->lock); if (unlikely(ac->avail == ac->limit)) { STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, ac, nodeid, &list); + __drain_alien_cache(cachep, ac, page_node, &list); } ac_put_obj(cachep, ac, objp); spin_unlock(&alien->lock); slabs_destroy(cachep, &list); } else { - n = get_node(cachep, nodeid); + n = get_node(cachep, page_node); spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, nodeid, &list); + free_block(cachep, &objp, 1, page_node, &list); spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } return 1; } + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + int page_node = page_to_nid(virt_to_page(objp)); + int node = numa_mem_id(); + /* + * Make sure we are not freeing a object from another node to the array + * cache on this cpu. + */ + if (likely(node == page_node)) + return 0; + + return __cache_free_alien(cachep, objp, node, page_node); +} #endif /* From c9e16131d6e39bddd183f0b9d787ec0a62bf0eeb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Oct 2014 15:26:11 -0700 Subject: [PATCH 048/164] slub: disable tracing and failslab for merged slabs Tracing of mergeable slabs as well as uses of failslab are confusing since the objects of multiple slab caches will be affected. Moreover this creates a situation where a mergeable slab will become unmergeable. If tracing or failslab testing is desired then it may be best to switch merging off for starters. Signed-off-by: Christoph Lameter Tested-by: WANG Chao Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index 3e8afcc07a76..fa86e5845093 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4604,6 +4604,14 @@ static ssize_t trace_show(struct kmem_cache *s, char *buf) static ssize_t trace_store(struct kmem_cache *s, const char *buf, size_t length) { + /* + * Tracing a merged cache is going to give confusing results + * as well as cause other issues like converting a mergeable + * cache into an umergeable one. + */ + if (s->refcount > 1) + return -EINVAL; + s->flags &= ~SLAB_TRACE; if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; @@ -4721,6 +4729,9 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf) static ssize_t failslab_store(struct kmem_cache *s, const char *buf, size_t length) { + if (s->refcount > 1) + return -EINVAL; + s->flags &= ~SLAB_FAILSLAB; if (buf[0] == '1') s->flags |= SLAB_FAILSLAB; From ad2c8144418c6a81cefe65379fd47bbe8344cef2 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:13 -0700 Subject: [PATCH 049/164] topology: add support for node_to_mem_node() to determine the fallback node Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that on ppc LPARs with memoryless nodes, a large amount of memory was consumed by slabs and was marked unreclaimable. He tracked it down to slab deactivations in the SLUB core when we allocate remotely, leading to poor efficiency always when memoryless nodes are present. After much discussion, Joonsoo provided a few patches that help significantly. They don't resolve the problem altogether: - memory hotplug still needs testing, that is when a memoryless node becomes memory-ful, we want to dtrt - there are other reasons for going off-node than memoryless nodes, e.g., fully exhausted local nodes Neither case is resolved with this series, but I don't think that should block their acceptance, as they can be explored/resolved with follow-on patches. The series consists of: [1/3] topology: add support for node_to_mem_node() to determine the fallback node [2/3] slub: fallback to node_to_mem_node() node if allocating on memoryless node - Joonsoo's patches to cache the nearest node with memory for each NUMA node [3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of task_struct allocations") - At Tejun's request, keep the knowledge of memoryless node fallback to the allocator core. This patch (of 3): We need to determine the fallback node in slub allocator if the allocation target node is memoryless node. Without it, the SLUB wrongly select the node which has no memory and can't use a partial slab, because of node mismatch. Introduced function, node_to_mem_node(X), will return a node Y with memory that has the nearest distance. If X is memoryless node, it will return nearest distance node, but, if X is normal node, it will return itself. We will use this function in following patch to determine the fallback node. Signed-off-by: Joonsoo Kim Signed-off-by: Nishanth Aravamudan Cc: David Rientjes Cc: Han Pingtian Cc: Pekka Enberg Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Anton Blanchard Cc: Christoph Lameter Cc: Wanpeng Li Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/topology.h | 17 +++++++++++++++++ mm/page_alloc.c | 1 + 2 files changed, 18 insertions(+) diff --git a/include/linux/topology.h b/include/linux/topology.h index dda6ee521e74..909b6e43b694 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -119,11 +119,20 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); +extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); + _node_numa_mem_[numa_node_id()] = node; +} +#endif + +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return _node_numa_mem_[node]; } #endif @@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; + _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -159,6 +169,13 @@ static inline int numa_mem_id(void) } #endif +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return node; +} +#endif + #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eee961958021..f3bc59f2ed52 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -85,6 +85,7 @@ EXPORT_PER_CPU_SYMBOL(numa_node); */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); +int _node_numa_mem_[MAX_NUMNODES]; #endif /* From a561ce00b09e1545953340deb5bef1036d7442de Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:15 -0700 Subject: [PATCH 050/164] slub: fall back to node_to_mem_node() node if allocating on memoryless node Update the SLUB code to search for partial slabs on the nearest node with memory in the presence of memoryless nodes. Additionally, do not consider it to be an ALLOC_NODE_MISMATCH (and deactivate the slab) when a memoryless-node specified allocation goes off-node. Signed-off-by: Joonsoo Kim Signed-off-by: Nishanth Aravamudan Cc: David Rientjes Cc: Han Pingtian Cc: Pekka Enberg Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Anton Blanchard Cc: Christoph Lameter Cc: Wanpeng Li Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index fa86e5845093..1050d7db5734 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1699,7 +1699,12 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu *c) { void *object; - int searchnode = (node == NUMA_NO_NODE) ? numa_mem_id() : node; + int searchnode = node; + + if (node == NUMA_NO_NODE) + searchnode = numa_mem_id(); + else if (!node_present_pages(node)) + searchnode = node_to_mem_node(node); object = get_partial_node(s, get_node(s, searchnode), c, flags); if (object || node != NUMA_NO_NODE) @@ -2280,11 +2285,18 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, redo: if (unlikely(!node_match(page, node))) { - stat(s, ALLOC_NODE_MISMATCH); - deactivate_slab(s, page, c->freelist); - c->page = NULL; - c->freelist = NULL; - goto new_slab; + int searchnode = node; + + if (node != NUMA_NO_NODE && !node_present_pages(node)) + searchnode = node_to_mem_node(node); + + if (unlikely(!node_match(page, searchnode))) { + stat(s, ALLOC_NODE_MISMATCH); + deactivate_slab(s, page, c->freelist); + c->page = NULL; + c->freelist = NULL; + goto new_slab; + } } /* From 109228389a943edd7e5c6ae94a7fda119691baec Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Thu, 9 Oct 2014 15:26:18 -0700 Subject: [PATCH 051/164] kernel/kthread.c: partial revert of 81c98869faa5 ("kthread: ensure locality of task_struct allocations") After discussions with Tejun, we don't want to spread the use of cpu_to_mem() (and thus knowledge of allocators/NUMA topology details) into callers, but would rather ensure the callees correctly handle memoryless nodes. With the previous patches ("topology: add support for node_to_mem_node() to determine the fallback node" and "slub: fallback to node_to_mem_node() node if allocating on memoryless node") adding and using node_to_mem_node(), we can safely undo part of the change to the kthread logic from 81c98869faa5. Signed-off-by: Nishanth Aravamudan Cc: Joonsoo Kim Cc: David Rientjes Cc: Han Pingtian Cc: Pekka Enberg Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Anton Blanchard Cc: Christoph Lameter Cc: Wanpeng Li Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index ef483220e855..10e489c448fe 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -369,7 +369,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), { struct task_struct *p; - p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, + p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, cpu); if (IS_ERR(p)) return p; From 9163582c3f22cfba90a78749751ac70b127a9167 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 9 Oct 2014 15:26:20 -0700 Subject: [PATCH 052/164] slab: fix for_each_kmem_cache_node() Fix a bug (discovered with kmemcheck) in for_each_kmem_cache_node(). The for loop reads the array "node" before verifying that the index is within the range. This results in kmemcheck warning. Signed-off-by: Mikulas Patocka Reviewed-by: Pekka Enberg Acked-by: Christoph Lameter Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 026e7c393f0b..6599f2084e80 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -338,8 +338,8 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) * a kmem_cache_node structure allocated (which is true for all online nodes) */ #define for_each_kmem_cache_node(__s, __node, __n) \ - for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \ - if (__n) + for (__node = 0; __node < nr_node_ids; __node++) \ + if ((__n = get_node(__s, __node))) #endif From 423c929cbbecc60e9c407f9048e58f5422f7995d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:22 -0700 Subject: [PATCH 053/164] mm/slab_common: commonize slab merge logic Slab merge is good feature to reduce fragmentation. Now, it is only applied to SLUB, but, it would be good to apply it to SLAB. This patch is preparation step to apply slab merge to SLAB by commonizing slab merge logic. Signed-off-by: Joonsoo Kim Cc: Randy Dunlap Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 14 +++-- mm/slab.h | 15 +++++ mm/slab_common.c | 91 +++++++++++++++++++++++++++++ mm/slub.c | 91 +---------------------------- 4 files changed, 117 insertions(+), 94 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d9a452e8fb9b..a126a31dde02 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3158,6 +3158,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. slram= [HW,MTD] + slab_nomerge [MM] + Disable merging of slabs with similar size. May be + necessary if there is some reason to distinguish + allocs to different slabs. Debug options disable + merging on their own. + For more information see Documentation/vm/slub.txt. + slab_max_order= [MM, SLAB] Determines the maximum allowed order for slabs. A high setting may cause OOMs due to memory @@ -3193,11 +3200,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. For more information see Documentation/vm/slub.txt. slub_nomerge [MM, SLUB] - Disable merging of slabs with similar size. May be - necessary if there is some reason to distinguish - allocs to different slabs. Debug options disable - merging on their own. - For more information see Documentation/vm/slub.txt. + Same with slab_nomerge. This is supported for legacy. + See slab_nomerge for more information. smart2= [HW] Format: [,[,...,]] diff --git a/mm/slab.h b/mm/slab.h index 6599f2084e80..c44d28b60609 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -88,15 +88,30 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, size_t size, unsigned long flags); struct mem_cgroup; + +int slab_unmergeable(struct kmem_cache *s); +struct kmem_cache *find_mergeable(size_t size, size_t align, + unsigned long flags, const char *name, void (*ctor)(void *)); #ifdef CONFIG_SLUB struct kmem_cache * __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); + +unsigned long kmem_cache_flags(unsigned long object_size, + unsigned long flags, const char *name, + void (*ctor)(void *)); #else static inline struct kmem_cache * __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } + +static inline unsigned long kmem_cache_flags(unsigned long object_size, + unsigned long flags, const char *name, + void (*ctor)(void *)) +{ + return flags; +} #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index d7d8ffd0c306..f206cb10a544 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -30,6 +30,34 @@ LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; +/* + * Set of flags that will prevent slab merging + */ +#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ + SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ + SLAB_FAILSLAB) + +#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ + SLAB_CACHE_DMA | SLAB_NOTRACK) + +/* + * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) + */ +static int slab_nomerge; + +static int __init setup_slab_nomerge(char *str) +{ + slab_nomerge = 1; + return 1; +} + +#ifdef CONFIG_SLUB +__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); +#endif + +__setup("slab_nomerge", setup_slab_nomerge); + /* * Determine the size of a slab object */ @@ -115,6 +143,69 @@ int memcg_update_all_caches(int num_memcgs) } #endif +/* + * Find a mergeable slab cache + */ +int slab_unmergeable(struct kmem_cache *s) +{ + if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) + return 1; + + if (!is_root_cache(s)) + return 1; + + if (s->ctor) + return 1; + + /* + * We may have set a slab to be unmergeable during bootstrap. + */ + if (s->refcount < 0) + return 1; + + return 0; +} + +struct kmem_cache *find_mergeable(size_t size, size_t align, + unsigned long flags, const char *name, void (*ctor)(void *)) +{ + struct kmem_cache *s; + + if (slab_nomerge || (flags & SLAB_NEVER_MERGE)) + return NULL; + + if (ctor) + return NULL; + + size = ALIGN(size, sizeof(void *)); + align = calculate_alignment(flags, align, size); + size = ALIGN(size, align); + flags = kmem_cache_flags(size, flags, name, NULL); + + list_for_each_entry(s, &slab_caches, list) { + if (slab_unmergeable(s)) + continue; + + if (size > s->size) + continue; + + if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) + continue; + /* + * Check if alignment is compatible. + * Courtesy of Adrian Drzewiecki + */ + if ((s->size & ~(align - 1)) != s->size) + continue; + + if (s->size - size >= sizeof(void *)) + continue; + + return s; + } + return NULL; +} + /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. diff --git a/mm/slub.c b/mm/slub.c index 1050d7db5734..ae7b9f1ad394 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -169,16 +169,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) */ #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) -/* - * Set of flags that will prevent slab merging - */ -#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB) - -#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA | SLAB_NOTRACK) - #define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ @@ -1176,7 +1166,7 @@ static int __init setup_slub_debug(char *str) __setup("slub_debug", setup_slub_debug); -static unsigned long kmem_cache_flags(unsigned long object_size, +unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -1208,7 +1198,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} -static inline unsigned long kmem_cache_flags(unsigned long object_size, +unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -2718,12 +2708,6 @@ static int slub_min_order; static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; static int slub_min_objects; -/* - * Merge control. If this is set then no merging of slab caches will occur. - * (Could be removed. This was introduced to pacify the merge skeptics.) - */ -static int slub_nomerge; - /* * Calculate the order of allocation given an slab object size. * @@ -3252,14 +3236,6 @@ static int __init setup_slub_min_objects(char *str) __setup("slub_min_objects=", setup_slub_min_objects); -static int __init setup_slub_nomerge(char *str) -{ - slub_nomerge = 1; - return 1; -} - -__setup("slub_nomerge", setup_slub_nomerge); - void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; @@ -3637,69 +3613,6 @@ void __init kmem_cache_init_late(void) { } -/* - * Find a mergeable slab cache - */ -static int slab_unmergeable(struct kmem_cache *s) -{ - if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) - return 1; - - if (!is_root_cache(s)) - return 1; - - if (s->ctor) - return 1; - - /* - * We may have set a slab to be unmergeable during bootstrap. - */ - if (s->refcount < 0) - return 1; - - return 0; -} - -static struct kmem_cache *find_mergeable(size_t size, size_t align, - unsigned long flags, const char *name, void (*ctor)(void *)) -{ - struct kmem_cache *s; - - if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) - return NULL; - - if (ctor) - return NULL; - - size = ALIGN(size, sizeof(void *)); - align = calculate_alignment(flags, align, size); - size = ALIGN(size, align); - flags = kmem_cache_flags(size, flags, name, NULL); - - list_for_each_entry(s, &slab_caches, list) { - if (slab_unmergeable(s)) - continue; - - if (size > s->size) - continue; - - if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) - continue; - /* - * Check if alignment is compatible. - * Courtesy of Adrian Drzewiecki - */ - if ((s->size & ~(align - 1)) != s->size) - continue; - - if (s->size - size >= sizeof(void *)) - continue; - - return s; - } - return NULL; -} - struct kmem_cache * __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) From 12220dea07f1ac6ac717707104773d771c3f3077 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:24 -0700 Subject: [PATCH 054/164] mm/slab: support slab merge Slab merge is good feature to reduce fragmentation. If new creating slab have similar size and property with exsitent slab, this feature reuse it rather than creating new one. As a result, objects are packed into fewer slabs so that fragmentation is reduced. Below is result of my testing. * After boot, sleep 20; cat /proc/meminfo | grep Slab Slab: 25136 kB Slab: 24364 kB We can save 3% memory used by slab. For supporting this feature in SLAB, we need to implement SLAB specific kmem_cache_flag() and __kmem_cache_alias(), because SLUB implements some SLUB specific processing related to debug flag and object size change on these functions. Signed-off-by: Joonsoo Kim Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 26 ++++++++++++++++++++++++++ mm/slab.h | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index f989af87b72c..328233a724af 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2104,6 +2104,32 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) return 0; } +unsigned long kmem_cache_flags(unsigned long object_size, + unsigned long flags, const char *name, + void (*ctor)(void *)) +{ + return flags; +} + +struct kmem_cache * +__kmem_cache_alias(const char *name, size_t size, size_t align, + unsigned long flags, void (*ctor)(void *)) +{ + struct kmem_cache *cachep; + + cachep = find_mergeable(size, align, flags, name, ctor); + if (cachep) { + cachep->refcount++; + + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. + */ + cachep->object_size = max_t(int, cachep->object_size, size); + } + return cachep; +} + /** * __kmem_cache_create - Create a cache. * @cachep: cache management descriptor diff --git a/mm/slab.h b/mm/slab.h index c44d28b60609..50d29d716db4 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -92,7 +92,7 @@ struct mem_cgroup; int slab_unmergeable(struct kmem_cache *s); struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)); -#ifdef CONFIG_SLUB +#ifndef CONFIG_SLOB struct kmem_cache * __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); From bf0dea23a9c094ae869a88bb694fbe966671bf6d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:27 -0700 Subject: [PATCH 055/164] mm/slab: use percpu allocator for cpu cache Because of chicken and egg problem, initialization of SLAB is really complicated. We need to allocate cpu cache through SLAB to make the kmem_cache work, but before initialization of kmem_cache, allocation through SLAB is impossible. On the other hand, SLUB does initialization in a more simple way. It uses percpu allocator to allocate cpu cache so there is no chicken and egg problem. So, this patch try to use percpu allocator in SLAB. This simplifies the initialization step in SLAB so that we could maintain SLAB code more easily. In my testing there is no performance difference. This implementation relies on percpu allocator. Because percpu allocator uses vmalloc address space, vmalloc address space could be exhausted by this change on many cpu system with *32 bit* kernel. This implementation can cover 1024 cpus in worst case by following calculation. Worst: 1024 cpus * 4 bytes for pointer * 300 kmem_caches * 120 objects per cpu_cache = 140 MB Normal: 1024 cpus * 4 bytes for pointer * 150 kmem_caches(slab merge) * 80 objects per cpu_cache = 46 MB Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Jeremiah Mahler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab_def.h | 20 +--- mm/slab.c | 239 ++++++++++++--------------------------- mm/slab.h | 1 - 3 files changed, 78 insertions(+), 182 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8235dfbb3b05..b869d1662ba3 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -8,6 +8,8 @@ */ struct kmem_cache { + struct array_cache __percpu *cpu_cache; + /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; unsigned int limit; @@ -71,23 +73,7 @@ struct kmem_cache { struct memcg_cache_params *memcg_params; #endif -/* 6) per-cpu/per-node data, touched during every alloc/free */ - /* - * We put array[] at the end of kmem_cache, because we want to size - * this array to nr_cpu_ids slots instead of NR_CPUS - * (see kmem_cache_init()) - * We still use [NR_CPUS] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of cpus. - * - * We also need to guarantee that the list is able to accomodate a - * pointer for each node since "nodelists" uses the remainder of - * available pointers. - */ - struct kmem_cache_node **node; - struct array_cache *array[NR_CPUS + MAX_NUMNODES]; - /* - * Do not add fields after array[] - */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; #endif /* _LINUX_SLAB_DEF_H */ diff --git a/mm/slab.c b/mm/slab.c index 328233a724af..655d65c3f010 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -237,11 +237,10 @@ struct arraycache_init { /* * Need this for bootstrapping a per node allocator. */ -#define NUM_INIT_LISTS (3 * MAX_NUMNODES) +#define NUM_INIT_LISTS (2 * MAX_NUMNODES) static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; #define CACHE_CACHE 0 -#define SIZE_AC MAX_NUMNODES -#define SIZE_NODE (2 * MAX_NUMNODES) +#define SIZE_NODE (MAX_NUMNODES) static int drain_freelist(struct kmem_cache *cache, struct kmem_cache_node *n, int tofree); @@ -253,7 +252,6 @@ static void cache_reap(struct work_struct *unused); static int slab_early_init = 1; -#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) static void kmem_cache_node_init(struct kmem_cache_node *parent) @@ -458,9 +456,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } -static struct arraycache_init initarray_generic = - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; - /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, @@ -476,7 +471,7 @@ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { - return cachep->array[smp_processor_id()]; + return this_cpu_ptr(cachep->cpu_cache); } static size_t calculate_freelist_size(int nr_objs, size_t align) @@ -1096,24 +1091,25 @@ static void cpuup_canceled(long cpu) struct alien_cache **alien; LIST_HEAD(list); - /* cpu is dead; no one can alloc from it. */ - nc = cachep->array[cpu]; - cachep->array[cpu] = NULL; n = get_node(cachep, node); - if (!n) - goto free_array_cache; + continue; spin_lock_irq(&n->list_lock); /* Free limit for this kmem_cache_node */ n->free_limit -= cachep->batchcount; - if (nc) + + /* cpu is dead; no one can alloc from it. */ + nc = per_cpu_ptr(cachep->cpu_cache, cpu); + if (nc) { free_block(cachep, nc->entry, nc->avail, node, &list); + nc->avail = 0; + } if (!cpumask_empty(mask)) { spin_unlock_irq(&n->list_lock); - goto free_array_cache; + goto free_slab; } shared = n->shared; @@ -1133,9 +1129,9 @@ static void cpuup_canceled(long cpu) drain_alien_cache(cachep, alien); free_alien_cache(alien); } -free_array_cache: + +free_slab: slabs_destroy(cachep, &list); - kfree(nc); } /* * In the previous loop, all the objects were freed to @@ -1172,32 +1168,23 @@ static int cpuup_prepare(long cpu) * array caches */ list_for_each_entry(cachep, &slab_caches, list) { - struct array_cache *nc; struct array_cache *shared = NULL; struct alien_cache **alien = NULL; - nc = alloc_arraycache(node, cachep->limit, - cachep->batchcount, GFP_KERNEL); - if (!nc) - goto bad; if (cachep->shared) { shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d, GFP_KERNEL); - if (!shared) { - kfree(nc); + if (!shared) goto bad; - } } if (use_alien_caches) { alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); if (!alien) { kfree(shared); - kfree(nc); goto bad; } } - cachep->array[cpu] = nc; n = get_node(cachep, node); BUG_ON(!n); @@ -1388,15 +1375,6 @@ static void __init set_up_node(struct kmem_cache *cachep, int index) } } -/* - * The memory after the last cpu cache pointer is used for the - * the node pointer. - */ -static void setup_node_pointer(struct kmem_cache *cachep) -{ - cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; -} - /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). @@ -1408,7 +1386,6 @@ void __init kmem_cache_init(void) BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)); kmem_cache = &kmem_cache_boot; - setup_node_pointer(kmem_cache); if (num_possible_nodes() == 1) use_alien_caches = 0; @@ -1416,8 +1393,6 @@ void __init kmem_cache_init(void) for (i = 0; i < NUM_INIT_LISTS; i++) kmem_cache_node_init(&init_kmem_cache_node[i]); - set_up_node(kmem_cache, CACHE_CACHE); - /* * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory if @@ -1452,49 +1427,22 @@ void __init kmem_cache_init(void) * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ create_boot_cache(kmem_cache, "kmem_cache", - offsetof(struct kmem_cache, array[nr_cpu_ids]) + + offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN); list_add(&kmem_cache->list, &slab_caches); - - /* 2+3) create the kmalloc caches */ + slab_state = PARTIAL; /* - * Initialize the caches that provide memory for the array cache and the - * kmem_cache_node structures first. Without this, further allocations will - * bug. + * Initialize the caches that provide memory for the kmem_cache_node + * structures first. Without this, further allocations will bug. */ - - kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", - kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); - - if (INDEX_AC != INDEX_NODE) - kmalloc_caches[INDEX_NODE] = - create_kmalloc_cache("kmalloc-node", + kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node", kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); + slab_state = PARTIAL_NODE; slab_early_init = 0; - /* 4) Replace the bootstrap head arrays */ - { - struct array_cache *ptr; - - ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - - memcpy(ptr, cpu_cache_get(kmem_cache), - sizeof(struct arraycache_init)); - - kmem_cache->array[smp_processor_id()] = ptr; - - ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - - BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) - != &initarray_generic.cache); - memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), - sizeof(struct arraycache_init)); - - kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; - } /* 5) Replace the bootstrap kmem_cache_node */ { int nid; @@ -1502,13 +1450,8 @@ void __init kmem_cache_init(void) for_each_online_node(nid) { init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); - init_list(kmalloc_caches[INDEX_AC], - &init_kmem_cache_node[SIZE_AC + nid], nid); - - if (INDEX_AC != INDEX_NODE) { - init_list(kmalloc_caches[INDEX_NODE], + init_list(kmalloc_caches[INDEX_NODE], &init_kmem_cache_node[SIZE_NODE + nid], nid); - } } } @@ -2041,56 +1984,53 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, return left_over; } +static struct array_cache __percpu *alloc_kmem_cache_cpus( + struct kmem_cache *cachep, int entries, int batchcount) +{ + int cpu; + size_t size; + struct array_cache __percpu *cpu_cache; + + size = sizeof(void *) * entries + sizeof(struct array_cache); + cpu_cache = __alloc_percpu(size, 0); + + if (!cpu_cache) + return NULL; + + for_each_possible_cpu(cpu) { + init_arraycache(per_cpu_ptr(cpu_cache, cpu), + entries, batchcount); + } + + return cpu_cache; +} + static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { if (slab_state >= FULL) return enable_cpucache(cachep, gfp); + cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); + if (!cachep->cpu_cache) + return 1; + if (slab_state == DOWN) { - /* - * Note: Creation of first cache (kmem_cache). - * The setup_node is taken care - * of by the caller of __kmem_cache_create - */ - cachep->array[smp_processor_id()] = &initarray_generic.cache; - slab_state = PARTIAL; + /* Creation of first cache (kmem_cache). */ + set_up_node(kmem_cache, CACHE_CACHE); } else if (slab_state == PARTIAL) { - /* - * Note: the second kmem_cache_create must create the cache - * that's used by kmalloc(24), otherwise the creation of - * further caches will BUG(). - */ - cachep->array[smp_processor_id()] = &initarray_generic.cache; - - /* - * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is - * the second cache, then we need to set up all its node/, - * otherwise the creation of further caches will BUG(). - */ - set_up_node(cachep, SIZE_AC); - if (INDEX_AC == INDEX_NODE) - slab_state = PARTIAL_NODE; - else - slab_state = PARTIAL_ARRAYCACHE; + /* For kmem_cache_node */ + set_up_node(cachep, SIZE_NODE); } else { - /* Remaining boot caches */ - cachep->array[smp_processor_id()] = - kmalloc(sizeof(struct arraycache_init), gfp); + int node; - if (slab_state == PARTIAL_ARRAYCACHE) { - set_up_node(cachep, SIZE_NODE); - slab_state = PARTIAL_NODE; - } else { - int node; - for_each_online_node(node) { - cachep->node[node] = - kmalloc_node(sizeof(struct kmem_cache_node), - gfp, node); - BUG_ON(!cachep->node[node]); - kmem_cache_node_init(cachep->node[node]); - } + for_each_online_node(node) { + cachep->node[node] = kmalloc_node( + sizeof(struct kmem_cache_node), gfp, node); + BUG_ON(!cachep->node[node]); + kmem_cache_node_init(cachep->node[node]); } } + cachep->node[numa_mem_id()]->next_reap = jiffies + REAPTIMEOUT_NODE + ((unsigned long)cachep) % REAPTIMEOUT_NODE; @@ -2213,7 +2153,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) else gfp = GFP_NOWAIT; - setup_node_pointer(cachep); #if DEBUG /* @@ -2470,8 +2409,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) if (rc) return rc; - for_each_online_cpu(i) - kfree(cachep->array[i]); + free_percpu(cachep->cpu_cache); /* NUMA: free the node structures */ for_each_kmem_cache_node(cachep, i, n) { @@ -3719,72 +3657,45 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) return -ENOMEM; } -struct ccupdate_struct { - struct kmem_cache *cachep; - struct array_cache *new[0]; -}; - -static void do_ccupdate_local(void *info) -{ - struct ccupdate_struct *new = info; - struct array_cache *old; - - check_irq_off(); - old = cpu_cache_get(new->cachep); - - new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; -} - /* Always called with the slab_mutex held */ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { - struct ccupdate_struct *new; - int i; + struct array_cache __percpu *cpu_cache, *prev; + int cpu; - new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), - gfp); - if (!new) + cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); + if (!cpu_cache) return -ENOMEM; - for_each_online_cpu(i) { - new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, - batchcount, gfp); - if (!new->new[i]) { - for (i--; i >= 0; i--) - kfree(new->new[i]); - kfree(new); - return -ENOMEM; - } - } - new->cachep = cachep; - - on_each_cpu(do_ccupdate_local, (void *)new, 1); + prev = cachep->cpu_cache; + cachep->cpu_cache = cpu_cache; + kick_all_cpus_sync(); check_irq_on(); cachep->batchcount = batchcount; cachep->limit = limit; cachep->shared = shared; - for_each_online_cpu(i) { + if (!prev) + goto alloc_node; + + for_each_online_cpu(cpu) { LIST_HEAD(list); - struct array_cache *ccold = new->new[i]; int node; struct kmem_cache_node *n; + struct array_cache *ac = per_cpu_ptr(prev, cpu); - if (!ccold) - continue; - - node = cpu_to_mem(i); + node = cpu_to_mem(cpu); n = get_node(cachep, node); spin_lock_irq(&n->list_lock); - free_block(cachep, ccold->entry, ccold->avail, node, &list); + free_block(cachep, ac->entry, ac->avail, node, &list); spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); - kfree(ccold); } - kfree(new); + free_percpu(prev); + +alloc_node: return alloc_kmem_cache_node(cachep, gfp); } diff --git a/mm/slab.h b/mm/slab.h index 50d29d716db4..ab019e63e3c2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -50,7 +50,6 @@ struct kmem_cache { enum slab_state { DOWN, /* No slab functionality yet */ PARTIAL, /* SLUB: kmem_cache_node available */ - PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ UP, /* Slab caches usable but not all extras yet */ FULL /* Everything is working */ From cc71aba348906ff93a4ad2f600045ee2d1ecc291 Mon Sep 17 00:00:00 2001 From: "vishnu.ps" Date: Thu, 9 Oct 2014 15:26:29 -0700 Subject: [PATCH 056/164] mm/mmap.c: whitespace fixes Signed-off-by: vishnu.ps Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index c0a3637cdb64..2814189f501e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -70,7 +70,7 @@ static void unmap_region(struct mm_struct *mm, * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * + * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes @@ -741,7 +741,7 @@ again: remove_next = 1 + (end > next->vm_end); * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ - adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); + adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; } @@ -1010,7 +1010,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, + struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; @@ -1036,7 +1036,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && - mpol_equal(vma_policy(prev), policy) && + mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff)) { /* @@ -1064,7 +1064,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, * Can this new request be merged in front of next? */ if (next && end == next->vm_start && - mpol_equal(policy, vma_policy(next)) && + mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen)) { if (prev && addr < prev->vm_end) /* case 4 */ @@ -1235,7 +1235,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long flags, unsigned long pgoff, unsigned long *populate) { - struct mm_struct * mm = current->mm; + struct mm_struct *mm = current->mm; vm_flags_t vm_flags; *populate = 0; @@ -1263,7 +1263,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) - return -EOVERFLOW; + return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) @@ -1921,7 +1921,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.align_mask = 0; return vm_unmapped_area(&info); } -#endif +#endif /* * This mmap-allocator allocates new areas top-down from below the @@ -2321,13 +2321,13 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) } struct vm_area_struct * -find_extend_vma(struct mm_struct * mm, unsigned long addr) +find_extend_vma(struct mm_struct *mm, unsigned long addr) { - struct vm_area_struct * vma; + struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; - vma = find_vma(mm,addr); + vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) @@ -2376,7 +2376,7 @@ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { - struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; + struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); @@ -2423,7 +2423,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, * __split_vma() bypasses sysctl_max_map_count checking. We use this on the * munmap path where it doesn't make sense to fail. */ -static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; @@ -2512,7 +2512,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; - if ((len = PAGE_ALIGN(len)) == 0) + len = PAGE_ALIGN(len); + if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ @@ -2558,7 +2559,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) if (error) return error; } - vma = prev? prev->vm_next: mm->mmap; + vma = prev ? prev->vm_next : mm->mmap; /* * unlock any mlock()ed ranges before detaching vmas @@ -2621,10 +2622,10 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) */ static unsigned long do_brk(unsigned long addr, unsigned long len) { - struct mm_struct * mm = current->mm; - struct vm_area_struct * vma, * prev; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *prev; unsigned long flags; - struct rb_node ** rb_link, * rb_parent; + struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; From ed2f240094f900833ac06f533ab8bbcf0a1e8199 Mon Sep 17 00:00:00 2001 From: Zhang Zhen Date: Thu, 9 Oct 2014 15:26:31 -0700 Subject: [PATCH 057/164] memory-hotplug: add sysfs valid_zones attribute Currently memory-hotplug has two limits: 1. If the memory block is in ZONE_NORMAL, you can change it to ZONE_MOVABLE, but this memory block must be adjacent to ZONE_MOVABLE. 2. If the memory block is in ZONE_MOVABLE, you can change it to ZONE_NORMAL, but this memory block must be adjacent to ZONE_NORMAL. With this patch, we can easy to know a memory block can be onlined to which zone, and don't need to know the above two limits. Updated the related Documentation. [akpm@linux-foundation.org: use conventional comment layout] [akpm@linux-foundation.org: fix build with CONFIG_MEMORY_HOTREMOVE=n] [akpm@linux-foundation.org: remove unused local zone_prev] Signed-off-by: Zhang Zhen Cc: Dave Hansen Cc: David Rientjes Cc: Toshi Kani Cc: Yasuaki Ishimatsu Cc: Naoya Horiguchi Cc: Wang Nan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- .../ABI/testing/sysfs-devices-memory | 8 ++++ Documentation/memory-hotplug.txt | 11 ++++- drivers/base/memory.c | 42 +++++++++++++++++++ include/linux/memory_hotplug.h | 1 + mm/memory_hotplug.c | 2 +- 5 files changed, 62 insertions(+), 2 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 7405de26ee60..deef3b5723cf 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -61,6 +61,14 @@ Users: hotplug memory remove tools http://www.ibm.com/developerworks/wikis/display/LinuxP/powerpc-utils +What: /sys/devices/system/memory/memoryX/valid_zones +Date: July 2014 +Contact: Zhang Zhen +Description: + The file /sys/devices/system/memory/memoryX/valid_zones is + read-only and is designed to show which zone this memory + block can be onlined to. + What: /sys/devices/system/memoryX/nodeY Date: October 2009 Contact: Linux Memory Management list diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index 45134dc23854..ea03abfc97e9 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -155,6 +155,7 @@ Under each memory block, you can see 4 files: /sys/devices/system/memory/memoryXXX/phys_device /sys/devices/system/memory/memoryXXX/state /sys/devices/system/memory/memoryXXX/removable +/sys/devices/system/memory/memoryXXX/valid_zones 'phys_index' : read-only and contains memory block id, same as XXX. 'state' : read-write @@ -170,6 +171,15 @@ Under each memory block, you can see 4 files: block is removable and a value of 0 indicates that it is not removable. A memory block is removable only if every section in the block is removable. +'valid_zones' : read-only: designed to show which zones this memory block + can be onlined to. + The first column shows it's default zone. + "memory6/valid_zones: Normal Movable" shows this memoryblock + can be onlined to ZONE_NORMAL by default and to ZONE_MOVABLE + by online_movable. + "memory7/valid_zones: Movable Normal" shows this memoryblock + can be onlined to ZONE_MOVABLE by default and to ZONE_NORMAL + by online_kernel. NOTE: These directories/files appear after physical memory hotplug phase. @@ -408,7 +418,6 @@ node if necessary. - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like sysctl or new control file. - showing memory block and physical device relationship. - - showing memory block is under ZONE_MOVABLE or not - test and make it better memory offlining. - support HugeTLB page migration and offlining. - memmap removing at memory offline. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index a2e13e250bba..7c5d87191b28 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -373,6 +373,45 @@ static ssize_t show_phys_device(struct device *dev, return sprintf(buf, "%d\n", mem->phys_device); } +#ifdef CONFIG_MEMORY_HOTREMOVE +static ssize_t show_valid_zones(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn, end_pfn; + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + struct page *first_page; + struct zone *zone; + + start_pfn = section_nr_to_pfn(mem->start_section_nr); + end_pfn = start_pfn + nr_pages; + first_page = pfn_to_page(start_pfn); + + /* The block contains more than one zone can not be offlined. */ + if (!test_pages_in_a_zone(start_pfn, end_pfn)) + return sprintf(buf, "none\n"); + + zone = page_zone(first_page); + + if (zone_idx(zone) == ZONE_MOVABLE - 1) { + /*The mem block is the last memoryblock of this zone.*/ + if (end_pfn == zone_end_pfn(zone)) + return sprintf(buf, "%s %s\n", + zone->name, (zone + 1)->name); + } + + if (zone_idx(zone) == ZONE_MOVABLE) { + /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ + if (start_pfn == zone->zone_start_pfn) + return sprintf(buf, "%s %s\n", + zone->name, (zone - 1)->name); + } + + return sprintf(buf, "%s\n", zone->name); +} +static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); +#endif + static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); @@ -523,6 +562,9 @@ static struct attribute *memory_memblk_attrs[] = { &dev_attr_state.attr, &dev_attr_phys_device.attr, &dev_attr_removable.attr, +#ifdef CONFIG_MEMORY_HOTREMOVE + &dev_attr_valid_zones.attr, +#endif NULL }; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d9524c49d767..8f1a41951df9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); typedef void (*online_page_callback_t)(struct page *page); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2ff8c2325e96..29d8693d0c61 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1307,7 +1307,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) /* * Confirm all pages in a range [start, end) is belongs to the same zone. */ -static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) +int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct zone *zone = NULL; From 6a33979d5bd7521497121c5ae4435d7003115a0f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Oct 2014 15:26:33 -0700 Subject: [PATCH 058/164] mm: remove misleading ARCH_USES_NUMA_PROT_NONE ARCH_USES_NUMA_PROT_NONE was defined for architectures that implemented _PAGE_NUMA using _PROT_NONE. This saved using an additional PTE bit and relied on the fact that PROT_NONE vmas were skipped by the NUMA hinting fault scanner. This was found to be conceptually confusing with a lot of implicit assumptions and it was asked that an alternative be found. Commit c46a7c81 "x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels" redefined _PAGE_NUMA on x86 to be one of the swap PTE bits and shrunk the maximum possible swap size but it did not go far enough. There are no architectures that reuse _PROT_NONE as _PROT_NUMA but the relics still exist. This patch removes ARCH_USES_NUMA_PROT_NONE and removes some unnecessary duplication in powerpc vs the generic implementation by defining the types the core NUMA helpers expected to exist from x86 with their ppc64 equivalent. This necessitated that a PTE bit mask be created that identified the bits that distinguish present from NUMA pte entries but it is expected this will only differ between arches based on _PAGE_PROTNONE. The naming for the generic helpers was taken from x86 originally but ppc64 has types that are equivalent for the purposes of the helper so they are mapped instead of duplicating code. Signed-off-by: Mel Gorman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Rik van Riel Cc: Johannes Weiner Cc: Cyrill Gorcunov Reviewed-by: Aneesh Kumar K.V Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgtable.h | 57 ++++++--------------------- arch/powerpc/include/asm/pte-common.h | 5 +++ arch/x86/Kconfig | 1 - arch/x86/include/asm/pgtable_types.h | 14 +++++++ include/asm-generic/pgtable.h | 27 +++++-------- init/Kconfig | 11 ------ 6 files changed, 40 insertions(+), 75 deletions(-) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d98c1ecc3266..f60d4ea8b50c 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -38,10 +38,9 @@ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } #ifdef CONFIG_NUMA_BALANCING - static inline int pte_present(pte_t pte) { - return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); + return pte_val(pte) & _PAGE_NUMA_MASK; } #define pte_present_nonuma pte_present_nonuma @@ -50,37 +49,6 @@ static inline int pte_present_nonuma(pte_t pte) return pte_val(pte) & (_PAGE_PRESENT); } -#define pte_numa pte_numa -static inline int pte_numa(pte_t pte) -{ - return (pte_val(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; -} - -#define pte_mknonnuma pte_mknonnuma -static inline pte_t pte_mknonnuma(pte_t pte) -{ - pte_val(pte) &= ~_PAGE_NUMA; - pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; - return pte; -} - -#define pte_mknuma pte_mknuma -static inline pte_t pte_mknuma(pte_t pte) -{ - /* - * We should not set _PAGE_NUMA on non present ptes. Also clear the - * present bit so that hash_page will return 1 and we collect this - * as numa fault. - */ - if (pte_present(pte)) { - pte_val(pte) |= _PAGE_NUMA; - pte_val(pte) &= ~_PAGE_PRESENT; - } else - VM_BUG_ON(1); - return pte; -} - #define ptep_set_numa ptep_set_numa static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -92,12 +60,6 @@ static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_numa pmd_numa -static inline int pmd_numa(pmd_t pmd) -{ - return pte_numa(pmd_pte(pmd)); -} - #define pmdp_set_numa pmdp_set_numa static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -109,16 +71,21 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_mknonnuma pmd_mknonnuma -static inline pmd_t pmd_mknonnuma(pmd_t pmd) +/* + * Generic NUMA pte helpers expect pteval_t and pmdval_t types to exist + * which was inherited from x86. For the purposes of powerpc pte_basic_t and + * pmd_t are equivalent + */ +#define pteval_t pte_basic_t +#define pmdval_t pmd_t +static inline pteval_t ptenuma_flags(pte_t pte) { - return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); + return pte_val(pte) & _PAGE_NUMA_MASK; } -#define pmd_mknuma pmd_mknuma -static inline pmd_t pmd_mknuma(pmd_t pmd) +static inline pmdval_t pmdnuma_flags(pmd_t pmd) { - return pte_pmd(pte_mknuma(pmd_pte(pmd))); + return pmd_val(pmd) & _PAGE_NUMA_MASK; } # else diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 8d1569c29042..e040c3595129 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -98,6 +98,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); _PAGE_USER | _PAGE_ACCESSED | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) +#ifdef CONFIG_NUMA_BALANCING +/* Mask of bits that distinguish present and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PRESENT) +#endif + /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e4b1f431c7ed..3eb8a41509b3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -30,7 +30,6 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_INT128 if X86_64 - select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index f216963760e5..0f9724c9c510 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -325,6 +325,20 @@ static inline pteval_t pte_flags(pte_t pte) return native_pte_val(pte) & PTE_FLAGS_MASK; } +#ifdef CONFIG_NUMA_BALANCING +/* Set of bits that distinguishes present, prot_none and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) +static inline pteval_t ptenuma_flags(pte_t pte) +{ + return pte_flags(pte) & _PAGE_NUMA_MASK; +} + +static inline pmdval_t pmdnuma_flags(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_NUMA_MASK; +} +#endif /* CONFIG_NUMA_BALANCING */ + #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 53b2acc38213..281870f56450 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -660,11 +660,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd) } #ifdef CONFIG_NUMA_BALANCING -#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE /* - * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the - * same bit too). It's set only when _PAGE_PRESET is not set and it's - * never set if _PAGE_PRESENT is set. + * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that + * is protected for PROT_NONE and a NUMA hinting fault entry. If the + * architecture defines __PAGE_PROTNONE then it should take that into account + * but those that do not can rely on the fact that the NUMA hinting scanner + * skips inaccessible VMAs. * * pte/pmd_present() returns true if pte/pmd_numa returns true. Page * fault triggers on those regions if pte/pmd_numa returns true @@ -673,16 +674,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd) #ifndef pte_numa static inline int pte_numa(pte_t pte) { - return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return ptenuma_flags(pte) == _PAGE_NUMA; } #endif #ifndef pmd_numa static inline int pmd_numa(pmd_t pmd) { - return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return pmdnuma_flags(pmd) == _PAGE_NUMA; } #endif @@ -722,6 +721,8 @@ static inline pte_t pte_mknuma(pte_t pte) { pteval_t val = pte_val(pte); + VM_BUG_ON(!(val & _PAGE_PRESENT)); + val &= ~_PAGE_PRESENT; val |= _PAGE_NUMA; @@ -765,16 +766,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, } #endif #else -extern int pte_numa(pte_t pte); -extern int pmd_numa(pmd_t pmd); -extern pte_t pte_mknonnuma(pte_t pte); -extern pmd_t pmd_mknonnuma(pmd_t pmd); -extern pte_t pte_mknuma(pte_t pte); -extern pmd_t pmd_mknuma(pmd_t pmd); -extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); -#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ -#else static inline int pmd_numa(pmd_t pmd) { return 0; diff --git a/init/Kconfig b/init/Kconfig index e25a82a291a6..d2355812ba48 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -889,17 +889,6 @@ config ARCH_SUPPORTS_INT128 config ARCH_WANT_NUMA_VARIABLE_LOCALITY bool -# -# For architectures that are willing to define _PAGE_NUMA as _PAGE_PROTNONE -config ARCH_WANTS_PROT_NUMA_PROT_NONE - bool - -config ARCH_USES_NUMA_PROT_NONE - bool - default y - depends on ARCH_WANTS_PROT_NUMA_PROT_NONE - depends on NUMA_BALANCING - config NUMA_BALANCING_DEFAULT_ENABLED bool "Automatically enable NUMA aware memory/task placement" default y From 505e3be6c082489a32a88e042f930d047b6415bc Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:35 -0700 Subject: [PATCH 059/164] lib/genalloc.c: add power aligned algorithm One of the more common algorithms used for allocation is to align the start address of the allocation to the order of size requested. Add this as an algorithm option for genalloc. Signed-off-by: Laura Abbott Acked-by: Will Deacon Acked-by: Olof Johansson Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 4 ++++ lib/genalloc.c | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1c2fdaa2ffc3..3cd0934d62ba 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); +extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data); + extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); diff --git a/lib/genalloc.c b/lib/genalloc.c index 38d2db82228c..166f17b9f169 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -480,6 +480,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, } EXPORT_SYMBOL(gen_pool_first_fit); +/** + * gen_pool_first_fit_order_align - find the first available region + * of memory matching the size requirement. The region will be aligned + * to the order of the size specified. + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + */ +unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, + unsigned int nr, void *data) +{ + unsigned long align_mask = roundup_pow_of_two(nr) - 1; + + return bitmap_find_next_zero_area(map, size, start, nr, align_mask); +} +EXPORT_SYMBOL(gen_pool_first_fit_order_align); + /** * gen_pool_best_fit - find the best fitting region of memory * macthing the size requirement (no alignment constraint) From 9efb3a421d55d30b65fb0dbee05108d15c6c55f7 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:38 -0700 Subject: [PATCH 060/164] lib/genalloc.c: add genpool range check function After allocating an address from a particular genpool, there is no good way to verify if that address actually belongs to a genpool. Introduce addr_in_gen_pool which will return if an address plus size falls completely within the genpool range. Signed-off-by: Laura Abbott Acked-by: Will Deacon Reviewed-by: Olof Johansson Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 3 +++ lib/genalloc.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 3cd0934d62ba..1ccaab44abcc 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -121,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); extern struct gen_pool *dev_get_gen_pool(struct device *dev); +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size); + #ifdef CONFIG_OF extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, const char *propname, int index); diff --git a/lib/genalloc.c b/lib/genalloc.c index 166f17b9f169..cce4dd68c40d 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -402,6 +402,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool, } EXPORT_SYMBOL(gen_pool_for_each_chunk); +/** + * addr_in_gen_pool - checks if an address falls within the range of a pool + * @pool: the generic memory pool + * @start: start address + * @size: size of the region + * + * Check if the range of addresses falls within the specified pool. Returns + * true if the entire range is contained in the pool and false otherwise. + */ +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size) +{ + bool found = false; + unsigned long end = start + size; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space From 513510ddba9650fc7da456eefeb0ead7632324f6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:40 -0700 Subject: [PATCH 061/164] common: dma-mapping: introduce common remapping functions For architectures without coherent DMA, memory for DMA may need to be remapped with coherent attributes. Factor out the the remapping code from arm and put it in a common location to reduce code duplication. As part of this, the arm APIs are now migrated away from ioremap_page_range to the common APIs which use map_vm_area for remapping. This should be an equivalent change and using map_vm_area is more correct as ioremap_page_range is intended to bring in io addresses into the cpu space and not regular kernel managed memory. Signed-off-by: Laura Abbott Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Olof Johansson Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Cc: Will Deacon Cc: James Hogan Cc: Laura Abbott Cc: Mitchel Humpherys Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/dma-mapping.c | 57 +++---------------- drivers/base/dma-mapping.c | 72 ++++++++++++++++++++++++ include/asm-generic/dma-mapping-common.h | 9 +++ 3 files changed, 90 insertions(+), 48 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7a996aaa061e..eecc8e60deea 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -298,37 +298,19 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - struct vm_struct *area; - unsigned long addr; - /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(page_to_pfn(page)); - - if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { - vunmap((void *)addr); - return NULL; - } - return (void *)addr; + return dma_common_contiguous_remap(page, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, + prot, caller); } static void __dma_free_remap(void *cpu_addr, size_t size) { - unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; - struct vm_struct *area = find_vm_area(cpu_addr); - if (!area || (area->flags & flags) != flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); - return; - } - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -1271,29 +1253,8 @@ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct vm_struct *area; - unsigned long p; - - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - - area->pages = pages; - area->nr_pages = nr_pages; - p = (unsigned long)area->addr; - - for (i = 0; i < nr_pages; i++) { - phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); - if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) - goto err; - p += PAGE_SIZE; - } - return area->addr; -err: - unmap_kernel_range((unsigned long)area->addr, size); - vunmap(area->addr); + return dma_common_pages_remap(pages, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); return NULL; } @@ -1501,8 +1462,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } __iommu_remove_mapping(dev, handle, size); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 6cd08e145bfa..9e8bbdd470ca 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include /* @@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } EXPORT_SYMBOL(dma_common_mmap); + +#ifdef CONFIG_MMU +/* + * remaps an array of PAGE_SIZE pages into another vm_area + * Cannot be used in non-sleeping contexts + */ +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + + area = get_vm_area_caller(size, vm_flags, caller); + if (!area) + return NULL; + + area->pages = pages; + + if (map_vm_area(area, prot, pages)) { + vunmap(area->addr); + return NULL; + } + + return area->addr; +} + +/* + * remaps an allocated contiguous region into another vm_area. + * Cannot be used in non-sleeping contexts + */ + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller) +{ + int i; + struct page **pages; + void *ptr; + unsigned long pfn; + + pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++) + pages[i] = pfn_to_page(pfn + i); + + ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); + + kfree(pages); + + return ptr; +} + +/* + * unmaps a range previously mapped by dma_common_*_remap + */ +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + + if (!area || (area->flags & vm_flags) != vm_flags) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + return; + } + + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); +} +#endif diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index de8bf89940f8..a9fd248f5d48 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + /** * dma_mmap_attrs - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices From 36d0fd2198da3fd16b0e0da50ece05b4d295d2f1 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:42 -0700 Subject: [PATCH 062/164] arm: use genalloc for the atomic pool ARM currently uses a bitmap for tracking atomic allocations. genalloc already handles this type of memory pool allocation so switch to using that instead. Signed-off-by: Laura Abbott Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Olof Johansson Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 + arch/arm/mm/dma-mapping.c | 147 ++++++++++++-------------------------- 2 files changed, 47 insertions(+), 101 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d9d32de9628c..36d47987a9e0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -14,6 +14,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS + select GENERIC_ALLOCATOR select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IDLE_POLL_SETUP diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index eecc8e60deea..c245d903927f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -314,23 +315,13 @@ static void __dma_free_remap(void *cpu_addr, size_t size) } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static struct gen_pool *atomic_pool; -struct dma_pool { - size_t size; - spinlock_t lock; - unsigned long *bitmap; - unsigned long nr_pages; - void *vaddr; - struct page **pages; -}; - -static struct dma_pool atomic_pool = { - .size = DEFAULT_DMA_COHERENT_POOL_SIZE, -}; +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; static int __init early_coherent_pool(char *p) { - atomic_pool.size = memparse(p, &p); + atomic_pool_size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); @@ -340,14 +331,14 @@ void __init init_dma_coherent_pool_size(unsigned long size) /* * Catch any attempt to set the pool size too late. */ - BUG_ON(atomic_pool.vaddr); + BUG_ON(atomic_pool); /* * Set architecture specific coherent pool size only if * it has not been changed by kernel command line parameter. */ - if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) - atomic_pool.size = size; + if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool_size = size; } /* @@ -355,52 +346,44 @@ void __init init_dma_coherent_pool_size(unsigned long size) */ static int __init atomic_pool_init(void) { - struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); gfp_t gfp = GFP_KERNEL | GFP_DMA; - unsigned long nr_pages = pool->size >> PAGE_SHIFT; - unsigned long *bitmap; struct page *page; - struct page **pages; void *ptr; - int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); - bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!bitmap) - goto no_bitmap; - - pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); - if (!pages) - goto no_pages; + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto out; if (dev_get_cma_area(NULL)) - ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, - atomic_pool_init); + ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, + &page, atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, - atomic_pool_init); + ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, + &page, atomic_pool_init); if (ptr) { - int i; + int ret; - for (i = 0; i < nr_pages; i++) - pages[i] = page + i; + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto destroy_genpool; - spin_lock_init(&pool->lock); - pool->vaddr = ptr; - pool->pages = pages; - pool->bitmap = bitmap; - pool->nr_pages = nr_pages; - pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", - (unsigned)pool->size / 1024); + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", + atomic_pool_size / 1024); return 0; } - kfree(pages); -no_pages: - kfree(bitmap); -no_bitmap: - pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", - (unsigned)pool->size / 1024); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +out: + pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); return -ENOMEM; } /* @@ -504,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__alloc_from_pool(size_t size, struct page **ret_page) { - struct dma_pool *pool = &atomic_pool; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned int pageno; - unsigned long flags; + unsigned long val; void *ptr = NULL; - unsigned long align_mask; - if (!pool->vaddr) { + if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } - /* - * Align the region allocation - allocations from pool are rather - * small, so align them to their order in pages, minimum is a page - * size. This helps reduce fragmentation of the DMA space. - */ - align_mask = (1 << get_order(size)) - 1; + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); - spin_lock_irqsave(&pool->lock, flags); - pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, align_mask); - if (pageno < pool->nr_pages) { - bitmap_set(pool->bitmap, pageno, count); - ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->pages[pageno]; - } else { - pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" - "Please increase it with coherent_pool= kernel parameter!\n", - (unsigned)pool->size / 1024); + *ret_page = phys_to_page(phys); + ptr = (void *)val; } - spin_unlock_irqrestore(&pool->lock, flags); return ptr; } static bool __in_atomic_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - void *end = start + size; - void *pool_start = pool->vaddr; - void *pool_end = pool->vaddr + pool->size; - - if (start < pool_start || start >= pool_end) - return false; - - if (end <= pool_end) - return true; - - WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", - start, end - 1, pool_start, pool_end - 1); - - return false; + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); } static int __free_from_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - unsigned long pageno, count; - unsigned long flags; - if (!__in_atomic_pool(start, size)) return 0; - pageno = (start - pool->vaddr) >> PAGE_SHIFT; - count = size >> PAGE_SHIFT; - - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, pageno, count); - spin_unlock_irqrestore(&pool->lock, flags); + gen_pool_free(atomic_pool, (unsigned long)start, size); return 1; } @@ -1316,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si static struct page **__atomic_get_pages(void *addr) { - struct dma_pool *pool = &atomic_pool; - struct page **pages = pool->pages; - int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + struct page *page; + phys_addr_t phys; - return pages + offs; + phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); + page = phys_to_page(phys); + + return (struct page **)page; } static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) From d4932f9e81ae7a7bf3c3967e48373909b9c98ee5 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:44 -0700 Subject: [PATCH 063/164] arm64: add atomic pool for non-coherent and CMA allocations Neither CMA nor noncoherent allocations support atomic allocations. Add a dedicated atomic pool to support this. Reviewed-by: Catalin Marinas Signed-off-by: Laura Abbott Cc: Arnd Bergmann Cc: David Riley Cc: Olof Johansson Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 1 + arch/arm64/mm/dma-mapping.c | 164 +++++++++++++++++++++++++++++++----- 2 files changed, 146 insertions(+), 19 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9746dc24a117..e96cbe84d5ae 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -18,6 +18,7 @@ config ARM64 select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_AUTOPROBE diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 2c71077cacfd..d92094203913 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, return prot; } +static struct gen_pool *atomic_pool; + +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static void *__alloc_from_pool(size_t size, struct page **ret_page) +{ + unsigned long val; + void *ptr = NULL; + + if (!atomic_pool) { + WARN(1, "coherent pool not initialised!\n"); + return NULL; + } + + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; + } + + return ptr; +} + +static bool __in_atomic_pool(void *start, size_t size) +{ + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); +} + +static int __free_from_pool(void *start, size_t size) +{ + if (!__in_atomic_pool(start, size)) + return 0; + + gen_pool_free(atomic_pool, (unsigned long)start, size); + + return 1; +} + static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) @@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; - if (IS_ENABLED(CONFIG_DMA_CMA)) { + if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; size = PAGE_ALIGN(size); @@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { + bool freed; + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return; } - if (IS_ENABLED(CONFIG_DMA_CMA)) { - phys_addr_t paddr = dma_to_phys(dev, dma_handle); - - dma_release_from_contiguous(dev, + freed = dma_release_from_contiguous(dev, phys_to_page(paddr), size >> PAGE_SHIFT); - } else { + if (!freed) swiotlb_free_coherent(dev, size, vaddr, dma_handle); - } } static void *__dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { - struct page *page, **map; + struct page *page; void *ptr, *coherent_ptr; - int order, i; size = PAGE_ALIGN(size); - order = get_order(size); + + if (!(flags & __GFP_WAIT)) { + struct page *page = NULL; + void *addr = __alloc_from_pool(size, &page); + + if (addr) + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + + return addr; + + } ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; - map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA); - if (!map) - goto no_map; /* remove any dirty cache lines on the kernel alias */ __dma_flush_range(ptr, ptr + size); /* create a coherent mapping */ page = virt_to_page(ptr); - for (i = 0; i < (size >> PAGE_SHIFT); i++) - map[i] = page + i; - coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, - __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); - kfree(map); + coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, + __get_dma_pgprot(attrs, + __pgprot(PROT_NORMAL_NC), false), + NULL); if (!coherent_ptr) goto no_map; @@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size, { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + if (__free_from_pool(vaddr, size)) + return; vunmap(vaddr); __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); } @@ -307,6 +362,67 @@ EXPORT_SYMBOL(coherent_swiotlb_dma_ops); extern int swiotlb_late_init_with_default_size(size_t default_size); +static int __init atomic_pool_init(void) +{ + pgprot_t prot = __pgprot(PROT_NORMAL_NC); + unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; + struct page *page; + void *addr; + unsigned int pool_size_order = get_order(atomic_pool_size); + + if (dev_get_cma_area(NULL)) + page = dma_alloc_from_contiguous(NULL, nr_pages, + pool_size_order); + else + page = alloc_pages(GFP_DMA, pool_size_order); + + if (page) { + int ret; + void *page_addr = page_address(page); + + memset(page_addr, 0, atomic_pool_size); + __dma_flush_range(page_addr, page_addr + atomic_pool_size); + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto free_page; + + addr = dma_common_contiguous_remap(page, atomic_pool_size, + VM_USERMAP, prot, atomic_pool_init); + + if (!addr) + goto destroy_genpool; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto remove_mapping; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + + pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", + atomic_pool_size / 1024); + return 0; + } + goto out; + +remove_mapping: + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +free_page: + if (!dma_release_from_contiguous(NULL, page, nr_pages)) + __free_pages(page, pool_size_order); +out: + pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); + return -ENOMEM; +} + static int __init swiotlb_late_init(void) { size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); @@ -315,7 +431,17 @@ static int __init swiotlb_late_init(void) return swiotlb_late_init_with_default_size(swiotlb_size); } -arch_initcall(swiotlb_late_init); + +static int __init arm64_dma_init(void) +{ + int ret = 0; + + ret |= swiotlb_late_init(); + ret |= atomic_pool_init(); + + return ret; +} +arch_initcall(arm64_dma_init); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 From f7426b983a6a353cf21e5733e84458219c4a817e Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 9 Oct 2014 15:26:47 -0700 Subject: [PATCH 064/164] mm: cma: adjust address limit to avoid hitting low/high memory boundary Russell King recently noticed that limiting default CMA region only to low memory on ARM architecture causes serious memory management issues with machines having a lot of memory (which is mainly available as high memory). More information can be found the following thread: http://thread.gmane.org/gmane.linux.ports.arm.kernel/348441/ Those two patches removes this limit letting kernel to put default CMA region into high memory when this is possible (there is enough high memory available and architecture specific DMA limit fits). This should solve strange OOM issues on systems with lots of RAM (i.e. >1GiB) and large (>256M) CMA area. This patch (of 2): Automatically allocated regions should not cross low/high memory boundary, because such regions cannot be later correctly initialized due to spanning across two memory zones. This patch adds a check for this case and a simple code for moving region to low memory if automatically selected address might not fit completely into high memory. Signed-off-by: Marek Szyprowski Acked-by: Michal Nazarewicz Cc: Daniel Drake Cc: Minchan Kim Cc: Russell King Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/cma.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mm/cma.c b/mm/cma.c index c17751c0dcaf..474c644a0dc6 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -32,6 +32,7 @@ #include #include #include +#include struct cma { unsigned long base_pfn; @@ -163,6 +164,8 @@ int __init cma_declare_contiguous(phys_addr_t base, bool fixed, struct cma **res_cma) { struct cma *cma; + phys_addr_t memblock_end = memblock_end_of_DRAM(); + phys_addr_t highmem_start = __pa(high_memory); int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", @@ -196,6 +199,24 @@ int __init cma_declare_contiguous(phys_addr_t base, if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; + /* + * adjust limit to avoid crossing low/high memory boundary for + * automatically allocated regions + */ + if (((limit == 0 || limit > memblock_end) && + (memblock_end - size < highmem_start && + memblock_end > highmem_start)) || + (!fixed && limit > highmem_start && limit - size < highmem_start)) { + limit = highmem_start; + } + + if (fixed && base < highmem_start && base+size > highmem_start) { + ret = -EINVAL; + pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n", + (unsigned long)base, (unsigned long)highmem_start); + goto err; + } + /* Reserve memory */ if (base && fixed) { if (memblock_is_region_reserved(base, size) || From 95b0e655f9148881907fdbe5baba6a9f5d094fee Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 9 Oct 2014 15:26:49 -0700 Subject: [PATCH 065/164] ARM: mm: don't limit default CMA region only to low memory DMA-mapping supports CMA regions places either in low or high memory, so there is no longer needed to limit default CMA regions only to low memory. The real limit is still defined by architecture specific DMA limit. Signed-off-by: Marek Szyprowski Reported-by: Russell King - ARM Linux Acked-by: Michal Nazarewicz Cc: Daniel Drake Cc: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9221645dd192..92bba32d9230 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -322,7 +322,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory */ - dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); + dma_contiguous_reserve(arm_dma_limit); arm_memblock_steal_permitted = false; memblock_dump_all(); From 21bb9bd19430a43e6462ce75030fd7fac4b766ef Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:26:51 -0700 Subject: [PATCH 066/164] mm: page_alloc: determine migratetype only once The check for ALLOC_CMA in __alloc_pages_nodemask() derives migratetype from gfp_mask in each retry pass, although the migratetype variable already has the value determined and it does not change. Use the variable and perform the check only once. Also convert #ifdef CONFIG_CMA to IS_ENABLED. Signed-off-by: Vlastimil Babka Acked-by: David Rientjes Cc: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: "Srivatsa S. Bhat" Cc: Hugh Dickins Cc: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f3bc59f2ed52..e63bf7744a0c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2776,6 +2776,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; + if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; + retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -2787,10 +2790,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, goto out; classzone_idx = zonelist_zone_idx(preferred_zoneref); -#ifdef CONFIG_CMA - if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; -#endif /* First allocation attempt */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, zonelist, high_zoneidx, alloc_flags, From 59d43914ed7b96255271ad6b7b735344beffa3c0 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Thu, 9 Oct 2014 15:26:53 -0700 Subject: [PATCH 067/164] vfs: make guard_bh_eod() more generic This patchset implements readpages() operation for block device by using mpage_readpages() which can create multipage BIOs instead of BIOs for each page and reduce system CPU time consumption. This patch (of 3): guard_bh_eod() is used in submit_bh() to allow us to do IO even on the odd last sectors of a device, even if the block size is some multiple of the physical sector size. This makes guard_bh_eod() more generic and renames it guard_bio_eod() so that we can use it without struct buffer_head argument. The reason for this change is that using mpage_readpages() for block device requires to add this guard check in mpage code. Signed-off-by: Akinobu Mita Cc: Jens Axboe Cc: Alexander Viro Cc: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 3588a80854b2..e442a26e80f7 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2956,7 +2956,7 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) /* * This allows us to do IO even on the odd last sectors - * of a device, even if the bh block size is some multiple + * of a device, even if the block size is some multiple * of the physical sector size. * * We'll just truncate the bio to the size of the device, @@ -2966,10 +2966,11 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) * errors, this only handles the "we need to be able to * do IO at the final sector" case. */ -static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) +static void guard_bio_eod(int rw, struct bio *bio) { sector_t maxsector; - unsigned bytes; + struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; + unsigned truncated_bytes; maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; if (!maxsector) @@ -2984,23 +2985,20 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) return; maxsector -= bio->bi_iter.bi_sector; - bytes = bio->bi_iter.bi_size; - if (likely((bytes >> 9) <= maxsector)) + if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) return; - /* Uhhuh. We've got a bh that straddles the device size! */ - bytes = maxsector << 9; + /* Uhhuh. We've got a bio that straddles the device size! */ + truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); /* Truncate the bio.. */ - bio->bi_iter.bi_size = bytes; - bio->bi_io_vec[0].bv_len = bytes; + bio->bi_iter.bi_size -= truncated_bytes; + bvec->bv_len -= truncated_bytes; /* ..and clear the end of the buffer for reads */ if ((rw & RW_MASK) == READ) { - void *kaddr = kmap_atomic(bh->b_page); - memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes); - kunmap_atomic(kaddr); - flush_dcache_page(bh->b_page); + zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, + truncated_bytes); } } @@ -3041,7 +3039,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) bio->bi_flags |= bio_flags; /* Take care of bh's that straddle the end of the device */ - guard_bh_eod(rw, bio, bh); + guard_bio_eod(rw, bio); if (buffer_meta(bh)) rw |= REQ_META; From 4db96b71e3caea5bb39053d57683129e0682c66f Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Thu, 9 Oct 2014 15:26:55 -0700 Subject: [PATCH 068/164] vfs: guard end of device for mpage interface Add guard_bio_eod() check for mpage code in order to allow us to do IO even on the odd last sectors of a device, even if the block size is some multiple of the physical sector size. Using mpage_readpages() for block device requires this guard check. Signed-off-by: Akinobu Mita Cc: Jens Axboe Cc: Alexander Viro Cc: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 2 +- fs/internal.h | 5 +++++ fs/mpage.c | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/buffer.c b/fs/buffer.c index e442a26e80f7..7bd5c4685e98 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2966,7 +2966,7 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) * errors, this only handles the "we need to be able to * do IO at the final sector" case. */ -static void guard_bio_eod(int rw, struct bio *bio) +void guard_bio_eod(int rw, struct bio *bio) { sector_t maxsector; struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; diff --git a/fs/internal.h b/fs/internal.h index e325b4f9c799..b2623200107b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -34,6 +34,11 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) } #endif +/* + * buffer.c + */ +extern void guard_bio_eod(int rw, struct bio *bio); + /* * char_dev.c */ diff --git a/fs/mpage.c b/fs/mpage.c index 5f9ed622274f..3e79220babac 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -28,6 +28,7 @@ #include #include #include +#include "internal.h" /* * I/O completion handler for multipage BIOs. @@ -57,6 +58,7 @@ static void mpage_end_io(struct bio *bio, int err) static struct bio *mpage_bio_submit(int rw, struct bio *bio) { bio->bi_end_io = mpage_end_io; + guard_bio_eod(rw, bio); submit_bio(rw, bio); return NULL; } From 447f05bb488bff4282088259b04f47f0f9f76760 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Thu, 9 Oct 2014 15:26:58 -0700 Subject: [PATCH 069/164] block_dev: implement readpages() to optimize sequential read Sequential read from a block device is expected to be equal or faster than from the file on a filesystem. But it is not correct due to the lack of effective readpages() in the address space operations for block device. This implements readpages() operation for block device by using mpage_readpages() which can create multipage BIOs instead of BIOs for each page and reduce system CPU time consumption. Install 1GB of RAM disk storage: # modprobe scsi_debug dev_size_mb=1024 delay=0 Sequential read from file on a filesystem: # mkfs.ext4 /dev/$DEV # mount /dev/$DEV /mnt # fio --name=t --size=512m --rw=read --filename=/mnt/file ... read : io=524288KB, bw=2133.4MB/s, iops=546133, runt= 240msec Sequential read from a block device: # fio --name=t --size=512m --rw=read --filename=/dev/$DEV ... (Without this commit) read : io=524288KB, bw=1700.2MB/s, iops=435455, runt= 301msec (With this commit) read : io=524288KB, bw=2160.4MB/s, iops=553046, runt= 237msec Signed-off-by: Akinobu Mita Cc: Jens Axboe Cc: Alexander Viro Cc: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/block_dev.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/block_dev.c b/fs/block_dev.c index 6d7274619bf9..e2f3ad0879ce 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -304,6 +304,12 @@ static int blkdev_readpage(struct file * file, struct page * page) return block_read_full_page(page, blkdev_get_block); } +static int blkdev_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); +} + static int blkdev_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -1622,6 +1628,7 @@ static int blkdev_releasepage(struct page *page, gfp_t wait) static const struct address_space_operations def_blk_aops = { .readpage = blkdev_readpage, + .readpages = blkdev_readpages, .writepage = blkdev_writepage, .write_begin = blkdev_write_begin, .write_end = blkdev_write_end, From 8b1645685acf3c7e0b93611fb4b328ef45c47e92 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:00 -0700 Subject: [PATCH 070/164] mm, THP: don't hold mmap_sem in khugepaged when allocating THP When allocating huge page for collapsing, khugepaged currently holds mmap_sem for reading on the mm where collapsing occurs. Afterwards the read lock is dropped before write lock is taken on the same mmap_sem. Holding mmap_sem during whole huge page allocation is therefore useless, the vma needs to be rechecked after taking the write lock anyway. Furthemore, huge page allocation might involve a rather long sync compaction, and thus block any mmap_sem writers and i.e. affect workloads that perform frequent m(un)map or mprotect oterations. This patch simply releases the read lock before allocating a huge page. It also deletes an outdated comment that assumed vma must be stable, as it was using alloc_hugepage_vma(). This is no longer true since commit 9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target node"). Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f8ffd9412ec5..55ab569c31b4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2322,23 +2322,17 @@ static struct page int node) { VM_BUG_ON_PAGE(*hpage, *hpage); + /* - * Allocate the page while the vma is still valid and under - * the mmap_sem read mode so there is no memory allocation - * later when we take the mmap_sem in write mode. This is more - * friendly behavior (OTOH it may actually hide bugs) to - * filesystems in userland with daemons allocating memory in - * the userland I/O paths. Allocating memory with the - * mmap_sem in read mode is good idea also to allow greater - * scalability. - */ - *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( - khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); - /* - * After allocating the hugepage, release the mmap_sem read lock in - * preparation for taking it in write mode. + * Before allocating the hugepage, release the mmap_sem read lock. + * The allocation can take potentially a long time if it involves + * sync compaction, and we do not need to hold the mmap_sem during + * that. We will recheck the vma after taking it again in write mode. */ up_read(&mm->mmap_sem); + + *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( + khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); From 53853e2d2bfb748a8b5aa2fd1de15699266865e0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:02 -0700 Subject: [PATCH 071/164] mm, compaction: defer each zone individually instead of preferred zone When direct sync compaction is often unsuccessful, it may become deferred for some time to avoid further useless attempts, both sync and async. Successful high-order allocations un-defer compaction, while further unsuccessful compaction attempts prolong the compaction deferred period. Currently the checking and setting deferred status is performed only on the preferred zone of the allocation that invoked direct compaction. But compaction itself is attempted on all eligible zones in the zonelist, so the behavior is suboptimal and may lead both to scenarios where 1) compaction is attempted uselessly, or 2) where it's not attempted despite good chances of succeeding, as shown on the examples below: 1) A direct compaction with Normal preferred zone failed and set deferred compaction for the Normal zone. Another unrelated direct compaction with DMA32 as preferred zone will attempt to compact DMA32 zone even though the first compaction attempt also included DMA32 zone. In another scenario, compaction with Normal preferred zone failed to compact Normal zone, but succeeded in the DMA32 zone, so it will not defer compaction. In the next attempt, it will try Normal zone which will fail again, instead of skipping Normal zone and trying DMA32 directly. 2) Kswapd will balance DMA32 zone and reset defer status based on watermarks looking good. A direct compaction with preferred Normal zone will skip compaction of all zones including DMA32 because Normal was still deferred. The allocation might have succeeded in DMA32, but won't. This patch makes compaction deferring work on individual zone basis instead of preferred zone. For each zone, it checks compaction_deferred() to decide if the zone should be skipped. If watermarks fail after compacting the zone, defer_compaction() is called. The zone where watermarks passed can still be deferred when the allocation attempt is unsuccessful. When allocation is successful, compaction_defer_reset() is called for the zone containing the allocated page. This approach should approximate calling defer_compaction() only on zones where compaction was attempted and did not yield allocated page. There might be corner cases but that is inevitable as long as the decision to stop compacting dues not guarantee that a page will be allocated. Due to a new COMPACT_DEFERRED return value, some functions relying implicitly on COMPACT_SKIPPED = 0 had to be updated, with comments made more accurate. The did_some_progress output parameter of __alloc_pages_direct_compact() is removed completely, as the caller actually does not use it after compaction sets it - it is only considered when direct reclaim sets it. During testing on a two-node machine with a single very small Normal zone on node 1, this patch has improved success rates in stress-highalloc mmtests benchmark. The success here were previously made worse by commit 3a025760fc15 ("mm: page_alloc: spill to remote nodes before waking kswapd") as kswapd was no longer resetting often enough the deferred compaction for the Normal zone, and DMA32 zones on both nodes were thus not considered for compaction. On different machine, success rates were improved with __GFP_NO_KSWAPD allocations. [akpm@linux-foundation.org: fix CONFIG_COMPACTION=n build] Signed-off-by: Vlastimil Babka Acked-by: Minchan Kim Reviewed-by: Zhang Yanfei Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 16 +++++++---- mm/compaction.c | 32 ++++++++++++++++----- mm/page_alloc.c | 57 +++++++++++++++++++++----------------- mm/vmscan.c | 14 +++++++--- 4 files changed, 76 insertions(+), 43 deletions(-) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 01e3132820da..b2e4c92d0445 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,14 +2,16 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was deferred due to past failures */ +#define COMPACT_DEFERRED 0 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 0 +#define COMPACT_SKIPPED 1 /* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 1 +#define COMPACT_CONTINUE 2 /* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 2 +#define COMPACT_PARTIAL 3 /* The full zone was compacted */ -#define COMPACT_COMPLETE 3 +#define COMPACT_COMPLETE 4 #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; @@ -22,7 +24,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended); + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -91,7 +94,8 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone) { return COMPACT_CONTINUE; } diff --git a/mm/compaction.c b/mm/compaction.c index 21bf292b642a..1c7195d42e83 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1125,27 +1125,26 @@ int sysctl_extfrag_threshold = 500; * @nodemask: The allowed nodes to allocate from * @mode: The migration mode for async, sync light, or sync migration * @contended: Return value that is true if compaction was aborted due to lock contention - * @page: Optionally capture a free page of the requested order during compaction + * @candidate_zone: Return the zone where we think allocation should succeed * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; int may_perform_io = gfp_mask & __GFP_IO; struct zoneref *z; struct zone *zone; - int rc = COMPACT_SKIPPED; + int rc = COMPACT_DEFERRED; int alloc_flags = 0; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) - return rc; - - count_compact_event(COMPACTSTALL); + return COMPACT_SKIPPED; #ifdef CONFIG_CMA if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) @@ -1156,14 +1155,33 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; + if (compaction_deferred(zone, order)) + continue; + status = compact_zone_order(zone, order, gfp_mask, mode, contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, - alloc_flags)) + alloc_flags)) { + *candidate_zone = zone; + /* + * We think the allocation will succeed in this zone, + * but it is not certain, hence the false. The caller + * will repeat this with true if allocation indeed + * succeeds in this zone. + */ + compaction_defer_reset(zone, order, false); break; + } else if (mode != MIGRATE_ASYNC) { + /* + * We think that allocation won't succeed in this zone + * so we defer compaction there. If it ends up + * succeeding after all, it will be reset. + */ + defer_compaction(zone, order); + } } return rc; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e63bf7744a0c..514fd8008114 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2297,24 +2297,28 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction, - unsigned long *did_some_progress) + bool *contended_compaction, bool *deferred_compaction) { + struct zone *last_compact_zone = NULL; + unsigned long compact_result; + + if (!order) return NULL; - if (compaction_deferred(preferred_zone, order)) { - *deferred_compaction = true; - return NULL; - } - current->flags |= PF_MEMALLOC; - *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, + compact_result = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, mode, - contended_compaction); + contended_compaction, + &last_compact_zone); current->flags &= ~PF_MEMALLOC; - if (*did_some_progress != COMPACT_SKIPPED) { + if (compact_result > COMPACT_DEFERRED) + count_vm_event(COMPACTSTALL); + else + *deferred_compaction = true; + + if (compact_result > COMPACT_SKIPPED) { struct page *page; /* Page migration frees to the PCP lists but we want merging */ @@ -2325,13 +2329,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, order, zonelist, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, preferred_zone, classzone_idx, migratetype); + if (page) { - preferred_zone->compact_blockskip_flush = false; - compaction_defer_reset(preferred_zone, order, true); + struct zone *zone = page_zone(page); + + zone->compact_blockskip_flush = false; + compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); return page; } + /* + * last_compact_zone is where try_to_compact_pages thought + * allocation should succeed, so it did not defer compaction. + * But now we know that it didn't succeed, so we do the defer. + */ + if (last_compact_zone && mode != MIGRATE_ASYNC) + defer_compaction(last_compact_zone, order); + /* * It's bad if compaction run occurs and fails. * The most likely reason is that pages exist, @@ -2339,13 +2354,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, */ count_vm_event(COMPACTFAIL); - /* - * As async compaction considers a subset of pageblocks, only - * defer if the failure was a sync compaction failure. - */ - if (mode != MIGRATE_ASYNC) - defer_compaction(preferred_zone, order); - cond_resched(); } @@ -2356,9 +2364,8 @@ static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int classzone_idx, int migratetype, - enum migrate_mode mode, bool *contended_compaction, - bool *deferred_compaction, unsigned long *did_some_progress) + int classzone_idx, int migratetype, enum migrate_mode mode, + bool *contended_compaction, bool *deferred_compaction) { return NULL; } @@ -2634,8 +2641,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, preferred_zone, classzone_idx, migratetype, migration_mode, &contended_compaction, - &deferred_compaction, - &did_some_progress); + &deferred_compaction); if (page) goto got_pg; @@ -2727,8 +2733,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, preferred_zone, classzone_idx, migratetype, migration_mode, &contended_compaction, - &deferred_compaction, - &did_some_progress); + &deferred_compaction); if (page) goto got_pg; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 2836b5373b2e..1a71b8b1ea34 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2315,7 +2315,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc) return reclaimable; } -/* Returns true if compaction should go ahead for a high-order request */ +/* + * Returns true if compaction should go ahead for a high-order request, or + * the high-order allocation would succeed without compaction. + */ static inline bool compaction_ready(struct zone *zone, int order) { unsigned long balance_gap, watermark; @@ -2339,8 +2342,11 @@ static inline bool compaction_ready(struct zone *zone, int order) if (compaction_deferred(zone, order)) return watermark_ok; - /* If compaction is not ready to start, keep reclaiming */ - if (!compaction_suitable(zone, order)) + /* + * If compaction is not ready to start and allocation is not likely + * to succeed without it, then keep reclaiming. + */ + if (compaction_suitable(zone, order) == COMPACT_SKIPPED) return false; return watermark_ok; @@ -2818,7 +2824,7 @@ static bool zone_balanced(struct zone *zone, int order, return false; if (IS_ENABLED(CONFIG_COMPACTION) && order && - !compaction_suitable(zone, order)) + compaction_suitable(zone, order) == COMPACT_SKIPPED) return false; return true; From 98dd3b48a7b8e8277f14c2b7d879477efc1ed0d0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:04 -0700 Subject: [PATCH 072/164] mm, compaction: do not count compact_stall if all zones skipped compaction The compact_stall vmstat counter counts the number of allocations stalled by direct compaction. It does not count when all attempted zones had deferred compaction, but it does count when all zones skipped compaction. The skipping is decided based on very early check of compaction_suitable(), based on watermarks and memory fragmentation. Therefore it makes sense not to count skipped compactions as stalls. Moreover, compact_success or compact_fail is also already not being counted when compaction was skipped, so this patch changes the compact_stall counting to match the other two. Additionally, restructure __alloc_pages_direct_compact() code for better readability. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 90 ++++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 42 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 514fd8008114..822babd808fe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2301,7 +2301,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, { struct zone *last_compact_zone = NULL; unsigned long compact_result; - + struct page *page; if (!order) return NULL; @@ -2313,50 +2313,56 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, &last_compact_zone); current->flags &= ~PF_MEMALLOC; - if (compact_result > COMPACT_DEFERRED) - count_vm_event(COMPACTSTALL); - else + switch (compact_result) { + case COMPACT_DEFERRED: *deferred_compaction = true; - - if (compact_result > COMPACT_SKIPPED) { - struct page *page; - - /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu()); - put_cpu(); - - page = get_page_from_freelist(gfp_mask, nodemask, - order, zonelist, high_zoneidx, - alloc_flags & ~ALLOC_NO_WATERMARKS, - preferred_zone, classzone_idx, migratetype); - - if (page) { - struct zone *zone = page_zone(page); - - zone->compact_blockskip_flush = false; - compaction_defer_reset(zone, order, true); - count_vm_event(COMPACTSUCCESS); - return page; - } - - /* - * last_compact_zone is where try_to_compact_pages thought - * allocation should succeed, so it did not defer compaction. - * But now we know that it didn't succeed, so we do the defer. - */ - if (last_compact_zone && mode != MIGRATE_ASYNC) - defer_compaction(last_compact_zone, order); - - /* - * It's bad if compaction run occurs and fails. - * The most likely reason is that pages exist, - * but not enough to satisfy watermarks. - */ - count_vm_event(COMPACTFAIL); - - cond_resched(); + /* fall-through */ + case COMPACT_SKIPPED: + return NULL; + default: + break; } + /* + * At least in one zone compaction wasn't deferred or skipped, so let's + * count a compaction stall + */ + count_vm_event(COMPACTSTALL); + + /* Page migration frees to the PCP lists but we want merging */ + drain_pages(get_cpu()); + put_cpu(); + + page = get_page_from_freelist(gfp_mask, nodemask, + order, zonelist, high_zoneidx, + alloc_flags & ~ALLOC_NO_WATERMARKS, + preferred_zone, classzone_idx, migratetype); + + if (page) { + struct zone *zone = page_zone(page); + + zone->compact_blockskip_flush = false; + compaction_defer_reset(zone, order, true); + count_vm_event(COMPACTSUCCESS); + return page; + } + + /* + * last_compact_zone is where try_to_compact_pages thought allocation + * should succeed, so it did not defer compaction. But here we know + * that it didn't succeed, so we do the defer. + */ + if (last_compact_zone && mode != MIGRATE_ASYNC) + defer_compaction(last_compact_zone, order); + + /* + * It's bad if compaction run occurs and fails. The most likely reason + * is that pages exist, but not enough to satisfy watermarks. + */ + count_vm_event(COMPACTFAIL); + + cond_resched(); + return NULL; } #else From f8224aa5a0a4627926019bba7511926393fbee3b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:07 -0700 Subject: [PATCH 073/164] mm, compaction: do not recheck suitable_migration_target under lock isolate_freepages_block() rechecks if the pageblock is suitable to be a target for migration after it has taken the zone->lock. However, the check has been optimized to occur only once per pageblock, and compact_checklock_irqsave() might be dropping and reacquiring lock, which means somebody else might have changed the pageblock's migratetype meanwhile. Furthermore, nothing prevents the migratetype to change right after isolate_freepages_block() has finished isolating. Given how imperfect this is, it's simpler to just rely on the check done in isolate_freepages() without lock, and not pretend that the recheck under lock guarantees anything. It is just a heuristic after all. Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 1c7195d42e83..7bf150d4e1c8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -276,7 +276,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, struct page *cursor, *valid_page = NULL; unsigned long flags; bool locked = false; - bool checked_pageblock = false; cursor = pfn_to_page(blockpfn); @@ -307,18 +306,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (!locked) break; - /* Recheck this is a suitable migration target under lock */ - if (!strict && !checked_pageblock) { - /* - * We need to check suitability of pageblock only once - * and this isolate_freepages_block() is called with - * pageblock range, so just check once is sufficient. - */ - checked_pageblock = true; - if (!suitable_migration_target(page)) - break; - } - /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail; From edc2ca61249679298c1f343cd9c549964b8df4b4 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:09 -0700 Subject: [PATCH 074/164] mm, compaction: move pageblock checks up from isolate_migratepages_range() isolate_migratepages_range() is the main function of the compaction scanner, called either on a single pageblock by isolate_migratepages() during regular compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range(). It currently perfoms two pageblock-wide compaction suitability checks, and because of the CMA callpath, it tracks if it crossed a pageblock boundary in order to repeat those checks. However, closer inspection shows that those checks are always true for CMA: - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true - migrate_async_suitable() check is skipped because CMA uses sync compaction We can therefore move the compaction-specific checks to isolate_migratepages() and simplify isolate_migratepages_range(). Furthermore, we can mimic the freepage scanner family of functions, which has isolate_freepages_block() function called both by compaction from isolate_freepages() and by CMA from isolate_freepages_range(), where each use-case adds own specific glue code. This allows further code simplification. Thus, we rename isolate_migratepages_range() to isolate_migratepages_block() and limit its functionality to a single pageblock (or its subset). For CMA, a new different isolate_migratepages_range() is created as a CMA-specific wrapper for the _block() function. The checks specific to compaction are moved to isolate_migratepages(). As part of the unification of these two families of functions, we remove the redundant zone parameter where applicable, since zone pointer is already passed in cc->zone. Furthermore, going back to compact_zone() and compact_finished() when pageblock is found unsuitable (now by isolate_migratepages()) is wasteful - the checks are meant to skip pageblocks quickly. The patch therefore also introduces a simple loop into isolate_migratepages() so that it does not return immediately on failed pageblock checks, but keeps going until isolate_migratepages_range() gets called once. Similarily to isolate_freepages(), the function periodically checks if it needs to reschedule or abort async compaction. [iamjoonsoo.kim@lge.com: fix isolated page counting bug in compaction] Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 254 ++++++++++++++++++++++++++++-------------------- mm/internal.h | 4 +- mm/page_alloc.c | 3 +- 3 files changed, 149 insertions(+), 112 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 7bf150d4e1c8..8058e3f98f08 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat) */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, - bool set_unsuitable, bool migrate_scanner) + bool migrate_scanner) { struct zone *zone = cc->zone; unsigned long pfn; @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control *cc, if (nr_isolated) return; - /* - * Only skip pageblocks when all forms of compaction will be known to - * fail in the near future. - */ - if (set_unsuitable) - set_pageblock_skip(page); + set_pageblock_skip(page); pfn = page_to_pfn(page); @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct compact_control *cc, static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, - bool set_unsuitable, bool migrate_scanner) + bool migrate_scanner) { } #endif /* CONFIG_COMPACTION */ @@ -348,8 +343,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, /* Update the pageblock-skip if the whole pageblock was scanned */ if (blockpfn == end_pfn) - update_pageblock_skip(cc, valid_page, total_isolated, true, - false); + update_pageblock_skip(cc, valid_page, total_isolated, false); count_compact_events(COMPACTFREE_SCANNED, nr_scanned); if (total_isolated) @@ -420,22 +414,19 @@ isolate_freepages_range(struct compact_control *cc, } /* Update the number of anon and file isolated pages in the zone */ -static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) +static void acct_isolated(struct zone *zone, struct compact_control *cc) { struct page *page; unsigned int count[2] = { 0, }; + if (list_empty(&cc->migratepages)) + return; + list_for_each_entry(page, &cc->migratepages, lru) count[!!page_is_file_cache(page)]++; - /* If locked we can use the interrupt unsafe versions */ - if (locked) { - __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); - __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); - } else { - mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); - mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); - } + mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); } /* Similar to reclaim, but different enough that they don't share logic */ @@ -454,40 +445,34 @@ static bool too_many_isolated(struct zone *zone) } /** - * isolate_migratepages_range() - isolate all migrate-able pages in range. - * @zone: Zone pages are in. + * isolate_migratepages_block() - isolate all migrate-able pages within + * a single pageblock * @cc: Compaction control structure. - * @low_pfn: The first PFN of the range. - * @end_pfn: The one-past-the-last PFN of the range. - * @unevictable: true if it allows to isolate unevictable pages + * @low_pfn: The first PFN to isolate + * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock + * @isolate_mode: Isolation mode to be used. * * Isolate all pages that can be migrated from the range specified by - * [low_pfn, end_pfn). Returns zero if there is a fatal signal - * pending), otherwise PFN of the first page that was not scanned - * (which may be both less, equal to or more then end_pfn). + * [low_pfn, end_pfn). The range is expected to be within same pageblock. + * Returns zero if there is a fatal signal pending, otherwise PFN of the + * first page that was not scanned (which may be both less, equal to or more + * than end_pfn). * - * Assumes that cc->migratepages is empty and cc->nr_migratepages is - * zero. - * - * Apart from cc->migratepages and cc->nr_migratetypes this function - * does not modify any cc's fields, in particular it does not modify - * (or read for that matter) cc->migrate_pfn. + * The pages are isolated on cc->migratepages list (not required to be empty), + * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field + * is neither read nor updated. */ -unsigned long -isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn, bool unevictable) +static unsigned long +isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, + unsigned long end_pfn, isolate_mode_t isolate_mode) { - unsigned long last_pageblock_nr = 0, pageblock_nr; + struct zone *zone = cc->zone; unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; struct lruvec *lruvec; unsigned long flags; bool locked = false; struct page *page = NULL, *valid_page = NULL; - bool set_unsuitable = true; - const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ? - ISOLATE_ASYNC_MIGRATE : 0) | - (unevictable ? ISOLATE_UNEVICTABLE : 0); /* * Ensure that there are not too many pages isolated from the LRU @@ -518,19 +503,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, } } - /* - * migrate_pfn does not necessarily start aligned to a - * pageblock. Ensure that pfn_valid is called when moving - * into a new MAX_ORDER_NR_PAGES range in case of large - * memory holes within the zone - */ - if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { - if (!pfn_valid(low_pfn)) { - low_pfn += MAX_ORDER_NR_PAGES - 1; - continue; - } - } - if (!pfn_valid_within(low_pfn)) continue; nr_scanned++; @@ -548,28 +520,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (!valid_page) valid_page = page; - /* If isolation recently failed, do not retry */ - pageblock_nr = low_pfn >> pageblock_order; - if (last_pageblock_nr != pageblock_nr) { - int mt; - - last_pageblock_nr = pageblock_nr; - if (!isolation_suitable(cc, page)) - goto next_pageblock; - - /* - * For async migration, also only scan in MOVABLE - * blocks. Async migration is optimistic to see if - * the minimum amount of work satisfies the allocation - */ - mt = get_pageblock_migratetype(page); - if (cc->mode == MIGRATE_ASYNC && - !migrate_async_suitable(mt)) { - set_unsuitable = false; - goto next_pageblock; - } - } - /* * Skip if free. page_order cannot be used without zone->lock * as nothing prevents parallel allocations or buddy merging. @@ -604,8 +554,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, */ if (PageTransHuge(page)) { if (!locked) - goto next_pageblock; - low_pfn += (1 << compound_order(page)) - 1; + low_pfn = ALIGN(low_pfn + 1, + pageblock_nr_pages) - 1; + else + low_pfn += (1 << compound_order(page)) - 1; + continue; } @@ -635,7 +588,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, lruvec = mem_cgroup_page_lruvec(page, zone); /* Try isolate the page */ - if (__isolate_lru_page(page, mode) != 0) + if (__isolate_lru_page(page, isolate_mode) != 0) continue; VM_BUG_ON_PAGE(PageTransCompound(page), page); @@ -654,15 +607,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ++low_pfn; break; } - - continue; - -next_pageblock: - low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1; } - acct_isolated(zone, locked, cc); - if (locked) spin_unlock_irqrestore(&zone->lru_lock, flags); @@ -671,8 +617,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, * if the whole pageblock was scanned without isolating any page. */ if (low_pfn == end_pfn) - update_pageblock_skip(cc, valid_page, nr_isolated, - set_unsuitable, true); + update_pageblock_skip(cc, valid_page, nr_isolated, true); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); @@ -683,15 +628,63 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, return low_pfn; } +/** + * isolate_migratepages_range() - isolate migrate-able pages in a PFN range + * @cc: Compaction control structure. + * @start_pfn: The first PFN to start isolating. + * @end_pfn: The one-past-last PFN. + * + * Returns zero if isolation fails fatally due to e.g. pending signal. + * Otherwise, function returns one-past-the-last PFN of isolated page + * (which may be greater than end_pfn if end fell in a middle of a THP page). + */ +unsigned long +isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long pfn, block_end_pfn; + + /* Scan block by block. First and last block may be incomplete */ + pfn = start_pfn; + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); + + for (; pfn < end_pfn; pfn = block_end_pfn, + block_end_pfn += pageblock_nr_pages) { + + block_end_pfn = min(block_end_pfn, end_pfn); + + /* Skip whole pageblock in case of a memory hole */ + if (!pfn_valid(pfn)) + continue; + + pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, + ISOLATE_UNEVICTABLE); + + /* + * In case of fatal failure, release everything that might + * have been isolated in the previous iteration, and signal + * the failure back to caller. + */ + if (!pfn) { + putback_movable_pages(&cc->migratepages); + cc->nr_migratepages = 0; + break; + } + } + acct_isolated(cc->zone, cc); + + return pfn; +} + #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ -static void isolate_freepages(struct zone *zone, - struct compact_control *cc) +static void isolate_freepages(struct compact_control *cc) { + struct zone *zone = cc->zone; struct page *page; unsigned long block_start_pfn; /* start of current pageblock */ unsigned long block_end_pfn; /* end of current pageblock */ @@ -809,7 +802,7 @@ static struct page *compaction_alloc(struct page *migratepage, */ if (list_empty(&cc->freepages)) { if (!cc->contended) - isolate_freepages(cc->zone, cc); + isolate_freepages(cc); if (list_empty(&cc->freepages)) return NULL; @@ -843,34 +836,82 @@ typedef enum { } isolate_migrate_t; /* - * Isolate all pages that can be migrated from the block pointed to by - * the migrate scanner within compact_control. + * Isolate all pages that can be migrated from the first suitable block, + * starting at the block pointed to by the migrate scanner pfn within + * compact_control. */ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; + struct page *page; + const isolate_mode_t isolate_mode = + (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); - /* Do not scan outside zone boundaries */ - low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); + /* + * Start at where we last stopped, or beginning of the zone as + * initialized by compact_zone() + */ + low_pfn = cc->migrate_pfn; /* Only scan within a pageblock boundary */ end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); - /* Do not cross the free scanner or scan within a memory hole */ - if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { - cc->migrate_pfn = end_pfn; - return ISOLATE_NONE; + /* + * Iterate over whole pageblocks until we find the first suitable. + * Do not cross the free scanner. + */ + for (; end_pfn <= cc->free_pfn; + low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { + + /* + * This can potentially iterate a massively long zone with + * many pageblocks unsuitable, so periodically check if we + * need to schedule, or even abort async compaction. + */ + if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) + && compact_should_abort(cc)) + break; + + /* Skip whole pageblock in case of a memory hole */ + if (!pfn_valid(low_pfn)) + continue; + + page = pfn_to_page(low_pfn); + + /* If isolation recently failed, do not retry */ + if (!isolation_suitable(cc, page)) + continue; + + /* + * For async compaction, also only scan in MOVABLE blocks. + * Async compaction is optimistic to see if the minimum amount + * of work satisfies the allocation. + */ + if (cc->mode == MIGRATE_ASYNC && + !migrate_async_suitable(get_pageblock_migratetype(page))) + continue; + + /* Perform the isolation */ + low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, + isolate_mode); + + if (!low_pfn || cc->contended) + return ISOLATE_ABORT; + + /* + * Either we isolated something and proceed with migration. Or + * we failed and compact_zone should decide if we should + * continue or not. + */ + break; } - /* Perform the isolation */ - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); - if (!low_pfn || cc->contended) - return ISOLATE_ABORT; - + acct_isolated(zone, cc); + /* Record where migration scanner will be restarted */ cc->migrate_pfn = low_pfn; - return ISOLATE_SUCCESS; + return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } static int compact_finished(struct zone *zone, @@ -1043,9 +1084,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ; } - if (!cc->nr_migratepages) - continue; - err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION); diff --git a/mm/internal.h b/mm/internal.h index a1b651b11c5f..5a0738fa649c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -154,8 +154,8 @@ unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); unsigned long -isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn, bool unevictable); +isolate_migratepages_range(struct compact_control *cc, + unsigned long low_pfn, unsigned long end_pfn); #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 822babd808fe..dfbf54b51649 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6288,8 +6288,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; - pfn = isolate_migratepages_range(cc->zone, cc, - pfn, end, true); + pfn = isolate_migratepages_range(cc, pfn, end); if (!pfn) { ret = -EINTR; break; From 7d49d8868336bbf4f68714d8282ca5fd65e387ed Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:11 -0700 Subject: [PATCH 075/164] mm, compaction: reduce zone checking frequency in the migration scanner The unification of the migrate and free scanner families of function has highlighted a difference in how the scanners ensure they only isolate pages of the intended zone. This is important for taking zone lock or lru lock of the correct zone. Due to nodes overlapping, it is however possible to encounter a different zone within the range of the zone being compacted. The free scanner, since its inception by commit 748446bb6b5a ("mm: compaction: memory compaction core"), has been checking the zone of the first valid page in a pageblock, and skipping the whole pageblock if the zone does not match. This checking was completely missing from the migration scanner at first, and later added by commit dc9086004b3d ("mm: compaction: check for overlapping nodes during isolation for migration") in a reaction to a bug report. But the zone comparison in migration scanner is done once per a single scanned page, which is more defensive and thus more costly than a check per pageblock. This patch unifies the checking done in both scanners to once per pageblock, through a new pageblock_pfn_to_page() function, which also includes pfn_valid() checks. It is more defensive than the current free scanner checks, as it checks both the first and last page of the pageblock, but less defensive by the migration scanner per-page checks. It assumes that node overlapping may result (on some architecture) in a boundary between two nodes falling into the middle of a pageblock, but that there cannot be a node0 node1 node0 interleaving within a single pageblock. The result is more code being shared and a bit less per-page CPU cost in the migration scanner. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 91 +++++++++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 34 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 8058e3f98f08..1067c07cb33d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -67,6 +67,49 @@ static inline bool migrate_async_suitable(int migratetype) return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; } +/* + * Check that the whole (or subset of) a pageblock given by the interval of + * [start_pfn, end_pfn) is valid and within the same zone, before scanning it + * with the migration of free compaction scanner. The scanners then need to + * use only pfn_valid_within() check for arches that allow holes within + * pageblocks. + * + * Return struct page pointer of start_pfn, or NULL if checks were not passed. + * + * It's possible on some configurations to have a setup like node0 node1 node0 + * i.e. it's possible that all pages within a zones range of pages do not + * belong to a single zone. We assume that a border between node0 and node1 + * can occur within a single pageblock, but not a node0 node1 node0 + * interleaving within a single pageblock. It is therefore sufficient to check + * the first and last page of a pageblock and avoid checking each individual + * page in a pageblock. + */ +static struct page *pageblock_pfn_to_page(unsigned long start_pfn, + unsigned long end_pfn, struct zone *zone) +{ + struct page *start_page; + struct page *end_page; + + /* end_pfn is one past the range we are checking */ + end_pfn--; + + if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) + return NULL; + + start_page = pfn_to_page(start_pfn); + + if (page_zone(start_page) != zone) + return NULL; + + end_page = pfn_to_page(end_pfn); + + /* This gives a shorter code than deriving page_zone(end_page) */ + if (page_zone_id(start_page) != page_zone_id(end_page)) + return NULL; + + return start_page; +} + #ifdef CONFIG_COMPACTION /* Returns true if the pageblock should be scanned for pages to isolate. */ static inline bool isolation_suitable(struct compact_control *cc, @@ -371,17 +414,17 @@ isolate_freepages_range(struct compact_control *cc, unsigned long isolated, pfn, block_end_pfn; LIST_HEAD(freelist); - for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { - if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) - break; + pfn = start_pfn; + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); + + for (; pfn < end_pfn; pfn += isolated, + block_end_pfn += pageblock_nr_pages) { - /* - * On subsequent iterations ALIGN() is actually not needed, - * but we keep it that we not to complicate the code. - */ - block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); + if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) + break; + isolated = isolate_freepages_block(cc, pfn, block_end_pfn, &freelist, true); @@ -507,15 +550,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, continue; nr_scanned++; - /* - * Get the page and ensure the page is within the same zone. - * See the comment in isolate_freepages about overlapping - * nodes. It is deliberate that the new zone lock is not taken - * as memory compaction should not move pages between nodes. - */ page = pfn_to_page(low_pfn); - if (page_zone(page) != zone) - continue; if (!valid_page) valid_page = page; @@ -653,8 +688,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, block_end_pfn = min(block_end_pfn, end_pfn); - /* Skip whole pageblock in case of a memory hole */ - if (!pfn_valid(pfn)) + if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) continue; pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, @@ -727,18 +761,9 @@ static void isolate_freepages(struct compact_control *cc) && compact_should_abort(cc)) break; - if (!pfn_valid(block_start_pfn)) - continue; - - /* - * Check for overlapping nodes/zones. It's possible on some - * configurations to have a setup like - * node0 node1 node0 - * i.e. it's possible that all pages within a zones range of - * pages do not belong to a single zone. - */ - page = pfn_to_page(block_start_pfn); - if (page_zone(page) != zone) + page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, + zone); + if (!page) continue; /* Check the block is suitable for migration */ @@ -873,12 +898,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, && compact_should_abort(cc)) break; - /* Skip whole pageblock in case of a memory hole */ - if (!pfn_valid(low_pfn)) + page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); + if (!page) continue; - page = pfn_to_page(low_pfn); - /* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue; From 1f9efdef4f3f1d2a073e524113fd0038af636f2b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:14 -0700 Subject: [PATCH 076/164] mm, compaction: khugepaged should not give up due to need_resched() Async compaction aborts when it detects zone lock contention or need_resched() is true. David Rientjes has reported that in practice, most direct async compactions for THP allocation abort due to need_resched(). This means that a second direct compaction is never attempted, which might be OK for a page fault, but khugepaged is intended to attempt a sync compaction in such case and in these cases it won't. This patch replaces "bool contended" in compact_control with an int that distinguishes between aborting due to need_resched() and aborting due to lock contention. This allows propagating the abort through all compaction functions as before, but passing the abort reason up to __alloc_pages_slowpath() which decides when to continue with direct reclaim and another compaction attempt. Another problem is that try_to_compact_pages() did not act upon the reported contention (both need_resched() or lock contention) immediately and would proceed with another zone from the zonelist. When need_resched() is true, that means initializing another zone compaction, only to check again need_resched() in isolate_migratepages() and aborting. For zone lock contention, the unintended consequence is that the lock contended status reported back to the allocator is detrmined from the last zone where compaction was attempted, which is rather arbitrary. This patch fixes the problem in the following way: - async compaction of a zone aborting due to need_resched() or fatal signal pending means that further zones should not be tried. We report COMPACT_CONTENDED_SCHED to the allocator. - aborting zone compaction due to lock contention means we can still try another zone, since it has different set of locks. We report back COMPACT_CONTENDED_LOCK only if *all* zones where compaction was attempted, it was aborted due to lock contention. As a result of these fixes, khugepaged will proceed with second sync compaction as intended, when the preceding async compaction aborted due to need_resched(). Page fault compactions aborting due to need_resched() will spare some cycles previously wasted by initializing another zone compaction only to abort again. Lock contention will be reported only when compaction in all zones aborted due to lock contention, and therefore it's not a good idea to try again after reclaim. In stress-highalloc from mmtests configured to use __GFP_NO_KSWAPD, this has improved number of THP collapse allocations by 10%, which shows positive effect on khugepaged. The benchmark's success rates are unchanged as it is not recognized as khugepaged. Numbers of compact_stall and compact_fail events have however decreased by 20%, with compact_success still a bit improved, which is good. With benchmark configured not to use __GFP_NO_KSWAPD, there is 6% improvement in THP collapse allocations, and only slight improvement in stalls and failures. [akpm@linux-foundation.org: fix warnings] Reported-by: David Rientjes Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 12 +++++- mm/compaction.c | 87 +++++++++++++++++++++++++++++++++----- mm/internal.h | 4 +- mm/page_alloc.c | 45 ++++++++++++++------ 4 files changed, 121 insertions(+), 27 deletions(-) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index b2e4c92d0445..60bdf8dc02a3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -13,6 +13,14 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 4 +/* Used to signal whether compaction detected need_sched() or lock contention */ +/* No contention detected */ +#define COMPACT_CONTENDED_NONE 0 +/* Either need_sched() was true or fatal signal pending */ +#define COMPACT_CONTENDED_SCHED 1 +/* Zone lock or lru_lock was contended in async compaction */ +#define COMPACT_CONTENDED_LOCK 2 + #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, @@ -24,7 +32,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); @@ -94,7 +102,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone) { return COMPACT_CONTINUE; diff --git a/mm/compaction.c b/mm/compaction.c index 1067c07cb33d..26bb20ef853d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -223,9 +223,21 @@ static void update_pageblock_skip(struct compact_control *cc, } #endif /* CONFIG_COMPACTION */ -static inline bool should_release_lock(spinlock_t *lock) +static int should_release_lock(spinlock_t *lock) { - return need_resched() || spin_is_contended(lock); + /* + * Sched contention has higher priority here as we may potentially + * have to abort whole compaction ASAP. Returning with lock contention + * means we will try another zone, and further decisions are + * influenced only when all zones are lock contended. That means + * potentially missing a lock contention is less critical. + */ + if (need_resched()) + return COMPACT_CONTENDED_SCHED; + else if (spin_is_contended(lock)) + return COMPACT_CONTENDED_LOCK; + + return COMPACT_CONTENDED_NONE; } /* @@ -240,7 +252,9 @@ static inline bool should_release_lock(spinlock_t *lock) static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, bool locked, struct compact_control *cc) { - if (should_release_lock(lock)) { + int contended = should_release_lock(lock); + + if (contended) { if (locked) { spin_unlock_irqrestore(lock, *flags); locked = false; @@ -248,7 +262,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, /* async aborts if taking too long or contended */ if (cc->mode == MIGRATE_ASYNC) { - cc->contended = true; + cc->contended = contended; return false; } @@ -274,7 +288,7 @@ static inline bool compact_should_abort(struct compact_control *cc) /* async compaction aborts if contended */ if (need_resched()) { if (cc->mode == MIGRATE_ASYNC) { - cc->contended = true; + cc->contended = COMPACT_CONTENDED_SCHED; return true; } @@ -1140,7 +1154,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } static unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, enum migrate_mode mode, bool *contended) + gfp_t gfp_mask, enum migrate_mode mode, int *contended) { unsigned long ret; struct compact_control cc = { @@ -1172,14 +1186,15 @@ int sysctl_extfrag_threshold = 500; * @gfp_mask: The GFP mask of the current allocation * @nodemask: The allowed nodes to allocate from * @mode: The migration mode for async, sync light, or sync migration - * @contended: Return value that is true if compaction was aborted due to lock contention + * @contended: Return value that determines if compaction was aborted due to + * need_resched() or lock contention * @candidate_zone: Return the zone where we think allocation should succeed * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); @@ -1189,6 +1204,9 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zone *zone; int rc = COMPACT_DEFERRED; int alloc_flags = 0; + int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ + + *contended = COMPACT_CONTENDED_NONE; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) @@ -1202,13 +1220,19 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { int status; + int zone_contended; if (compaction_deferred(zone, order)) continue; status = compact_zone_order(zone, order, gfp_mask, mode, - contended); + &zone_contended); rc = max(status, rc); + /* + * It takes at least one zone that wasn't lock contended + * to clear all_zones_contended. + */ + all_zones_contended &= zone_contended; /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, @@ -1221,8 +1245,21 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, * succeeds in this zone. */ compaction_defer_reset(zone, order, false); - break; - } else if (mode != MIGRATE_ASYNC) { + /* + * It is possible that async compaction aborted due to + * need_resched() and the watermarks were ok thanks to + * somebody else freeing memory. The allocation can + * however still fail so we better signal the + * need_resched() contention anyway (this will not + * prevent the allocation attempt). + */ + if (zone_contended == COMPACT_CONTENDED_SCHED) + *contended = COMPACT_CONTENDED_SCHED; + + goto break_loop; + } + + if (mode != MIGRATE_ASYNC) { /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up @@ -1230,8 +1267,36 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, */ defer_compaction(zone, order); } + + /* + * We might have stopped compacting due to need_resched() in + * async compaction, or due to a fatal signal detected. In that + * case do not try further zones and signal need_resched() + * contention. + */ + if ((zone_contended == COMPACT_CONTENDED_SCHED) + || fatal_signal_pending(current)) { + *contended = COMPACT_CONTENDED_SCHED; + goto break_loop; + } + + continue; +break_loop: + /* + * We might not have tried all the zones, so be conservative + * and assume they are not all lock contended. + */ + all_zones_contended = 0; + break; } + /* + * If at least one zone wasn't deferred or skipped, we report if all + * zones that were tried were lock contended. + */ + if (rc > COMPACT_SKIPPED && all_zones_contended) + *contended = COMPACT_CONTENDED_LOCK; + return rc; } diff --git a/mm/internal.h b/mm/internal.h index 5a0738fa649c..4c1d604c396c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -144,8 +144,8 @@ struct compact_control { int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - bool contended; /* True if a lock was contended, or - * need_resched() true during async + int contended; /* Signal need_sched() or lock + * contention detected during * compaction */ }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dfbf54b51649..313338d74095 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2297,7 +2297,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction) + int *contended_compaction, bool *deferred_compaction) { struct zone *last_compact_zone = NULL; unsigned long compact_result; @@ -2371,7 +2371,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction) + int *contended_compaction, bool *deferred_compaction) { return NULL; } @@ -2547,7 +2547,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; enum migrate_mode migration_mode = MIGRATE_ASYNC; bool deferred_compaction = false; - bool contended_compaction = false; + int contended_compaction = COMPACT_CONTENDED_NONE; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -2651,15 +2651,36 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (page) goto got_pg; - /* - * If compaction is deferred for high-order allocations, it is because - * sync compaction recently failed. In this is the case and the caller - * requested a movable allocation that does not heavily disrupt the - * system then fail the allocation instead of entering direct reclaim. - */ - if ((deferred_compaction || contended_compaction) && - (gfp_mask & __GFP_NO_KSWAPD)) - goto nopage; + /* Checks for THP-specific high-order allocations */ + if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) { + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (deferred_compaction) + goto nopage; + + /* + * In all zones where compaction was attempted (and not + * deferred or skipped), lock contention has been detected. + * For THP allocation we do not want to disrupt the others + * so we fallback to base pages instead. + */ + if (contended_compaction == COMPACT_CONTENDED_LOCK) + goto nopage; + + /* + * If compaction was aborted due to need_resched(), we do not + * want to further increase allocation latency, unless it is + * khugepaged trying to collapse. + */ + if (contended_compaction == COMPACT_CONTENDED_SCHED + && !(current->flags & PF_KTHREAD)) + goto nopage; + } /* * It can become very expensive to allocate transparent hugepages at From 8b44d2791f912566a7ef58c71a7f9cbd16c3eeae Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:16 -0700 Subject: [PATCH 077/164] mm, compaction: periodically drop lock and restore IRQs in scanners Compaction scanners regularly check for lock contention and need_resched() through the compact_checklock_irqsave() function. However, if there is no contention, the lock can be held and IRQ disabled for potentially long time. This has been addressed by commit b2eef8c0d091 ("mm: compaction: minimise the time IRQs are disabled while isolating pages for migration") for the migration scanner. However, the refactoring done by commit 2a1402aa044b ("mm: compaction: acquire the zone->lru_lock as late as possible") has changed the conditions so that the lock is dropped only when there's contention on the lock or need_resched() is true. Also, need_resched() is checked only when the lock is already held. The comment "give a chance to irqs before checking need_resched" is therefore misleading, as IRQs remain disabled when the check is done. This patch restores the behavior intended by commit b2eef8c0d091 and also tries to better balance and make more deterministic the time spent by checking for contention vs the time the scanners might run between the checks. It also avoids situations where checking has not been done often enough before. The result should be avoiding both too frequent and too infrequent contention checking, and especially the potentially long-running scans with IRQs disabled and no checking of need_resched() or for fatal signal pending, which can happen when many consecutive pages or pageblocks fail the preliminary tests and do not reach the later call site to compact_checklock_irqsave(), as explained below. Before the patch: In the migration scanner, compact_checklock_irqsave() was called each loop, if reached. If not reached, some lower-frequency checking could still be done if the lock was already held, but this would not result in aborting contended async compaction until reaching compact_checklock_irqsave() or end of pageblock. In the free scanner, it was similar but completely without the periodical checking, so lock can be potentially held until reaching the end of pageblock. After the patch, in both scanners: The periodical check is done as the first thing in the loop on each SWAP_CLUSTER_MAX aligned pfn, using the new compact_unlock_should_abort() function, which always unlocks the lock (if locked) and aborts async compaction if scheduling is needed. It also aborts any type of compaction when a fatal signal is pending. The compact_checklock_irqsave() function is replaced with a slightly different compact_trylock_irqsave(). The biggest difference is that the function is not called at all if the lock is already held. The periodical need_resched() checking is left solely to compact_unlock_should_abort(). The lock contention avoidance for async compaction is achieved by the periodical unlock by compact_unlock_should_abort() and by using trylock in compact_trylock_irqsave() and aborting when trylock fails. Sync compaction does not use trylock. Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 119 +++++++++++++++++++++++++++++------------------- 1 file changed, 72 insertions(+), 47 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 26bb20ef853d..74770e40cfe5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -223,61 +223,72 @@ static void update_pageblock_skip(struct compact_control *cc, } #endif /* CONFIG_COMPACTION */ -static int should_release_lock(spinlock_t *lock) +/* + * Compaction requires the taking of some coarse locks that are potentially + * very heavily contended. For async compaction, back out if the lock cannot + * be taken immediately. For sync compaction, spin on the lock if needed. + * + * Returns true if the lock is held + * Returns false if the lock is not held and compaction should abort + */ +static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, + struct compact_control *cc) { - /* - * Sched contention has higher priority here as we may potentially - * have to abort whole compaction ASAP. Returning with lock contention - * means we will try another zone, and further decisions are - * influenced only when all zones are lock contended. That means - * potentially missing a lock contention is less critical. - */ - if (need_resched()) - return COMPACT_CONTENDED_SCHED; - else if (spin_is_contended(lock)) - return COMPACT_CONTENDED_LOCK; + if (cc->mode == MIGRATE_ASYNC) { + if (!spin_trylock_irqsave(lock, *flags)) { + cc->contended = COMPACT_CONTENDED_LOCK; + return false; + } + } else { + spin_lock_irqsave(lock, *flags); + } - return COMPACT_CONTENDED_NONE; + return true; } /* * Compaction requires the taking of some coarse locks that are potentially - * very heavily contended. Check if the process needs to be scheduled or - * if the lock is contended. For async compaction, back out in the event - * if contention is severe. For sync compaction, schedule. + * very heavily contended. The lock should be periodically unlocked to avoid + * having disabled IRQs for a long time, even when there is nobody waiting on + * the lock. It might also be that allowing the IRQs will result in + * need_resched() becoming true. If scheduling is needed, async compaction + * aborts. Sync compaction schedules. + * Either compaction type will also abort if a fatal signal is pending. + * In either case if the lock was locked, it is dropped and not regained. * - * Returns true if the lock is held. - * Returns false if the lock is released and compaction should abort + * Returns true if compaction should abort due to fatal signal pending, or + * async compaction due to need_resched() + * Returns false when compaction can continue (sync compaction might have + * scheduled) */ -static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, - bool locked, struct compact_control *cc) +static bool compact_unlock_should_abort(spinlock_t *lock, + unsigned long flags, bool *locked, struct compact_control *cc) { - int contended = should_release_lock(lock); + if (*locked) { + spin_unlock_irqrestore(lock, flags); + *locked = false; + } - if (contended) { - if (locked) { - spin_unlock_irqrestore(lock, *flags); - locked = false; - } + if (fatal_signal_pending(current)) { + cc->contended = COMPACT_CONTENDED_SCHED; + return true; + } - /* async aborts if taking too long or contended */ + if (need_resched()) { if (cc->mode == MIGRATE_ASYNC) { - cc->contended = contended; - return false; + cc->contended = COMPACT_CONTENDED_SCHED; + return true; } - cond_resched(); } - if (!locked) - spin_lock_irqsave(lock, *flags); - return true; + return false; } /* * Aside from avoiding lock contention, compaction also periodically checks * need_resched() and either schedules in sync compaction or aborts async - * compaction. This is similar to what compact_checklock_irqsave() does, but + * compaction. This is similar to what compact_unlock_should_abort() does, but * is used where no lock is concerned. * * Returns false when no scheduling was needed, or sync compaction scheduled. @@ -336,6 +347,16 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, int isolated, i; struct page *page = cursor; + /* + * Periodically drop the lock (if held) regardless of its + * contention, to give chance to IRQs. Abort if fatal signal + * pending or async compaction detects need_resched() + */ + if (!(blockpfn % SWAP_CLUSTER_MAX) + && compact_unlock_should_abort(&cc->zone->lock, flags, + &locked, cc)) + break; + nr_scanned++; if (!pfn_valid_within(blockpfn)) goto isolate_fail; @@ -353,8 +374,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, * spin on the lock and we acquire the lock as late as * possible. */ - locked = compact_checklock_irqsave(&cc->zone->lock, &flags, - locked, cc); + if (!locked) + locked = compact_trylock_irqsave(&cc->zone->lock, + &flags, cc); if (!locked) break; @@ -552,13 +574,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { - /* give a chance to irqs before checking need_resched() */ - if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) { - if (should_release_lock(&zone->lru_lock)) { - spin_unlock_irqrestore(&zone->lru_lock, flags); - locked = false; - } - } + /* + * Periodically drop the lock (if held) regardless of its + * contention, to give chance to IRQs. Abort async compaction + * if contended. + */ + if (!(low_pfn % SWAP_CLUSTER_MAX) + && compact_unlock_should_abort(&zone->lru_lock, flags, + &locked, cc)) + break; if (!pfn_valid_within(low_pfn)) continue; @@ -620,10 +644,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, page_count(page) > page_mapcount(page)) continue; - /* Check if it is ok to still hold the lock */ - locked = compact_checklock_irqsave(&zone->lru_lock, &flags, - locked, cc); - if (!locked || fatal_signal_pending(current)) + /* If the lock is not held, try to take it */ + if (!locked) + locked = compact_trylock_irqsave(&zone->lru_lock, + &flags, cc); + if (!locked) break; /* Recheck PageLRU and PageTransHuge under lock */ From 69b7189f12e0064237630e8c6bb64cad710bb268 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:18 -0700 Subject: [PATCH 078/164] mm, compaction: skip rechecks when lock was already held Compaction scanners try to lock zone locks as late as possible by checking many page or pageblock properties opportunistically without lock and skipping them if not unsuitable. For pages that pass the initial checks, some properties have to be checked again safely under lock. However, if the lock was already held from a previous iteration in the initial checks, the rechecks are unnecessary. This patch therefore skips the rechecks when the lock was already held. This is now possible to do, since we don't (potentially) drop and reacquire the lock between the initial checks and the safe rechecks anymore. Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Reviewed-by: Naoya Horiguchi Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 53 +++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 74770e40cfe5..5039c964f5c8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -367,22 +367,30 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, goto isolate_fail; /* - * The zone lock must be held to isolate freepages. - * Unfortunately this is a very coarse lock and can be - * heavily contended if there are parallel allocations - * or parallel compactions. For async compaction do not - * spin on the lock and we acquire the lock as late as - * possible. + * If we already hold the lock, we can skip some rechecking. + * Note that if we hold the lock now, checked_pageblock was + * already set in some previous iteration (or strict is true), + * so it is correct to skip the suitable migration target + * recheck as well. */ - if (!locked) + if (!locked) { + /* + * The zone lock must be held to isolate freepages. + * Unfortunately this is a very coarse lock and can be + * heavily contended if there are parallel allocations + * or parallel compactions. For async compaction do not + * spin on the lock and we acquire the lock as late as + * possible. + */ locked = compact_trylock_irqsave(&cc->zone->lock, &flags, cc); - if (!locked) - break; + if (!locked) + break; - /* Recheck this is a buddy page under lock */ - if (!PageBuddy(page)) - goto isolate_fail; + /* Recheck this is a buddy page under lock */ + if (!PageBuddy(page)) + goto isolate_fail; + } /* Found a free page, break it into order-0 pages */ isolated = split_free_page(page); @@ -644,19 +652,20 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, page_count(page) > page_mapcount(page)) continue; - /* If the lock is not held, try to take it */ - if (!locked) + /* If we already hold the lock, we can skip some rechecking */ + if (!locked) { locked = compact_trylock_irqsave(&zone->lru_lock, &flags, cc); - if (!locked) - break; + if (!locked) + break; - /* Recheck PageLRU and PageTransHuge under lock */ - if (!PageLRU(page)) - continue; - if (PageTransHuge(page)) { - low_pfn += (1 << compound_order(page)) - 1; - continue; + /* Recheck PageLRU and PageTransHuge under lock */ + if (!PageLRU(page)) + continue; + if (PageTransHuge(page)) { + low_pfn += (1 << compound_order(page)) - 1; + continue; + } } lruvec = mem_cgroup_page_lruvec(page, zone); From e14c720efdd73c6d69cd8d07fa894bcd11fe1973 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:20 -0700 Subject: [PATCH 079/164] mm, compaction: remember position within pageblock in free pages scanner Unlike the migration scanner, the free scanner remembers the beginning of the last scanned pageblock in cc->free_pfn. It might be therefore rescanning pages uselessly when called several times during single compaction. This might have been useful when pages were returned to the buddy allocator after a failed migration, but this is no longer the case. This patch changes the meaning of cc->free_pfn so that if it points to a middle of a pageblock, that pageblock is scanned only from cc->free_pfn to the end. isolate_freepages_block() will record the pfn of the last page it looked at, which is then used to update cc->free_pfn. In the mmtests stress-highalloc benchmark, this has resulted in lowering the ratio between pages scanned by both scanners, from 2.5 free pages per migrate page, to 2.25 free pages per migrate page, without affecting success rates. With __GFP_NO_KSWAPD allocations, this appears to result in a worse ratio (2.1 instead of 1.8), but page migration successes increased by 10%, so this could mean that more useful work can be done until need_resched() aborts this kind of compaction. Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Reviewed-by: Naoya Horiguchi Acked-by: David Rientjes Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 5039c964f5c8..b69b7dac0361 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -330,7 +330,7 @@ static bool suitable_migration_target(struct page *page) * (even though it may still end up isolating some pages). */ static unsigned long isolate_freepages_block(struct compact_control *cc, - unsigned long blockpfn, + unsigned long *start_pfn, unsigned long end_pfn, struct list_head *freelist, bool strict) @@ -339,6 +339,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, struct page *cursor, *valid_page = NULL; unsigned long flags; bool locked = false; + unsigned long blockpfn = *start_pfn; cursor = pfn_to_page(blockpfn); @@ -415,6 +416,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, } + /* Record how far we have got within the block */ + *start_pfn = blockpfn; + trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); /* @@ -463,14 +467,16 @@ isolate_freepages_range(struct compact_control *cc, for (; pfn < end_pfn; pfn += isolated, block_end_pfn += pageblock_nr_pages) { + /* Protect pfn from changing by isolate_freepages_block */ + unsigned long isolate_start_pfn = pfn; block_end_pfn = min(block_end_pfn, end_pfn); if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) break; - isolated = isolate_freepages_block(cc, pfn, block_end_pfn, - &freelist, true); + isolated = isolate_freepages_block(cc, &isolate_start_pfn, + block_end_pfn, &freelist, true); /* * In strict mode, isolate_freepages_block() returns 0 if @@ -769,6 +775,7 @@ static void isolate_freepages(struct compact_control *cc) struct zone *zone = cc->zone; struct page *page; unsigned long block_start_pfn; /* start of current pageblock */ + unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ int nr_freepages = cc->nr_freepages; @@ -777,14 +784,15 @@ static void isolate_freepages(struct compact_control *cc) /* * Initialise the free scanner. The starting point is where we last * successfully isolated from, zone-cached value, or the end of the - * zone when isolating for the first time. We need this aligned to - * the pageblock boundary, because we do + * zone when isolating for the first time. For looping we also need + * this pfn aligned down to the pageblock boundary, because we do * block_start_pfn -= pageblock_nr_pages in the for loop. * For ending point, take care when isolating in last pageblock of a * a zone which ends in the middle of a pageblock. * The low boundary is the end of the pageblock the migration scanner * is using. */ + isolate_start_pfn = cc->free_pfn; block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); block_end_pfn = min(block_start_pfn + pageblock_nr_pages, zone_end_pfn(zone)); @@ -797,7 +805,8 @@ static void isolate_freepages(struct compact_control *cc) */ for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; block_end_pfn = block_start_pfn, - block_start_pfn -= pageblock_nr_pages) { + block_start_pfn -= pageblock_nr_pages, + isolate_start_pfn = block_start_pfn) { unsigned long isolated; /* @@ -822,12 +831,24 @@ static void isolate_freepages(struct compact_control *cc) if (!isolation_suitable(cc, page)) continue; - /* Found a block suitable for isolating free pages from */ - cc->free_pfn = block_start_pfn; - isolated = isolate_freepages_block(cc, block_start_pfn, + /* Found a block suitable for isolating free pages from. */ + isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, freelist, false); nr_freepages += isolated; + /* + * Remember where the free scanner should restart next time, + * which is where isolate_freepages_block() left off. + * But if it scanned the whole pageblock, isolate_start_pfn + * now points at block_end_pfn, which is the start of the next + * pageblock. + * In that case we will however want to restart at the start + * of the previous pageblock. + */ + cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? + isolate_start_pfn : + block_start_pfn - pageblock_nr_pages; + /* * Set a flag that we successfully isolated in this pageblock. * In the next loop iteration, zone->compact_cached_free_pfn From 99c0fd5e51c447917264154cb01a967804ace745 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:23 -0700 Subject: [PATCH 080/164] mm, compaction: skip buddy pages by their order in the migrate scanner The migration scanner skips PageBuddy pages, but does not consider their order as checking page_order() is generally unsafe without holding the zone->lock, and acquiring the lock just for the check wouldn't be a good tradeoff. Still, this could avoid some iterations over the rest of the buddy page, and if we are careful, the race window between PageBuddy() check and page_order() is small, and the worst thing that can happen is that we skip too much and miss some isolation candidates. This is not that bad, as compaction can already fail for many other reasons like parallel allocations, and those have much larger race window. This patch therefore makes the migration scanner obtain the buddy page order and use it to skip the whole buddy page, if the order appears to be in the valid range. It's important that the page_order() is read only once, so that the value used in the checks and in the pfn calculation is the same. But in theory the compiler can replace the local variable by multiple inlines of page_order(). Therefore, the patch introduces page_order_unsafe() that uses ACCESS_ONCE to prevent this. Testing with stress-highalloc from mmtests shows a 15% reduction in number of pages scanned by migration scanner. The reduction is >60% with __GFP_NO_KSWAPD allocations, along with success rates better by few percent. Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 36 +++++++++++++++++++++++++++++++----- mm/internal.h | 16 +++++++++++++++- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index b69b7dac0361..b9cf751cc00e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -313,8 +313,15 @@ static inline bool compact_should_abort(struct compact_control *cc) static bool suitable_migration_target(struct page *page) { /* If the page is a large free page, then disallow migration */ - if (PageBuddy(page) && page_order(page) >= pageblock_order) - return false; + if (PageBuddy(page)) { + /* + * We are checking page_order without zone->lock taken. But + * the only small danger is that we skip a potentially suitable + * pageblock, so it's not worth to check order for valid range. + */ + if (page_order_unsafe(page) >= pageblock_order) + return false; + } /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ if (migrate_async_suitable(get_pageblock_migratetype(page))) @@ -608,11 +615,23 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; /* - * Skip if free. page_order cannot be used without zone->lock - * as nothing prevents parallel allocations or buddy merging. + * Skip if free. We read page order here without zone lock + * which is generally unsafe, but the race window is small and + * the worst thing that can happen is that we skip some + * potential isolation targets. */ - if (PageBuddy(page)) + if (PageBuddy(page)) { + unsigned long freepage_order = page_order_unsafe(page); + + /* + * Without lock, we cannot be sure that what we got is + * a valid page order. Consider only values in the + * valid order range to prevent low_pfn overflow. + */ + if (freepage_order > 0 && freepage_order < MAX_ORDER) + low_pfn += (1UL << freepage_order) - 1; continue; + } /* * Check may be lockless but that's ok as we recheck later. @@ -698,6 +717,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } } + /* + * The PageBuddy() check could have potentially brought us outside + * the range to be scanned. + */ + if (unlikely(low_pfn > end_pfn)) + low_pfn = end_pfn; + if (locked) spin_unlock_irqrestore(&zone->lru_lock, flags); diff --git a/mm/internal.h b/mm/internal.h index 4c1d604c396c..86ae964a25b0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -164,7 +164,8 @@ isolate_migratepages_range(struct compact_control *cc, * general, page_zone(page)->lock must be held by the caller to prevent the * page from being allocated in parallel and returning garbage as the order. * If a caller does not hold page_zone(page)->lock, it must guarantee that the - * page cannot be allocated or merged in parallel. + * page cannot be allocated or merged in parallel. Alternatively, it must + * handle invalid values gracefully, and use page_order_unsafe() below. */ static inline unsigned long page_order(struct page *page) { @@ -172,6 +173,19 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } +/* + * Like page_order(), but for callers who cannot afford to hold the zone lock. + * PageBuddy() should be checked first by the caller to minimize race window, + * and invalid values must be handled gracefully. + * + * ACCESS_ONCE is used so that if the caller assigns the result into a local + * variable and e.g. tests it for valid range before using, the compiler cannot + * decide to remove the variable and inline the page_private(page) multiple + * times, potentially observing different values in the tests and the actual + * use of the result. + */ +#define page_order_unsafe(page) ACCESS_ONCE(page_private(page)) + static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; From 43e7a34d265e884b7cf34f9b05e6f2e0c05bf120 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 9 Oct 2014 15:27:25 -0700 Subject: [PATCH 081/164] mm: rename allocflags_to_migratetype for clarity The page allocator has gfp flags (like __GFP_WAIT) and alloc flags (like ALLOC_CPUSET) that have separate semantics. The function allocflags_to_migratetype() actually takes gfp flags, not alloc flags, and returns a migratetype. Rename it to gfpflags_to_migratetype(). Signed-off-by: David Rientjes Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Reviewed-by: Naoya Horiguchi Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Christoph Lameter Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 +- mm/compaction.c | 4 ++-- mm/page_alloc.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5e7219dc0fae..41b30fd4d041 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -156,7 +156,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 /* Convert GFP flags to their corresponding migrate type */ -static inline int allocflags_to_migratetype(gfp_t gfp_flags) +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); diff --git a/mm/compaction.c b/mm/compaction.c index b9cf751cc00e..7c687c0eef6e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1242,7 +1242,7 @@ static unsigned long compact_zone_order(struct zone *zone, int order, .nr_freepages = 0, .nr_migratepages = 0, .order = order, - .migratetype = allocflags_to_migratetype(gfp_mask), + .migratetype = gfpflags_to_migratetype(gfp_mask), .zone = zone, .mode = mode, }; @@ -1294,7 +1294,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, return COMPACT_SKIPPED; #ifdef CONFIG_CMA - if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif /* Compact each zone in the list */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 313338d74095..f07588b11d59 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2523,7 +2523,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_NO_WATERMARKS; } #ifdef CONFIG_CMA - if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; @@ -2786,7 +2786,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zone *preferred_zone; struct zoneref *preferred_zoneref; struct page *page = NULL; - int migratetype = allocflags_to_migratetype(gfp_mask); + int migratetype = gfpflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; int classzone_idx; From 6d7ce55940b6ecd463ca044ad241f0122d913293 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 9 Oct 2014 15:27:27 -0700 Subject: [PATCH 082/164] mm, compaction: pass gfp mask to compact_control struct compact_control currently converts the gfp mask to a migratetype, but we need the entire gfp mask in a follow-up patch. Pass the entire gfp mask as part of struct compact_control. Signed-off-by: David Rientjes Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 12 +++++++----- mm/internal.h | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 7c687c0eef6e..15163b4b35ab 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1032,8 +1032,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } -static int compact_finished(struct zone *zone, - struct compact_control *cc) +static int compact_finished(struct zone *zone, struct compact_control *cc, + const int migratetype) { unsigned int order; unsigned long watermark; @@ -1079,7 +1079,7 @@ static int compact_finished(struct zone *zone, struct free_area *area = &zone->free_area[order]; /* Job done if page is free of the right migratetype */ - if (!list_empty(&area->free_list[cc->migratetype])) + if (!list_empty(&area->free_list[migratetype])) return COMPACT_PARTIAL; /* Job done if allocation would set block type */ @@ -1145,6 +1145,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) int ret; unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(zone); + const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; ret = compaction_suitable(zone, cc->order); @@ -1187,7 +1188,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) migrate_prep_local(); - while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { + while ((ret = compact_finished(zone, cc, migratetype)) == + COMPACT_CONTINUE) { int err; switch (isolate_migratepages(zone, cc)) { @@ -1242,7 +1244,7 @@ static unsigned long compact_zone_order(struct zone *zone, int order, .nr_freepages = 0, .nr_migratepages = 0, .order = order, - .migratetype = gfpflags_to_migratetype(gfp_mask), + .gfp_mask = gfp_mask, .zone = zone, .mode = mode, }; diff --git a/mm/internal.h b/mm/internal.h index 86ae964a25b0..829304090b90 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -142,7 +142,7 @@ struct compact_control { bool finished_update_migrate; int order; /* order a direct compactor needs */ - int migratetype; /* MOVABLE, RECLAIMABLE etc */ + const gfp_t gfp_mask; /* gfp mask of a direct compactor */ struct zone *zone; int contended; /* Signal need_sched() or lock * contention detected during From 9c5990240e076ae564cccbd921868cd08f6daaa5 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:29 -0700 Subject: [PATCH 083/164] mm: introduce check_data_rlimit helper To eliminate code duplication lets introduce check_data_rlimit helper which we will use in brk() and prctl() syscalls. Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Cc: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 28df70774b81..4d814aa97785 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -18,6 +18,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1780,6 +1781,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +static inline int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < RLIM_INFINITY) { + if (((new - start) + (end_data - start_data)) > rlim) + return -ENOSPC; + } + + return 0; +} + extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); From 8764b338b37524ab1a78aee527318ebee9762487 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:32 -0700 Subject: [PATCH 084/164] mm: use may_adjust_brk helper Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Cc: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 11 ++++------- mm/mmap.c | 7 +++---- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index ce8129192a26..7879729bd3bd 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1693,7 +1693,6 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { - unsigned long rlim = rlimit(RLIMIT_DATA); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int error; @@ -1733,9 +1732,8 @@ static int prctl_set_mm(int opt, unsigned long addr, if (addr <= mm->end_data) goto out; - if (rlim < RLIM_INFINITY && - (mm->brk - addr) + - (mm->end_data - mm->start_data) > rlim) + if (check_data_rlimit(rlimit(RLIMIT_DATA), mm->brk, addr, + mm->end_data, mm->start_data)) goto out; mm->start_brk = addr; @@ -1745,9 +1743,8 @@ static int prctl_set_mm(int opt, unsigned long addr, if (addr <= mm->end_data) goto out; - if (rlim < RLIM_INFINITY && - (addr - mm->start_brk) + - (mm->end_data - mm->start_data) > rlim) + if (check_data_rlimit(rlimit(RLIMIT_DATA), addr, mm->start_brk, + mm->end_data, mm->start_data)) goto out; mm->brk = addr; diff --git a/mm/mmap.c b/mm/mmap.c index 2814189f501e..7ff38f1a66ec 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -268,7 +268,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len); SYSCALL_DEFINE1(brk, unsigned long, brk) { - unsigned long rlim, retval; + unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; unsigned long min_brk; @@ -298,9 +298,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ - rlim = rlimit(RLIMIT_DATA); - if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + - (mm->end_data - mm->start_data) > rlim) + if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, + mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); From 71fe97e185040c5dac3216cd54e186dfa534efa0 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:34 -0700 Subject: [PATCH 085/164] prctl: PR_SET_MM -- factor out mmap_sem when updating mm::exe_file Instead of taking mm->mmap_sem inside prctl_set_mm_exe_file() move it out and rename the helper to prctl_set_mm_exe_file_locked(). This will allow to reuse this function in a next patch. Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Cc: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index 7879729bd3bd..14222a1699c0 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1628,12 +1628,14 @@ SYSCALL_DEFINE1(umask, int, mask) return mask; } -static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) +static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) { struct fd exe; struct inode *inode; int err; + VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); + exe = fdget(fd); if (!exe.file) return -EBADF; @@ -1654,8 +1656,6 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (err) goto exit; - down_write(&mm->mmap_sem); - /* * Forbid mm->exe_file change if old file still mapped. */ @@ -1667,7 +1667,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (vma->vm_file && path_equal(&vma->vm_file->f_path, &mm->exe_file->f_path)) - goto exit_unlock; + goto exit; } /* @@ -1678,13 +1678,10 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) */ err = -EPERM; if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) - goto exit_unlock; + goto exit; err = 0; set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */ -exit_unlock: - up_write(&mm->mmap_sem); - exit: fdput(exe); return err; @@ -1703,8 +1700,12 @@ static int prctl_set_mm(int opt, unsigned long addr, if (!capable(CAP_SYS_RESOURCE)) return -EPERM; - if (opt == PR_SET_MM_EXE_FILE) - return prctl_set_mm_exe_file(mm, (unsigned int)addr); + if (opt == PR_SET_MM_EXE_FILE) { + down_write(&mm->mmap_sem); + error = prctl_set_mm_exe_file_locked(mm, (unsigned int)addr); + up_write(&mm->mmap_sem); + return error; + } if (addr >= TASK_SIZE || addr < mmap_min_addr) return -EINVAL; From f606b77f1a9e362451aca8f81d8f36a3a112139e Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:37 -0700 Subject: [PATCH 086/164] prctl: PR_SET_MM -- introduce PR_SET_MM_MAP operation During development of c/r we've noticed that in case if we need to support user namespaces we face a problem with capabilities in prctl(PR_SET_MM, ...) call, in particular once new user namespace is created capable(CAP_SYS_RESOURCE) no longer passes. A approach is to eliminate CAP_SYS_RESOURCE check but pass all new values in one bundle, which would allow the kernel to make more intensive test for sanity of values and same time allow us to support checkpoint/restore of user namespaces. Thus a new command PR_SET_MM_MAP introduced. It takes a pointer of prctl_mm_map structure which carries all the members to be updated. prctl(PR_SET_MM, PR_SET_MM_MAP, struct prctl_mm_map *, size) struct prctl_mm_map { __u64 start_code; __u64 end_code; __u64 start_data; __u64 end_data; __u64 start_brk; __u64 brk; __u64 start_stack; __u64 arg_start; __u64 arg_end; __u64 env_start; __u64 env_end; __u64 *auxv; __u32 auxv_size; __u32 exe_fd; }; All members except @exe_fd correspond ones of struct mm_struct. To figure out which available values these members may take here are meanings of the members. - start_code, end_code: represent bounds of executable code area - start_data, end_data: represent bounds of data area - start_brk, brk: used to calculate bounds for brk() syscall - start_stack: used when accounting space needed for command line arguments, environment and shmat() syscall - arg_start, arg_end, env_start, env_end: represent memory area supplied for command line arguments and environment variables - auxv, auxv_size: carries auxiliary vector, Elf format specifics - exe_fd: file descriptor number for executable link (/proc/self/exe) Thus we apply the following requirements to the values 1) Any member except @auxv, @auxv_size, @exe_fd is rather an address in user space thus it must be laying inside [mmap_min_addr, mmap_max_addr) interval. 2) While @[start|end]_code and @[start|end]_data may point to an nonexisting VMAs (say a program maps own new .text and .data segments during execution) the rest of members should belong to VMA which must exist. 3) Addresses must be ordered, ie @start_ member must not be greater or equal to appropriate @end_ member. 4) As in regular Elf loading procedure we require that @start_brk and @brk be greater than @end_data. 5) If RLIMIT_DATA rlimit is set to non-infinity new values should not exceed existing limit. Same applies to RLIMIT_STACK. 6) Auxiliary vector size must not exceed existing one (which is predefined as AT_VECTOR_SIZE and depends on architecture). 7) File descriptor passed in @exe_file should be pointing to executable file (because we use existing prctl_set_mm_exe_file_locked helper it ensures that the file we are going to use as exe link has all required permission granted). Now about where these members are involved inside kernel code: - @start_code and @end_code are used in /proc/$pid/[stat|statm] output; - @start_data and @end_data are used in /proc/$pid/[stat|statm] output, also they are considered if there enough space for brk() syscall result if RLIMIT_DATA is set; - @start_brk shown in /proc/$pid/stat output and accounted in brk() syscall if RLIMIT_DATA is set; also this member is tested to find a symbolic name of mmap event for perf system (we choose if event is generated for "heap" area); one more aplication is selinux -- we test if a process has PROCESS__EXECHEAP permission if trying to make heap area being executable with mprotect() syscall; - @brk is a current value for brk() syscall which lays inside heap area, it's shown in /proc/$pid/stat. When syscall brk() succesfully provides new memory area to a user space upon brk() completion the mm::brk is updated to carry new value; Both @start_brk and @brk are actively used in /proc/$pid/maps and /proc/$pid/smaps output to find a symbolic name "heap" for VMA being scanned; - @start_stack is printed out in /proc/$pid/stat and used to find a symbolic name "stack" for task and threads in /proc/$pid/maps and /proc/$pid/smaps output, and as the same as with @start_brk -- perf system uses it for event naming. Also kernel treat this member as a start address of where to map vDSO pages and to check if there is enough space for shmat() syscall; - @arg_start, @arg_end, @env_start and @env_end are printed out in /proc/$pid/stat. Another access to the data these members represent is to read /proc/$pid/environ or /proc/$pid/cmdline. Any attempt to read these areas kernel tests with access_process_vm helper so a user must have enough rights for this action; - @auxv and @auxv_size may be read from /proc/$pid/auxv. Strictly speaking kernel doesn't care much about which exactly data is sitting there because it is solely for userspace; - @exe_fd is referred from /proc/$pid/exe and when generating coredump. We uses prctl_set_mm_exe_file_locked helper to update this member, so exe-file link modification remains one-shot action. Still note that updating exe-file link now doesn't require sys-resource capability anymore, after all there is no much profit in preventing setup own file link (there are a number of ways to execute own code -- ptrace, ld-preload, so that the only reliable way to find which exactly code is executed is to inspect running program memory). Still we require the caller to be at least user-namespace root user. I believe the old interface should be deprecated and ripped off in a couple of kernel releases if no one against. To test if new interface is implemented in the kernel one can pass PR_SET_MM_MAP_SIZE opcode and the kernel returns the size of currently supported struct prctl_mm_map. [akpm@linux-foundation.org: fix 80-col wordwrap in macro definitions] Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Acked-by: Andrew Vagin Tested-by: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/prctl.h | 27 ++++++ kernel/sys.c | 190 ++++++++++++++++++++++++++++++++++++- 2 files changed, 216 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 58afc04c107e..513df75d0fc9 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PRCTL_H #define _LINUX_PRCTL_H +#include + /* Values to pass as first argument to prctl() */ #define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ @@ -119,6 +121,31 @@ # define PR_SET_MM_ENV_END 11 # define PR_SET_MM_AUXV 12 # define PR_SET_MM_EXE_FILE 13 +# define PR_SET_MM_MAP 14 +# define PR_SET_MM_MAP_SIZE 15 + +/* + * This structure provides new memory descriptor + * map which mostly modifies /proc/pid/stat[m] + * output for a task. This mostly done in a + * sake of checkpoint/restore functionality. + */ +struct prctl_mm_map { + __u64 start_code; /* code section bounds */ + __u64 end_code; + __u64 start_data; /* data section bounds */ + __u64 end_data; + __u64 start_brk; /* heap for brk() syscall */ + __u64 brk; + __u64 start_stack; /* stack starts at */ + __u64 arg_start; /* command line arguments bounds */ + __u64 arg_end; + __u64 env_start; /* environment variables bounds */ + __u64 env_end; + __u64 *auxv; /* auxiliary vector */ + __u32 auxv_size; /* vector size */ + __u32 exe_fd; /* /proc/$pid/exe link file */ +}; /* * Set specific pid that is allowed to ptrace the current task. diff --git a/kernel/sys.c b/kernel/sys.c index 14222a1699c0..f7030b060018 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1687,6 +1687,187 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) return err; } +#ifdef CONFIG_CHECKPOINT_RESTORE +/* + * WARNING: we don't require any capability here so be very careful + * in what is allowed for modification from userspace. + */ +static int validate_prctl_map(struct prctl_mm_map *prctl_map) +{ + unsigned long mmap_max_addr = TASK_SIZE; + struct mm_struct *mm = current->mm; + int error = -EINVAL, i; + + static const unsigned char offsets[] = { + offsetof(struct prctl_mm_map, start_code), + offsetof(struct prctl_mm_map, end_code), + offsetof(struct prctl_mm_map, start_data), + offsetof(struct prctl_mm_map, end_data), + offsetof(struct prctl_mm_map, start_brk), + offsetof(struct prctl_mm_map, brk), + offsetof(struct prctl_mm_map, start_stack), + offsetof(struct prctl_mm_map, arg_start), + offsetof(struct prctl_mm_map, arg_end), + offsetof(struct prctl_mm_map, env_start), + offsetof(struct prctl_mm_map, env_end), + }; + + /* + * Make sure the members are not somewhere outside + * of allowed address space. + */ + for (i = 0; i < ARRAY_SIZE(offsets); i++) { + u64 val = *(u64 *)((char *)prctl_map + offsets[i]); + + if ((unsigned long)val >= mmap_max_addr || + (unsigned long)val < mmap_min_addr) + goto out; + } + + /* + * Make sure the pairs are ordered. + */ +#define __prctl_check_order(__m1, __op, __m2) \ + ((unsigned long)prctl_map->__m1 __op \ + (unsigned long)prctl_map->__m2) ? 0 : -EINVAL + error = __prctl_check_order(start_code, <, end_code); + error |= __prctl_check_order(start_data, <, end_data); + error |= __prctl_check_order(start_brk, <=, brk); + error |= __prctl_check_order(arg_start, <=, arg_end); + error |= __prctl_check_order(env_start, <=, env_end); + if (error) + goto out; +#undef __prctl_check_order + + error = -EINVAL; + + /* + * @brk should be after @end_data in traditional maps. + */ + if (prctl_map->start_brk <= prctl_map->end_data || + prctl_map->brk <= prctl_map->end_data) + goto out; + + /* + * Neither we should allow to override limits if they set. + */ + if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, + prctl_map->start_brk, prctl_map->end_data, + prctl_map->start_data)) + goto out; + + /* + * Someone is trying to cheat the auxv vector. + */ + if (prctl_map->auxv_size) { + if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) + goto out; + } + + /* + * Finally, make sure the caller has the rights to + * change /proc/pid/exe link: only local root should + * be allowed to. + */ + if (prctl_map->exe_fd != (u32)-1) { + struct user_namespace *ns = current_user_ns(); + const struct cred *cred = current_cred(); + + if (!uid_eq(cred->uid, make_kuid(ns, 0)) || + !gid_eq(cred->gid, make_kgid(ns, 0))) + goto out; + } + + error = 0; +out: + return error; +} + +static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) +{ + struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; + unsigned long user_auxv[AT_VECTOR_SIZE]; + struct mm_struct *mm = current->mm; + int error; + + BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); + BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); + + if (opt == PR_SET_MM_MAP_SIZE) + return put_user((unsigned int)sizeof(prctl_map), + (unsigned int __user *)addr); + + if (data_size != sizeof(prctl_map)) + return -EINVAL; + + if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) + return -EFAULT; + + error = validate_prctl_map(&prctl_map); + if (error) + return error; + + if (prctl_map.auxv_size) { + memset(user_auxv, 0, sizeof(user_auxv)); + if (copy_from_user(user_auxv, + (const void __user *)prctl_map.auxv, + prctl_map.auxv_size)) + return -EFAULT; + + /* Last entry must be AT_NULL as specification requires */ + user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; + user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; + } + + down_write(&mm->mmap_sem); + if (prctl_map.exe_fd != (u32)-1) + error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd); + downgrade_write(&mm->mmap_sem); + if (error) + goto out; + + /* + * We don't validate if these members are pointing to + * real present VMAs because application may have correspond + * VMAs already unmapped and kernel uses these members for statistics + * output in procfs mostly, except + * + * - @start_brk/@brk which are used in do_brk but kernel lookups + * for VMAs when updating these memvers so anything wrong written + * here cause kernel to swear at userspace program but won't lead + * to any problem in kernel itself + */ + + mm->start_code = prctl_map.start_code; + mm->end_code = prctl_map.end_code; + mm->start_data = prctl_map.start_data; + mm->end_data = prctl_map.end_data; + mm->start_brk = prctl_map.start_brk; + mm->brk = prctl_map.brk; + mm->start_stack = prctl_map.start_stack; + mm->arg_start = prctl_map.arg_start; + mm->arg_end = prctl_map.arg_end; + mm->env_start = prctl_map.env_start; + mm->env_end = prctl_map.env_end; + + /* + * Note this update of @saved_auxv is lockless thus + * if someone reads this member in procfs while we're + * updating -- it may get partly updated results. It's + * known and acceptable trade off: we leave it as is to + * not introduce additional locks here making the kernel + * more complex. + */ + if (prctl_map.auxv_size) + memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); + + error = 0; +out: + up_read(&mm->mmap_sem); + return error; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ + static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { @@ -1694,9 +1875,16 @@ static int prctl_set_mm(int opt, unsigned long addr, struct vm_area_struct *vma; int error; - if (arg5 || (arg4 && opt != PR_SET_MM_AUXV)) + if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && + opt != PR_SET_MM_MAP && + opt != PR_SET_MM_MAP_SIZE))) return -EINVAL; +#ifdef CONFIG_CHECKPOINT_RESTORE + if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) + return prctl_set_mm_map(opt, (const void __user *)addr, arg4); +#endif + if (!capable(CAP_SYS_RESOURCE)) return -EPERM; From 1f13ae399c58af5a05b5cee61da864e1f4071de4 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:27:39 -0700 Subject: [PATCH 087/164] mm: remove noisy remainder of the scan_unevictable interface The deprecation warnings for the scan_unevictable interface triggers by scripts doing `sysctl -a | grep something else'. This is annoying and not helpful. The interface has been defunct since 264e56d8247e ("mm: disable user interface to manually rescue unevictable pages"), which was in 2011, and there haven't been any reports of usecases for it, only reports that the deprecation warnings are annying. It's unlikely that anybody is using this interface specifically at this point, so remove it. Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/stable/sysfs-devices-node | 8 --- drivers/base/node.c | 3 - include/linux/swap.h | 16 ------ kernel/sysctl.c | 7 --- mm/vmscan.c | 63 --------------------- 5 files changed, 97 deletions(-) diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node index ce259c13c36a..5b2d0f08867c 100644 --- a/Documentation/ABI/stable/sysfs-devices-node +++ b/Documentation/ABI/stable/sysfs-devices-node @@ -85,14 +85,6 @@ Description: will be compacted. When it completes, memory will be freed into blocks which have as many contiguous pages as possible -What: /sys/devices/system/node/nodeX/scan_unevictable_pages -Date: October 2008 -Contact: Lee Schermerhorn -Description: - When set, it triggers scanning the node's unevictable lists - and move any pages that have become evictable onto the respective - zone's inactive list. See mm/vmscan.c - What: /sys/devices/system/node/nodeX/hugepages/hugepages-/ Date: December 2009 Contact: Lee Schermerhorn diff --git a/drivers/base/node.c b/drivers/base/node.c index d51c49c9bafa..472168cd0c97 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -289,8 +289,6 @@ static int register_node(struct node *node, int num, struct node *parent) device_create_file(&node->dev, &dev_attr_distance); device_create_file(&node->dev, &dev_attr_vmstat); - scan_unevictable_register_node(node); - hugetlb_register_node(node); compaction_register_node(node); @@ -314,7 +312,6 @@ void unregister_node(struct node *node) device_remove_file(&node->dev, &dev_attr_distance); device_remove_file(&node->dev, &dev_attr_vmstat); - scan_unevictable_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ device_unregister(&node->dev); diff --git a/include/linux/swap.h b/include/linux/swap.h index 1b72060f093a..ea4f926e6b9b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -354,22 +354,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct page **, int nr_pages); -extern unsigned long scan_unevictable_pages; -extern int scan_unevictable_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#ifdef CONFIG_NUMA -extern int scan_unevictable_register_node(struct node *node); -extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} -static inline void scan_unevictable_unregister_node(struct node *node) -{ -} -#endif - extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_MEMCG diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 75875a741b5e..91180987e40e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1460,13 +1460,6 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif - { - .procname = "scan_unevictable_pages", - .data = &scan_unevictable_pages, - .maxlen = sizeof(scan_unevictable_pages), - .mode = 0644, - .proc_handler = scan_unevictable_handler, - }, #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", diff --git a/mm/vmscan.c b/mm/vmscan.c index 1a71b8b1ea34..af72fe8e8d74 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3797,66 +3797,3 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) } } #endif /* CONFIG_SHMEM */ - -static void warn_scan_unevictable_pages(void) -{ - printk_once(KERN_WARNING - "%s: The scan_unevictable_pages sysctl/node-interface has been " - "disabled for lack of a legitimate use case. If you have " - "one, please send an email to linux-mm@kvack.org.\n", - current->comm); -} - -/* - * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of - * all nodes' unevictable lists for evictable pages - */ -unsigned long scan_unevictable_pages; - -int scan_unevictable_handler(struct ctl_table *table, int write, - void __user *buffer, - size_t *length, loff_t *ppos) -{ - warn_scan_unevictable_pages(); - proc_doulongvec_minmax(table, write, buffer, length, ppos); - scan_unevictable_pages = 0; - return 0; -} - -#ifdef CONFIG_NUMA -/* - * per node 'scan_unevictable_pages' attribute. On demand re-scan of - * a specified node's per zone unevictable lists for evictable pages. - */ - -static ssize_t read_scan_unevictable_node(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - warn_scan_unevictable_pages(); - return sprintf(buf, "0\n"); /* always zero; should fit... */ -} - -static ssize_t write_scan_unevictable_node(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - warn_scan_unevictable_pages(); - return 1; -} - - -static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, - read_scan_unevictable_node, - write_scan_unevictable_node); - -int scan_unevictable_register_node(struct node *node) -{ - return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); -} - -void scan_unevictable_unregister_node(struct node *node) -{ - device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); -} -#endif From 2386740d1add7bb5048c731dd1127a4e9911a3ed Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:41 -0700 Subject: [PATCH 088/164] mempolicy: change alloc_pages_vma() to use mpol_cond_put() Trivial cleanup. alloc_pages_vma() can use mpol_cond_put(). Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8f5330d74f47..c0c5d388046f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2046,8 +2046,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol, node), policy_nodemask(gfp, pol)); - if (unlikely(mpol_needs_cond_ref(pol))) - __mpol_put(pol); + mpol_cond_put(pol); if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return page; From f15ca78e33b0bb5acc0c5d9a5d5be3c55c4f0bb7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:43 -0700 Subject: [PATCH 089/164] mempolicy: change get_task_policy() to return default_policy rather than NULL Every caller of get_task_policy() falls back to default_policy if it returns NULL. Change get_task_policy() to do this. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c0c5d388046f..656db97584f0 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -126,22 +126,20 @@ static struct mempolicy preferred_node_policy[MAX_NUMNODES]; static struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; + int node; - if (!pol) { - int node = numa_node_id(); + if (pol) + return pol; - if (node != NUMA_NO_NODE) { - pol = &preferred_node_policy[node]; - /* - * preferred_node_policy is not initialised early in - * boot - */ - if (!pol->mode) - pol = NULL; - } + node = numa_node_id(); + if (node != NUMA_NO_NODE) { + pol = &preferred_node_policy[node]; + /* preferred_node_policy is not initialised early in boot */ + if (pol->mode) + return pol; } - return pol; + return &default_policy; } static const struct mempolicy_operations { @@ -1644,14 +1642,14 @@ struct mempolicy *get_vma_policy(struct task_struct *task, mpol_get(pol); } } - if (!pol) - pol = &default_policy; + return pol; } bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) { struct mempolicy *pol = get_task_policy(task); + if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { bool ret = false; @@ -1667,9 +1665,6 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) } } - if (!pol) - return default_policy.flags & MPOL_F_MOF; - return pol->flags & MPOL_F_MOF; } @@ -2077,7 +2072,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) struct page *page; unsigned int cpuset_mems_cookie; - if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) + if (in_interrupt() || (gfp & __GFP_THISNODE)) pol = &default_policy; retry_cpuset: From 8d90274b3b118c9babeefb1302947f33a1364fb5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:45 -0700 Subject: [PATCH 090/164] mempolicy: sanitize the usage of get_task_policy() Cleanup + preparation. Every user of get_task_policy() calls it unconditionally, even if it is not going to use the result. get_task_policy() is cheap but still this does not look clean, plus the code looks simpler if get_task_policy() is called only when this is really needed. Note: I hope this is correct, but it is not clear why vma_policy_mof() doesn't fall back to get_task_policy() if ->get_policy() returns NULL. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 656db97584f0..b86b08e77b8d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1621,14 +1621,11 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, struct mempolicy *get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = get_task_policy(task); + struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { - struct mempolicy *vpol = vma->vm_ops->get_policy(vma, - addr); - if (vpol) - pol = vpol; + pol = vma->vm_ops->get_policy(vma, addr); } else if (vma->vm_policy) { pol = vma->vm_policy; @@ -1643,12 +1640,15 @@ struct mempolicy *get_vma_policy(struct task_struct *task, } } + if (!pol) + pol = get_task_policy(task); + return pol; } bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) { - struct mempolicy *pol = get_task_policy(task); + struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { @@ -1660,11 +1660,14 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) mpol_cond_put(pol); return ret; - } else if (vma->vm_policy) { - pol = vma->vm_policy; } + + pol = vma->vm_policy; } + if (!pol) + pol = get_task_policy(task); + return pol->flags & MPOL_F_MOF; } @@ -2068,12 +2071,12 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { - struct mempolicy *pol = get_task_policy(current); + struct mempolicy *pol = &default_policy; struct page *page; unsigned int cpuset_mems_cookie; - if (in_interrupt() || (gfp & __GFP_THISNODE)) - pol = &default_policy; + if (!in_interrupt() && !(gfp & __GFP_THISNODE)) + pol = get_task_policy(current); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); From 6b6482bbf64ef6f6dbc8b52f7a7cf88a0498bd51 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:48 -0700 Subject: [PATCH 091/164] mempolicy: remove the "task" arg of vma_policy_mof() and simplify it 1. vma_policy_mof(task) is simply not safe unless task == current, it can race with do_exit()->mpol_put(). Remove this arg and update its single caller. 2. vma can not be NULL, remove this check and simplify the code. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 +- kernel/sched/fair.c | 2 +- mm/mempolicy.c | 25 +++++++++++-------------- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index f230a978e6ba..5e4bfcedd2ce 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -136,7 +136,7 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_vma_policy(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long addr); -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); +bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bfa3c86d0d68..82088b29704e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1946,7 +1946,7 @@ void task_numa_work(struct callback_head *work) vma = mm->mmap; } for (; vma; vma = vma->vm_next) { - if (!vma_migratable(vma) || !vma_policy_mof(p, vma)) + if (!vma_migratable(vma) || !vma_policy_mof(vma)) continue; /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b86b08e77b8d..ad27bbc757bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1646,27 +1646,24 @@ struct mempolicy *get_vma_policy(struct task_struct *task, return pol; } -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) +bool vma_policy_mof(struct vm_area_struct *vma) { - struct mempolicy *pol = NULL; + struct mempolicy *pol; - if (vma) { - if (vma->vm_ops && vma->vm_ops->get_policy) { - bool ret = false; + if (vma->vm_ops && vma->vm_ops->get_policy) { + bool ret = false; - pol = vma->vm_ops->get_policy(vma, vma->vm_start); - if (pol && (pol->flags & MPOL_F_MOF)) - ret = true; - mpol_cond_put(pol); + pol = vma->vm_ops->get_policy(vma, vma->vm_start); + if (pol && (pol->flags & MPOL_F_MOF)) + ret = true; + mpol_cond_put(pol); - return ret; - } - - pol = vma->vm_policy; + return ret; } + pol = vma->vm_policy; if (!pol) - pol = get_task_policy(task); + pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; } From 74d2c3a05cc6c1eef2d7236a9919036ed85ddaaf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:50 -0700 Subject: [PATCH 092/164] mempolicy: introduce __get_vma_policy(), export get_task_policy() Extract the code which looks for vma's policy from get_vma_policy() into the new helper, __get_vma_policy(). Export get_task_policy(). Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 3 +++ mm/mempolicy.c | 44 +++++++++++++++++++++++---------------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5e4bfcedd2ce..e1abe249892a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -134,6 +134,9 @@ void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); struct mempolicy *get_vma_policy(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ad27bbc757bf..4378c334e89b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -123,7 +123,7 @@ static struct mempolicy default_policy = { static struct mempolicy preferred_node_policy[MAX_NUMNODES]; -static struct mempolicy *get_task_policy(struct task_struct *p) +struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; @@ -1603,23 +1603,8 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, #endif -/* - * get_vma_policy(@task, @vma, @addr) - * @task: task for fallback if vma policy == default - * @vma: virtual memory area whose policy is sought - * @addr: address in @vma for shared policy lookup - * - * Returns effective policy for a VMA at specified address. - * Falls back to @task or system default policy, as necessary. - * Current or other task's task mempolicy and non-shared vma policies must be - * protected by task_lock(task) by the caller. - * Shared policies [those marked as MPOL_F_SHARED] require an extra reference - * count--added by the get_policy() vm_op, as appropriate--to protect against - * freeing by another task. It is the caller's responsibility to free the - * extra reference for shared policies. - */ -struct mempolicy *get_vma_policy(struct task_struct *task, - struct vm_area_struct *vma, unsigned long addr) +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol = NULL; @@ -1640,6 +1625,29 @@ struct mempolicy *get_vma_policy(struct task_struct *task, } } + return pol; +} + +/* + * get_vma_policy(@task, @vma, @addr) + * @task: task for fallback if vma policy == default + * @vma: virtual memory area whose policy is sought + * @addr: address in @vma for shared policy lookup + * + * Returns effective policy for a VMA at specified address. + * Falls back to @task or system default policy, as necessary. + * Current or other task's task mempolicy and non-shared vma policies must be + * protected by task_lock(task) by the caller. + * Shared policies [those marked as MPOL_F_SHARED] require an extra reference + * count--added by the get_policy() vm_op, as appropriate--to protect against + * freeing by another task. It is the caller's responsibility to free the + * extra reference for shared policies. + */ +struct mempolicy *get_vma_policy(struct task_struct *task, + struct vm_area_struct *vma, unsigned long addr) +{ + struct mempolicy *pol = __get_vma_policy(vma, addr); + if (!pol) pol = get_task_policy(task); From 498f237178a3d3151f7ebe329af9a4734e41f6ed Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:52 -0700 Subject: [PATCH 093/164] mempolicy: fix show_numa_map() vs exec() + do_set_mempolicy() race 9e7814404b77 "hold task->mempolicy while numa_maps scans." fixed the race with the exiting task but this is not enough. The current code assumes that get_vma_policy(task) should either see task->mempolicy == NULL or it should be equal to ->task_mempolicy saved by hold_task_mempolicy(), so we can never race with __mpol_put(). But this can only work if we can't race with do_set_mempolicy(), and thus we can't race with another do_set_mempolicy() or do_exit() after that. However, do_set_mempolicy()->down_write(mmap_sem) can not prevent this race. This task can exec, change it's ->mm, and call do_set_mempolicy() after that; in this case they take 2 different locks. Change hold_task_mempolicy() to use get_task_policy(), it never returns NULL, and change show_numa_map() to use __get_vma_policy() or fall back to proc_priv->task_mempolicy. Note: this is the minimal fix, we will cleanup this code later. I think hold_task_mempolicy() and release_task_mempolicy() should die, we can move this logic into show_numa_map(). Or we can move get_task_policy() outside of ->mmap_sem and !CONFIG_NUMA code at least. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index adddf697c4ea..1acec26a3758 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -87,32 +87,14 @@ unsigned long task_statm(struct mm_struct *mm, #ifdef CONFIG_NUMA /* - * These functions are for numa_maps but called in generic **maps seq_file - * ->start(), ->stop() ops. - * - * numa_maps scans all vmas under mmap_sem and checks their mempolicy. - * Each mempolicy object is controlled by reference counting. The problem here - * is how to avoid accessing dead mempolicy object. - * - * Because we're holding mmap_sem while reading seq_file, it's safe to access - * each vma's mempolicy, no vma objects will never drop refs to mempolicy. - * - * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy - * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). - * So, without task_lock(), we cannot trust get_vma_policy() because we cannot - * gurantee the task never exits under us. But taking task_lock() around - * get_vma_plicy() causes lock order problem. - * - * To access task->mempolicy without lock, we hold a reference count of an - * object pointed by task->mempolicy and remember it. This will guarantee - * that task->mempolicy points to an alive object or NULL in numa_maps accesses. + * Save get_task_policy() for show_numa_map(). */ static void hold_task_mempolicy(struct proc_maps_private *priv) { struct task_struct *task = priv->task; task_lock(task); - priv->task_mempolicy = task->mempolicy; + priv->task_mempolicy = get_task_policy(task); mpol_get(priv->task_mempolicy); task_unlock(task); } @@ -1431,7 +1413,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; - struct task_struct *task = proc_priv->task; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = {}; struct mempolicy *pol; @@ -1451,9 +1432,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) walk.private = md; walk.mm = mm; - pol = get_vma_policy(task, vma, vma->vm_start); - mpol_to_str(buffer, sizeof(buffer), pol); - mpol_cond_put(pol); + pol = __get_vma_policy(vma, vma->vm_start); + if (pol) { + mpol_to_str(buffer, sizeof(buffer), pol); + mpol_cond_put(pol); + } else { + mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); + } seq_printf(m, "%08lx %s", vma->vm_start, buffer); From 2c7c3a7d08b28278112f2aaa0b7cf53140101e2a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:55 -0700 Subject: [PATCH 094/164] mempolicy: kill do_set_mempolicy()->down_write(&mm->mmap_sem) Remove down_write(&mm->mmap_sem) in do_set_mempolicy(). This logic was never correct and it is no longer needed, see the previous patch. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4378c334e89b..9695a9a3ab90 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -802,7 +802,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; - struct mm_struct *mm = current->mm; NODEMASK_SCRATCH(scratch); int ret; @@ -814,20 +813,11 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, ret = PTR_ERR(new); goto out; } - /* - * prevent changing our mempolicy while show_numa_maps() - * is using it. - * Note: do_set_mempolicy() can be called at init time - * with no 'mm'. - */ - if (mm) - down_write(&mm->mmap_sem); + task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); - if (mm) - up_write(&mm->mmap_sem); mpol_put(new); goto out; } @@ -837,9 +827,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodes_weight(new->v.nodes)) current->il_next = first_node(new->v.nodes); task_unlock(current); - if (mm) - up_write(&mm->mmap_sem); - mpol_put(old); ret = 0; out: From dd6eecb917938c1b7e505a83df307b3476e7c8bd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:57 -0700 Subject: [PATCH 095/164] mempolicy: unexport get_vma_policy() and remove its "task" arg - get_vma_policy(task) is not safe if task != current, remove this argument. - get_vma_policy() no longer has callers outside of mempolicy.c, make it static. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 -- mm/mempolicy.c | 19 ++++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index e1abe249892a..3d385c81c153 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -137,8 +137,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); -struct mempolicy *get_vma_policy(struct task_struct *tsk, - struct vm_area_struct *vma, unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9695a9a3ab90..008fb32936eb 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1616,27 +1616,24 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, } /* - * get_vma_policy(@task, @vma, @addr) - * @task: task for fallback if vma policy == default + * get_vma_policy(@vma, @addr) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. - * Falls back to @task or system default policy, as necessary. - * Current or other task's task mempolicy and non-shared vma policies must be - * protected by task_lock(task) by the caller. + * Falls back to current->mempolicy or system default policy, as necessary. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ -struct mempolicy *get_vma_policy(struct task_struct *task, - struct vm_area_struct *vma, unsigned long addr) +static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); if (!pol) - pol = get_task_policy(task); + pol = get_task_policy(current); return pol; } @@ -1864,7 +1861,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, { struct zonelist *zl; - *mpol = get_vma_policy(current, vma, addr); + *mpol = get_vma_policy(vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { @@ -2019,7 +2016,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned int cpuset_mems_cookie; retry_cpuset: - pol = get_vma_policy(current, vma, addr); + pol = get_vma_policy(vma, addr); cpuset_mems_cookie = read_mems_allowed_begin(); if (unlikely(pol->mode == MPOL_INTERLEAVE)) { @@ -2285,7 +2282,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long BUG_ON(!vma); - pol = get_vma_policy(current, vma, addr); + pol = get_vma_policy(vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; From 1c93923cc264105418e6ead149c76bd88302eff4 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:27:59 -0700 Subject: [PATCH 096/164] include/linux/migrate.h: remove migrate_page #define This is designed to avoid a few ifdefs in .c files but it's obnoxious because it can cause unsuspecting "migrate_page" symbols to get turned into "NULL". Just nuke it and use the ifdefs. Cc: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 3 --- mm/shmem.c | 2 ++ mm/swap_state.c | 2 ++ 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2901c414664..b66fd10f4b93 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -82,9 +82,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING diff --git a/mm/shmem.c b/mm/shmem.c index 469f90d56051..4fad61bb41e5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3077,7 +3077,9 @@ static const struct address_space_operations shmem_aops = { .write_begin = shmem_write_begin, .write_end = shmem_write_end, #endif +#ifdef CONFIG_MIGRATION .migratepage = migrate_page, +#endif .error_remove_page = generic_error_remove_page, }; diff --git a/mm/swap_state.c b/mm/swap_state.c index 3e0ec83d000c..ef1f39139b71 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -28,7 +28,9 @@ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, .set_page_dirty = swap_set_page_dirty, +#ifdef CONFIG_MIGRATION .migratepage = migrate_page, +#endif }; static struct backing_dev_info swap_backing_dev_info = { From 703394c1005caeccaaf64945c1b6d6cc3af0cd1d Mon Sep 17 00:00:00 2001 From: Rob Jones Date: Thu, 9 Oct 2014 15:28:01 -0700 Subject: [PATCH 097/164] mm/vmalloc.c: use seq_open_private() instead of seq_open() Using seq_open_private() removes boilerplate code from vmalloc_open(). The resultant code is shorter and easier to follow. However, please note that seq_open_private() call kzalloc() rather than kmalloc() which may affect timing due to the memory initialisation overhead. Signed-off-by: Rob Jones Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2b0aa5486092..90520af7f186 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2646,21 +2646,11 @@ static const struct seq_operations vmalloc_op = { static int vmalloc_open(struct inode *inode, struct file *file) { - unsigned int *ptr = NULL; - int ret; - - if (IS_ENABLED(CONFIG_NUMA)) { - ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; - } - ret = seq_open(file, &vmalloc_op); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = ptr; - } else - kfree(ptr); - return ret; + if (IS_ENABLED(CONFIG_NUMA)) + return seq_open_private(file, &vmalloc_op, + nr_node_ids * sizeof(unsigned int)); + else + return seq_open(file, &vmalloc_op); } static const struct file_operations proc_vmalloc_operations = { From b208ce32927ac2c4bf14edebfb3197acd7673165 Mon Sep 17 00:00:00 2001 From: Rob Jones Date: Thu, 9 Oct 2014 15:28:03 -0700 Subject: [PATCH 098/164] mm/slab.c: use __seq_open_private() instead of seq_open() Using __seq_open_private() removes boilerplate code from slabstats_open() The resultant code is shorter and easier to follow. This patch does not change any functionality. Signed-off-by: Rob Jones Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 655d65c3f010..154aac8411c5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4178,19 +4178,15 @@ static const struct seq_operations slabstats_op = { static int slabstats_open(struct inode *inode, struct file *file) { - unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); - int ret = -ENOMEM; - if (n) { - ret = seq_open(file, &slabstats_op); - if (!ret) { - struct seq_file *m = file->private_data; - *n = PAGE_SIZE / (2 * sizeof(unsigned long)); - m->private = n; - n = NULL; - } - kfree(n); - } - return ret; + unsigned long *n; + + n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); + if (!n) + return -ENOMEM; + + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + + return 0; } static const struct file_operations proc_slabstats_operations = { From 0bf55139782db1fa96af66e37cc84afde18443ef Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:06 -0700 Subject: [PATCH 099/164] mm: introduce dump_vma Introduce a helper to dump information about a VMA, this also makes dump_page_flags more generic and re-uses that so the output looks very similar to dump_page: [ 61.903437] vma ffff88070f88be00 start 00007fff25970000 end 00007fff25992000 [ 61.903437] next ffff88070facd600 prev ffff88070face400 mm ffff88070fade000 [ 61.903437] prot 8000000000000025 anon_vma ffff88070fa1e200 vm_ops (null) [ 61.903437] pgoff 7ffffffdd file (null) private_data (null) [ 61.909129] flags: 0x100173(read|write|mayread|maywrite|mayexec|growsdown|account) [akpm@linux-foundation.org: make dump_vma() require CONFIG_DEBUG_VM] [swarren@nvidia.com: fix dump_vma() compilation] Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Stephen Warren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 2 + mm/page_alloc.c | 82 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 9 deletions(-) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2f348d02f640..dfb93333fc62 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -4,10 +4,12 @@ #include struct page; +struct vm_area_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); +void dump_vma(const struct vm_area_struct *vma); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f07588b11d59..3a950144f80b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6626,27 +6626,26 @@ static const struct trace_print_flags pageflag_names[] = { #endif }; -static void dump_page_flags(unsigned long flags) +static void dump_flags(unsigned long flags, + const struct trace_print_flags *names, int count) { const char *delim = ""; unsigned long mask; int i; - BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); - - printk(KERN_ALERT "page flags: %#lx(", flags); + printk(KERN_ALERT "flags: %#lx(", flags); /* remove zone id */ flags &= (1UL << NR_PAGEFLAGS) - 1; - for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) { + for (i = 0; i < count && flags; i++) { - mask = pageflag_names[i].mask; + mask = names[i].mask; if ((flags & mask) != mask) continue; flags &= ~mask; - printk("%s%s", delim, pageflag_names[i].name); + printk("%s%s", delim, names[i].name); delim = "|"; } @@ -6664,12 +6663,14 @@ void dump_page_badflags(struct page *page, const char *reason, "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); - dump_page_flags(page->flags); + BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); + dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); if (reason) pr_alert("page dumped because: %s\n", reason); if (page->flags & badflags) { pr_alert("bad because of flags:\n"); - dump_page_flags(page->flags & badflags); + dump_flags(page->flags & badflags, + pageflag_names, ARRAY_SIZE(pageflag_names)); } mem_cgroup_print_bad_page(page); } @@ -6679,3 +6680,66 @@ void dump_page(struct page *page, const char *reason) dump_page_badflags(page, reason, 0); } EXPORT_SYMBOL(dump_page); + +#ifdef CONFIG_DEBUG_VM + +static const struct trace_print_flags vmaflags_names[] = { + {VM_READ, "read" }, + {VM_WRITE, "write" }, + {VM_EXEC, "exec" }, + {VM_SHARED, "shared" }, + {VM_MAYREAD, "mayread" }, + {VM_MAYWRITE, "maywrite" }, + {VM_MAYEXEC, "mayexec" }, + {VM_MAYSHARE, "mayshare" }, + {VM_GROWSDOWN, "growsdown" }, + {VM_PFNMAP, "pfnmap" }, + {VM_DENYWRITE, "denywrite" }, + {VM_LOCKED, "locked" }, + {VM_IO, "io" }, + {VM_SEQ_READ, "seqread" }, + {VM_RAND_READ, "randread" }, + {VM_DONTCOPY, "dontcopy" }, + {VM_DONTEXPAND, "dontexpand" }, + {VM_ACCOUNT, "account" }, + {VM_NORESERVE, "noreserve" }, + {VM_HUGETLB, "hugetlb" }, + {VM_NONLINEAR, "nonlinear" }, +#if defined(CONFIG_X86) + {VM_PAT, "pat" }, +#elif defined(CONFIG_PPC) + {VM_SAO, "sao" }, +#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) + {VM_GROWSUP, "growsup" }, +#elif !defined(CONFIG_MMU) + {VM_MAPPED_COPY, "mappedcopy" }, +#else + {VM_ARCH_1, "arch_1" }, +#endif + {VM_DONTDUMP, "dontdump" }, +#ifdef CONFIG_MEM_SOFT_DIRTY + {VM_SOFTDIRTY, "softdirty" }, +#endif + {VM_MIXEDMAP, "mixedmap" }, + {VM_HUGEPAGE, "hugepage" }, + {VM_NOHUGEPAGE, "nohugepage" }, + {VM_MERGEABLE, "mergeable" }, +}; + +void dump_vma(const struct vm_area_struct *vma) +{ + printk(KERN_ALERT + "vma %p start %p end %p\n" + "next %p prev %p mm %p\n" + "prot %lx anon_vma %p vm_ops %p\n" + "pgoff %lx file %p private_data %p\n", + vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, + vma->vm_prev, vma->vm_mm, + (unsigned long)pgprot_val(vma->vm_page_prot), + vma->anon_vma, vma->vm_ops, vma->vm_pgoff, + vma->vm_file, vma->vm_private_data); + dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); +} +EXPORT_SYMBOL(dump_vma); + +#endif /* CONFIG_DEBUG_VM */ From fa3759ccd5651c4235f572302d58c8ec9ddf1c4b Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:08 -0700 Subject: [PATCH 100/164] mm: introduce VM_BUG_ON_VMA Very similar to VM_BUG_ON_PAGE but dumps VMA information instead. Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index dfb93333fc62..569e4c8d0ebb 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -20,12 +20,20 @@ void dump_vma(const struct vm_area_struct *vma); BUG(); \ } \ } while (0) +#define VM_BUG_ON_VMA(cond, vma) \ + do { \ + if (unlikely(cond)) { \ + dump_vma(vma); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) From 81d1b09c6be66afac7d41ee52279d9bccbce56d8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:10 -0700 Subject: [PATCH 101/164] mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract more information when they trigger. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 +- include/linux/rmap.h | 2 +- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 14 +++++++------- mm/interval_tree.c | 2 +- mm/mlock.c | 4 ++-- mm/mmap.c | 6 +++--- mm/mremap.c | 3 ++- mm/rmap.c | 8 ++++---- 9 files changed, 24 insertions(+), 23 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 63579cb8d3dc..ad9051bab267 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma, ptl); else diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be574506e6a9..c0c2bce6b0b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { - VM_BUG_ON(vma->anon_vma != next->anon_vma); + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 55ab569c31b4..c13148cc745f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1096,7 +1096,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long mmun_end; /* For mmu_notifiers */ ptl = pmd_lockptr(mm, pmd); - VM_BUG_ON(!vma->anon_vma); + VM_BUG_ON_VMA(!vma->anon_vma, vma); haddr = address & HPAGE_PMD_MASK; if (is_huge_zero_pmd(orig_pmd)) goto alloc; @@ -2083,7 +2083,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON(vma->vm_flags & VM_NO_THP); + VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -2406,7 +2406,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - VM_BUG_ON(vma->vm_flags & VM_NO_THP); + VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); return true; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eeceeeb09019..9fd722769927 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -434,7 +434,7 @@ static inline struct resv_map *inode_resv_map(struct inode *inode) static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; @@ -449,8 +449,8 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); + VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); @@ -458,15 +458,15 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); + VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } @@ -474,7 +474,7 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } diff --git a/mm/interval_tree.c b/mm/interval_tree.c index 4a5822a586e6..8da581fa9060 100644 --- a/mm/interval_tree.c +++ b/mm/interval_tree.c @@ -34,7 +34,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *parent; unsigned long last = vma_last_pgoff(node); - VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev)); + VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); if (!prev->shared.linear.rb.rb_right) { parent = prev; diff --git a/mm/mlock.c b/mm/mlock.c index ce84cb0b83ef..d5d09d0786ec 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -233,8 +233,8 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); - VM_BUG_ON(start < vma->vm_start); - VM_BUG_ON(end > vma->vm_end); + VM_BUG_ON_VMA(start < vma->vm_start, vma); + VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); gup_flags = FOLL_TOUCH | FOLL_MLOCK; diff --git a/mm/mmap.c b/mm/mmap.c index 7ff38f1a66ec..69d4c5199fd8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -786,8 +786,8 @@ again: remove_next = 1 + (end > next->vm_end); if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { - VM_BUG_ON(adjust_next && next->anon_vma && - anon_vma != next->anon_vma); + VM_BUG_ON_VMA(adjust_next && next->anon_vma && + anon_vma != next->anon_vma, next); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) @@ -2848,7 +2848,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ - VM_BUG_ON(faulted_in_anon_vma); + VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); diff --git a/mm/mremap.c b/mm/mremap.c index 05f1180e9f21..89e45d8a983a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -195,7 +195,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pmd_trans_huge(*old_pmd)) { int err = 0; if (extent == HPAGE_PMD_SIZE) { - VM_BUG_ON(vma->vm_file || !vma->anon_vma); + VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, + vma); /* See comment in move_ptes() */ if (need_rmap_locks) anon_vma_lock_write(vma->anon_vma); diff --git a/mm/rmap.c b/mm/rmap.c index bc74e0012809..116a5053415b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long address = __vma_address(page, vma); /* page should be within @vma mapping range */ - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); return address; } @@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page, struct anon_vma *anon_vma = vma->anon_vma; VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON(!anon_vma); + VM_BUG_ON_VMA(!anon_vma, vma); VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; @@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ if (PageTransHuge(page)) @@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_mutex. */ - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (!mapping) return ret; From 7ade3c997208566c5bf50ece8fc319a8caf0d41a Mon Sep 17 00:00:00 2001 From: Weijie Yang Date: Thu, 9 Oct 2014 15:28:12 -0700 Subject: [PATCH 102/164] mm: page_alloc: avoid wakeup kswapd on the unintended node When entering the page_alloc slowpath, we wakeup kswapd on every pgdat according to the zonelist and high_zoneidx. However, this doesn't take nodemask into account, and could prematurely wakeup kswapd on some unintended nodes. This patch uses for_each_zone_zonelist_nodemask() instead of for_each_zone_zonelist() in wake_all_kswapds() to avoid the above situation. Signed-off-by: Weijie Yang Acked-by: Mel Gorman Acked-by: Johannes Weiner Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a950144f80b..ae2f8474273c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2471,12 +2471,14 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, static void wake_all_kswapds(unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, - struct zone *preferred_zone) + struct zone *preferred_zone, + nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) + for_each_zone_zonelist_nodemask(zone, z, zonelist, + high_zoneidx, nodemask) wakeup_kswapd(zone, order, zone_idx(preferred_zone)); } @@ -2574,7 +2576,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, restart: if (!(gfp_mask & __GFP_NO_KSWAPD)) - wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone); + wake_all_kswapds(order, zonelist, high_zoneidx, + preferred_zone, nodemask); /* * OK, we're below the kswapd watermark and have kicked background From 7c809968ffa92d41baaa9054e897436480179b20 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Thu, 9 Oct 2014 15:28:15 -0700 Subject: [PATCH 103/164] mm/page-writeback.c: use min3/max3 macros to avoid shadow warnings Nested calls to min/max functions result in shadow warnings in W=2 builds. Avoid the warning by using the min3 and max3 macros to get the min/max of 3 values instead of nested calls. Signed-off-by: Mark Rustad Signed-off-by: Jeff Kirsher Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 91d73ef1744d..35ca7102d421 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1075,13 +1075,13 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, } if (dirty < setpoint) { - x = min(bdi->balanced_dirty_ratelimit, - min(balanced_dirty_ratelimit, task_ratelimit)); + x = min3(bdi->balanced_dirty_ratelimit, + balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { - x = max(bdi->balanced_dirty_ratelimit, - max(balanced_dirty_ratelimit, task_ratelimit)); + x = max3(bdi->balanced_dirty_ratelimit, + balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } From 5705465174686d007473e017b76c4b64b44aa690 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:28:17 -0700 Subject: [PATCH 104/164] mm: clean up zone flags Page reclaim tests zone_is_reclaim_dirty(), but the site that actually sets this state does zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY), sending the reader through layers indirection just to track down a simple bit. Remove all zone flag wrappers and just use bitops against zone->flags directly. It's just as readable and the lines are barely any longer. Also rename ZONE_TAIL_LRU_DIRTY to ZONE_DIRTY to match ZONE_WRITEBACK, and remove the zone_flags_t typedef. Signed-off-by: Johannes Weiner Acked-by: David Rientjes Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 51 +++--------------------------------------- mm/backing-dev.c | 2 +- mm/oom_kill.c | 6 ++--- mm/page_alloc.c | 8 +++---- mm/vmscan.c | 28 +++++++++++------------ 5 files changed, 25 insertions(+), 70 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 318df7051850..48bf12ef6620 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -521,13 +521,13 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -typedef enum { +enum zone_flags { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ - ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found + ZONE_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ @@ -535,52 +535,7 @@ typedef enum { * many pages under writeback */ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ -} zone_flags_t; - -static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) -{ - set_bit(flag, &zone->flags); -} - -static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) -{ - return test_and_set_bit(flag, &zone->flags); -} - -static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) -{ - clear_bit(flag, &zone->flags); -} - -static inline int zone_is_reclaim_congested(const struct zone *zone) -{ - return test_bit(ZONE_CONGESTED, &zone->flags); -} - -static inline int zone_is_reclaim_dirty(const struct zone *zone) -{ - return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); -} - -static inline int zone_is_reclaim_writeback(const struct zone *zone) -{ - return test_bit(ZONE_WRITEBACK, &zone->flags); -} - -static inline int zone_is_reclaim_locked(const struct zone *zone) -{ - return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); -} - -static inline int zone_is_fair_depleted(const struct zone *zone) -{ - return test_bit(ZONE_FAIR_DEPLETED, &zone->flags); -} - -static inline int zone_is_oom_locked(const struct zone *zone) -{ - return test_bit(ZONE_OOM_LOCKED, &zone->flags); -} +}; static inline unsigned long zone_end_pfn(const struct zone *zone) { diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1706cbbdf5f0..b27714f1b40f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -631,7 +631,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout) * of sleeping on the congestion queue */ if (atomic_read(&nr_bdi_congested[sync]) == 0 || - !zone_is_reclaim_congested(zone)) { + !test_bit(ZONE_CONGESTED, &zone->flags)) { cond_resched(); /* In case we scheduled, work out time remaining */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1e11df8fa7ec..bbf405a3a18f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -565,7 +565,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - if (zone_is_oom_locked(zone)) { + if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) { ret = false; goto out; } @@ -575,7 +575,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. */ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - zone_set_flag(zone, ZONE_OOM_LOCKED); + set_bit(ZONE_OOM_LOCKED, &zone->flags); out: spin_unlock(&zone_scan_lock); @@ -594,7 +594,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - zone_clear_flag(zone, ZONE_OOM_LOCKED); + clear_bit(ZONE_OOM_LOCKED, &zone->flags); spin_unlock(&zone_scan_lock); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae2f8474273c..f3769f0fce3c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1614,8 +1614,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && - !zone_is_fair_depleted(zone)) - zone_set_flag(zone, ZONE_FAIR_DEPLETED); + !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) + set_bit(ZONE_FAIR_DEPLETED, &zone->flags); __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); @@ -1935,7 +1935,7 @@ static void reset_alloc_batches(struct zone *preferred_zone) mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); - zone_clear_flag(zone, ZONE_FAIR_DEPLETED); + clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); } while (zone++ != preferred_zone); } @@ -1986,7 +1986,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, if (alloc_flags & ALLOC_FAIR) { if (!zone_local(preferred_zone, zone)) break; - if (zone_is_fair_depleted(zone)) { + if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { nr_fair_skipped++; continue; } diff --git a/mm/vmscan.c b/mm/vmscan.c index af72fe8e8d74..06123f20a326 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 1 above */ if (current_is_kswapd() && PageReclaim(page) && - zone_is_reclaim_writeback(zone)) { + test_bit(ZONE_WRITEBACK, &zone->flags)) { nr_immediate++; goto keep_locked; @@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (page_is_file_cache(page) && (!current_is_kswapd() || - !zone_is_reclaim_dirty(zone))) { + !test_bit(ZONE_DIRTY, &zone->flags))) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() @@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * are encountered in the nr_immediate check below. */ if (nr_writeback && nr_writeback == nr_taken) - zone_set_flag(zone, ZONE_WRITEBACK); + set_bit(ZONE_WRITEBACK, &zone->flags); /* * memcg will stall in page writeback so only consider forcibly @@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * backed by a congested BDI and wait_iff_congested will stall. */ if (nr_dirty && nr_dirty == nr_congested) - zone_set_flag(zone, ZONE_CONGESTED); + set_bit(ZONE_CONGESTED, &zone->flags); /* * If dirty pages are scanned that are not queued for IO, it * implies that flushers are not keeping up. In this case, flag - * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing - * pages from reclaim context. + * the zone ZONE_DIRTY and kswapd will start writing pages from + * reclaim context. */ if (nr_unqueued_dirty == nr_taken) - zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); + set_bit(ZONE_DIRTY, &zone->flags); /* * If kswapd scans pages marked marked for immediate @@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone, /* Account for the number of pages attempted to reclaim */ *nr_attempted += sc->nr_to_reclaim; - zone_clear_flag(zone, ZONE_WRITEBACK); + clear_bit(ZONE_WRITEBACK, &zone->flags); /* * If a zone reaches its high watermark, consider it to be no longer @@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone, */ if (zone_reclaimable(zone) && zone_balanced(zone, testorder, 0, classzone_idx)) { - zone_clear_flag(zone, ZONE_CONGESTED); - zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); + clear_bit(ZONE_CONGESTED, &zone->flags); + clear_bit(ZONE_DIRTY, &zone->flags); } return sc->nr_scanned >= sc->nr_to_reclaim; @@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, * If balanced, clear the dirty and congested * flags */ - zone_clear_flag(zone, ZONE_CONGESTED); - zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); + clear_bit(ZONE_CONGESTED, &zone->flags); + clear_bit(ZONE_DIRTY, &zone->flags); } } @@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; - if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) + if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); - zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); + clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); From ff26f70f4323ffe332ab6a5b2550f687bbd15326 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:28:19 -0700 Subject: [PATCH 105/164] mm/mmap.c: clean up CONFIG_DEBUG_VM_RB checks - be consistent in printing the test which failed - one message was actually wrong (aa) - don't print second bogus warning if browse_rb() failed Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 69d4c5199fd8..c9bc285df255 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -368,16 +368,18 @@ static int browse_rb(struct rb_root *root) struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { - pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev); + pr_emerg("vm_start %lx < prev %lx\n", + vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { - pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend); + pr_emerg("vm_start %lx < pend %lx\n", + vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { - pr_emerg("vm_end %lx < vm_start %lx\n", - vma->vm_end, vma->vm_start); + pr_emerg("vm_start %lx > vm_end %lx\n", + vma->vm_start, vma->vm_end); bug = 1; } if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { @@ -419,8 +421,10 @@ static void validate_mm(struct mm_struct *mm) int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; + while (vma) { struct anon_vma_chain *avc; + vma_lock_anon_vma(vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); @@ -435,12 +439,13 @@ static void validate_mm(struct mm_struct *mm) } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", - mm->highest_vm_end, highest_address); + mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(&mm->mm_rb); if (i != mm->map_count) { - pr_emerg("map_count %d rb %d\n", mm->map_count, i); + if (i != -1) + pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } BUG_ON(bug); From b8b2d8253236331c3b26189f34e73f2af89ca982 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 9 Oct 2014 15:28:21 -0700 Subject: [PATCH 106/164] mm/compaction.c: fix warning of 'flags' may be used uninitialized C mm/compaction.o mm/compaction.c: In function isolate_freepages_block: mm/compaction.c:364:37: warning: flags may be used uninitialized in this function [-Wmaybe-uninitialized] && compact_unlock_should_abort(&cc->zone->lock, flags, ^ Signed-off-by: Xiubo Li Cc: Vlastimil Babka Cc: Mel Gorman Cc: David Rientjes Cc: Minchan Kim Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 15163b4b35ab..b9972c0fd917 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -344,7 +344,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, { int nr_scanned = 0, total_isolated = 0; struct page *cursor, *valid_page = NULL; - unsigned long flags; + unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; @@ -570,7 +570,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; struct lruvec *lruvec; - unsigned long flags; + unsigned long flags = 0; bool locked = false; struct page *page = NULL, *valid_page = NULL; From 934f3072c17cc8886f4c043b47eeeb1b12f8de33 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 9 Oct 2014 15:28:23 -0700 Subject: [PATCH 107/164] mm: clear __GFP_FS when PF_MEMALLOC_NOIO is set commit 21caf2fc1931 ("mm: teach mm by current context info to not do I/O during memory allocation") introduces PF_MEMALLOC_NOIO flag to avoid doing I/O inside memory allocation, __GFP_IO is cleared when this flag is set, but __GFP_FS implies __GFP_IO, it should also be cleared. Or it may still run into I/O, like in superblock shrinker. And this will make the kernel run into the deadlock case described in that commit. See Dave Chinner's comment about io in superblock shrinker: Filesystem shrinkers do indeed perform IO from the superblock shrinker and have for years. Even clean inodes can require IO before they can be freed - e.g. on an orphan list, need truncation of post-eof blocks, need to wait for ordered operations to complete before it can be freed, etc. IOWs, Ext4, btrfs and XFS all can issue and/or block on arbitrary amounts of IO in the superblock shrinker context. XFS, in particular, has been doing transactions and IO from the VFS inode cache shrinker since it was first introduced.... Fix this by clearing __GFP_FS in memalloc_noio_flags(), this function has masked all the gfp_mask that will be passed into fs for the processes setting PF_MEMALLOC_NOIO in the direct reclaim path. v1 thread at: https://lkml.org/lkml/2014/9/3/32 Signed-off-by: Junxiao Bi Cc: Dave Chinner Cc: joyce.xue Cc: Ming Lei Cc: Trond Myklebust Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c6353d9e63a..5e63ba59258c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1935,11 +1935,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ static inline gfp_t memalloc_noio_flags(gfp_t flags) { if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~__GFP_IO; + flags &= ~(__GFP_IO | __GFP_FS); return flags; } From b246d3d11e9c04f76a4fd6aae9c61da82bba0afb Mon Sep 17 00:00:00 2001 From: Xue jiufei Date: Thu, 9 Oct 2014 15:28:26 -0700 Subject: [PATCH 108/164] ocfs2: fix a deadlock while o2net_wq doing direct memory reclaim Fix a deadlock problem caused by direct memory reclaim in o2net_wq. The situation is as follows: 1) Receive a connect message from another node, node queues a work_struct o2net_listen_work. 2) o2net_wq processes this work and call the following functions: o2net_wq -> o2net_accept_one -> sock_create_lite -> sock_alloc() -> kmem_cache_alloc with GFP_KERNEL -> ____cache_alloc_node ->__alloc_pages_nodemask -> do_try_to_free_pages -> shrink_slab -> evict -> ocfs2_evict_inode -> ocfs2_drop_lock -> dlmunlock -> o2net_send_message_vec then o2net_wq wait for the unlock reply from master. 3) tcp layer received the reply, call o2net_data_ready() and queue sc_rx_work, waiting o2net_wq to process this work. 4) o2net_wq is a single thread workqueue, it process the work one by one. Right now it is still doing o2net_listen_work and cannot handle sc_rx_work. so we deadlock. Junxiao Bi's patch "mm: clear __GFP_FS when PF_MEMALLOC_NOIO is set" (http://ozlabs.org/~akpm/mmots/broken-out/mm-clear-__gfp_fs-when-pf_memalloc_noio-is-set.patch) clears __GFP_FS in memalloc_noio_flags() besides __GFP_IO. We use memalloc_noio_save() to set process flag PF_MEMALLOC_NOIO so that all allocations done by this process are done as if GFP_NOIO was specified. We are not reentering filesystem while doing memory reclaim. Signed-off-by: joyce.xue Cc: Junxiao Bi Cc: Joel Becker Cc: Mark Fasheh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/tcp.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 509e6d5415e2..97de0fbd9f78 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1601,7 +1601,15 @@ static void o2net_start_connect(struct work_struct *work) struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; int ret = 0, stop; unsigned int timeout; + unsigned int noio_flag; + /* + * sock_create allocates the sock with GFP_KERNEL. We must set + * per-process flag PF_MEMALLOC_NOIO so that all allocations done + * by this process are done as if GFP_NOIO was specified. So we + * are not reentering filesystem while doing memory reclaim. + */ + noio_flag = memalloc_noio_save(); /* if we're greater we initiate tx, otherwise we accept */ if (o2nm_this_node() <= o2net_num_from_nn(nn)) goto out; @@ -1710,6 +1718,7 @@ static void o2net_start_connect(struct work_struct *work) if (mynode) o2nm_node_put(mynode); + memalloc_noio_restore(noio_flag); return; } @@ -1836,6 +1845,15 @@ static int o2net_accept_one(struct socket *sock, int *more) struct o2nm_node *local_node = NULL; struct o2net_sock_container *sc = NULL; struct o2net_node *nn; + unsigned int noio_flag; + + /* + * sock_create_lite allocates the sock with GFP_KERNEL. We must set + * per-process flag PF_MEMALLOC_NOIO so that all allocations done + * by this process are done as if GFP_NOIO was specified. So we + * are not reentering filesystem while doing memory reclaim. + */ + noio_flag = memalloc_noio_save(); BUG_ON(sock == NULL); *more = 0; @@ -1952,6 +1970,8 @@ static int o2net_accept_one(struct socket *sock, int *more) o2nm_node_put(local_node); if (sc) sc_put(sc); + + memalloc_noio_restore(noio_flag); return ret; } From 97ee4ba7cbd30f1858f0d16911e042737c53f2ef Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Oct 2014 15:28:28 -0700 Subject: [PATCH 109/164] mm: page_alloc: Make paranoid check in move_freepages a VM_BUG_ON Since 2.6.24 there has been a paranoid check in move_freepages that looks up the zone of two pages. This is a very slow path and the only time I've seen this bug trigger recently is when memory initialisation was broken during patch development. Despite the fact it's a slow path, this patch converts the check to a VM_BUG_ON anyway as it has served its purpose by now. Signed-off-by: Mel Gorman Acked-by: David Rientjes Acked-by: Rik van Riel Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f3769f0fce3c..eac31a6059c0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1015,7 +1015,7 @@ int move_freepages(struct zone *zone, * Remove at a later date when no bug reports exist related to * grouping pages by mobility */ - BUG_ON(page_zone(start_page) != page_zone(end_page)); + VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); #endif for (page = start_page; page <= end_page;) { From 3193913ce62c63056bc67a6ae378beaf494afa66 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Oct 2014 15:28:30 -0700 Subject: [PATCH 110/164] mm: page_alloc: default node-ordering on 64-bit NUMA, zone-ordering on 32-bit Zones are allocated by the page allocator in either node or zone order. Node ordering is preferred in terms of locality and is applied automatically in one of three cases: 1. If a node has only low memory 2. If DMA/DMA32 is a high percentage of memory 3. If low memory on a single node is greater than 70% of the node size Otherwise zone ordering is used to preserve low memory for devices that require it. Unfortunately a consequence of this is that applications running on a machine with balanced NUMA nodes will experience different performance characteristics depending on which node they happen to start from. The point of zone ordering is to protect lower zones for devices that require DMA/DMA32 memory. When NUMA was first introduced, this was critical as 32-bit NUMA machines existed and exhausting low memory triggered OOMs easily as so many allocations required low memory. On 64-bit machines the primary concern is devices that are 32-bit only which is less severe than the low memory exhaustion problem on 32-bit NUMA. It seems there are really few devices that depends on it. AGP -- I assume this is getting more rare but even then I think the allocations happen early in boot time where lowmem pressure is less of a problem DRM -- If the device is 32-bit only then there may be low pressure. I didn't evaluate these in detail but it looks like some of these are mobile graphics card. Not many NUMA laptops out there. DRM folk should know better though. Some TV cards -- Much demand for 32-bit capable TV cards on NUMA machines? B43 wireless card -- again not really a NUMA thing. I cannot find a good reason to incur a performance penalty on all 64-bit NUMA machines in case someone throws a brain damanged TV or graphics card in there. This patch defaults to node-ordering on 64-bit NUMA machines. I was tempted to make it default everywhere but I understand that some embedded arches may be using 32-bit NUMA where I cannot predict the consequences. The performance impact depends on the workload and the characteristics of the machine and the machine I tested on had a large Normal zone on node 0 so the impact is within the noise for the majority of tests. The allocation stats show more allocation requests were from DMA32 and local node. Running SpecJBB with multiple JVMs and automatic NUMA balancing disabled the results were specjbb 3.17.0-rc2 3.17.0-rc2 vanilla nodeorder-v1r1 Min 1 29534.00 ( 0.00%) 30020.00 ( 1.65%) Min 10 115717.00 ( 0.00%) 134038.00 ( 15.83%) Min 19 109718.00 ( 0.00%) 114186.00 ( 4.07%) Min 28 104459.00 ( 0.00%) 103639.00 ( -0.78%) Min 37 98245.00 ( 0.00%) 103756.00 ( 5.61%) Min 46 97198.00 ( 0.00%) 96197.00 ( -1.03%) Mean 1 30953.25 ( 0.00%) 31917.75 ( 3.12%) Mean 10 124432.50 ( 0.00%) 140904.00 ( 13.24%) Mean 19 116033.50 ( 0.00%) 119294.75 ( 2.81%) Mean 28 108365.25 ( 0.00%) 106879.50 ( -1.37%) Mean 37 102984.75 ( 0.00%) 106924.25 ( 3.83%) Mean 46 100783.25 ( 0.00%) 105368.50 ( 4.55%) Stddev 1 1260.38 ( 0.00%) 1109.66 ( 11.96%) Stddev 10 7434.03 ( 0.00%) 5171.91 ( 30.43%) Stddev 19 8453.84 ( 0.00%) 5309.59 ( 37.19%) Stddev 28 4184.55 ( 0.00%) 2906.63 ( 30.54%) Stddev 37 5409.49 ( 0.00%) 3192.12 ( 40.99%) Stddev 46 4521.95 ( 0.00%) 7392.52 (-63.48%) Max 1 32738.00 ( 0.00%) 32719.00 ( -0.06%) Max 10 136039.00 ( 0.00%) 148614.00 ( 9.24%) Max 19 130566.00 ( 0.00%) 127418.00 ( -2.41%) Max 28 115404.00 ( 0.00%) 111254.00 ( -3.60%) Max 37 112118.00 ( 0.00%) 111732.00 ( -0.34%) Max 46 108541.00 ( 0.00%) 116849.00 ( 7.65%) TPut 1 123813.00 ( 0.00%) 127671.00 ( 3.12%) TPut 10 497730.00 ( 0.00%) 563616.00 ( 13.24%) TPut 19 464134.00 ( 0.00%) 477179.00 ( 2.81%) TPut 28 433461.00 ( 0.00%) 427518.00 ( -1.37%) TPut 37 411939.00 ( 0.00%) 427697.00 ( 3.83%) TPut 46 403133.00 ( 0.00%) 421474.00 ( 4.55%) 3.17.0-rc2 3.17.0-rc2 vanillanodeorder-v1r1 DMA allocs 0 0 DMA32 allocs 57 1491992 Normal allocs 32543566 30026383 Movable allocs 0 0 Direct pages scanned 0 0 Kswapd pages scanned 0 0 Kswapd pages reclaimed 0 0 Direct pages reclaimed 0 0 Kswapd efficiency 100% 100% Kswapd velocity 0.000 0.000 Direct efficiency 100% 100% Direct velocity 0.000 0.000 Percentage direct scans 0% 0% Zone normal velocity 0.000 0.000 Zone dma32 velocity 0.000 0.000 Zone dma velocity 0.000 0.000 THP fault alloc 55164 52987 THP collapse alloc 139 147 THP splits 26 21 NUMA alloc hit 4169066 4250692 NUMA alloc miss 0 0 Note that there were more DMA32 allocations with the patch applied. In this particular case there was no difference in numa_hit and numa_miss. The expectation is that DMA32 was being used at the low watermark instead of falling into the slow path. kswapd was not woken but it's not worken for THP allocations. On 32-bit, this patch defaults to zone-ordering as low memory depletion can be a serious problem on 32-bit large memory machines. If the default ordering was node then processes on node 0 will deplete the Normal zone due to normal activity. The problem is worse if CONFIG_HIGHPTE is not set. If combined with large amounts of dirty/writeback pages in Normal zone then there is also a high risk of OOM. The heuristics are removed as it's not clear they were ever important on 32-bit. They were only relevant for setting node-ordering on 64-bit. Signed-off-by: Mel Gorman Acked-by: Johannes Weiner Cc: Rik van Riel Cc: David Rientjes Cc: KAMEZAWA Hiroyuki Cc: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 78 +++++++++++++------------------------------------ 1 file changed, 20 insertions(+), 58 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eac31a6059c0..bfb73e025e02 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3614,68 +3614,30 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) zonelist->_zonerefs[pos].zone_idx = 0; } +#if defined(CONFIG_64BIT) +/* + * Devices that require DMA32/DMA are relatively rare and do not justify a + * penalty to every machine in case the specialised case applies. Default + * to Node-ordering on 64-bit NUMA machines + */ +static int default_zonelist_order(void) +{ + return ZONELIST_ORDER_NODE; +} +#else +/* + * On 32-bit, the Normal zone needs to be preserved for allocations accessible + * by the kernel. If processes running on node 0 deplete the low memory zone + * then reclaim will occur more frequency increasing stalls and potentially + * be easier to OOM if a large percentage of the zone is under writeback or + * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. + * Hence, default to zone ordering on 32-bit. + */ static int default_zonelist_order(void) { - int nid, zone_type; - unsigned long low_kmem_size, total_size; - struct zone *z; - int average_size; - /* - * ZONE_DMA and ZONE_DMA32 can be very small area in the system. - * If they are really small and used heavily, the system can fall - * into OOM very easily. - * This function detect ZONE_DMA/DMA32 size and configures zone order. - */ - /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ - low_kmem_size = 0; - total_size = 0; - for_each_online_node(nid) { - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { - z = &NODE_DATA(nid)->node_zones[zone_type]; - if (populated_zone(z)) { - if (zone_type < ZONE_NORMAL) - low_kmem_size += z->managed_pages; - total_size += z->managed_pages; - } else if (zone_type == ZONE_NORMAL) { - /* - * If any node has only lowmem, then node order - * is preferred to allow kernel allocations - * locally; otherwise, they can easily infringe - * on other nodes when there is an abundance of - * lowmem available to allocate from. - */ - return ZONELIST_ORDER_NODE; - } - } - } - if (!low_kmem_size || /* there are no DMA area. */ - low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ - return ZONELIST_ORDER_NODE; - /* - * look into each node's config. - * If there is a node whose DMA/DMA32 memory is very big area on - * local memory, NODE_ORDER may be suitable. - */ - average_size = total_size / - (nodes_weight(node_states[N_MEMORY]) + 1); - for_each_online_node(nid) { - low_kmem_size = 0; - total_size = 0; - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { - z = &NODE_DATA(nid)->node_zones[zone_type]; - if (populated_zone(z)) { - if (zone_type < ZONE_NORMAL) - low_kmem_size += z->present_pages; - total_size += z->present_pages; - } - } - if (low_kmem_size && - total_size > average_size && /* ignore small node */ - low_kmem_size > total_size * 70/100) - return ZONELIST_ORDER_NODE; - } return ZONELIST_ORDER_ZONE; } +#endif /* CONFIG_64BIT */ static void set_zonelist_order(void) { From 81d0fa623c5b8dbd5279d9713094b0f9b0a00fb4 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 9 Oct 2014 15:28:32 -0700 Subject: [PATCH 111/164] mm: softdirty: unmapped addresses between VMAs are clean If a /proc/pid/pagemap read spans a [VMA, an unmapped region, then a VM_SOFTDIRTY VMA], the virtual pages in the unmapped region are reported as softdirty. Here's a program to demonstrate the bug: int main() { const uint64_t PAGEMAP_SOFTDIRTY = 1ul << 55; uint64_t pme[3]; int fd = open("/proc/self/pagemap", O_RDONLY);; char *m = mmap(NULL, 3 * getpagesize(), PROT_READ, MAP_ANONYMOUS | MAP_SHARED, -1, 0); munmap(m + getpagesize(), getpagesize()); pread(fd, pme, 24, (unsigned long) m / getpagesize() * 8); assert(pme[0] & PAGEMAP_SOFTDIRTY); /* passes */ assert(!(pme[1] & PAGEMAP_SOFTDIRTY)); /* fails */ assert(pme[2] & PAGEMAP_SOFTDIRTY); /* passes */ return 0; } (Note that all pages in new VMAs are softdirty until cleared). Tested: Used the program given above. I'm going to include this code in a selftest in the future. [n-horiguchi@ah.jp.nec.com: prevent pagemap_pte_range() from overrunning] Signed-off-by: Peter Feiner Cc: "Kirill A. Shutemov" Cc: Cyrill Gorcunov Cc: Pavel Emelyanov Cc: Jamie Liu Cc: Hugh Dickins Cc: Naoya Horiguchi Signed-off-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 59 ++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 1acec26a3758..b7a7dc963a35 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1027,7 +1027,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, spinlock_t *ptl; pte_t *pte; int err = 0; - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); @@ -1041,6 +1040,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, for (; addr != end; addr += PAGE_SIZE) { unsigned long offset; + pagemap_entry_t pme; offset = (addr & ~PAGEMAP_WALK_MASK) >> PAGE_SHIFT; @@ -1055,32 +1055,51 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (pmd_trans_unstable(pmd)) return 0; - for (; addr != end; addr += PAGE_SIZE) { - int flags2; - /* check to see if we've left 'vma' behind - * and need a new, higher one */ - if (vma && (addr >= vma->vm_end)) { - vma = find_vma(walk->mm, addr); - if (vma && (vma->vm_flags & VM_SOFTDIRTY)) - flags2 = __PM_SOFT_DIRTY; - else - flags2 = 0; - pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); + while (1) { + /* End of address space hole, which we mark as non-present. */ + unsigned long hole_end; + + if (vma) + hole_end = min(end, vma->vm_start); + else + hole_end = end; + + for (; addr < hole_end; addr += PAGE_SIZE) { + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); + + err = add_to_pagemap(addr, &pme, pm); + if (err) + return err; } - /* check that 'vma' actually covers this address, - * and that it isn't a huge page vma */ - if (vma && (vma->vm_start <= addr) && - !is_vm_hugetlb_page(vma)) { + if (!vma || vma->vm_start >= end) + break; + /* + * We can't possibly be in a hugetlb VMA. In general, + * for a mm_walk with a pmd_entry and a hugetlb_entry, + * the pmd_entry can only be called on addresses in a + * hugetlb if the walk starts in a non-hugetlb VMA and + * spans a hugepage VMA. Since pagemap_read walks are + * PMD-sized and PMD-aligned, this will never be true. + */ + BUG_ON(is_vm_hugetlb_page(vma)); + + /* Addresses in the VMA. */ + for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { + pagemap_entry_t pme; pte = pte_offset_map(pmd, addr); pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); - /* unmap before userspace copy */ pte_unmap(pte); + err = add_to_pagemap(addr, &pme, pm); + if (err) + return err; } - err = add_to_pagemap(addr, &pme, pm); - if (err) - return err; + + if (addr == end) + break; + + vma = find_vma(walk->mm, addr); } cond_resched(); From 82742a3a5152195edd69528c0c9a1a6fb9caa293 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:34 -0700 Subject: [PATCH 112/164] mm: move debug code out of page_alloc.c dump_page() and dump_vma() are not specific to page_alloc.c, move them out so page_alloc.c won't turn into the unofficial debug repository. Signed-off-by: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Makefile | 2 +- mm/debug.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 160 ----------------------------------------------- 3 files changed, 163 insertions(+), 161 deletions(-) create mode 100644 mm/debug.c diff --git a/mm/Makefile b/mm/Makefile index fe7a053c0f45..f8ed7ab417b1 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -18,7 +18,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o balloon_compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ - iov_iter.o $(mmu-y) + iov_iter.o debug.o $(mmu-y) obj-y += init-mm.o diff --git a/mm/debug.c b/mm/debug.c new file mode 100644 index 000000000000..697df9050193 --- /dev/null +++ b/mm/debug.c @@ -0,0 +1,162 @@ +#include +#include +#include +#include + +static const struct trace_print_flags pageflag_names[] = { + {1UL << PG_locked, "locked" }, + {1UL << PG_error, "error" }, + {1UL << PG_referenced, "referenced" }, + {1UL << PG_uptodate, "uptodate" }, + {1UL << PG_dirty, "dirty" }, + {1UL << PG_lru, "lru" }, + {1UL << PG_active, "active" }, + {1UL << PG_slab, "slab" }, + {1UL << PG_owner_priv_1, "owner_priv_1" }, + {1UL << PG_arch_1, "arch_1" }, + {1UL << PG_reserved, "reserved" }, + {1UL << PG_private, "private" }, + {1UL << PG_private_2, "private_2" }, + {1UL << PG_writeback, "writeback" }, +#ifdef CONFIG_PAGEFLAGS_EXTENDED + {1UL << PG_head, "head" }, + {1UL << PG_tail, "tail" }, +#else + {1UL << PG_compound, "compound" }, +#endif + {1UL << PG_swapcache, "swapcache" }, + {1UL << PG_mappedtodisk, "mappedtodisk" }, + {1UL << PG_reclaim, "reclaim" }, + {1UL << PG_swapbacked, "swapbacked" }, + {1UL << PG_unevictable, "unevictable" }, +#ifdef CONFIG_MMU + {1UL << PG_mlocked, "mlocked" }, +#endif +#ifdef CONFIG_ARCH_USES_PG_UNCACHED + {1UL << PG_uncached, "uncached" }, +#endif +#ifdef CONFIG_MEMORY_FAILURE + {1UL << PG_hwpoison, "hwpoison" }, +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + {1UL << PG_compound_lock, "compound_lock" }, +#endif +}; + +static void dump_flags(unsigned long flags, + const struct trace_print_flags *names, int count) +{ + const char *delim = ""; + unsigned long mask; + int i; + + printk(KERN_ALERT "flags: %#lx(", flags); + + /* remove zone id */ + flags &= (1UL << NR_PAGEFLAGS) - 1; + + for (i = 0; i < count && flags; i++) { + + mask = names[i].mask; + if ((flags & mask) != mask) + continue; + + flags &= ~mask; + printk("%s%s", delim, names[i].name); + delim = "|"; + } + + /* check for left over flags */ + if (flags) + printk("%s%#lx", delim, flags); + + printk(")\n"); +} + +void dump_page_badflags(struct page *page, const char *reason, + unsigned long badflags) +{ + printk(KERN_ALERT + "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", + page, atomic_read(&page->_count), page_mapcount(page), + page->mapping, page->index); + BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); + dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); + if (reason) + pr_alert("page dumped because: %s\n", reason); + if (page->flags & badflags) { + pr_alert("bad because of flags:\n"); + dump_flags(page->flags & badflags, + pageflag_names, ARRAY_SIZE(pageflag_names)); + } + mem_cgroup_print_bad_page(page); +} + +void dump_page(struct page *page, const char *reason) +{ + dump_page_badflags(page, reason, 0); +} +EXPORT_SYMBOL(dump_page); + +#ifdef CONFIG_DEBUG_VM + +static const struct trace_print_flags vmaflags_names[] = { + {VM_READ, "read" }, + {VM_WRITE, "write" }, + {VM_EXEC, "exec" }, + {VM_SHARED, "shared" }, + {VM_MAYREAD, "mayread" }, + {VM_MAYWRITE, "maywrite" }, + {VM_MAYEXEC, "mayexec" }, + {VM_MAYSHARE, "mayshare" }, + {VM_GROWSDOWN, "growsdown" }, + {VM_PFNMAP, "pfnmap" }, + {VM_DENYWRITE, "denywrite" }, + {VM_LOCKED, "locked" }, + {VM_IO, "io" }, + {VM_SEQ_READ, "seqread" }, + {VM_RAND_READ, "randread" }, + {VM_DONTCOPY, "dontcopy" }, + {VM_DONTEXPAND, "dontexpand" }, + {VM_ACCOUNT, "account" }, + {VM_NORESERVE, "noreserve" }, + {VM_HUGETLB, "hugetlb" }, + {VM_NONLINEAR, "nonlinear" }, +#if defined(CONFIG_X86) + {VM_PAT, "pat" }, +#elif defined(CONFIG_PPC) + {VM_SAO, "sao" }, +#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) + {VM_GROWSUP, "growsup" }, +#elif !defined(CONFIG_MMU) + {VM_MAPPED_COPY, "mappedcopy" }, +#else + {VM_ARCH_1, "arch_1" }, +#endif + {VM_DONTDUMP, "dontdump" }, +#ifdef CONFIG_MEM_SOFT_DIRTY + {VM_SOFTDIRTY, "softdirty" }, +#endif + {VM_MIXEDMAP, "mixedmap" }, + {VM_HUGEPAGE, "hugepage" }, + {VM_NOHUGEPAGE, "nohugepage" }, + {VM_MERGEABLE, "mergeable" }, +}; + +void dump_vma(const struct vm_area_struct *vma) +{ + printk(KERN_ALERT + "vma %p start %p end %p\n" + "next %p prev %p mm %p\n" + "prot %lx anon_vma %p vm_ops %p\n" + "pgoff %lx file %p private_data %p\n", + vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, + vma->vm_prev, vma->vm_mm, + (unsigned long)pgprot_val(vma->vm_page_prot), + vma->anon_vma, vma->vm_ops, vma->vm_pgoff, + vma->vm_file, vma->vm_private_data); + dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); +} +EXPORT_SYMBOL(dump_vma); + +#endif /* CONFIG_DEBUG_VM */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bfb73e025e02..c9710c9bbee2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -53,8 +53,6 @@ #include #include #include -#include -#include #include #include #include @@ -6550,161 +6548,3 @@ bool is_free_buddy_page(struct page *page) return order < MAX_ORDER; } #endif - -static const struct trace_print_flags pageflag_names[] = { - {1UL << PG_locked, "locked" }, - {1UL << PG_error, "error" }, - {1UL << PG_referenced, "referenced" }, - {1UL << PG_uptodate, "uptodate" }, - {1UL << PG_dirty, "dirty" }, - {1UL << PG_lru, "lru" }, - {1UL << PG_active, "active" }, - {1UL << PG_slab, "slab" }, - {1UL << PG_owner_priv_1, "owner_priv_1" }, - {1UL << PG_arch_1, "arch_1" }, - {1UL << PG_reserved, "reserved" }, - {1UL << PG_private, "private" }, - {1UL << PG_private_2, "private_2" }, - {1UL << PG_writeback, "writeback" }, -#ifdef CONFIG_PAGEFLAGS_EXTENDED - {1UL << PG_head, "head" }, - {1UL << PG_tail, "tail" }, -#else - {1UL << PG_compound, "compound" }, -#endif - {1UL << PG_swapcache, "swapcache" }, - {1UL << PG_mappedtodisk, "mappedtodisk" }, - {1UL << PG_reclaim, "reclaim" }, - {1UL << PG_swapbacked, "swapbacked" }, - {1UL << PG_unevictable, "unevictable" }, -#ifdef CONFIG_MMU - {1UL << PG_mlocked, "mlocked" }, -#endif -#ifdef CONFIG_ARCH_USES_PG_UNCACHED - {1UL << PG_uncached, "uncached" }, -#endif -#ifdef CONFIG_MEMORY_FAILURE - {1UL << PG_hwpoison, "hwpoison" }, -#endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - {1UL << PG_compound_lock, "compound_lock" }, -#endif -}; - -static void dump_flags(unsigned long flags, - const struct trace_print_flags *names, int count) -{ - const char *delim = ""; - unsigned long mask; - int i; - - printk(KERN_ALERT "flags: %#lx(", flags); - - /* remove zone id */ - flags &= (1UL << NR_PAGEFLAGS) - 1; - - for (i = 0; i < count && flags; i++) { - - mask = names[i].mask; - if ((flags & mask) != mask) - continue; - - flags &= ~mask; - printk("%s%s", delim, names[i].name); - delim = "|"; - } - - /* check for left over flags */ - if (flags) - printk("%s%#lx", delim, flags); - - printk(")\n"); -} - -void dump_page_badflags(struct page *page, const char *reason, - unsigned long badflags) -{ - printk(KERN_ALERT - "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", - page, atomic_read(&page->_count), page_mapcount(page), - page->mapping, page->index); - BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); - dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); - if (reason) - pr_alert("page dumped because: %s\n", reason); - if (page->flags & badflags) { - pr_alert("bad because of flags:\n"); - dump_flags(page->flags & badflags, - pageflag_names, ARRAY_SIZE(pageflag_names)); - } - mem_cgroup_print_bad_page(page); -} - -void dump_page(struct page *page, const char *reason) -{ - dump_page_badflags(page, reason, 0); -} -EXPORT_SYMBOL(dump_page); - -#ifdef CONFIG_DEBUG_VM - -static const struct trace_print_flags vmaflags_names[] = { - {VM_READ, "read" }, - {VM_WRITE, "write" }, - {VM_EXEC, "exec" }, - {VM_SHARED, "shared" }, - {VM_MAYREAD, "mayread" }, - {VM_MAYWRITE, "maywrite" }, - {VM_MAYEXEC, "mayexec" }, - {VM_MAYSHARE, "mayshare" }, - {VM_GROWSDOWN, "growsdown" }, - {VM_PFNMAP, "pfnmap" }, - {VM_DENYWRITE, "denywrite" }, - {VM_LOCKED, "locked" }, - {VM_IO, "io" }, - {VM_SEQ_READ, "seqread" }, - {VM_RAND_READ, "randread" }, - {VM_DONTCOPY, "dontcopy" }, - {VM_DONTEXPAND, "dontexpand" }, - {VM_ACCOUNT, "account" }, - {VM_NORESERVE, "noreserve" }, - {VM_HUGETLB, "hugetlb" }, - {VM_NONLINEAR, "nonlinear" }, -#if defined(CONFIG_X86) - {VM_PAT, "pat" }, -#elif defined(CONFIG_PPC) - {VM_SAO, "sao" }, -#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) - {VM_GROWSUP, "growsup" }, -#elif !defined(CONFIG_MMU) - {VM_MAPPED_COPY, "mappedcopy" }, -#else - {VM_ARCH_1, "arch_1" }, -#endif - {VM_DONTDUMP, "dontdump" }, -#ifdef CONFIG_MEM_SOFT_DIRTY - {VM_SOFTDIRTY, "softdirty" }, -#endif - {VM_MIXEDMAP, "mixedmap" }, - {VM_HUGEPAGE, "hugepage" }, - {VM_NOHUGEPAGE, "nohugepage" }, - {VM_MERGEABLE, "mergeable" }, -}; - -void dump_vma(const struct vm_area_struct *vma) -{ - printk(KERN_ALERT - "vma %p start %p end %p\n" - "next %p prev %p mm %p\n" - "prot %lx anon_vma %p vm_ops %p\n" - "pgoff %lx file %p private_data %p\n", - vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, - vma->vm_prev, vma->vm_mm, - (unsigned long)pgprot_val(vma->vm_page_prot), - vma->anon_vma, vma->vm_ops, vma->vm_pgoff, - vma->vm_file, vma->vm_private_data); - dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); -} -EXPORT_SYMBOL(dump_vma); - -#endif /* CONFIG_DEBUG_VM */ From 31c9afa6db122a5c7a7843278aaf77dd08ea6e98 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:37 -0700 Subject: [PATCH 113/164] mm: introduce VM_BUG_ON_MM Very similar to VM_BUG_ON_PAGE and VM_BUG_ON_VMA, dump struct_mm when the bug is hit. [akpm@linux-foundation.org: coding-style fixes] [mhocko@suse.cz: fix build] [mhocko@suse.cz: fix build some more] [akpm@linux-foundation.org: do strange things to avoid doing strange things for the comma separators] Signed-off-by: Sasha Levin Cc: Dave Jones Signed-off-by: Michal Hocko Cc: Valdis Kletnieks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 10 ++++++ mm/debug.c | 78 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 569e4c8d0ebb..877ef226f90f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -5,11 +5,13 @@ struct page; struct vm_area_struct; +struct mm_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -27,6 +29,13 @@ void dump_vma(const struct vm_area_struct *vma); BUG(); \ } \ } while (0) +#define VM_BUG_ON_MM(cond, mm) \ + do { \ + if (unlikely(cond)) { \ + dump_mm(mm); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) @@ -34,6 +43,7 @@ void dump_vma(const struct vm_area_struct *vma); #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) +#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/mm/debug.c b/mm/debug.c index 697df9050193..5a1b6194089c 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -1,3 +1,10 @@ +/* + * mm/debug.c + * + * mm/ specific debug routines. + * + */ + #include #include #include @@ -159,4 +166,75 @@ void dump_vma(const struct vm_area_struct *vma) } EXPORT_SYMBOL(dump_vma); +void dump_mm(const struct mm_struct *mm) +{ + printk(KERN_ALERT + "mm %p mmap %p seqnum %d task_size %lu\n" +#ifdef CONFIG_MMU + "get_unmapped_area %p\n" +#endif + "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" + "pgd %p mm_users %d mm_count %d nr_ptes %lu map_count %d\n" + "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" + "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n" + "start_code %lx end_code %lx start_data %lx end_data %lx\n" + "start_brk %lx brk %lx start_stack %lx\n" + "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" + "binfmt %p flags %lx core_state %p\n" +#ifdef CONFIG_AIO + "ioctx_table %p\n" +#endif +#ifdef CONFIG_MEMCG + "owner %p " +#endif + "exe_file %p\n" +#ifdef CONFIG_MMU_NOTIFIER + "mmu_notifier_mm %p\n" +#endif +#ifdef CONFIG_NUMA_BALANCING + "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + "tlb_flush_pending %d\n" +#endif + "%s", /* This is here to hold the comma */ + + mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, +#ifdef CONFIG_MMU + mm->get_unmapped_area, +#endif + mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, + mm->pgd, atomic_read(&mm->mm_users), + atomic_read(&mm->mm_count), + atomic_long_read((atomic_long_t *)&mm->nr_ptes), + mm->map_count, + mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, + mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, + mm->start_code, mm->end_code, mm->start_data, mm->end_data, + mm->start_brk, mm->brk, mm->start_stack, + mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, + mm->binfmt, mm->flags, mm->core_state, +#ifdef CONFIG_AIO + mm->ioctx_table, +#endif +#ifdef CONFIG_MEMCG + mm->owner, +#endif + mm->exe_file, +#ifdef CONFIG_MMU_NOTIFIER + mm->mmu_notifier_mm, +#endif +#ifdef CONFIG_NUMA_BALANCING + mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + mm->tlb_flush_pending, +#endif + "" /* This is here to not have a comma! */ + ); + + dump_flags(mm->def_flags, vmaflags_names, + ARRAY_SIZE(vmaflags_names)); +} + #endif /* CONFIG_DEBUG_VM */ From 96dad67ff244e797c4bc3e4f7f0fdaa0cfdf0a7d Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:39 -0700 Subject: [PATCH 114/164] mm: use VM_BUG_ON_MM where possible Dump the contents of the relevant struct_mm when we hit the bug condition. Signed-off-by: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 3 +-- kernel/sys.c | 2 +- mm/huge_memory.c | 2 +- mm/mlock.c | 2 +- mm/mmap.c | 7 ++++--- mm/pagewalk.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index a91e47d86de2..8c162d102740 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -601,9 +601,8 @@ static void check_mm(struct mm_struct *mm) printk(KERN_ALERT "BUG: Bad rss-counter state " "mm:%p idx:%d val:%ld\n", mm, i, x); } - #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS - VM_BUG_ON(mm->pmd_huge_pte); + VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif } diff --git a/kernel/sys.c b/kernel/sys.c index f7030b060018..df692fbf1e79 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1634,7 +1634,7 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd) struct inode *inode; int err; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); exe = fdget(fd); if (!exe.file) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c13148cc745f..74c78aa8bc2f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2048,7 +2048,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON(khugepaged_test_exit(mm)); + VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; diff --git a/mm/mlock.c b/mm/mlock.c index d5d09d0786ec..03aa8512723b 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -235,7 +235,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_MLOCK; /* diff --git a/mm/mmap.c b/mm/mmap.c index c9bc285df255..16d19b48e2ad 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -410,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); - BUG_ON(vma != ignore && - vma->rb_subtree_gap != vma_compute_subtree_gap(vma)); + VM_BUG_ON_VMA(vma != ignore && + vma->rb_subtree_gap != vma_compute_subtree_gap(vma), + vma); } } @@ -448,7 +449,7 @@ static void validate_mm(struct mm_struct *mm) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } - BUG_ON(bug); + VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 2beeabf502c5..ad83195521f2 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end, if (!walk->mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); + VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); pgd = pgd_offset(walk->mm, addr); do { From 7a82ca0d6437261d0727ce472ae4f3a05a9ce5f7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:28:41 -0700 Subject: [PATCH 115/164] mm/debug.c: use pr_emerg() - s/KERN_ALERT/pr_emerg/: we're going BUG so let's maximize the changes of getting the message out. - convert debug.c to pr_foo() Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/debug.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index 5a1b6194089c..5ce45c9a29b5 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -57,7 +57,7 @@ static void dump_flags(unsigned long flags, unsigned long mask; int i; - printk(KERN_ALERT "flags: %#lx(", flags); + pr_emerg("flags: %#lx(", flags); /* remove zone id */ flags &= (1UL << NR_PAGEFLAGS) - 1; @@ -69,24 +69,23 @@ static void dump_flags(unsigned long flags, continue; flags &= ~mask; - printk("%s%s", delim, names[i].name); + pr_cont("%s%s", delim, names[i].name); delim = "|"; } /* check for left over flags */ if (flags) - printk("%s%#lx", delim, flags); + pr_cont("%s%#lx", delim, flags); - printk(")\n"); + pr_cont(")\n"); } void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags) { - printk(KERN_ALERT - "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", - page, atomic_read(&page->_count), page_mapcount(page), - page->mapping, page->index); + pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", + page, atomic_read(&page->_count), page_mapcount(page), + page->mapping, page->index); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); if (reason) @@ -152,8 +151,7 @@ static const struct trace_print_flags vmaflags_names[] = { void dump_vma(const struct vm_area_struct *vma) { - printk(KERN_ALERT - "vma %p start %p end %p\n" + pr_emerg("vma %p start %p end %p\n" "next %p prev %p mm %p\n" "prot %lx anon_vma %p vm_ops %p\n" "pgoff %lx file %p private_data %p\n", @@ -168,8 +166,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - printk(KERN_ALERT - "mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif From 33a690c45b202e4c6483bfd1d93ad8d0f51df2ca Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:43 -0700 Subject: [PATCH 116/164] memcg: move memcg_{alloc,free}_cache_params to slab_common.c The only reason why they live in memcontrol.c is that we get/put css reference to the owner memory cgroup in them. However, we can do that in memcg_{un,}register_cache. OTOH, there are several reasons to move them to slab_common.c. First, I think that the less public interface functions we have in memcontrol.h the better. Since the functions I move don't depend on memcontrol, I think it's worth making them private to slab, especially taking into account that the arrays are defined on the slab's side too. Second, the way how per-memcg arrays are updated looks rather awkward: it proceeds from memcontrol.c (__memcg_activate_kmem) to slab_common.c (memcg_update_all_caches) and back to memcontrol.c again (memcg_update_array_size). In the following patches I move the function relocating the arrays (memcg_update_array_size) to slab_common.c and therefore get rid this circular call path. I think we should have the cache allocation stuff in the same place where we have relocation, because it's easier to follow the code then. So I move arrays alloc/free functions to slab_common.c too. The third point isn't obvious. I'm going to make the list_lru structure per-memcg to allow targeted kmem reclaim. That means we will have per-memcg arrays in list_lrus too. It turns out that it's much easier to update these arrays in list_lru.c rather than in memcontrol.c, because all the stuff we need is defined there. This patch makes memcg caches arrays allocation path conform that of the upcoming list_lru. So let's move these functions to slab_common.c and make them static. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Christoph Lameter Cc: Glauber Costa Cc: Joonsoo Kim Cc: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 14 ------------ mm/memcontrol.c | 41 ++++------------------------------- mm/slab_common.c | 44 +++++++++++++++++++++++++++++++++++++- 3 files changed, 47 insertions(+), 52 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e0752d204d9e..4d17242eeff7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,10 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); -void memcg_free_cache_params(struct kmem_cache *s); - int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -574,16 +570,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, - struct kmem_cache *s, struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void memcg_free_cache_params(struct kmem_cache *s) -{ -} - static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 28928ce9b07f..865e87c014d6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2984,43 +2984,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - size_t size; - - if (!memcg_kmem_enabled()) - return 0; - - if (!memcg) { - size = offsetof(struct memcg_cache_params, memcg_caches); - size += memcg_limited_groups_array_size * sizeof(void *); - } else - size = sizeof(struct memcg_cache_params); - - s->memcg_params = kzalloc(size, GFP_KERNEL); - if (!s->memcg_params) - return -ENOMEM; - - if (memcg) { - s->memcg_params->memcg = memcg; - s->memcg_params->root_cache = root_cache; - css_get(&memcg->css); - } else - s->memcg_params->is_root_cache = true; - - return 0; -} - -void memcg_free_cache_params(struct kmem_cache *s) -{ - if (!s->memcg_params) - return; - if (!s->memcg_params->is_root_cache) - css_put(&s->memcg_params->memcg->css); - kfree(s->memcg_params); -} - static void memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { @@ -3051,6 +3014,7 @@ static void memcg_register_cache(struct mem_cgroup *memcg, if (!cachep) return; + css_get(&memcg->css); list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); /* @@ -3084,6 +3048,9 @@ static void memcg_unregister_cache(struct kmem_cache *cachep) list_del(&cachep->memcg_params->list); kmem_cache_destroy(cachep); + + /* drop the reference taken in memcg_register_cache */ + css_put(&memcg->css); } /* diff --git a/mm/slab_common.c b/mm/slab_common.c index f206cb10a544..c2a8661f8b81 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -116,6 +116,38 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) #endif #ifdef CONFIG_MEMCG_KMEM +static int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) +{ + size_t size; + + if (!memcg_kmem_enabled()) + return 0; + + if (!memcg) { + size = offsetof(struct memcg_cache_params, memcg_caches); + size += memcg_limited_groups_array_size * sizeof(void *); + } else + size = sizeof(struct memcg_cache_params); + + s->memcg_params = kzalloc(size, GFP_KERNEL); + if (!s->memcg_params) + return -ENOMEM; + + if (memcg) { + s->memcg_params->memcg = memcg; + s->memcg_params->root_cache = root_cache; + } else + s->memcg_params->is_root_cache = true; + + return 0; +} + +static void memcg_free_cache_params(struct kmem_cache *s) +{ + kfree(s->memcg_params); +} + int memcg_update_all_caches(int num_memcgs) { struct kmem_cache *s; @@ -141,7 +173,17 @@ int memcg_update_all_caches(int num_memcgs) mutex_unlock(&slab_mutex); return ret; } -#endif +#else +static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) +{ + return 0; +} + +static inline void memcg_free_cache_params(struct kmem_cache *s) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ /* * Find a mergeable slab cache From f3bb3043a092368a255bca5d1c6f4352c96a3b2d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:45 -0700 Subject: [PATCH 117/164] memcg: don't call memcg_update_all_caches if new cache id fits memcg_update_all_caches grows arrays of per-memcg caches, so we only need to call it when memcg_limited_groups_array_size is increased. However, currently we invoke it each time a new kmem-active memory cgroup is created. Then it just iterates over all slab_caches and does nothing (memcg_update_cache_size returns immediately). This patch fixes this insanity. In the meantime it moves the code dealing with id allocations to separate functions, memcg_alloc_cache_id and memcg_free_cache_id. Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Acked-by: Michal Hocko Cc: Christoph Lameter Cc: Glauber Costa Cc: Joonsoo Kim Cc: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 136 +++++++++++++++++++++++++----------------------- 1 file changed, 72 insertions(+), 64 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 865e87c014d6..ef4fbc5e4ca3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -649,11 +649,13 @@ int memcg_limited_groups_array_size; struct static_key memcg_kmem_enabled_key; EXPORT_SYMBOL(memcg_kmem_enabled_key); +static void memcg_free_cache_id(int id); + static void disarm_kmem_keys(struct mem_cgroup *memcg) { if (memcg_kmem_is_active(memcg)) { static_key_slow_dec(&memcg_kmem_enabled_key); - ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); + memcg_free_cache_id(memcg->kmemcg_id); } /* * This check can't live in kmem destruction function, @@ -2906,19 +2908,44 @@ int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -static size_t memcg_caches_array_size(int num_groups) +static int memcg_alloc_cache_id(void) { - ssize_t size; - if (num_groups <= 0) - return 0; + int id, size; + int err; - size = 2 * num_groups; + id = ida_simple_get(&kmem_limited_groups, + 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); + if (id < 0) + return id; + + if (id < memcg_limited_groups_array_size) + return id; + + /* + * There's no space for the new id in memcg_caches arrays, + * so we have to grow them. + */ + + size = 2 * (id + 1); if (size < MEMCG_CACHES_MIN_SIZE) size = MEMCG_CACHES_MIN_SIZE; else if (size > MEMCG_CACHES_MAX_SIZE) size = MEMCG_CACHES_MAX_SIZE; - return size; + mutex_lock(&memcg_slab_mutex); + err = memcg_update_all_caches(size); + mutex_unlock(&memcg_slab_mutex); + + if (err) { + ida_simple_remove(&kmem_limited_groups, id); + return err; + } + return id; +} + +static void memcg_free_cache_id(int id) +{ + ida_simple_remove(&kmem_limited_groups, id); } /* @@ -2928,59 +2955,55 @@ static size_t memcg_caches_array_size(int num_groups) */ void memcg_update_array_size(int num) { - if (num > memcg_limited_groups_array_size) - memcg_limited_groups_array_size = memcg_caches_array_size(num); + memcg_limited_groups_array_size = num; } int memcg_update_cache_size(struct kmem_cache *s, int num_groups) { struct memcg_cache_params *cur_params = s->memcg_params; + struct memcg_cache_params *new_params; + size_t size; + int i; VM_BUG_ON(!is_root_cache(s)); - if (num_groups > memcg_limited_groups_array_size) { - int i; - struct memcg_cache_params *new_params; - ssize_t size = memcg_caches_array_size(num_groups); + size = num_groups * sizeof(void *); + size += offsetof(struct memcg_cache_params, memcg_caches); - size *= sizeof(void *); - size += offsetof(struct memcg_cache_params, memcg_caches); + new_params = kzalloc(size, GFP_KERNEL); + if (!new_params) + return -ENOMEM; - new_params = kzalloc(size, GFP_KERNEL); - if (!new_params) - return -ENOMEM; + new_params->is_root_cache = true; - new_params->is_root_cache = true; - - /* - * There is the chance it will be bigger than - * memcg_limited_groups_array_size, if we failed an allocation - * in a cache, in which case all caches updated before it, will - * have a bigger array. - * - * But if that is the case, the data after - * memcg_limited_groups_array_size is certainly unused - */ - for (i = 0; i < memcg_limited_groups_array_size; i++) { - if (!cur_params->memcg_caches[i]) - continue; - new_params->memcg_caches[i] = - cur_params->memcg_caches[i]; - } - - /* - * Ideally, we would wait until all caches succeed, and only - * then free the old one. But this is not worth the extra - * pointer per-cache we'd have to have for this. - * - * It is not a big deal if some caches are left with a size - * bigger than the others. And all updates will reset this - * anyway. - */ - rcu_assign_pointer(s->memcg_params, new_params); - if (cur_params) - kfree_rcu(cur_params, rcu_head); + /* + * There is the chance it will be bigger than + * memcg_limited_groups_array_size, if we failed an allocation + * in a cache, in which case all caches updated before it, will + * have a bigger array. + * + * But if that is the case, the data after + * memcg_limited_groups_array_size is certainly unused + */ + for (i = 0; i < memcg_limited_groups_array_size; i++) { + if (!cur_params->memcg_caches[i]) + continue; + new_params->memcg_caches[i] = + cur_params->memcg_caches[i]; } + + /* + * Ideally, we would wait until all caches succeed, and only + * then free the old one. But this is not worth the extra + * pointer per-cache we'd have to have for this. + * + * It is not a big deal if some caches are left with a size + * bigger than the others. And all updates will reset this + * anyway. + */ + rcu_assign_pointer(s->memcg_params, new_params); + if (cur_params) + kfree_rcu(cur_params, rcu_head); return 0; } @@ -4181,23 +4204,12 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, if (err) goto out; - memcg_id = ida_simple_get(&kmem_limited_groups, - 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); + memcg_id = memcg_alloc_cache_id(); if (memcg_id < 0) { err = memcg_id; goto out; } - /* - * Make sure we have enough space for this cgroup in each root cache's - * memcg_params. - */ - mutex_lock(&memcg_slab_mutex); - err = memcg_update_all_caches(memcg_id + 1); - mutex_unlock(&memcg_slab_mutex); - if (err) - goto out_rmid; - memcg->kmemcg_id = memcg_id; INIT_LIST_HEAD(&memcg->memcg_slab_caches); @@ -4218,10 +4230,6 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, out: memcg_resume_kmem_account(); return err; - -out_rmid: - ida_simple_remove(&kmem_limited_groups, memcg_id); - goto out; } static int memcg_activate_kmem(struct mem_cgroup *memcg, From 6f817f4cda68b09621312ec5ba84217bc5e37b3d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:47 -0700 Subject: [PATCH 118/164] memcg: move memcg_update_cache_size() to slab_common.c `While growing per memcg caches arrays, we jump between memcontrol.c and slab_common.c in a weird way: memcg_alloc_cache_id - memcontrol.c memcg_update_all_caches - slab_common.c memcg_update_cache_size - memcontrol.c There's absolutely no reason why memcg_update_cache_size can't live on the slab's side though. So let's move it there and settle it comfortably amid per-memcg cache allocation functions. Besides, this patch cleans this function up a bit, removing all the useless comments from it, and renames it to memcg_update_cache_params to conform to memcg_alloc/free_cache_params, which we already have in slab_common.c. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Christoph Lameter Cc: Glauber Costa Cc: Joonsoo Kim Cc: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 - mm/memcontrol.c | 49 -------------------------------------- mm/slab_common.c | 30 +++++++++++++++++++++-- 3 files changed, 28 insertions(+), 52 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4d17242eeff7..19df5d857411 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,7 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); struct kmem_cache * diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef4fbc5e4ca3..fff511e25bb2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2958,55 +2958,6 @@ void memcg_update_array_size(int num) memcg_limited_groups_array_size = num; } -int memcg_update_cache_size(struct kmem_cache *s, int num_groups) -{ - struct memcg_cache_params *cur_params = s->memcg_params; - struct memcg_cache_params *new_params; - size_t size; - int i; - - VM_BUG_ON(!is_root_cache(s)); - - size = num_groups * sizeof(void *); - size += offsetof(struct memcg_cache_params, memcg_caches); - - new_params = kzalloc(size, GFP_KERNEL); - if (!new_params) - return -ENOMEM; - - new_params->is_root_cache = true; - - /* - * There is the chance it will be bigger than - * memcg_limited_groups_array_size, if we failed an allocation - * in a cache, in which case all caches updated before it, will - * have a bigger array. - * - * But if that is the case, the data after - * memcg_limited_groups_array_size is certainly unused - */ - for (i = 0; i < memcg_limited_groups_array_size; i++) { - if (!cur_params->memcg_caches[i]) - continue; - new_params->memcg_caches[i] = - cur_params->memcg_caches[i]; - } - - /* - * Ideally, we would wait until all caches succeed, and only - * then free the old one. But this is not worth the extra - * pointer per-cache we'd have to have for this. - * - * It is not a big deal if some caches are left with a size - * bigger than the others. And all updates will reset this - * anyway. - */ - rcu_assign_pointer(s->memcg_params, new_params); - if (cur_params) - kfree_rcu(cur_params, rcu_head); - return 0; -} - static void memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { diff --git a/mm/slab_common.c b/mm/slab_common.c index c2a8661f8b81..3a6e0cfdf03a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -148,6 +148,33 @@ static void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } +static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs) +{ + int size; + struct memcg_cache_params *new_params, *cur_params; + + BUG_ON(!is_root_cache(s)); + + size = offsetof(struct memcg_cache_params, memcg_caches); + size += num_memcgs * sizeof(void *); + + new_params = kzalloc(size, GFP_KERNEL); + if (!new_params) + return -ENOMEM; + + cur_params = s->memcg_params; + memcpy(new_params->memcg_caches, cur_params->memcg_caches, + memcg_limited_groups_array_size * sizeof(void *)); + + new_params->is_root_cache = true; + + rcu_assign_pointer(s->memcg_params, new_params); + if (cur_params) + kfree_rcu(cur_params, rcu_head); + + return 0; +} + int memcg_update_all_caches(int num_memcgs) { struct kmem_cache *s; @@ -158,9 +185,8 @@ int memcg_update_all_caches(int num_memcgs) if (!is_root_cache(s)) continue; - ret = memcg_update_cache_size(s, num_memcgs); + ret = memcg_update_cache_params(s, num_memcgs); /* - * See comment in memcontrol.c, memcg_update_cache_size: * Instead of freeing the memory, we'll just leave the caches * up to this point in an updated state. */ From 01c2965f0723a25209d5cf4cac630ed0f6d0edf4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 9 Oct 2014 15:28:50 -0700 Subject: [PATCH 119/164] mm: dmapool: add/remove sysfs file outside of the pool lock lock cat /sys/.../pools followed by removal the device leads to: |====================================================== |[ INFO: possible circular locking dependency detected ] |3.17.0-rc4+ #1498 Not tainted |------------------------------------------------------- |rmmod/2505 is trying to acquire lock: | (s_active#28){++++.+}, at: [] kernfs_remove_by_name_ns+0x3c/0x88 | |but task is already holding lock: | (pools_lock){+.+.+.}, at: [] dma_pool_destroy+0x18/0x17c | |which lock already depends on the new lock. |the existing dependency chain (in reverse order) is: | |-> #1 (pools_lock){+.+.+.}: | [] show_pools+0x30/0xf8 | [] dev_attr_show+0x1c/0x48 | [] sysfs_kf_seq_show+0x88/0x10c | [] kernfs_seq_show+0x24/0x28 | [] seq_read+0x1b8/0x480 | [] vfs_read+0x8c/0x148 | [] SyS_read+0x40/0x8c | [] ret_fast_syscall+0x0/0x48 | |-> #0 (s_active#28){++++.+}: | [] __kernfs_remove+0x258/0x2ec | [] kernfs_remove_by_name_ns+0x3c/0x88 | [] dma_pool_destroy+0x148/0x17c | [] hcd_buffer_destroy+0x20/0x34 | [] usb_remove_hcd+0x110/0x1a4 The problem is the lock order of pools_lock and kernfs_mutex in dma_pool_destroy() vs show_pools() call path. This patch breaks out the creation of the sysfs file outside of the pools_lock mutex. The newly added pools_reg_lock ensures that there is no race of create vs destroy code path in terms whether or not the sysfs file has to be deleted (and was it deleted before we try to create a new one) and what to do if device_create_file() failed. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/dmapool.c | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index ba8019b063e1..2372ed5a33d3 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -62,6 +62,7 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ }; static DEFINE_MUTEX(pools_lock); +static DEFINE_MUTEX(pools_reg_lock); static ssize_t show_pools(struct device *dev, struct device_attribute *attr, char *buf) @@ -132,6 +133,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, { struct dma_pool *retval; size_t allocation; + bool empty = false; if (align == 0) { align = 1; @@ -172,15 +174,34 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, INIT_LIST_HEAD(&retval->pools); + /* + * pools_lock ensures that the ->dma_pools list does not get corrupted. + * pools_reg_lock ensures that there is not a race between + * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() + * when the first invocation of dma_pool_create() failed on + * device_create_file() and the second assumes that it has been done (I + * know it is a short window). + */ + mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); - if (list_empty(&dev->dma_pools) && - device_create_file(dev, &dev_attr_pools)) { - kfree(retval); - retval = NULL; - } else - list_add(&retval->pools, &dev->dma_pools); + if (list_empty(&dev->dma_pools)) + empty = true; + list_add(&retval->pools, &dev->dma_pools); mutex_unlock(&pools_lock); + if (empty) { + int err; + err = device_create_file(dev, &dev_attr_pools); + if (err) { + mutex_lock(&pools_lock); + list_del(&retval->pools); + mutex_unlock(&pools_lock); + mutex_unlock(&pools_reg_lock); + kfree(retval); + return NULL; + } + } + mutex_unlock(&pools_reg_lock); return retval; } EXPORT_SYMBOL(dma_pool_create); @@ -251,11 +272,17 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page) */ void dma_pool_destroy(struct dma_pool *pool) { + bool empty = false; + + mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); if (pool->dev && list_empty(&pool->dev->dma_pools)) - device_remove_file(pool->dev, &dev_attr_pools); + empty = true; mutex_unlock(&pools_lock); + if (empty) + device_remove_file(pool->dev, &dev_attr_pools); + mutex_unlock(&pools_reg_lock); while (!list_empty(&pool->page_list)) { struct dma_page *page; From aabfb57296e3dd9761e47736ec69305c95461d7d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 9 Oct 2014 15:28:52 -0700 Subject: [PATCH 120/164] mm: memcontrol: do not kill uncharge batching in free_pages_and_swap_cache free_pages_and_swap_cache limits release_pages to PAGEVEC_SIZE chunks. This is not a big deal for the normal release path but it completely kills memcg uncharge batching which reduces res_counter spin_lock contention. Dave has noticed this with his page fault scalability test case on a large machine when the lock was basically dominating on all CPUs: 80.18% 80.18% [kernel] [k] _raw_spin_lock | --- _raw_spin_lock | |--66.59%-- res_counter_uncharge_until | res_counter_uncharge | uncharge_batch | uncharge_list | mem_cgroup_uncharge_list | release_pages | free_pages_and_swap_cache | tlb_flush_mmu_free | | | |--90.12%-- unmap_single_vma | | unmap_vmas | | unmap_region | | do_munmap | | vm_munmap | | sys_munmap | | system_call_fastpath | | __GI___munmap | | | --9.88%-- tlb_flush_mmu | tlb_finish_mmu | unmap_region | do_munmap | vm_munmap | sys_munmap | system_call_fastpath | __GI___munmap In his case the load was running in the root memcg and that part has been handled by reverting 05b843012335 ("mm: memcontrol: use root_mem_cgroup res_counter") because this is a clear regression, but the problem remains inside dedicated memcgs. There is no reason to limit release_pages to PAGEVEC_SIZE batches other than lru_lock held times. This logic, however, can be moved inside the function. mem_cgroup_uncharge_list and free_hot_cold_page_list do not hold any lock for the whole pages_to_free list so it is safe to call them in a single run. The release_pages() code was previously breaking the lru_lock each PAGEVEC_SIZE pages (ie, 14 pages). However this code has no usage of pagevecs so switch to breaking the lock at least every SWAP_CLUSTER_MAX (32) pages. This means that the lock acquisition frequency is approximately halved and the max hold times are approximately doubled. The now unneeded batching is removed from free_pages_and_swap_cache(). Also update the grossly out-of-date release_pages documentation. Signed-off-by: Michal Hocko Signed-off-by: Johannes Weiner Reported-by: Dave Hansen Cc: Vladimir Davydov Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 30 +++++++++++++++++++----------- mm/swap_state.c | 14 ++++---------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 6b2dc3897cd5..8a12b33936b4 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -887,18 +887,14 @@ void lru_add_drain_all(void) mutex_unlock(&lock); } -/* - * Batched page_cache_release(). Decrement the reference count on all the - * passed pages. If it fell to zero then remove the page from the LRU and - * free it. +/** + * release_pages - batched page_cache_release() + * @pages: array of pages to release + * @nr: number of pages + * @cold: whether the pages are cache cold * - * Avoid taking zone->lru_lock if possible, but if it is taken, retain it - * for the remainder of the operation. - * - * The locking in this function is against shrink_inactive_list(): we recheck - * the page count inside the lock to see whether shrink_inactive_list() - * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() - * will free it. + * Decrement the reference count on all the pages in @pages. If it + * fell to zero, remove the page from the LRU and free it. */ void release_pages(struct page **pages, int nr, bool cold) { @@ -907,6 +903,7 @@ void release_pages(struct page **pages, int nr, bool cold) struct zone *zone = NULL; struct lruvec *lruvec; unsigned long uninitialized_var(flags); + unsigned int uninitialized_var(lock_batch); for (i = 0; i < nr; i++) { struct page *page = pages[i]; @@ -920,6 +917,16 @@ void release_pages(struct page **pages, int nr, bool cold) continue; } + /* + * Make sure the IRQ-safe lock-holding time does not get + * excessive with a continuous string of pages from the + * same zone. The lock is held only if zone != NULL. + */ + if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { + spin_unlock_irqrestore(&zone->lru_lock, flags); + zone = NULL; + } + if (!put_page_testzero(page)) continue; @@ -930,6 +937,7 @@ void release_pages(struct page **pages, int nr, bool cold) if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); + lock_batch = 0; zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } diff --git a/mm/swap_state.c b/mm/swap_state.c index ef1f39139b71..154444918685 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -265,18 +265,12 @@ void free_page_and_swap_cache(struct page *page) void free_pages_and_swap_cache(struct page **pages, int nr) { struct page **pagep = pages; + int i; lru_add_drain(); - while (nr) { - int todo = min(nr, PAGEVEC_SIZE); - int i; - - for (i = 0; i < todo; i++) - free_swap_cache(pagep[i]); - release_pages(pagep, todo, false); - pagep += todo; - nr -= todo; - } + for (i = 0; i < nr; i++) + free_swap_cache(pagep[i]); + release_pages(pagep, nr, false); } /* From 3fbe724424fb104aaca9973389b4a9df428c3e2a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:28:54 -0700 Subject: [PATCH 121/164] mm: memcontrol: simplify detecting when the memory+swap limit is hit When attempting to charge pages, we first charge the memory counter and then the memory+swap counter. If one of the counters is at its limit, we enter reclaim, but if it's the memory+swap counter, reclaim shouldn't swap because that wouldn't change the situation. However, if the counters have the same limits, we never get to the memory+swap limit. To know whether reclaim should swap or not, there is a state flag that indicates whether the limits are equal and whether hitting the memory limit implies hitting the memory+swap limit. Just try the memory+swap counter first. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: Dave Hansen Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 47 +++++++++++++---------------------------------- 1 file changed, 13 insertions(+), 34 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fff511e25bb2..9cda99dfac4f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -318,9 +318,6 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* set when res.limit == memsw.limit */ - bool memsw_is_minimum; - /* protect arrays of thresholds */ struct mutex thresholds_lock; @@ -1818,8 +1815,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, if (flags & MEM_CGROUP_RECLAIM_NOSWAP) noswap = true; - if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) - noswap = true; for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { if (loop) @@ -2557,16 +2552,17 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, goto done; size = batch * PAGE_SIZE; - if (!res_counter_charge(&memcg->res, size, &fail_res)) { - if (!do_swap_account) + if (!do_swap_account || + !res_counter_charge(&memcg->memsw, size, &fail_res)) { + if (!res_counter_charge(&memcg->res, size, &fail_res)) goto done_restock; - if (!res_counter_charge(&memcg->memsw, size, &fail_res)) - goto done_restock; - res_counter_uncharge(&memcg->res, size); + if (do_swap_account) + res_counter_uncharge(&memcg->memsw, size); + mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); + } else { mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); flags |= MEM_CGROUP_RECLAIM_NOSWAP; - } else - mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); + } if (batch > nr_pages) { batch = nr_pages; @@ -3629,7 +3625,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) { int retry_count; - u64 memswlimit, memlimit; int ret = 0; int children = mem_cgroup_count_children(memcg); u64 curusage, oldusage; @@ -3656,24 +3651,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, * We have to guarantee memcg->res.limit <= memcg->memsw.limit. */ mutex_lock(&set_limit_mutex); - memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - if (memswlimit < val) { + if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) { ret = -EINVAL; mutex_unlock(&set_limit_mutex); break; } - memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); - if (memlimit < val) + if (res_counter_read_u64(&memcg->res, RES_LIMIT) < val) enlarge = 1; ret = res_counter_set_limit(&memcg->res, val); - if (!ret) { - if (memswlimit == val) - memcg->memsw_is_minimum = true; - else - memcg->memsw_is_minimum = false; - } mutex_unlock(&set_limit_mutex); if (!ret) @@ -3698,7 +3685,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, unsigned long long val) { int retry_count; - u64 memlimit, memswlimit, oldusage, curusage; + u64 oldusage, curusage; int children = mem_cgroup_count_children(memcg); int ret = -EBUSY; int enlarge = 0; @@ -3717,22 +3704,14 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, * We have to guarantee memcg->res.limit <= memcg->memsw.limit. */ mutex_lock(&set_limit_mutex); - memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); - if (memlimit > val) { + if (res_counter_read_u64(&memcg->res, RES_LIMIT) > val) { ret = -EINVAL; mutex_unlock(&set_limit_mutex); break; } - memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - if (memswlimit < val) + if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) enlarge = 1; ret = res_counter_set_limit(&memcg->memsw, val); - if (!ret) { - if (memlimit == val) - memcg->memsw_is_minimum = true; - else - memcg->memsw_is_minimum = false; - } mutex_unlock(&set_limit_mutex); if (!ret) From b70a2a21dc9d4ad455931b53131a0cb4fc01fafe Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:28:56 -0700 Subject: [PATCH 122/164] mm: memcontrol: fix transparent huge page allocations under pressure In a memcg with even just moderate cache pressure, success rates for transparent huge page allocations drop to zero, wasting a lot of effort that the allocator puts into assembling these pages. The reason for this is that the memcg reclaim code was never designed for higher-order charges. It reclaims in small batches until there is room for at least one page. Huge page charges only succeed when these batches add up over a series of huge faults, which is unlikely under any significant load involving order-0 allocations in the group. Remove that loop on the memcg side in favor of passing the actual reclaim goal to direct reclaim, which is already set up and optimized to meet higher-order goals efficiently. This brings memcg's THP policy in line with the system policy: if the allocator painstakingly assembles a hugepage, memcg will at least make an honest effort to charge it. As a result, transparent hugepage allocation rates amid cache activity are drastically improved: vanilla patched pgalloc 4717530.80 ( +0.00%) 4451376.40 ( -5.64%) pgfault 491370.60 ( +0.00%) 225477.40 ( -54.11%) pgmajfault 2.00 ( +0.00%) 1.80 ( -6.67%) thp_fault_alloc 0.00 ( +0.00%) 531.60 (+100.00%) thp_fault_fallback 749.00 ( +0.00%) 217.40 ( -70.88%) [ Note: this may in turn increase memory consumption from internal fragmentation, which is an inherent risk of transparent hugepages. Some setups may have to adjust the memcg limits accordingly to accomodate this - or, if the machine is already packed to capacity, disable the transparent huge page feature. ] Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Cc: Michal Hocko Cc: Dave Hansen Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 6 ++-- mm/memcontrol.c | 69 +++++++++++--------------------------------- mm/vmscan.c | 7 +++-- 3 files changed, 25 insertions(+), 57 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index ea4f926e6b9b..37a585beef5c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + bool may_swap); extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, struct zone *zone, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9cda99dfac4f..c86cc442ada4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -480,14 +480,6 @@ enum res_type { /* Used for OOM nofiier */ #define OOM_CONTROL (0) -/* - * Reclaim flags for mem_cgroup_hierarchical_reclaim - */ -#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 -#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) -#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 -#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) - /* * The memcg_create_mutex will be held whenever a new cgroup is created. * As a consequence, any change that needs to protect against new child cgroups @@ -1805,40 +1797,6 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, NULL, "Memory cgroup out of memory"); } -static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, - gfp_t gfp_mask, - unsigned long flags) -{ - unsigned long total = 0; - bool noswap = false; - int loop; - - if (flags & MEM_CGROUP_RECLAIM_NOSWAP) - noswap = true; - - for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { - if (loop) - drain_all_stock_async(memcg); - total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); - /* - * Allow limit shrinkers, which are triggered directly - * by userspace, to catch signals and stop reclaim - * after minimal progress, regardless of the margin. - */ - if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) - break; - if (mem_cgroup_margin(memcg)) - break; - /* - * If nothing was reclaimed after two attempts, there - * may be no reclaimable pages in this hierarchy. - */ - if (loop && !total) - break; - } - return total; -} - /** * test_mem_cgroup_node_reclaimable * @memcg: the target memcg @@ -2541,8 +2499,9 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, struct mem_cgroup *mem_over_limit; struct res_counter *fail_res; unsigned long nr_reclaimed; - unsigned long flags = 0; unsigned long long size; + bool may_swap = true; + bool drained = false; int ret = 0; if (mem_cgroup_is_root(memcg)) @@ -2561,7 +2520,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); } else { mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); - flags |= MEM_CGROUP_RECLAIM_NOSWAP; + may_swap = false; } if (batch > nr_pages) { @@ -2586,11 +2545,18 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, if (!(gfp_mask & __GFP_WAIT)) goto nomem; - nr_reclaimed = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); + nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, + gfp_mask, may_swap); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; + if (!drained) { + drain_all_stock_async(mem_over_limit); + drained = true; + goto retry; + } + if (gfp_mask & __GFP_NORETRY) goto nomem; /* @@ -3666,8 +3632,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!ret) break; - mem_cgroup_reclaim(memcg, GFP_KERNEL, - MEM_CGROUP_RECLAIM_SHRINK); + try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); + curusage = res_counter_read_u64(&memcg->res, RES_USAGE); /* Usage is reduced ? */ if (curusage >= oldusage) @@ -3717,9 +3683,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, if (!ret) break; - mem_cgroup_reclaim(memcg, GFP_KERNEL, - MEM_CGROUP_RECLAIM_NOSWAP | - MEM_CGROUP_RECLAIM_SHRINK); + try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); + curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); /* Usage is reduced ? */ if (curusage >= oldusage) @@ -3968,8 +3933,8 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) if (signal_pending(current)) return -EINTR; - progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, - false); + progress = try_to_free_mem_cgroup_pages(memcg, 1, + GFP_KERNEL, true); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 06123f20a326..dcb47074ae03 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2759,21 +2759,22 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, gfp_t gfp_mask, - bool noswap) + bool may_swap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { - .nr_to_reclaim = SWAP_CLUSTER_MAX, + .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .target_mem_cgroup = memcg, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, - .may_swap = !noswap, + .may_swap = may_swap, }; /* From cf2b8fbf1d2f7ba07999e97685563c94483d33d6 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:59 -0700 Subject: [PATCH 123/164] memcg: zap memcg_can_account_kmem memcg_can_account_kmem() returns true iff !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && memcg_kmem_is_active(memcg); To begin with the !mem_cgroup_is_root(memcg) check is useless, because one can't enable kmem accounting for the root cgroup (mem_cgroup_write() returns EINVAL on an attempt to set the limit on the root cgroup). Furthermore, the !mem_cgroup_disabled() check also seems to be redundant. The point is memcg_can_account_kmem() is called from three places: mem_cgroup_salbinfo_read(), __memcg_kmem_get_cache(), and __memcg_kmem_newpage_charge(). The latter two functions are only invoked if memcg_kmem_enabled() returns true, which implies that the memory cgroup subsystem is enabled. And mem_cgroup_slabinfo_read() shows the output of memory.kmem.slabinfo, which won't exist if the memory cgroup is completely disabled. So let's substitute all the calls to memcg_can_account_kmem() with plain memcg_kmem_is_active(), and kill the former. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c86cc442ada4..23976fd885fd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2762,12 +2762,6 @@ static DEFINE_MUTEX(memcg_slab_mutex); static DEFINE_MUTEX(activate_kmem_mutex); -static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) -{ - return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && - memcg_kmem_is_active(memcg); -} - /* * This is a bit cumbersome, but it is rarely used and avoids a backpointer * in the memcg_cache_params struct. @@ -2787,7 +2781,7 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct memcg_cache_params *params; - if (!memcg_can_account_kmem(memcg)) + if (!memcg_kmem_is_active(memcg)) return -EIO; print_slabinfo_header(m); @@ -3164,7 +3158,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); - if (!memcg_can_account_kmem(memcg)) + if (!memcg_kmem_is_active(memcg)) goto out; memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); @@ -3249,7 +3243,7 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) memcg = get_mem_cgroup_from_mm(current->mm); - if (!memcg_can_account_kmem(memcg)) { + if (!memcg_kmem_is_active(memcg)) { css_put(&memcg->css); return true; } From 2581d20237f02984c16c7b23262150e6bd6b8c57 Mon Sep 17 00:00:00 2001 From: Paul McQuade Date: Thu, 9 Oct 2014 15:29:01 -0700 Subject: [PATCH 124/164] mm/mremap.c: use linux headers "WARNING: Use #include instead of " Signed-off-by: Paul McQuade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mremap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mremap.c b/mm/mremap.c index 89e45d8a983a..b147f66f4c40 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -21,8 +21,8 @@ #include #include #include +#include -#include #include #include From 99dadfdde04b72ce98aa2fbebdb49526f494e4cf Mon Sep 17 00:00:00 2001 From: Paul McQuade Date: Thu, 9 Oct 2014 15:29:03 -0700 Subject: [PATCH 125/164] mm/filemap.c: remove trailing whitespace Signed-off-by: Paul McQuade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 0ab0a3ea5721..14b4642279f1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(generic_file_read_iter); static int page_cache_read(struct file *file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; - struct page *page; + struct page *page; int ret; do { @@ -1770,7 +1770,7 @@ static int page_cache_read(struct file *file, pgoff_t offset) page_cache_release(page); } while (ret == AOP_TRUNCATED_PAGE); - + return ret; } From d85fbee89f6e67e37ed722adaf085f49b1ce6c50 Mon Sep 17 00:00:00 2001 From: Paul McQuade Date: Thu, 9 Oct 2014 15:29:05 -0700 Subject: [PATCH 126/164] mm/bootmem.c: use include/linux/ headers Replace asm. headers with linux/headers: Signed-off-by: Paul McQuade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/bootmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/bootmem.c b/mm/bootmem.c index 90bd3507b413..8a000cebb0d7 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -16,9 +16,9 @@ #include #include #include +#include +#include -#include -#include #include #include "internal.h" From 22880ebe76be421a572b6f004604467c63f281f5 Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Thu, 9 Oct 2014 15:29:07 -0700 Subject: [PATCH 127/164] drivers/firmware/memmap.c: don't create memmap sysfs of same firmware_map_entry By the following commits, we prevented from allocating firmware_map_entry of same memory range: f0093ede: drivers/firmware/memmap.c: don't allocate firmware_map_entry of same memory range 49c8b24d: drivers/firmware/memmap.c: pass the correct argument to firmware_map_find_entry_bootmem() But it's not enough. When PNP0C80 device is added by acpi_scan_init(), memmap sysfses of same firmware_map_entry are created twice as follows: # cat /sys/firmware/memmap/*/start 0x40000000000 0x60000000000 0x4a837000 0x4a83a000 0x4a8b5000 ... 0x40000000000 0x60000000000 ... The flows of the issues are as follows: 1. e820_reserve_resources() allocates firmware_map_entrys of all memory ranges defined in e820. And, these firmware_map_entrys are linked with map_entries list. map_entries -> entry 1 -> ... -> entry N 2. When PNP0C80 device is limited by mem= boot option, acpi_scan_init() added the memory device. In this case, firmware_map_add_hotplug() allocates firmware_map_entry and creates memmap sysfs. map_entries -> entry 1 -> ... -> entry N -> entry N+1 | memmap 1 3. firmware_memmap_init() creates memmap sysfses of firmware_map_entrys linked with map_entries. map_entries -> entry 1 -> ... -> entry N -> entry N+1 | | | memmap 2 memmap N+1 memmap 1 memmap N+2 So while hot removing the PNP0C80 device, kernel panic occurs as follows: BUG: unable to handle kernel paging request at 00000001003e000b IP: sysfs_open_file+0x46/0x2b0 PGD 203a89fe067 PUD 0 Oops: 0000 [#1] SMP ... Call Trace: do_dentry_open+0x1ef/0x2a0 finish_open+0x31/0x40 do_last+0x57c/0x1220 path_openat+0xc2/0x4c0 do_filp_open+0x4b/0xb0 do_sys_open+0xf3/0x1f0 SyS_open+0x1e/0x20 system_call_fastpath+0x16/0x1b The patch adds a check of confirming whether memmap sysfs of firmware_map_entry has been created, and does not create memmap sysfs of same firmware_map_entry. Signed-off-by: Yasuaki Ishimatsu Cc: Santosh Shilimkar Cc: Toshi Kani Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/firmware/memmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 79f18e6d9c4f..cc016c615c19 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -184,6 +184,9 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry) static int map_entries_nr; static struct kset *mmap_kset; + if (entry->kobj.state_in_sysfs) + return -EEXIST; + if (!mmap_kset) { mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj); if (!mmap_kset) From 25acde317354997bb945892189f32ffb31b7379b Mon Sep 17 00:00:00 2001 From: Paul McQuade Date: Thu, 9 Oct 2014 15:29:09 -0700 Subject: [PATCH 128/164] mm: ksm use pr_err instead of printk WARNING: Prefer: pr_err(... to printk(KERN_ERR ... [akpm@linux-foundation.org: remove KERN_ERR] Signed-off-by: Paul McQuade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/ksm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index fb7590222706..6b2e337bc03c 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2310,7 +2310,7 @@ static int __init ksm_init(void) ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); if (IS_ERR(ksm_thread)) { - printk(KERN_ERR "ksm: creating kthread failed\n"); + pr_err("ksm: creating kthread failed\n"); err = PTR_ERR(ksm_thread); goto out_free; } @@ -2318,7 +2318,7 @@ static int __init ksm_init(void) #ifdef CONFIG_SYSFS err = sysfs_create_group(mm_kobj, &ksm_attr_group); if (err) { - printk(KERN_ERR "ksm: register sysfs failed\n"); + pr_err("ksm: register sysfs failed\n"); kthread_stop(ksm_thread); goto out_free; } From baa2ef83981c71ceb00f68fbdac323253c2c3e42 Mon Sep 17 00:00:00 2001 From: Paul McQuade Date: Thu, 9 Oct 2014 15:29:11 -0700 Subject: [PATCH 129/164] mm/dmapool.c: fixed a brace coding style issue Remove 3 brace coding style for any arm of this statement Signed-off-by: Paul McQuade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/dmapool.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 2372ed5a33d3..fd5fe4342e93 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -135,28 +135,25 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t allocation; bool empty = false; - if (align == 0) { + if (align == 0) align = 1; - } else if (align & (align - 1)) { + else if (align & (align - 1)) return NULL; - } - if (size == 0) { + if (size == 0) return NULL; - } else if (size < 4) { + else if (size < 4) size = 4; - } if ((size % align) != 0) size = ALIGN(size, align); allocation = max_t(size_t, size, PAGE_SIZE); - if (!boundary) { + if (!boundary) boundary = allocation; - } else if ((boundary < size) || (boundary & (boundary - 1))) { + else if ((boundary < size) || (boundary & (boundary - 1))) return NULL; - } retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); if (!retval) From 2667f50e8b81457fcb4a3dbe6aff3e81ea009e13 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:14 -0700 Subject: [PATCH 130/164] mm: introduce a general RCU get_user_pages_fast() This series implements general forms of get_user_pages_fast and __get_user_pages_fast in core code and activates them for arm and arm64. These are required for Transparent HugePages to function correctly, as a futex on a THP tail will otherwise result in an infinite loop (due to the core implementation of __get_user_pages_fast always returning 0). Unfortunately, a futex on THP tail can be quite common for certain workloads; thus THP is unreliable without a __get_user_pages_fast implementation. This series may also be beneficial for direct-IO heavy workloads and certain KVM workloads. This patch (of 6): get_user_pages_fast() attempts to pin user pages by walking the page tables directly and avoids taking locks. Thus the walker needs to be protected from page table pages being freed from under it, and needs to block any THP splits. One way to achieve this is to have the walker disable interrupts, and rely on IPIs from the TLB flushing code blocking before the page table pages are freed. On some platforms we have hardware broadcast of TLB invalidations, thus the TLB flushing code doesn't necessarily need to broadcast IPIs; and spuriously broadcasting IPIs can hurt system performance if done too often. This problem has been solved on PowerPC and Sparc by batching up page table pages belonging to more than one mm_user, then scheduling an rcu_sched callback to free the pages. This RCU page table free logic has been promoted to core code and is activated when one enables HAVE_RCU_TABLE_FREE. Unfortunately, these architectures implement their own get_user_pages_fast routines. The RCU page table free logic coupled with an IPI broadcast on THP split (which is a rare event), allows one to protect a page table walker by merely disabling the interrupts during the walk. This patch provides a general RCU implementation of get_user_pages_fast that can be used by architectures that perform hardware broadcast of TLB invalidations. It is based heavily on the PowerPC implementation by Nick Piggin. [akpm@linux-foundation.org: various comment fixes] Signed-off-by: Steve Capper Tested-by: Dann Frazier Reviewed-by: Catalin Marinas Acked-by: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Kconfig | 3 + mm/gup.c | 354 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 357 insertions(+) diff --git a/mm/Kconfig b/mm/Kconfig index 886db2158538..0ceb8a567dab 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -137,6 +137,9 @@ config HAVE_MEMBLOCK_NODE_MAP config HAVE_MEMBLOCK_PHYS_MAP boolean +config HAVE_GENERIC_RCU_GUP + boolean + config ARCH_DISCARD_MEMBLOCK boolean diff --git a/mm/gup.c b/mm/gup.c index af7ea3e0826b..cd62c8c90d4a 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -10,6 +10,10 @@ #include #include +#include +#include +#include + #include "internal.h" static struct page *no_page_table(struct vm_area_struct *vma, @@ -676,3 +680,353 @@ struct page *get_dump_page(unsigned long addr) return page; } #endif /* CONFIG_ELF_CORE */ + +/* + * Generic RCU Fast GUP + * + * get_user_pages_fast attempts to pin user pages by walking the page + * tables directly and avoids taking locks. Thus the walker needs to be + * protected from page table pages being freed from under it, and should + * block any THP splits. + * + * One way to achieve this is to have the walker disable interrupts, and + * rely on IPIs from the TLB flushing code blocking before the page table + * pages are freed. This is unsuitable for architectures that do not need + * to broadcast an IPI when invalidating TLBs. + * + * Another way to achieve this is to batch up page table containing pages + * belonging to more than one mm_user, then rcu_sched a callback to free those + * pages. Disabling interrupts will allow the fast_gup walker to both block + * the rcu_sched callback, and an IPI that we broadcast for splitting THPs + * (which is a relatively rare event). The code below adopts this strategy. + * + * Before activating this code, please be aware that the following assumptions + * are currently made: + * + * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free + * pages containing page tables. + * + * *) THP splits will broadcast an IPI, this can be achieved by overriding + * pmdp_splitting_flush. + * + * *) ptes can be read atomically by the architecture. + * + * *) access_ok is sufficient to validate userspace address ranges. + * + * The last two assumptions can be relaxed by the addition of helper functions. + * + * This code is based heavily on the PowerPC implementation by Nick Piggin. + */ +#ifdef CONFIG_HAVE_GENERIC_RCU_GUP + +#ifdef __HAVE_ARCH_PTE_SPECIAL +static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + pte_t *ptep, *ptem; + int ret = 0; + + ptem = ptep = pte_offset_map(&pmd, addr); + do { + /* + * In the line below we are assuming that the pte can be read + * atomically. If this is not the case for your architecture, + * please wrap this in a helper function! + * + * for an example see gup_get_pte in arch/x86/mm/gup.c + */ + pte_t pte = ACCESS_ONCE(*ptep); + struct page *page; + + /* + * Similar to the PMD case below, NUMA hinting must take slow + * path + */ + if (!pte_present(pte) || pte_special(pte) || + pte_numa(pte) || (write && !pte_write(pte))) + goto pte_unmap; + + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); + + if (!page_cache_get_speculative(page)) + goto pte_unmap; + + if (unlikely(pte_val(pte) != pte_val(*ptep))) { + put_page(page); + goto pte_unmap; + } + + pages[*nr] = page; + (*nr)++; + + } while (ptep++, addr += PAGE_SIZE, addr != end); + + ret = 1; + +pte_unmap: + pte_unmap(ptem); + return ret; +} +#else + +/* + * If we can't determine whether or not a pte is special, then fail immediately + * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not + * to be special. + * + * For a futex to be placed on a THP tail page, get_futex_key requires a + * __get_user_pages_fast implementation that can pin pages. Thus it's still + * useful to have gup_huge_pmd even if we can't operate on ptes. + */ +static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#endif /* __HAVE_ARCH_PTE_SPECIAL */ + +static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + struct page *head, *page, *tail; + int refs; + + if (write && !pmd_write(orig)) + return 0; + + refs = 0; + head = pmd_page(orig); + page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + tail = page; + do { + VM_BUG_ON_PAGE(compound_head(page) != head, page); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + /* + * Any tail pages need their mapcount reference taken before we + * return. (This allows the THP code to bump their ref count when + * they are split into base pages). + */ + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } + + return 1; +} + +static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + struct page *head, *page, *tail; + int refs; + + if (write && !pud_write(orig)) + return 0; + + refs = 0; + head = pud_page(orig); + page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + tail = page; + do { + VM_BUG_ON_PAGE(compound_head(page) != head, page); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pud_val(orig) != pud_val(*pudp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + while (refs--) { + if (PageTail(tail)) + get_huge_page_tail(tail); + tail++; + } + + return 1; +} + +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_offset(&pud, addr); + do { + pmd_t pmd = ACCESS_ONCE(*pmdp); + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return 0; + + if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_numa(pmd)) + return 0; + + if (!gup_huge_pmd(pmd, pmdp, addr, next, write, + pages, nr)) + return 0; + + } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) + return 0; + } while (pmdp++, addr = next, addr != end); + + return 1; +} + +static int gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pud_t *pudp; + + pudp = pud_offset(pgdp, addr); + do { + pud_t pud = ACCESS_ONCE(*pudp); + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + return 0; + if (pud_huge(pud)) { + if (!gup_huge_pud(pud, pudp, addr, next, write, + pages, nr)) + return 0; + } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + return 0; + } while (pudp++, addr = next, addr != end); + + return 1; +} + +/* + * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to + * the regular GUP. It will only return non-negative values. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next, flags; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + start, len))) + return 0; + + /* + * Disable interrupts. We use the nested form as we can already have + * interrupts disabled by get_futex_key. + * + * With interrupts disabled, we block page table pages from being + * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h + * for more details. + * + * We do not adopt an rcu_read_lock(.) here as we also want to + * block IPIs that come from THPs splitting. + */ + + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(*pgdp)) + break; + else if (!gup_pud_range(pgdp, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + +/** + * get_user_pages_fast() - pin user pages in memory + * @start: starting user address + * @nr_pages: number of pages from start to pin + * @write: whether pages will be written to + * @pages: array that receives pointers to the pages pinned. + * Should be at least nr_pages long. + * + * Attempt to pin user pages in memory without taking mm->mmap_sem. + * If not successful, it will fall back to taking the lock and + * calling get_user_pages(). + * + * Returns number of pages pinned. This may be fewer than the number + * requested. If nr_pages is 0 or negative, returns 0. If no pages + * were pinned, returns -errno. + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + int nr, ret; + + start &= PAGE_MASK; + nr = __get_user_pages_fast(start, nr_pages, write, pages); + ret = nr; + + if (nr < nr_pages) { + /* Try to get the remaining pages with get_user_pages */ + start += nr << PAGE_SHIFT; + pages += nr; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, + nr_pages - nr, write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + /* Have to be a bit careful with return values */ + if (nr > 0) { + if (ret < 0) + ret = nr; + else + ret += nr; + } + } + + return ret; +} + +#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ From bd951303be5b4df578c7f30ef78839f1a9d6658c Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:16 -0700 Subject: [PATCH 131/164] arm: mm: introduce special ptes for LPAE We need a mechanism to tag ptes as being special, this indicates that no attempt should be made to access the underlying struct page * associated with the pte. This is used by the fast_gup when operating on ptes as it has no means to access VMAs (that also contain this information) locklessly. The L_PTE_SPECIAL bit is already allocated for LPAE, this patch modifies pte_special and pte_mkspecial to make use of it, and defines __HAVE_ARCH_PTE_SPECIAL. This patch also excludes special ptes from the icache/dcache sync logic. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Cc: Dann Frazier Cc: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/pgtable-2level.h | 2 ++ arch/arm/include/asm/pgtable-3level.h | 7 +++++++ arch/arm/include/asm/pgtable.h | 6 ++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 219ac88a9542..f0279411847d 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -182,6 +182,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_addr_end(addr,end) (end) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_special(pte) (0) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * We don't have huge page support for short descriptors, for the moment diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 06e0bc0f8b00..16122d4d7081 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -213,6 +213,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) +#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= L_PTE_SPECIAL; + return pte; +} +#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 01baef07cd0c..90aa4583b308 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -226,7 +226,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) -#define pte_special(pte) (0) #define pte_valid_user(pte) \ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) @@ -245,7 +244,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long ext = 0; if (addr < TASK_SIZE && pte_valid_user(pteval)) { - __sync_icache_dcache(pteval); + if (!pte_special(pteval)) + __sync_icache_dcache(pteval); ext |= PTE_EXT_NG; } @@ -264,8 +264,6 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); PTE_BIT_FUNC(mknexec, |= L_PTE_XN); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | From a0ad5496b2b3accf09ab9485ad0170e3b4b1cb27 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:18 -0700 Subject: [PATCH 132/164] arm: mm: enable HAVE_RCU_TABLE_FREE logic In order to implement fast_get_user_pages we need to ensure that the page table walker is protected from page table pages being freed from under it. This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to address spaces with multiple users will be call_rcu_sched freed. Meaning that disabling interrupts will block the free and protect the fast gup page walker. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Cc: Dann Frazier Cc: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 + arch/arm/include/asm/tlb.h | 38 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 36d47987a9e0..eafe6aea64ff 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -62,6 +62,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f1a0dace3efe..3cadb726ec88 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -35,12 +35,39 @@ #define MMU_GATHER_BUNDLE 8 +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} + +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * TLB handling. This allows us to remove pages from the page * tables, and efficiently handle the TLB issues. */ struct mmu_gather { struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; + unsigned int need_flush; +#endif unsigned int fullmm; struct vm_area_struct *vma; unsigned long start, end; @@ -101,6 +128,9 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { tlb_flush(tlb); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif } static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) @@ -129,6 +159,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start tlb->pages = tlb->local; tlb->nr = 0; __tlb_alloc_page(tlb); + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif } static inline void @@ -205,7 +239,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb_add_flush(tlb, addr + SZ_1M); #endif - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, @@ -213,7 +247,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, { #ifdef CONFIG_ARM_LPAE tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); #endif } From b8cd51afe05a98ef907e61c603d5c5b7ad6242d8 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:20 -0700 Subject: [PATCH 133/164] arm: mm: enable RCU fast_gup Activate the RCU fast_gup for ARM. We also need to force THP splits to broadcast an IPI s.t. we block in the fast_gup page walker. As THP splits are comparatively rare, this should not lead to a noticeable performance degradation. Some pre-requisite functions pud_write and pud_page are also added. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Cc: Dann Frazier Cc: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 4 ++++ arch/arm/include/asm/pgtable-3level.h | 8 ++++++++ arch/arm/mm/flush.c | 15 +++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index eafe6aea64ff..18f392f8b744 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1661,6 +1661,10 @@ config ARCH_SELECT_MEMORY_MODEL config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM +config HAVE_GENERIC_RCU_GUP + def_bool y + depends on ARM_LPAE + config HIGHMEM bool "High Memory Support" depends on MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 16122d4d7081..a31ecdad4b59 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -224,6 +224,8 @@ static inline pte_t pte_mkspecial(pte_t pte) #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) +#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) +#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) @@ -231,6 +233,12 @@ static inline pte_t pte_mkspecial(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) #define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING)) + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif #endif #define PMD_BIT_FUNC(fn,op) \ diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 43d54f5b26b9..265b836b3bd1 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -400,3 +400,18 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l */ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ From 5e5f6dc10546f5c03bc572e3ba3089af30c66e2d Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:23 -0700 Subject: [PATCH 134/164] arm64: mm: enable HAVE_RCU_TABLE_FREE logic In order to implement fast_get_user_pages we need to ensure that the page table walker is protected from page table pages being freed from under it. This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to address spaces with multiple users will be call_rcu_sched freed. Meaning that disabling interrupts will block the free and protect the fast gup page walker. Signed-off-by: Steve Capper Tested-by: Dann Frazier Acked-by: Catalin Marinas Cc: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/tlb.h | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e96cbe84d5ae..1ffd9a05206b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -57,6 +57,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 62731ef9749a..a82c0c5c8b52 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -23,6 +23,20 @@ #include +#include +#include + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } #if CONFIG_ARM64_PGTABLE_LEVELS > 2 @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pudp)); + tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif From 29e5694054149acd25b0d5538c95fb6d64478315 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 9 Oct 2014 15:29:25 -0700 Subject: [PATCH 135/164] arm64: mm: enable RCU fast_gup Activate the RCU fast_gup for ARM64. We also need to force THP splits to broadcast an IPI s.t. we block in the fast_gup page walker. As THP splits are comparatively rare, this should not lead to a noticeable performance degradation. Some pre-requisite functions pud_write and pud_page are also added. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Steve Capper Tested-by: Dann Frazier Acked-by: Catalin Marinas Cc: Hugh Dickins Cc: Russell King Cc: Mark Rutland Cc: Mel Gorman Cc: Will Deacon Cc: Christoffer Dall Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/pgtable.h | 21 ++++++++++++++++++++- arch/arm64/mm/flush.c | 16 ++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1ffd9a05206b..73bfb477ca0d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -111,6 +111,9 @@ config GENERIC_CALIBRATE_DELAY config ZONE_DMA def_bool y +config HAVE_GENERIC_RCU_GUP + def_bool y + config ARCH_DMA_ADDR_T_64BIT def_bool y diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index d58e40cde88e..464c5cecdd15 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -244,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_PTE_SPECIAL +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pmd_t pud_pmd(pud_t pud) +{ + return __pmd(pud_val(pud)); +} + static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); @@ -261,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) -#endif +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +struct vm_area_struct; +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -282,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_write(pud) pte_write(pud_pte(pud)) #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) @@ -381,6 +398,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); } +#define pud_page(pud) pmd_page(pud_pmd(pud)) + #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ #if CONFIG_ARM64_PGTABLE_LEVELS > 3 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 0d64089d28b5..b6f14e8d2121 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -104,3 +104,19 @@ EXPORT_SYMBOL(flush_dcache_page); */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ From d6d86c0a7f8ddc5b38cf089222cb1d9540762dc2 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:27 -0700 Subject: [PATCH 136/164] mm/balloon_compaction: redesign ballooned pages management Sasha Levin reported KASAN splash inside isolate_migratepages_range(). Problem is in the function __is_movable_balloon_page() which tests AS_BALLOON_MAP in page->mapping->flags. This function has no protection against anonymous pages. As result it tried to check address space flags inside struct anon_vma. Further investigation shows more problems in current implementation: * Special branch in __unmap_and_move() never works: balloon_page_movable() checks page flags and page_count. In __unmap_and_move() page is locked, reference counter is elevated, thus balloon_page_movable() always fails. As a result execution goes to the normal migration path. virtballoon_migratepage() returns MIGRATEPAGE_BALLOON_SUCCESS instead of MIGRATEPAGE_SUCCESS, move_to_new_page() thinks this is an error code and assigns newpage->mapping to NULL. Newly migrated page lose connectivity with balloon an all ability for further migration. * lru_lock erroneously required in isolate_migratepages_range() for isolation ballooned page. This function releases lru_lock periodically, this makes migration mostly impossible for some pages. * balloon_page_dequeue have a tight race with balloon_page_isolate: balloon_page_isolate could be executed in parallel with dequeue between picking page from list and locking page_lock. Race is rare because they use trylock_page() for locking. This patch fixes all of them. Instead of fake mapping with special flag this patch uses special state of page->_mapcount: PAGE_BALLOON_MAPCOUNT_VALUE = -256. Buddy allocator uses PAGE_BUDDY_MAPCOUNT_VALUE = -128 for similar purpose. Storing mark directly in struct page makes everything safer and easier. PagePrivate is used to mark pages present in page list (i.e. not isolated, like PageLRU for normal pages). It replaces special rules for reference counter and makes balloon migration similar to migration of normal pages. This flag is protected by page_lock together with link to the balloon device. Signed-off-by: Konstantin Khlebnikov Reported-by: Sasha Levin Link: http://lkml.kernel.org/p/53E6CEAA.9020105@oracle.com Cc: Rafael Aquini Cc: Andrey Ryabinin Cc: [3.8+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_balloon.c | 15 +++-- include/linux/balloon_compaction.h | 97 ++++++++---------------------- include/linux/migrate.h | 11 +--- include/linux/mm.h | 19 ++++++ mm/balloon_compaction.c | 26 ++++---- mm/compaction.c | 2 +- mm/migrate.c | 16 ++--- 7 files changed, 68 insertions(+), 118 deletions(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 25ebe8eecdb7..c3eb93fc9261 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -163,8 +163,8 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { struct page *page = balloon_pfn_to_page(pfns[i]); - balloon_page_free(page); adjust_managed_page_count(page, 1); + put_page(page); /* balloon reference */ } } @@ -395,6 +395,8 @@ static int virtballoon_migratepage(struct address_space *mapping, if (!mutex_trylock(&vb->balloon_lock)) return -EAGAIN; + get_page(newpage); /* balloon reference */ + /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(newpage, mapping, &vb_dev_info->pages); @@ -404,12 +406,7 @@ static int virtballoon_migratepage(struct address_space *mapping, set_page_pfns(vb->pfns, newpage); tell_host(vb, vb->inflate_vq); - /* - * balloon's page migration 2nd step -- deflate "page" - * - * It's safe to delete page->lru here because this page is at - * an isolated migration list, and this step is expected to happen here - */ + /* balloon's page migration 2nd step -- deflate "page" */ balloon_page_delete(page); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, page); @@ -417,7 +414,9 @@ static int virtballoon_migratepage(struct address_space *mapping, mutex_unlock(&vb->balloon_lock); - return MIGRATEPAGE_BALLOON_SUCCESS; + put_page(page); /* balloon reference */ + + return MIGRATEPAGE_SUCCESS; } /* define the balloon_mapping->a_ops callback to allow balloon page migration */ diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 089743ade734..38aa07d5b81c 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -27,10 +27,13 @@ * counter raised only while it is under our special handling; * * iii. after the lockless scan step have selected a potential balloon page for - * isolation, re-test the page->mapping flags and the page ref counter + * isolation, re-test the PageBalloon mark and the PagePrivate flag * under the proper page lock, to ensure isolating a valid balloon page * (not yet isolated, nor under release procedure) * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. + * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple * set of exposed rules are satisfied while we are dealing with balloon pages @@ -71,28 +74,6 @@ static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) kfree(b_dev_info); } -/* - * balloon_page_free - release a balloon page back to the page free lists - * @page: ballooned page to be set free - * - * This function must be used to properly set free an isolated/dequeued balloon - * page at the end of a sucessful page migration, or at the balloon driver's - * page release procedure. - */ -static inline void balloon_page_free(struct page *page) -{ - /* - * Balloon pages always get an extra refcount before being isolated - * and before being dequeued to help on sorting out fortuite colisions - * between a thread attempting to isolate and another thread attempting - * to release the very same balloon page. - * - * Before we handle the page back to Buddy, lets drop its extra refcnt. - */ - put_page(page); - __free_page(page); -} - #ifdef CONFIG_BALLOON_COMPACTION extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); @@ -108,74 +89,33 @@ static inline void balloon_mapping_free(struct address_space *balloon_mapping) } /* - * page_flags_cleared - helper to perform balloon @page ->flags tests. - * - * As balloon pages are obtained from buddy and we do not play with page->flags - * at driver level (exception made when we get the page lock for compaction), - * we can safely identify a ballooned page by checking if the - * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also - * helps us skip ballooned pages that are locked for compaction or release, thus - * mitigating their racy check at balloon_page_movable() - */ -static inline bool page_flags_cleared(struct page *page) -{ - return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP); -} - -/* - * __is_movable_balloon_page - helper to perform @page mapping->flags tests + * __is_movable_balloon_page - helper to perform @page PageBalloon tests */ static inline bool __is_movable_balloon_page(struct page *page) { - struct address_space *mapping = page->mapping; - return mapping_balloon(mapping); + return PageBalloon(page); } /* - * balloon_page_movable - test page->mapping->flags to identify balloon pages - * that can be moved by compaction/migration. - * - * This function is used at core compaction's page isolation scheme, therefore - * most pages exposed to it are not enlisted as balloon pages and so, to avoid - * undesired side effects like racing against __free_pages(), we cannot afford - * holding the page locked while testing page->mapping->flags here. + * balloon_page_movable - test PageBalloon to identify balloon pages + * and PagePrivate to check that the page is not + * isolated and can be moved by compaction/migration. * * As we might return false positives in the case of a balloon page being just - * released under us, the page->mapping->flags need to be re-tested later, - * under the proper page lock, at the functions that will be coping with the - * balloon page case. + * released under us, this need to be re-tested later, under the page lock. */ static inline bool balloon_page_movable(struct page *page) { - /* - * Before dereferencing and testing mapping->flags, let's make sure - * this is not a page that uses ->mapping in a different way - */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) == 1) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page) && PagePrivate(page); } /* * isolated_balloon_page - identify an isolated balloon page on private * compaction/migration page lists. - * - * After a compaction thread isolates a balloon page for migration, it raises - * the page refcount to prevent concurrent compaction threads from re-isolating - * the same page. For that reason putback_movable_pages(), or other routines - * that need to identify isolated balloon pages on private pagelists, cannot - * rely on balloon_page_movable() to accomplish the task. */ static inline bool isolated_balloon_page(struct page *page) { - /* Already isolated balloon pages, by default, have a raised refcount */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) >= 2) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page); } /* @@ -192,6 +132,8 @@ static inline void balloon_page_insert(struct page *page, struct address_space *mapping, struct list_head *head) { + __SetPageBalloon(page); + SetPagePrivate(page); page->mapping = mapping; list_add(&page->lru, head); } @@ -206,8 +148,12 @@ static inline void balloon_page_insert(struct page *page, */ static inline void balloon_page_delete(struct page *page) { + __ClearPageBalloon(page); page->mapping = NULL; - list_del(&page->lru); + if (PagePrivate(page)) { + ClearPagePrivate(page); + list_del(&page->lru); + } } /* @@ -258,6 +204,11 @@ static inline void balloon_page_delete(struct page *page) list_del(&page->lru); } +static inline bool __is_movable_balloon_page(struct page *page) +{ + return false; +} + static inline bool balloon_page_movable(struct page *page) { return false; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index b66fd10f4b93..01aad3ed89ec 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private); * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; - * - * The balloon page migration introduces this special case where a 'distinct' - * return code is used to flag a successful page migration to unmap_and_move(). - * This approach is necessary because page migration can race against balloon - * deflation procedure, and for such case we could introduce a nasty page leak - * if a successfully migrated balloon page gets released concurrently with - * migration's unmap_and_move() wrap-up steps. */ #define MIGRATEPAGE_SUCCESS 0 -#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page - * sucessful migration case. - */ + enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, diff --git a/include/linux/mm.h b/include/linux/mm.h index 4d814aa97785..fa0d74e06428 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -554,6 +554,25 @@ static inline void __ClearPageBuddy(struct page *page) atomic_set(&page->_mapcount, -1); } +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) + +static inline int PageBalloon(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; +} + +static inline void __SetPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBalloon(page), page); + atomic_set(&page->_mapcount, -1); +} + void put_page(struct page *page); void put_pages_list(struct list_head *pages); diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 6e45a5074bf0..52abeeb3cb9d 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -93,17 +93,12 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) * to be released by the balloon driver. */ if (trylock_page(page)) { + if (!PagePrivate(page)) { + /* raced with isolation */ + unlock_page(page); + continue; + } spin_lock_irqsave(&b_dev_info->pages_lock, flags); - /* - * Raise the page refcount here to prevent any wrong - * attempt to isolate this page, in case of coliding - * with balloon_page_isolate() just after we release - * the page lock. - * - * balloon_page_free() will take care of dropping - * this extra refcount later. - */ - get_page(page); balloon_page_delete(page); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); @@ -187,7 +182,9 @@ static inline void __isolate_balloon_page(struct page *page) { struct balloon_dev_info *b_dev_info = page->mapping->private_data; unsigned long flags; + spin_lock_irqsave(&b_dev_info->pages_lock, flags); + ClearPagePrivate(page); list_del(&page->lru); b_dev_info->isolated_pages++; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -197,7 +194,9 @@ static inline void __putback_balloon_page(struct page *page) { struct balloon_dev_info *b_dev_info = page->mapping->private_data; unsigned long flags; + spin_lock_irqsave(&b_dev_info->pages_lock, flags); + SetPagePrivate(page); list_add(&page->lru, &b_dev_info->pages); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -235,12 +234,11 @@ bool balloon_page_isolate(struct page *page) */ if (likely(trylock_page(page))) { /* - * A ballooned page, by default, has just one refcount. + * A ballooned page, by default, has PagePrivate set. * Prevent concurrent compaction threads from isolating - * an already isolated balloon page by refcount check. + * an already isolated balloon page by clearing it. */ - if (__is_movable_balloon_page(page) && - page_count(page) == 2) { + if (balloon_page_movable(page)) { __isolate_balloon_page(page); unlock_page(page); return true; diff --git a/mm/compaction.c b/mm/compaction.c index b9972c0fd917..edba18aed173 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -640,7 +640,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, */ if (!PageLRU(page)) { if (unlikely(balloon_page_movable(page))) { - if (locked && balloon_page_isolate(page)) { + if (balloon_page_isolate(page)) { /* Successfully isolated */ goto isolate_success; } diff --git a/mm/migrate.c b/mm/migrate.c index 2740360cd216..01439953abf5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -876,7 +876,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, } } - if (unlikely(balloon_page_movable(page))) { + if (unlikely(isolated_balloon_page(page))) { /* * A ballooned page does not need any special attention from * physical to virtual reverse mapping procedures. @@ -955,17 +955,6 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, rc = __unmap_and_move(page, newpage, force, mode); - if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { - /* - * A ballooned page has been migrated already. - * Now, it's the time to wrap-up counters, - * handle the page back to Buddy and return. - */ - dec_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - balloon_page_free(page); - return MIGRATEPAGE_SUCCESS; - } out: if (rc != -EAGAIN) { /* @@ -988,6 +977,9 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { ClearPageSwapBacked(newpage); put_new_page(newpage, private); + } else if (unlikely(__is_movable_balloon_page(newpage))) { + /* drop our reference, page already in the balloon */ + put_page(newpage); } else putback_lru_page(newpage); From 9d1ba8056474a208ed9efb7e58cd014795d9f818 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:29 -0700 Subject: [PATCH 137/164] mm/balloon_compaction: remove balloon mapping and flag AS_BALLOON_MAP Now ballooned pages are detected using PageBalloon(). Fake mapping is no longer required. This patch links ballooned pages to balloon device using field page->private instead of page->mapping. Also this patch embeds balloon_dev_info directly into struct virtio_balloon. Signed-off-by: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_balloon.c | 60 ++++--------------- include/linux/balloon_compaction.h | 72 ++++++---------------- include/linux/pagemap.h | 18 +----- mm/balloon_compaction.c | 95 ++---------------------------- 4 files changed, 39 insertions(+), 206 deletions(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c3eb93fc9261..2bad7f9dd2ac 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -59,7 +59,7 @@ struct virtio_balloon * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE * to num_pages above. */ - struct balloon_dev_info *vb_dev_info; + struct balloon_dev_info vb_dev_info; /* Synchronize access/update to this struct virtio_balloon elements */ struct mutex balloon_lock; @@ -127,7 +127,7 @@ static void set_page_pfns(u32 pfns[], struct page *page) static void fill_balloon(struct virtio_balloon *vb, size_t num) { - struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; + struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); @@ -171,7 +171,7 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) static void leak_balloon(struct virtio_balloon *vb, size_t num) { struct page *page; - struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; + struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); @@ -353,12 +353,11 @@ static int init_vqs(struct virtio_balloon *vb) return 0; } -static const struct address_space_operations virtio_balloon_aops; #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of * a compation thread. (called under page lock) - * @mapping: the page->mapping which will be assigned to the new migrated page. + * @vb_dev_info: the balloon device * @newpage: page that will replace the isolated page after migration finishes. * @page : the isolated (old) page that is about to be migrated to newpage. * @mode : compaction mode -- not used for balloon page migration. @@ -373,17 +372,13 @@ static const struct address_space_operations virtio_balloon_aops; * This function preforms the balloon page migration task. * Called through balloon_mapping->a_ops->migratepage */ -static int virtballoon_migratepage(struct address_space *mapping, +static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) { - struct balloon_dev_info *vb_dev_info = balloon_page_device(page); - struct virtio_balloon *vb; + struct virtio_balloon *vb = container_of(vb_dev_info, + struct virtio_balloon, vb_dev_info); unsigned long flags; - BUG_ON(!vb_dev_info); - - vb = vb_dev_info->balloon_device; - /* * In order to avoid lock contention while migrating pages concurrently * to leak_balloon() or fill_balloon() we just give up the balloon_lock @@ -399,7 +394,7 @@ static int virtballoon_migratepage(struct address_space *mapping, /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); - balloon_page_insert(newpage, mapping, &vb_dev_info->pages); + balloon_page_insert(vb_dev_info, newpage); vb_dev_info->isolated_pages--; spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; @@ -418,18 +413,11 @@ static int virtballoon_migratepage(struct address_space *mapping, return MIGRATEPAGE_SUCCESS; } - -/* define the balloon_mapping->a_ops callback to allow balloon page migration */ -static const struct address_space_operations virtio_balloon_aops = { - .migratepage = virtballoon_migratepage, -}; #endif /* CONFIG_BALLOON_COMPACTION */ static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; - struct address_space *vb_mapping; - struct balloon_dev_info *vb_devinfo; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -445,30 +433,14 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; vb->need_stats_update = 0; - vb_devinfo = balloon_devinfo_alloc(vb); - if (IS_ERR(vb_devinfo)) { - err = PTR_ERR(vb_devinfo); - goto out_free_vb; - } - - vb_mapping = balloon_mapping_alloc(vb_devinfo, - (balloon_compaction_check()) ? - &virtio_balloon_aops : NULL); - if (IS_ERR(vb_mapping)) { - /* - * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP - * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off. - */ - err = PTR_ERR(vb_mapping); - if (err != -EOPNOTSUPP) - goto out_free_vb_devinfo; - } - - vb->vb_dev_info = vb_devinfo; + balloon_devinfo_init(&vb->vb_dev_info); +#ifdef CONFIG_BALLOON_COMPACTION + vb->vb_dev_info.migratepage = virtballoon_migratepage; +#endif err = init_vqs(vb); if (err) - goto out_free_vb_mapping; + goto out_free_vb; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { @@ -480,10 +452,6 @@ static int virtballoon_probe(struct virtio_device *vdev) out_del_vqs: vdev->config->del_vqs(vdev); -out_free_vb_mapping: - balloon_mapping_free(vb_mapping); -out_free_vb_devinfo: - balloon_devinfo_free(vb_devinfo); out_free_vb: kfree(vb); out: @@ -509,8 +477,6 @@ static void virtballoon_remove(struct virtio_device *vdev) kthread_stop(vb->thread); remove_common(vb); - balloon_mapping_free(vb->vb_dev_info->mapping); - balloon_devinfo_free(vb->vb_dev_info); kfree(vb); } diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 38aa07d5b81c..bc3d2985cc9a 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -57,21 +57,22 @@ * balloon driver as a page book-keeper for its registered balloon devices. */ struct balloon_dev_info { - void *balloon_device; /* balloon device descriptor */ - struct address_space *mapping; /* balloon special page->mapping */ unsigned long isolated_pages; /* # of isolated pages for migration */ spinlock_t pages_lock; /* Protection to pages list */ struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); -extern struct balloon_dev_info *balloon_devinfo_alloc( - void *balloon_dev_descriptor); -static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { - kfree(b_dev_info); + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; } #ifdef CONFIG_BALLOON_COMPACTION @@ -79,14 +80,6 @@ extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); extern int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode); -extern struct address_space -*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops); - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - kfree(balloon_mapping); -} /* * __is_movable_balloon_page - helper to perform @page PageBalloon tests @@ -120,27 +113,25 @@ static inline bool isolated_balloon_page(struct page *page) /* * balloon_page_insert - insert a page into the balloon's page list and make - * the page->mapping assignment accordingly. + * the page->private assignment accordingly. + * @balloon : pointer to balloon device * @page : page to be assigned as a 'balloon page' - * @mapping : allocated special 'balloon_mapping' - * @head : balloon's device page list head * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before inserting a page into the balloon device. */ -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { __SetPageBalloon(page); SetPagePrivate(page); - page->mapping = mapping; - list_add(&page->lru, head); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); } /* * balloon_page_delete - delete a page from balloon's page list and clear - * the page->mapping assignement accordingly. + * the page->private assignement accordingly. * @page : page to be released from balloon's page list * * Caller must ensure the page is locked and the spin_lock protecting balloon @@ -149,7 +140,7 @@ static inline void balloon_page_insert(struct page *page, static inline void balloon_page_delete(struct page *page) { __ClearPageBalloon(page); - page->mapping = NULL; + set_page_private(page, 0); if (PagePrivate(page)) { ClearPagePrivate(page); list_del(&page->lru); @@ -162,11 +153,7 @@ static inline void balloon_page_delete(struct page *page) */ static inline struct balloon_dev_info *balloon_page_device(struct page *page) { - struct address_space *mapping = page->mapping; - if (likely(mapping)) - return mapping->private_data; - - return NULL; + return (struct balloon_dev_info *)page_private(page); } static inline gfp_t balloon_mapping_gfp_mask(void) @@ -174,29 +161,12 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER_MOVABLE; } -static inline bool balloon_compaction_check(void) -{ - return true; -} - #else /* !CONFIG_BALLOON_COMPACTION */ -static inline void *balloon_mapping_alloc(void *balloon_device, - const struct address_space_operations *a_ops) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - return; -} - -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) -{ - list_add(&page->lru, head); + list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) @@ -240,9 +210,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER; } -static inline bool balloon_compaction_check(void) -{ - return false; -} #endif /* CONFIG_BALLOON_COMPACTION */ #endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 19191d39c4f3..7ea069cd3257 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,8 +24,7 @@ enum mapping_flags { AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ - AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ - AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ + AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return !!mapping; } -static inline void mapping_set_balloon(struct address_space *mapping) -{ - set_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline void mapping_clear_balloon(struct address_space *mapping) -{ - clear_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline int mapping_balloon(struct address_space *mapping) -{ - return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); -} - static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 52abeeb3cb9d..3afdabdbc0a4 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -10,32 +10,6 @@ #include #include -/* - * balloon_devinfo_alloc - allocates a balloon device information descriptor. - * @balloon_dev_descriptor: pointer to reference the balloon device which - * this struct balloon_dev_info will be servicing. - * - * Driver must call it to properly allocate and initialize an instance of - * struct balloon_dev_info which will be used to reference a balloon device - * as well as to keep track of the balloon device page list. - */ -struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor) -{ - struct balloon_dev_info *b_dev_info; - b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL); - if (!b_dev_info) - return ERR_PTR(-ENOMEM); - - b_dev_info->balloon_device = balloon_dev_descriptor; - b_dev_info->mapping = NULL; - b_dev_info->isolated_pages = 0; - spin_lock_init(&b_dev_info->pages_lock); - INIT_LIST_HEAD(&b_dev_info->pages); - - return b_dev_info; -} -EXPORT_SYMBOL_GPL(balloon_devinfo_alloc); - /* * balloon_page_enqueue - allocates a new page and inserts it into the balloon * page list. @@ -61,7 +35,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) */ BUG_ON(!trylock_page(page)); spin_lock_irqsave(&b_dev_info->pages_lock, flags); - balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages); + balloon_page_insert(b_dev_info, page); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); return page; @@ -127,60 +101,10 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION -/* - * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages. - * @b_dev_info: holds the balloon device information descriptor. - * @a_ops: balloon_mapping address_space_operations descriptor. - * - * Driver must call it to properly allocate and initialize an instance of - * struct address_space which will be used as the special page->mapping for - * balloon device enlisted page instances. - */ -struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops) -{ - struct address_space *mapping; - - mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); - if (!mapping) - return ERR_PTR(-ENOMEM); - - /* - * Give a clean 'zeroed' status to all elements of this special - * balloon page->mapping struct address_space instance. - */ - address_space_init_once(mapping); - - /* - * Set mapping->flags appropriately, to allow balloon pages - * ->mapping identification. - */ - mapping_set_balloon(mapping); - mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask()); - - /* balloon's page->mapping->a_ops callback descriptor */ - mapping->a_ops = a_ops; - - /* - * Establish a pointer reference back to the balloon device descriptor - * this particular page->mapping will be servicing. - * This is used by compaction / migration procedures to identify and - * access the balloon device pageset while isolating / migrating pages. - * - * As some balloon drivers can register multiple balloon devices - * for a single guest, this also helps compaction / migration to - * properly deal with multiple balloon pagesets, when required. - */ - mapping->private_data = b_dev_info; - b_dev_info->mapping = mapping; - - return mapping; -} -EXPORT_SYMBOL_GPL(balloon_mapping_alloc); static inline void __isolate_balloon_page(struct page *page) { - struct balloon_dev_info *b_dev_info = page->mapping->private_data; + struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -192,7 +116,7 @@ static inline void __isolate_balloon_page(struct page *page) static inline void __putback_balloon_page(struct page *page) { - struct balloon_dev_info *b_dev_info = page->mapping->private_data; + struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -202,12 +126,6 @@ static inline void __putback_balloon_page(struct page *page) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } -static inline int __migrate_balloon_page(struct address_space *mapping, - struct page *newpage, struct page *page, enum migrate_mode mode) -{ - return page->mapping->a_ops->migratepage(mapping, newpage, page, mode); -} - /* __isolate_lru_page() counterpart for a ballooned page */ bool balloon_page_isolate(struct page *page) { @@ -274,7 +192,7 @@ void balloon_page_putback(struct page *page) int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { - struct address_space *mapping; + struct balloon_dev_info *balloon = balloon_page_device(page); int rc = -EAGAIN; /* @@ -290,9 +208,8 @@ int balloon_page_migrate(struct page *newpage, return rc; } - mapping = page->mapping; - if (mapping) - rc = __migrate_balloon_page(mapping, newpage, page, mode); + if (balloon && balloon->migratepage) + rc = balloon->migratepage(balloon, newpage, page, mode); unlock_page(newpage); return rc; From 09316c09dde33aae14f34489d9e3d243ec0d5938 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:32 -0700 Subject: [PATCH 138/164] mm/balloon_compaction: add vmstat counters and kpageflags bit Always mark pages with PageBalloon even if balloon compaction is disabled and expose this mark in /proc/kpageflags as KPF_BALLOON. Also this patch adds three counters into /proc/vmstat: "balloon_inflate", "balloon_deflate" and "balloon_migrate". They accumulate balloon activity. Current size of balloon is (balloon_inflate - balloon_deflate) pages. All generic balloon code now gathered under option CONFIG_MEMORY_BALLOON. It should be selected by ballooning driver which wants use this feature. Currently virtio-balloon is the only user. Signed-off-by: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/Kconfig | 1 + drivers/virtio/virtio_balloon.c | 1 + fs/proc/page.c | 3 +++ include/linux/balloon_compaction.h | 2 ++ include/linux/vm_event_item.h | 7 +++++++ include/uapi/linux/kernel-page-flags.h | 1 + mm/Kconfig | 7 ++++++- mm/Makefile | 3 ++- mm/balloon_compaction.c | 2 ++ mm/vmstat.c | 12 +++++++++++- tools/vm/page-types.c | 1 + 11 files changed, 37 insertions(+), 3 deletions(-) diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index c6683f2e396c..00b228638274 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -25,6 +25,7 @@ config VIRTIO_PCI config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO + select MEMORY_BALLOON ---help--- This driver supports increasing and decreasing the amount of memory within a KVM guest. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 2bad7f9dd2ac..f893148a107b 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -396,6 +396,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(vb_dev_info, newpage); vb_dev_info->isolated_pages--; + __count_vm_event(BALLOON_MIGRATE); spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, newpage); diff --git a/fs/proc/page.c b/fs/proc/page.c index e647c55275d9..1e3187da1fed 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -133,6 +133,9 @@ u64 stable_page_flags(struct page *page) if (PageBuddy(page)) u |= 1 << KPF_BUDDY; + if (PageBalloon(page)) + u |= 1 << KPF_BALLOON; + u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index bc3d2985cc9a..9b0a15d06a4f 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -166,11 +166,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void) static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { + __SetPageBalloon(page); list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) { + __ClearPageBalloon(page); list_del(&page->lru); } diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ced92345c963..730334cdf037 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif +#ifdef CONFIG_MEMORY_BALLOON + BALLOON_INFLATE, + BALLOON_DEFLATE, +#ifdef CONFIG_BALLOON_COMPACTION + BALLOON_MIGRATE, +#endif +#endif #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 5116a0e48172..2f96d233c980 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h @@ -31,6 +31,7 @@ #define KPF_KSM 21 #define KPF_THP 22 +#define KPF_BALLOON 23 #endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/mm/Kconfig b/mm/Kconfig index 0ceb8a567dab..1d1ae6b078fd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -230,12 +230,17 @@ config SPLIT_PTLOCK_CPUS config ARCH_ENABLE_SPLIT_PMD_PTLOCK boolean +# +# support for memory balloon +config MEMORY_BALLOON + boolean + # # support for memory balloon compaction config BALLOON_COMPACTION bool "Allow for balloon memory compaction/migration" def_bool y - depends on COMPACTION && VIRTIO_BALLOON + depends on COMPACTION && MEMORY_BALLOON help Memory fragmentation introduced by ballooning might reduce significantly the number of 2MB contiguous memory blocks that can be diff --git a/mm/Makefile b/mm/Makefile index f8ed7ab417b1..1f534a7f0a71 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -16,7 +16,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ - compaction.o balloon_compaction.o vmacache.o \ + compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ iov_iter.o debug.o $(mmu-y) @@ -67,3 +67,4 @@ obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o +obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 3afdabdbc0a4..b3cbe19f71b5 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -36,6 +36,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) BUG_ON(!trylock_page(page)); spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, page); + __count_vm_event(BALLOON_INFLATE); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); return page; @@ -74,6 +75,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) } spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_delete(page); + __count_vm_event(BALLOON_DEFLATE); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); dequeued_page = true; diff --git a/mm/vmstat.c b/mm/vmstat.c index e9ab104b956f..cce7c766da7a 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -735,7 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, TEXT_FOR_HIGHMEM(xx) xx "_movable", const char * const vmstat_text[] = { - /* Zoned VM counters */ + /* enum zone_stat_item countes */ "nr_free_pages", "nr_alloc_batch", "nr_inactive_anon", @@ -778,10 +778,13 @@ const char * const vmstat_text[] = { "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_free_cma", + + /* enum writeback_stat_item counters */ "nr_dirty_threshold", "nr_dirty_background_threshold", #ifdef CONFIG_VM_EVENT_COUNTERS + /* enum vm_event_item counters */ "pgpgin", "pgpgout", "pswpin", @@ -860,6 +863,13 @@ const char * const vmstat_text[] = { "thp_zero_page_alloc", "thp_zero_page_alloc_failed", #endif +#ifdef CONFIG_MEMORY_BALLOON + "balloon_inflate", + "balloon_deflate", +#ifdef CONFIG_BALLOON_COMPACTION + "balloon_migrate", +#endif +#endif /* CONFIG_MEMORY_BALLOON */ #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP "nr_tlb_remote_flush", diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index c4d6d2e20e0d..264fbc297e0b 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -132,6 +132,7 @@ static const char * const page_flag_names[] = { [KPF_NOPAGE] = "n:nopage", [KPF_KSM] = "x:ksm", [KPF_THP] = "t:thp", + [KPF_BALLOON] = "o:balloon", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", From 0085d61fe05ec5a3739afb8ffb8a88130402633e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:34 -0700 Subject: [PATCH 139/164] selftests/vm/transhuge-stress: stress test for memory compaction This tool induces memory fragmentation via sequential allocation of transparent huge pages and splitting off everything except their last sub-pages. It easily generates pressure to the memory compaction code. $ perf stat -e 'compaction:*' -e 'migrate:*' ./transhuge-stress transhuge-stress: allocate 7858 transhuge pages, using 15716 MiB virtual memory and 61 MiB of ram transhuge-stress: 1.653 s/loop, 0.210 ms/page, 9504.828 MiB/s 7858 succeed, 0 failed, 2439 different pages transhuge-stress: 1.537 s/loop, 0.196 ms/page, 10226.227 MiB/s 7858 succeed, 0 failed, 2364 different pages transhuge-stress: 1.658 s/loop, 0.211 ms/page, 9479.215 MiB/s 7858 succeed, 0 failed, 2179 different pages transhuge-stress: 1.617 s/loop, 0.206 ms/page, 9716.992 MiB/s 7858 succeed, 0 failed, 2421 different pages ^C./transhuge-stress: Interrupt Performance counter stats for './transhuge-stress': 1.744.051 compaction:mm_compaction_isolate_migratepages 1.014 compaction:mm_compaction_isolate_freepages 1.744.051 compaction:mm_compaction_migratepages 1.647 compaction:mm_compaction_begin 1.647 compaction:mm_compaction_end 1.744.051 migrate:mm_migrate_pages 0 migrate:mm_numa_migrate_ratelimit 7,964696835 seconds time elapsed Signed-off-by: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/vm/Makefile | 1 + tools/testing/selftests/vm/transhuge-stress.c | 144 ++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 tools/testing/selftests/vm/transhuge-stress.c diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 3f94e1afd6cf..4c4b1f631ecf 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -3,6 +3,7 @@ CC = $(CROSS_COMPILE)gcc CFLAGS = -Wall BINARIES = hugepage-mmap hugepage-shm map_hugetlb thuge-gen hugetlbfstest +BINARIES += transhuge-stress all: $(BINARIES) %: %.c diff --git a/tools/testing/selftests/vm/transhuge-stress.c b/tools/testing/selftests/vm/transhuge-stress.c new file mode 100644 index 000000000000..fd7f1b4a96f9 --- /dev/null +++ b/tools/testing/selftests/vm/transhuge-stress.c @@ -0,0 +1,144 @@ +/* + * Stress test for transparent huge pages, memory compaction and migration. + * + * Authors: Konstantin Khlebnikov + * + * This is free and unencumbered software released into the public domain. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PAGE_SHIFT 12 +#define HPAGE_SHIFT 21 + +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define HPAGE_SIZE (1 << HPAGE_SHIFT) + +#define PAGEMAP_PRESENT(ent) (((ent) & (1ull << 63)) != 0) +#define PAGEMAP_PFN(ent) ((ent) & ((1ull << 55) - 1)) + +int pagemap_fd; + +int64_t allocate_transhuge(void *ptr) +{ + uint64_t ent[2]; + + /* drop pmd */ + if (mmap(ptr, HPAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_ANONYMOUS | + MAP_NORESERVE | MAP_PRIVATE, -1, 0) != ptr) + errx(2, "mmap transhuge"); + + if (madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE)) + err(2, "MADV_HUGEPAGE"); + + /* allocate transparent huge page */ + *(volatile void **)ptr = ptr; + + if (pread(pagemap_fd, ent, sizeof(ent), + (uintptr_t)ptr >> (PAGE_SHIFT - 3)) != sizeof(ent)) + err(2, "read pagemap"); + + if (PAGEMAP_PRESENT(ent[0]) && PAGEMAP_PRESENT(ent[1]) && + PAGEMAP_PFN(ent[0]) + 1 == PAGEMAP_PFN(ent[1]) && + !(PAGEMAP_PFN(ent[0]) & ((1 << (HPAGE_SHIFT - PAGE_SHIFT)) - 1))) + return PAGEMAP_PFN(ent[0]); + + return -1; +} + +int main(int argc, char **argv) +{ + size_t ram, len; + void *ptr, *p; + struct timespec a, b; + double s; + uint8_t *map; + size_t map_len; + + ram = sysconf(_SC_PHYS_PAGES); + if (ram > SIZE_MAX / sysconf(_SC_PAGESIZE) / 4) + ram = SIZE_MAX / 4; + else + ram *= sysconf(_SC_PAGESIZE); + + if (argc == 1) + len = ram; + else if (!strcmp(argv[1], "-h")) + errx(1, "usage: %s [size in MiB]", argv[0]); + else + len = atoll(argv[1]) << 20; + + warnx("allocate %zd transhuge pages, using %zd MiB virtual memory" + " and %zd MiB of ram", len >> HPAGE_SHIFT, len >> 20, + len >> (20 + HPAGE_SHIFT - PAGE_SHIFT - 1)); + + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + if (pagemap_fd < 0) + err(2, "open pagemap"); + + len -= len % HPAGE_SIZE; + ptr = mmap(NULL, len + HPAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE, -1, 0); + if (ptr == MAP_FAILED) + err(2, "initial mmap"); + ptr += HPAGE_SIZE - (uintptr_t)ptr % HPAGE_SIZE; + + if (madvise(ptr, len, MADV_HUGEPAGE)) + err(2, "MADV_HUGEPAGE"); + + map_len = ram >> (HPAGE_SHIFT - 1); + map = malloc(map_len); + if (!map) + errx(2, "map malloc"); + + while (1) { + int nr_succeed = 0, nr_failed = 0, nr_pages = 0; + + memset(map, 0, map_len); + + clock_gettime(CLOCK_MONOTONIC, &a); + for (p = ptr; p < ptr + len; p += HPAGE_SIZE) { + int64_t pfn; + + pfn = allocate_transhuge(p); + + if (pfn < 0) { + nr_failed++; + } else { + size_t idx = pfn >> (HPAGE_SHIFT - PAGE_SHIFT); + + nr_succeed++; + if (idx >= map_len) { + map = realloc(map, idx + 1); + if (!map) + errx(2, "map realloc"); + memset(map + map_len, 0, idx + 1 - map_len); + map_len = idx + 1; + } + if (!map[idx]) + nr_pages++; + map[idx] = 1; + } + + /* split transhuge page, keep last page */ + if (madvise(p, HPAGE_SIZE - PAGE_SIZE, MADV_DONTNEED)) + err(2, "MADV_DONTNEED"); + } + clock_gettime(CLOCK_MONOTONIC, &b); + s = b.tv_sec - a.tv_sec + (b.tv_nsec - a.tv_nsec) / 1000000000.; + + warnx("%.3f s/loop, %.3f ms/page, %10.3f MiB/s\t" + "%4d succeed, %4d failed, %4d different pages", + s, s * 1000 / (len >> HPAGE_SHIFT), len / s / (1 << 20), + nr_succeed, nr_failed, nr_pages); + } +} From 2c0346a36cc8ac6cb85ab585964590974c84bdf0 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Oct 2014 15:29:36 -0700 Subject: [PATCH 140/164] mm: mempolicy: skip inaccessible VMAs when setting MPOL_MF_LAZY PROT_NUMA VMAs are skipped to avoid problems distinguishing between present, prot_none and special entries. MPOL_MF_LAZY is not visible from userspace since commit a720094ded8c ("mm: mempolicy: Hide MPOL_NOOP and MPOL_MF_LAZY from userspace for now") but it should still skip VMAs the same way task_numa_work does. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Acked-by: Hugh Dickins Acked-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 008fb32936eb..e58725aff7e9 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -681,7 +681,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, } if (flags & MPOL_MF_LAZY) { - change_prot_numa(vma, start, endvma); + /* Similar to task_numa_work, skip inaccessible VMAs */ + if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) + change_prot_numa(vma, start, endvma); goto next; } From 86cf78d73de8c6bfa89804b91ee0ace71a459961 Mon Sep 17 00:00:00 2001 From: Sebastien Buisson Date: Thu, 9 Oct 2014 15:29:38 -0700 Subject: [PATCH 141/164] fs/buffer.c: increase the buffer-head per-CPU LRU size Increase the buffer-head per-CPU LRU size to allow efficient filesystem operations that access many blocks for each transaction. For example, creating a file in a large ext4 directory with quota enabled will access multiple buffer heads and will overflow the LRU at the default 8-block LRU size: * parent directory inode table block (ctime, nlinks for subdirs) * new inode bitmap * inode table block * 2 quota blocks * directory leaf block (not reused, but pollutes one cache entry) * 2 levels htree blocks (only one is reused, other pollutes cache) * 2 levels indirect/index blocks (only one is reused) The buffer-head per-CPU LRU size is raised to 16, as it shows in metadata performance benchmarks up to 10% gain for create, 4% for lookup and 7% for destroy. Signed-off-by: Liang Zhen Signed-off-by: Andreas Dilger Signed-off-by: Sebastien Buisson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/buffer.c b/fs/buffer.c index 7bd5c4685e98..44c14a87750e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1253,7 +1253,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh) * a local interrupt disable for that. */ -#define BH_LRU_SIZE 8 +#define BH_LRU_SIZE 16 struct bh_lru { struct buffer_head *bhs[BH_LRU_SIZE]; From f0d6d1f6ff6f8525cfa396ec1969b8f402391445 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 9 Oct 2014 15:29:41 -0700 Subject: [PATCH 142/164] CMA: document cma=0 It isn't obvious that CMA can be disabled on the kernel's command line, so document it. Signed-off-by: Jean Delvare Cc: Joonsoo Kim Cc: Greg Kroah-Hartman Cc: Akinobu Mita Cc: Chuck Ebbert Cc: Marek Szyprowski Cc: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 3 ++- drivers/base/Kconfig | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a126a31dde02..809e880bc787 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -656,7 +656,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Sets the size of kernel global memory area for contiguous memory allocations and optionally the placement constraint by the physical address range of - memory allocations. For more information, see + memory allocations. A value of 0 disables CMA + altogether. For more information, see include/linux/dma-contiguous.h cmo_free_hint= [PPC] Format: { yes | no } diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 134f763d90fd..61a33f4ba608 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -252,6 +252,9 @@ config DMA_CMA to allocate big physically-contiguous blocks of memory for use with hardware components that do not support I/O map nor scatter-gather. + You can disable CMA by specifying "cma=0" on the kernel's command + line. + For more information see . If unsure, say "n". From 7cc36bbddde5cd0c98f0c06e3304ab833d662565 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Oct 2014 15:29:43 -0700 Subject: [PATCH 143/164] vmstat: on-demand vmstat workers V8 vmstat workers are used for folding counter differentials into the zone, per node and global counters at certain time intervals. They currently run at defined intervals on all processors which will cause some holdoff for processors that need minimal intrusion by the OS. The current vmstat_update mechanism depends on a deferrable timer firing every other second by default which registers a work queue item that runs on the local CPU, with the result that we have 1 interrupt and one additional schedulable task on each CPU every 2 seconds If a workload indeed causes VM activity or multiple tasks are running on a CPU, then there are probably bigger issues to deal with. However, some workloads dedicate a CPU for a single CPU bound task. This is done in high performance computing, in high frequency financial applications, in networking (Intel DPDK, EZchip NPS) and with the advent of systems with more and more CPUs over time, this may become more and more common to do since when one has enough CPUs one cares less about efficiently sharing a CPU with other tasks and more about efficiently monopolizing a CPU per task. The difference of having this timer firing and workqueue kernel thread scheduled per second can be enormous. An artificial test measuring the worst case time to do a simple "i++" in an endless loop on a bare metal system and under Linux on an isolated CPU with dynticks and with and without this patch, have Linux match the bare metal performance (~700 cycles) with this patch and loose by couple of orders of magnitude (~200k cycles) without it[*]. The loss occurs for something that just calculates statistics. For networking applications, for example, this could be the difference between dropping packets or sustaining line rate. Statistics are important and useful, but it would be great if there would be a way to not cause statistics gathering produce a huge performance difference. This patche does just that. This patch creates a vmstat shepherd worker that monitors the per cpu differentials on all processors. If there are differentials on a processor then a vmstat worker local to the processors with the differentials is created. That worker will then start folding the diffs in regular intervals. Should the worker find that there is no work to be done then it will make the shepherd worker monitor the differentials again. With this patch it is possible then to have periods longer than 2 seconds without any OS event on a "cpu" (hardware thread). The patch shows a very minor increased in system performance. hackbench -s 512 -l 2000 -g 15 -f 25 -P Results before the patch: Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 4.992 Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 4.971 Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 5.063 Hackbench after the patch: Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 4.973 Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 4.990 Running in process mode with 15 groups using 50 file descriptors each (== 750 tasks) Each sender will pass 2000 messages of 512 bytes Time: 4.993 [fengguang.wu@intel.com: cpu_stat_off can be static] Signed-off-by: Christoph Lameter Reviewed-by: Gilad Ben-Yossef Cc: Frederic Weisbecker Cc: Thomas Gleixner Cc: Tejun Heo Cc: John Stultz Cc: Mike Frysinger Cc: Minchan Kim Cc: Hakan Akkan Cc: Max Krasnyansky Cc: "Paul E. McKenney" Cc: Hugh Dickins Cc: Viresh Kumar Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Peter Zijlstra Signed-off-by: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmstat.c | 143 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 121 insertions(+), 22 deletions(-) diff --git a/mm/vmstat.c b/mm/vmstat.c index cce7c766da7a..1b12d390dc68 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -7,6 +7,7 @@ * zoned VM statistics * Copyright (C) 2006 Silicon Graphics, Inc., * Christoph Lameter + * Copyright (C) 2008-2014 Christoph Lameter */ #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -419,13 +421,22 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item) EXPORT_SYMBOL(dec_zone_page_state); #endif -static inline void fold_diff(int *diff) + +/* + * Fold a differential into the global counters. + * Returns the number of counters updated. + */ +static int fold_diff(int *diff) { int i; + int changes = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (diff[i]) + if (diff[i]) { atomic_long_add(diff[i], &vm_stat[i]); + changes++; + } + return changes; } /* @@ -441,12 +452,15 @@ static inline void fold_diff(int *diff) * statistics in the remote zone struct as well as the global cachelines * with the global counters. These could cause remote node cache line * bouncing and will have to be only done when necessary. + * + * The function returns the number of global counters updated. */ -static void refresh_cpu_vm_stats(void) +static int refresh_cpu_vm_stats(void) { struct zone *zone; int i; int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; + int changes = 0; for_each_populated_zone(zone) { struct per_cpu_pageset __percpu *p = zone->pageset; @@ -486,15 +500,17 @@ static void refresh_cpu_vm_stats(void) continue; } - if (__this_cpu_dec_return(p->expire)) continue; - if (__this_cpu_read(p->pcp.count)) + if (__this_cpu_read(p->pcp.count)) { drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); + changes++; + } #endif } - fold_diff(global_diff); + changes += fold_diff(global_diff); + return changes; } /* @@ -1239,20 +1255,108 @@ static const struct file_operations proc_vmstat_file_operations = { #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct delayed_work, vmstat_work); int sysctl_stat_interval __read_mostly = HZ; +static cpumask_var_t cpu_stat_off; static void vmstat_update(struct work_struct *w) { - refresh_cpu_vm_stats(); - schedule_delayed_work(this_cpu_ptr(&vmstat_work), - round_jiffies_relative(sysctl_stat_interval)); + if (refresh_cpu_vm_stats()) + /* + * Counters were updated so we expect more updates + * to occur in the future. Keep on running the + * update worker thread. + */ + schedule_delayed_work(this_cpu_ptr(&vmstat_work), + round_jiffies_relative(sysctl_stat_interval)); + else { + /* + * We did not update any counters so the app may be in + * a mode where it does not cause counter updates. + * We may be uselessly running vmstat_update. + * Defer the checking for differentials to the + * shepherd thread on a different processor. + */ + int r; + /* + * Shepherd work thread does not race since it never + * changes the bit if its zero but the cpu + * online / off line code may race if + * worker threads are still allowed during + * shutdown / startup. + */ + r = cpumask_test_and_set_cpu(smp_processor_id(), + cpu_stat_off); + VM_BUG_ON(r); + } } -static void start_cpu_timer(int cpu) +/* + * Check if the diffs for a certain cpu indicate that + * an update is needed. + */ +static bool need_update(int cpu) { - struct delayed_work *work = &per_cpu(vmstat_work, cpu); + struct zone *zone; - INIT_DEFERRABLE_WORK(work, vmstat_update); - schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); + for_each_populated_zone(zone) { + struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); + + BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1); + /* + * The fast way of checking if there are any vmstat diffs. + * This works because the diffs are byte sized items. + */ + if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS)) + return true; + + } + return false; +} + + +/* + * Shepherd worker thread that checks the + * differentials of processors that have their worker + * threads for vm statistics updates disabled because of + * inactivity. + */ +static void vmstat_shepherd(struct work_struct *w); + +static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd); + +static void vmstat_shepherd(struct work_struct *w) +{ + int cpu; + + get_online_cpus(); + /* Check processors whose vmstat worker threads have been disabled */ + for_each_cpu(cpu, cpu_stat_off) + if (need_update(cpu) && + cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) + + schedule_delayed_work_on(cpu, &per_cpu(vmstat_work, cpu), + __round_jiffies_relative(sysctl_stat_interval, cpu)); + + put_online_cpus(); + + schedule_delayed_work(&shepherd, + round_jiffies_relative(sysctl_stat_interval)); + +} + +static void __init start_shepherd_timer(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), + vmstat_update); + + if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL)) + BUG(); + cpumask_copy(cpu_stat_off, cpu_online_mask); + + schedule_delayed_work(&shepherd, + round_jiffies_relative(sysctl_stat_interval)); } static void vmstat_cpu_dead(int node) @@ -1283,17 +1387,17 @@ static int vmstat_cpuup_callback(struct notifier_block *nfb, case CPU_ONLINE: case CPU_ONLINE_FROZEN: refresh_zone_stat_thresholds(); - start_cpu_timer(cpu); node_set_state(cpu_to_node(cpu), N_CPU); + cpumask_set_cpu(cpu, cpu_stat_off); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); - per_cpu(vmstat_work, cpu).work.func = NULL; + cpumask_clear_cpu(cpu, cpu_stat_off); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - start_cpu_timer(cpu); + cpumask_set_cpu(cpu, cpu_stat_off); break; case CPU_DEAD: case CPU_DEAD_FROZEN: @@ -1313,15 +1417,10 @@ static struct notifier_block vmstat_notifier = static int __init setup_vmstat(void) { #ifdef CONFIG_SMP - int cpu; - cpu_notifier_register_begin(); __register_cpu_notifier(&vmstat_notifier); - for_each_online_cpu(cpu) { - start_cpu_timer(cpu); - node_set_state(cpu_to_node(cpu), N_CPU); - } + start_shepherd_timer(); cpu_notifier_register_done(); #endif #ifdef CONFIG_PROC_FS From cd2567b6850b1648236a4aab0513a04ebaea6aa8 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 9 Oct 2014 15:29:45 -0700 Subject: [PATCH 144/164] m68k: call find_vma with the mmap_sem held in sys_cacheflush() Performing vma lookups without taking the mm->mmap_sem is asking for trouble. While doing the search, the vma in question can be modified or even removed before returning to the caller. Take the lock (shared) in order to avoid races while iterating through the vmacache and/or rbtree. In addition, this guarantees that the address space will remain intact during the CPU flushing. Signed-off-by: Davidlohr Bueso Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m68k/kernel/sys_m68k.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 3a480b3df0d6..9aa01adb407f 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { - struct vm_area_struct *vma; int ret = -EINVAL; if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || @@ -389,17 +388,21 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) if (!capable(CAP_SYS_ADMIN)) goto out; } else { + struct vm_area_struct *vma; + + /* Check for overflow. */ + if (addr + len < addr) + goto out; + /* * Verify that the specified address region actually belongs * to this process. */ - vma = find_vma (current->mm, addr); ret = -EINVAL; - /* Check for overflow. */ - if (addr + len < addr) - goto out; - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) - goto out; + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) + goto out_unlock; } if (CPU_IS_020_OR_030) { @@ -429,7 +432,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); } ret = 0; - goto out; + goto out_unlock; } else { /* * 040 or 060: don't blindly trust 'scope', someone could @@ -446,6 +449,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) ret = cache_flush_060 (addr, scope, cache, len); } } +out_unlock: + up_read(¤t->mm->mmap_sem); out: return ret; } From 13de8933c96b4557f667c337676f05274e017f83 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 9 Oct 2014 15:29:48 -0700 Subject: [PATCH 145/164] zsmalloc: move pages_allocated to zs_pool Currently, zram has no feature to limit memory so theoretically zram can deplete system memory. Users have asked for a limit several times as even without exhaustion zram makes it hard to control memory usage of the platform. This patchset adds the feature. Patch 1 makes zs_get_total_size_bytes faster because it would be used frequently in later patches for the new feature. Patch 2 changes zs_get_total_size_bytes's return unit from bytes to page so that zsmalloc doesn't need unnecessary operation(ie, << PAGE_SHIFT). Patch 3 adds new feature. I added the feature into zram layer, not zsmalloc because limiation is zram's requirement, not zsmalloc so any other user using zsmalloc(ie, zpool) shouldn't affected by unnecessary branch of zsmalloc. In future, if every users of zsmalloc want the feature, then, we could move the feature from client side to zsmalloc easily but vice versa would be painful. Patch 4 adds news facility to report maximum memory usage of zram so that this avoids user polling frequently via /sys/block/zram0/ mem_used_total and ensures transient max are not missed. This patch (of 4): pages_allocated has counted in size_class structure and when user of zsmalloc want to see total_size_bytes, it should gather all of count from each size_class to report the sum. It's not bad if user don't see the value often but if user start to see the value frequently, it would be not a good deal for performance pov. This patch moves the count from size_class to zs_pool so it could reduce memory footprint (from [255 * 8byte] to [sizeof(atomic_long_t)]). Signed-off-by: Minchan Kim Reviewed-by: Dan Streetman Cc: Sergey Senozhatsky Cc: Jerome Marchand Cc: Cc: Cc: Luigi Semenzato Cc: Nitin Gupta Cc: Seth Jennings Reviewed-by: David Horner Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 94f38fac5e81..2a4acf400846 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -199,9 +199,6 @@ struct size_class { spinlock_t lock; - /* stats */ - u64 pages_allocated; - struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; }; @@ -220,6 +217,7 @@ struct zs_pool { struct size_class size_class[ZS_SIZE_CLASSES]; gfp_t flags; /* allocation flags used when growing pool */ + atomic_long_t pages_allocated; }; /* @@ -1028,8 +1026,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) return 0; set_zspage_mapping(first_page, class->index, ZS_EMPTY); + atomic_long_add(class->pages_per_zspage, + &pool->pages_allocated); spin_lock(&class->lock); - class->pages_allocated += class->pages_per_zspage; } obj = (unsigned long)first_page->freelist; @@ -1082,14 +1081,13 @@ void zs_free(struct zs_pool *pool, unsigned long obj) first_page->inuse--; fullness = fix_fullness_group(pool, first_page); - - if (fullness == ZS_EMPTY) - class->pages_allocated -= class->pages_per_zspage; - spin_unlock(&class->lock); - if (fullness == ZS_EMPTY) + if (fullness == ZS_EMPTY) { + atomic_long_sub(class->pages_per_zspage, + &pool->pages_allocated); free_zspage(first_page); + } } EXPORT_SYMBOL_GPL(zs_free); @@ -1185,12 +1183,7 @@ EXPORT_SYMBOL_GPL(zs_unmap_object); u64 zs_get_total_size_bytes(struct zs_pool *pool) { - int i; - u64 npages = 0; - - for (i = 0; i < ZS_SIZE_CLASSES; i++) - npages += pool->size_class[i].pages_allocated; - + u64 npages = atomic_long_read(&pool->pages_allocated); return npages << PAGE_SHIFT; } EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); From 722cdc17232f0f684011407f7cf3c40d39457971 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 9 Oct 2014 15:29:50 -0700 Subject: [PATCH 146/164] zsmalloc: change return value unit of zs_get_total_size_bytes zs_get_total_size_bytes returns a amount of memory zsmalloc consumed with *byte unit* but zsmalloc operates *page unit* rather than byte unit so let's change the API so benefit we could get is that reduce unnecessary overhead (ie, change page unit with byte unit) in zsmalloc. Since return type is pages, "zs_get_total_pages" is better than "zs_get_total_size_bytes". Signed-off-by: Minchan Kim Reviewed-by: Dan Streetman Cc: Sergey Senozhatsky Cc: Jerome Marchand Cc: Cc: Cc: Luigi Semenzato Cc: Nitin Gupta Cc: Seth Jennings Cc: David Horner Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 4 ++-- include/linux/zsmalloc.h | 2 +- mm/zsmalloc.c | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d00831c3d731..f0b8b30a7128 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -103,10 +103,10 @@ static ssize_t mem_used_total_show(struct device *dev, down_read(&zram->init_lock); if (init_done(zram)) - val = zs_get_total_size_bytes(meta->mem_pool); + val = zs_get_total_pages(meta->mem_pool); up_read(&zram->init_lock); - return scnprintf(buf, PAGE_SIZE, "%llu\n", val); + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); } static ssize_t max_comp_streams_show(struct device *dev, diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index e44d634e7fb7..05c214760977 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); -u64 zs_get_total_size_bytes(struct zs_pool *pool); +unsigned long zs_get_total_pages(struct zs_pool *pool); #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2a4acf400846..c4a91578dc96 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -297,7 +297,7 @@ static void zs_zpool_unmap(void *pool, unsigned long handle) static u64 zs_zpool_total_size(void *pool) { - return zs_get_total_size_bytes(pool); + return zs_get_total_pages(pool) << PAGE_SHIFT; } static struct zpool_driver zs_zpool_driver = { @@ -1181,12 +1181,11 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_unmap_object); -u64 zs_get_total_size_bytes(struct zs_pool *pool) +unsigned long zs_get_total_pages(struct zs_pool *pool) { - u64 npages = atomic_long_read(&pool->pages_allocated); - return npages << PAGE_SHIFT; + return atomic_long_read(&pool->pages_allocated); } -EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); +EXPORT_SYMBOL_GPL(zs_get_total_pages); module_init(zs_init); module_exit(zs_exit); From 9ada9da9573f3460b156b7755c093e30b258eacb Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 9 Oct 2014 15:29:53 -0700 Subject: [PATCH 147/164] zram: zram memory size limitation Since zram has no control feature to limit memory usage, it makes hard to manage system memrory. This patch adds new knob "mem_limit" via sysfs to set up the a limit so that zram could fail allocation once it reaches the limit. In addition, user could change the limit in runtime so that he could manage the memory more dynamically. Initial state is no limit so it doesn't break old behavior. [akpm@linux-foundation.org: fix typo, per Sergey] Signed-off-by: Minchan Kim Cc: Dan Streetman Cc: Sergey Senozhatsky Cc: Jerome Marchand Cc: Cc: Cc: Luigi Semenzato Cc: Nitin Gupta Cc: Seth Jennings Cc: David Horner Cc: Joonsoo Kim Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-block-zram | 9 +++++ Documentation/blockdev/zram.txt | 24 ++++++++++-- drivers/block/zram/zram_drv.c | 45 ++++++++++++++++++++++ drivers/block/zram/zram_drv.h | 5 +++ 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 70ec992514d0..ea67fa3f3cff 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -119,3 +119,12 @@ Description: efficiency can be calculated using compr_data_size and this statistic. Unit: bytes + +What: /sys/block/zram/mem_limit +Date: August 2014 +Contact: Minchan Kim +Description: + The mem_limit file is read/write and specifies the maximum + amount of memory ZRAM can use to store the compressed data. The + limit could be changed in run time and "0" means disable the + limit. No limit is the initial state. Unit: bytes diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 0595c3f56ccf..82c6a41116db 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -74,14 +74,30 @@ There is little point creating a zram of greater than twice the size of memory since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. -5) Activate: +5) Set memory limit: Optional + Set memory limit by writing the value to sysfs node 'mem_limit'. + The value can be either in bytes or you can use mem suffixes. + In addition, you could change the value in runtime. + Examples: + # limit /dev/zram0 with 50MB memory + echo $((50*1024*1024)) > /sys/block/zram0/mem_limit + + # Using mem suffixes + echo 256K > /sys/block/zram0/mem_limit + echo 512M > /sys/block/zram0/mem_limit + echo 1G > /sys/block/zram0/mem_limit + + # To disable memory limit + echo 0 > /sys/block/zram0/mem_limit + +6) Activate: mkswap /dev/zram0 swapon /dev/zram0 mkfs.ext4 /dev/zram1 mount /dev/zram1 /tmp -6) Stats: +7) Stats: Per-device statistics are exported as various nodes under /sys/block/zram/ disksize @@ -96,11 +112,11 @@ size of the disk when not in use so a huge zram is wasteful. compr_data_size mem_used_total -7) Deactivate: +8) Deactivate: swapoff /dev/zram0 umount /dev/zram1 -8) Reset: +9) Reset: Write any positive value to 'reset' sysfs node echo 1 > /sys/block/zram0/reset echo 1 > /sys/block/zram1/reset diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f0b8b30a7128..64b27cf9a583 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -122,6 +122,37 @@ static ssize_t max_comp_streams_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%d\n", val); } +static ssize_t mem_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u64 val; + struct zram *zram = dev_to_zram(dev); + + down_read(&zram->init_lock); + val = zram->limit_pages; + up_read(&zram->init_lock); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); +} + +static ssize_t mem_limit_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + u64 limit; + char *tmp; + struct zram *zram = dev_to_zram(dev); + + limit = memparse(buf, &tmp); + if (buf == tmp) /* no chars parsed, invalid input */ + return -EINVAL; + + down_write(&zram->init_lock); + zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; + up_write(&zram->init_lock); + + return len; +} + static ssize_t max_comp_streams_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -513,6 +544,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ret = -ENOMEM; goto out; } + + if (zram->limit_pages && + zs_get_total_pages(meta->mem_pool) > zram->limit_pages) { + zs_free(meta->mem_pool, handle); + ret = -ENOMEM; + goto out; + } + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { @@ -617,6 +656,9 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) struct zram_meta *meta; down_write(&zram->init_lock); + + zram->limit_pages = 0; + if (!init_done(zram)) { up_write(&zram->init_lock); return; @@ -857,6 +899,8 @@ static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); +static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show, + mem_limit_store); static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR, max_comp_streams_show, max_comp_streams_store); static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR, @@ -885,6 +929,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_orig_data_size.attr, &dev_attr_compr_data_size.attr, &dev_attr_mem_used_total.attr, + &dev_attr_mem_limit.attr, &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, NULL, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index e0f725c87cc6..b7aa9c21553f 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -112,6 +112,11 @@ struct zram { u64 disksize; /* bytes */ int max_comp_streams; struct zram_stats stats; + /* + * the number of pages zram can consume for storing compressed data + */ + unsigned long limit_pages; + char compressor[10]; }; #endif From 461a8eee6af3b55745be64bea403ed0b743563cf Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 9 Oct 2014 15:29:55 -0700 Subject: [PATCH 148/164] zram: report maximum used memory Normally, zram user could get maximum memory usage zram consumed via polling mem_used_total with sysfs in userspace. But it has a critical problem because user can miss peak memory usage during update inverval of polling. For avoiding that, user should poll it with shorter interval(ie, 0.0000000001s) with mlocking to avoid page fault delay when memory pressure is heavy. It would be troublesome. This patch adds new knob "mem_used_max" so user could see the maximum memory usage easily via reading the knob and reset it via "echo 0 > /sys/block/zram0/mem_used_max". Signed-off-by: Minchan Kim Reviewed-by: Dan Streetman Cc: Sergey Senozhatsky Cc: Jerome Marchand Cc: Cc: Cc: Luigi Semenzato Cc: Nitin Gupta Cc: Seth Jennings Reviewed-by: David Horner Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-block-zram | 10 ++++ Documentation/blockdev/zram.txt | 1 + drivers/block/zram/zram_drv.c | 60 +++++++++++++++++++++- drivers/block/zram/zram_drv.h | 1 + 4 files changed, 70 insertions(+), 2 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index ea67fa3f3cff..b13dc993291f 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -120,6 +120,16 @@ Description: statistic. Unit: bytes +What: /sys/block/zram/mem_used_max +Date: August 2014 +Contact: Minchan Kim +Description: + The mem_used_max file is read/write and specifies the amount + of maximum memory zram have consumed to store compressed data. + For resetting the value, you should write "0". Otherwise, + you could see -EINVAL. + Unit: bytes + What: /sys/block/zram/mem_limit Date: August 2014 Contact: Minchan Kim diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 82c6a41116db..7fcf9c6592ec 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -111,6 +111,7 @@ size of the disk when not in use so a huge zram is wasteful. orig_data_size compr_data_size mem_used_total + mem_used_max 8) Deactivate: swapoff /dev/zram0 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 64b27cf9a583..d78b245bae06 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -153,6 +153,41 @@ static ssize_t mem_limit_store(struct device *dev, return len; } +static ssize_t mem_used_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u64 val = 0; + struct zram *zram = dev_to_zram(dev); + + down_read(&zram->init_lock); + if (init_done(zram)) + val = atomic_long_read(&zram->stats.max_used_pages); + up_read(&zram->init_lock); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); +} + +static ssize_t mem_used_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + int err; + unsigned long val; + struct zram *zram = dev_to_zram(dev); + struct zram_meta *meta = zram->meta; + + err = kstrtoul(buf, 10, &val); + if (err || val != 0) + return -EINVAL; + + down_read(&zram->init_lock); + if (init_done(zram)) + atomic_long_set(&zram->stats.max_used_pages, + zs_get_total_pages(meta->mem_pool)); + up_read(&zram->init_lock); + + return len; +} + static ssize_t max_comp_streams_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -465,6 +500,21 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return ret; } +static inline void update_used_max(struct zram *zram, + const unsigned long pages) +{ + int old_max, cur_max; + + old_max = atomic_long_read(&zram->stats.max_used_pages); + + do { + cur_max = old_max; + if (pages > cur_max) + old_max = atomic_long_cmpxchg( + &zram->stats.max_used_pages, cur_max, pages); + } while (old_max != cur_max); +} + static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, int offset) { @@ -476,6 +526,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, struct zram_meta *meta = zram->meta; struct zcomp_strm *zstrm; bool locked = false; + unsigned long alloced_pages; page = bvec->bv_page; if (is_partial_io(bvec)) { @@ -545,13 +596,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, goto out; } - if (zram->limit_pages && - zs_get_total_pages(meta->mem_pool) > zram->limit_pages) { + alloced_pages = zs_get_total_pages(meta->mem_pool); + if (zram->limit_pages && alloced_pages > zram->limit_pages) { zs_free(meta->mem_pool, handle); ret = -ENOMEM; goto out; } + update_used_max(zram, alloced_pages); + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { @@ -901,6 +954,8 @@ static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show, mem_limit_store); +static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show, + mem_used_max_store); static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR, max_comp_streams_show, max_comp_streams_store); static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR, @@ -930,6 +985,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_compr_data_size.attr, &dev_attr_mem_used_total.attr, &dev_attr_mem_limit.attr, + &dev_attr_mem_used_max.attr, &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, NULL, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index b7aa9c21553f..c6ee271317f5 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -90,6 +90,7 @@ struct zram_stats { atomic64_t notify_free; /* no. of swap slot free notifications */ atomic64_t zero_pages; /* no. of zero filled pages */ atomic64_t pages_stored; /* no. of pages currently stored */ + atomic_long_t max_used_pages; /* no. of maximum pages stored */ }; struct zram_meta { From 015254daf1753003c19c46b90ee85a963260d270 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 9 Oct 2014 15:29:57 -0700 Subject: [PATCH 149/164] zram: use notify_free to account all free notifications `notify_free' device attribute accounts the number of slot free notifications and internally represents the number of zram_free_page() calls. Slot free notifications are sent only when device is used as a swap device, hence `notify_free' is used only for swap devices. Since f4659d8e620d08 (zram: support REQ_DISCARD) ZRAM handles yet another one free notification (also via zram_free_page() call) -- REQ_DISCARD requests, which are sent by a filesystem, whenever some data blocks are discarded. However, there is no way to know the number of notifications in the latter case. Use `notify_free' to account the number of pages freed by zram_bio_discard() and zram_slot_free_notify(). Depending on usage scenario `notify_free' represents: a) the number of pages freed because of slot free notifications, which is equal to the number of swap_slot_free_notify() calls, so there is no behaviour change b) the number of pages freed because of REQ_DISCARD notifications Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Acked-by: Jerome Marchand Cc: Nitin Gupta Cc: Chao Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-block-zram | 13 ++++++++----- drivers/block/zram/zram_drv.c | 1 + 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index b13dc993291f..a6148eaf91e5 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -77,11 +77,14 @@ What: /sys/block/zram/notify_free Date: August 2010 Contact: Nitin Gupta Description: - The notify_free file is read-only and specifies the number of - swap slot free notifications received by this device. These - notifications are sent to a swap block device when a swap slot - is freed. This statistic is applicable only when this disk is - being used as a swap disk. + The notify_free file is read-only. Depending on device usage + scenario it may account a) the number of pages freed because + of swap slot free notifications or b) the number of pages freed + because of REQ_DISCARD requests sent by bio. The former ones + are sent to a swap block device when a swap slot is freed, which + implies that this disk is being used as a swap disk. The latter + ones are sent by filesystem mounted with discard option, + whenever some data blocks are getting discarded. What: /sys/block/zram/zero_pages Date: August 2010 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d78b245bae06..3b850164c65c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -698,6 +698,7 @@ static void zram_bio_discard(struct zram *zram, u32 index, bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + atomic64_inc(&zram->stats.notify_free); index++; n -= PAGE_SIZE; } From 6dd9737e31504f9377a8a19810ea4922e88516c1 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Thu, 9 Oct 2014 15:29:59 -0700 Subject: [PATCH 150/164] mm/zsmalloc.c: correct comment for fullness group computation The letter 'f' in "n <= N/f" stands for fullness_threshold_frac, not 1/fullness_threshold_frac. Signed-off-by: Wang Sheng-Hui Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c4a91578dc96..c81f63e73c5f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -175,7 +175,7 @@ enum fullness_group { * n <= N / f, where * n = number of allocated objects * N = total number of objects zspage can store - * f = 1/fullness_threshold_frac + * f = fullness_threshold_frac * * Similarly, we assign zspage to: * ZS_ALMOST_FULL when n > N / f From 5538c562377580947916b3366898f1eb5f53768e Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 9 Oct 2014 15:30:01 -0700 Subject: [PATCH 151/164] zsmalloc: simplify init_zspage free obj linking Change zsmalloc init_zspage() logic to iterate through each object on each of its pages, checking the offset to verify the object is on the current page before linking it into the zspage. The current zsmalloc init_zspage free object linking code has logic that relies on there only being one page per zspage when PAGE_SIZE is a multiple of class->size. It calculates the number of objects for the current page, and iterates through all of them plus one, to account for the assumed partial object at the end of the page. While this currently works, the logic can be simplified to just link the object at each successive offset until the offset is larger than PAGE_SIZE, which does not rely on PAGE_SIZE being a multiple of class->size. Signed-off-by: Dan Streetman Acked-by: Minchan Kim Cc: Sergey Senozhatsky Cc: Nitin Gupta Cc: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c81f63e73c5f..839a48c3ca27 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -628,7 +628,7 @@ static void init_zspage(struct page *first_page, struct size_class *class) while (page) { struct page *next_page; struct link_free *link; - unsigned int i, objs_on_page; + unsigned int i = 1; /* * page->index stores offset of first object starting @@ -641,14 +641,10 @@ static void init_zspage(struct page *first_page, struct size_class *class) link = (struct link_free *)kmap_atomic(page) + off / sizeof(*link); - objs_on_page = (PAGE_SIZE - off) / class->size; - for (i = 1; i <= objs_on_page; i++) { - off += class->size; - if (off < PAGE_SIZE) { - link->next = obj_location_to_handle(page, i); - link += class->size / sizeof(*link); - } + while ((off += class->size) < PAGE_SIZE) { + link->next = obj_location_to_handle(page, i++); + link += class->size / sizeof(*link); } /* @@ -660,7 +656,7 @@ static void init_zspage(struct page *first_page, struct size_class *class) link->next = obj_location_to_handle(next_page, 0); kunmap_atomic(link); page = next_page; - off = (off + class->size) % PAGE_SIZE; + off %= PAGE_SIZE; } } From f203c3b33f0891da98ae3dcf829851c48473ed60 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 9 Oct 2014 15:30:04 -0700 Subject: [PATCH 152/164] zbud: avoid accessing last unused freelist For now, there are NCHUNKS of 64 freelists in zbud_pool, the last unbuddied[63] freelist linked with all zbud pages which have free chunks of 63. Calculating according to context of num_free_chunks(), our max chunk number of unbuddied zbud page is 62, so none of zbud pages will be added/removed in last freelist, but still we will try to find an unbuddied zbud page in the last unused freelist, it is unneeded. This patch redefines NCHUNKS to 63 as free chunk number in one zbud page, hence we can decrease size of zpool and avoid accessing the last unused freelist whenever failing to allocate zbud from freelist in zbud_alloc. Signed-off-by: Chao Yu Cc: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zbud.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/zbud.c b/mm/zbud.c index f26e7fcc7fa2..ecf1dbef6983 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -60,15 +60,17 @@ * NCHUNKS_ORDER determines the internal allocation granularity, effectively * adjusting internal fragmentation. It also determines the number of * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64, and there - * will be 64 freelists per pool. + * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk + * in allocated page is occupied by zbud header, NCHUNKS will be calculated to + * 63 which shows the max number of free chunks in zbud page, also there will be + * 63 freelists per pool. */ #define NCHUNKS_ORDER 6 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) #define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT) #define ZHDR_SIZE_ALIGNED CHUNK_SIZE +#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) /** * struct zbud_pool - stores metadata for each zbud pool @@ -268,10 +270,9 @@ static int num_free_chunks(struct zbud_header *zhdr) { /* * Rather than branch for different situations, just use the fact that - * free buddies have a length of zero to simplify everything. -1 at the - * end for the zbud header. + * free buddies have a length of zero to simplify everything. */ - return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks - 1; + return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; } /***************** From 21f456607a7acc73947580a84eb36d180d2562b7 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:06 -0700 Subject: [PATCH 153/164] frv: remove unused cpuinfo_frv and friends to fix future build error Frv has a macro named cpu_data, interfering with variables and struct members with the same name: include/linux/pm_domain.h:75:24: error: expected identifier or '(' before '&' token struct gpd_cpu_data *cpu_data; As struct cpuinfo_frv, boot_cpu_data, cpu_data, and current_cpu_data are not used, removed them to fix this. Signed-off-by: Geert Uytterhoeven Reported-by: kbuild test robot Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/include/asm/processor.h | 16 ---------------- arch/frv/kernel/setup.c | 2 -- 2 files changed, 18 deletions(-) diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index 6554e78893f2..ae8d423e79d9 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -34,22 +34,6 @@ /* Forward declaration, a strange C thing */ struct task_struct; -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - */ -struct cpuinfo_frv { -#ifdef CONFIG_MMU - unsigned long *pgd_quick; - unsigned long *pte_quick; - unsigned long pgtable_cache_sz; -#endif -} __cacheline_aligned; - -extern struct cpuinfo_frv __nongprelbss boot_cpu_data; - -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data - /* * Bus types */ diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index 9f3a7a62d787..9f4a9a607dbe 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c @@ -104,8 +104,6 @@ unsigned long __nongprelbss dma_coherent_mem_end; unsigned long __initdata __sdram_old_base; unsigned long __initdata num_mappedpages; -struct cpuinfo_frv __nongprelbss boot_cpu_data; - char __initdata command_line[COMMAND_LINE_SIZE]; char __initdata redboot_command_line[COMMAND_LINE_SIZE]; From 08e4cf4be2153b0eac503679004889fd7e28c819 Mon Sep 17 00:00:00 2001 From: Michael Opdenacker Date: Thu, 9 Oct 2014 15:30:08 -0700 Subject: [PATCH 154/164] frv: remove deprecated IRQF_DISABLED Remove the IRQF_DISABLED flag from FRV architecture code. It's a NOOP since 2.6.35 and it will be removed one day. Signed-off-by: Michael Opdenacker Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/kernel/irq-mb93091.c | 8 ++++---- arch/frv/kernel/irq-mb93093.c | 1 - arch/frv/kernel/irq-mb93493.c | 4 ++-- arch/frv/kernel/time.c | 1 - 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index 2cc327a1ca44..091b2839be90 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c @@ -107,25 +107,25 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) static struct irqaction fpga_irq[4] = { [0] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.0", .dev_id = (void *) 0x0028UL, }, [1] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.1", .dev_id = (void *) 0x0050UL, }, [2] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.2", .dev_id = (void *) 0x1c00UL, }, [3] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.3", .dev_id = (void *) 0x6386UL, } diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index 95e4eb4f1f38..1f3015cf80f5 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c @@ -105,7 +105,6 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) static struct irqaction fpga_irq[1] = { [0] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED, .name = "fpga.0", .dev_id = (void *) 0x0700UL, } diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index ba648da0932d..8ca5aa4ff595 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c @@ -118,13 +118,13 @@ static irqreturn_t mb93493_interrupt(int irq, void *_piqsr) static struct irqaction mb93493_irq[2] = { [0] = { .handler = mb93493_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "mb93493.0", .dev_id = (void *) __addr_MB93493_IQSR(0), }, [1] = { .handler = mb93493_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "mb93493.1", .dev_id = (void *) __addr_MB93493_IQSR(1), } diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index b457de496b70..332e00bf9d06 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -44,7 +44,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy); static struct irqaction timer_irq = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; From 036c6508f183e9c730aee25e33d27b2b9b9a5bbc Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:10 -0700 Subject: [PATCH 155/164] alpha: use Kbuild logic to include Signed-off-by: Geert Uytterhoeven Acked-by: Richard Henderson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/Kbuild | 1 + arch/alpha/include/asm/sections.h | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 arch/alpha/include/asm/sections.h diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index a52cbf178c3a..25b49725df07 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/alpha/include/asm/sections.h b/arch/alpha/include/asm/sections.h deleted file mode 100644 index 43b40edd6e44..000000000000 --- a/arch/alpha/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ALPHA_SECTIONS_H -#define _ALPHA_SECTIONS_H - -/* nothing to see, move along */ -#include - -#endif From 2e1d06e1c05af9dbe8a3bfddeefbf041ca637fff Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Thu, 9 Oct 2014 15:30:13 -0700 Subject: [PATCH 156/164] include/linux/kernel.h: rewrite min3, max3 and clamp using min and max It appears that gcc is better at optimising a double call to min and max rather than open coded min3 and max3. This can be observed here: $ cat min-max.c #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define min3(x, y, z) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ typeof(z) _min3 = (z); \ (void) (&_min1 == &_min2); \ (void) (&_min1 == &_min3); \ _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ (_min2 < _min3 ? _min2 : _min3); }) int fmin3(int x, int y, int z) { return min3(x, y, z); } int fmin2(int x, int y, int z) { return min(min(x, y), z); } $ gcc -O2 -o min-max.s -S min-max.c; cat min-max.s .file "min-max.c" .text .p2align 4,,15 .globl fmin3 .type fmin3, @function fmin3: .LFB0: .cfi_startproc cmpl %esi, %edi jl .L5 cmpl %esi, %edx movl %esi, %eax cmovle %edx, %eax ret .p2align 4,,10 .p2align 3 .L5: cmpl %edi, %edx movl %edi, %eax cmovle %edx, %eax ret .cfi_endproc .LFE0: .size fmin3, .-fmin3 .p2align 4,,15 .globl fmin2 .type fmin2, @function fmin2: .LFB1: .cfi_startproc cmpl %edi, %esi movl %edx, %eax cmovle %esi, %edi cmpl %edx, %edi cmovle %edi, %eax ret .cfi_endproc .LFE1: .size fmin2, .-fmin2 .ident "GCC: (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3" .section .note.GNU-stack,"",@progbits fmin3 function, which uses open-coded min3 macro, is compiled into total of ten instructions including a conditional branch, whereas fmin2 function, which uses two calls to min2 macro, is compiled into six instructions with no branches. Similarly, open-coded clamp produces the same code as clamp using min and max macros, but the latter is much shorter: $ cat clamp.c #define clamp(val, min, max) ({ \ typeof(val) __val = (val); \ typeof(min) __min = (min); \ typeof(max) __max = (max); \ (void) (&__val == &__min); \ (void) (&__val == &__max); \ __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) int fclamp(int v, int min, int max) { return clamp(v, min, max); } int fclampmm(int v, int min, int max) { return min(max(v, min), max); } $ gcc -O2 -o clamp.s -S clamp.c; cat clamp.s .file "clamp.c" .text .p2align 4,,15 .globl fclamp .type fclamp, @function fclamp: .LFB0: .cfi_startproc cmpl %edi, %esi movl %edx, %eax cmovge %esi, %edi cmpl %edx, %edi cmovle %edi, %eax ret .cfi_endproc .LFE0: .size fclamp, .-fclamp .p2align 4,,15 .globl fclampmm .type fclampmm, @function fclampmm: .LFB1: .cfi_startproc cmpl %edi, %esi cmovge %esi, %edi cmpl %edi, %edx movl %edi, %eax cmovle %edx, %eax ret .cfi_endproc .LFE1: .size fclampmm, .-fclampmm .ident "GCC: (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3" .section .note.GNU-stack,"",@progbits Linux mpn-glaptop 3.13.0-29-generic #53~precise1-Ubuntu SMP Wed Jun 4 22:06:25 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 Copyright (C) 2011 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -rwx------ 1 mpn eng 51224656 Jun 17 14:15 vmlinux.before -rwx------ 1 mpn eng 51224608 Jun 17 13:57 vmlinux.after 48 bytes reduction. The do_fault_around was a few instruction shorter and as far as I can tell saved 12 bytes on the stack, i.e.: $ grep -e rsp -e pop -e push do_fault_around.* do_fault_around.before.s:push %rbp do_fault_around.before.s:mov %rsp,%rbp do_fault_around.before.s:push %r13 do_fault_around.before.s:push %r12 do_fault_around.before.s:push %rbx do_fault_around.before.s:sub $0x38,%rsp do_fault_around.before.s:add $0x38,%rsp do_fault_around.before.s:pop %rbx do_fault_around.before.s:pop %r12 do_fault_around.before.s:pop %r13 do_fault_around.before.s:pop %rbp do_fault_around.after.s:push %rbp do_fault_around.after.s:mov %rsp,%rbp do_fault_around.after.s:push %r12 do_fault_around.after.s:push %rbx do_fault_around.after.s:sub $0x30,%rsp do_fault_around.after.s:add $0x30,%rsp do_fault_around.after.s:pop %rbx do_fault_around.after.s:pop %r12 do_fault_around.after.s:pop %rbp or here side-by-side: Before After push %rbp push %rbp mov %rsp,%rbp mov %rsp,%rbp push %r13 push %r12 push %r12 push %rbx push %rbx sub $0x38,%rsp sub $0x30,%rsp add $0x38,%rsp add $0x30,%rsp pop %rbx pop %rbx pop %r12 pop %r12 pop %r13 pop %rbp pop %rbp There are also fewer branches: $ grep ^j do_fault_around.* do_fault_around.before.s:jae ffffffff812079b7 do_fault_around.before.s:jmp ffffffff812079c5 do_fault_around.before.s:jmp ffffffff81207a14 do_fault_around.before.s:ja ffffffff812079f9 do_fault_around.before.s:jb ffffffff81207a10 do_fault_around.before.s:jmp ffffffff81207a63 do_fault_around.before.s:jne ffffffff812079df do_fault_around.after.s:jmp ffffffff812079fd do_fault_around.after.s:ja ffffffff812079e2 do_fault_around.after.s:jb ffffffff812079f9 do_fault_around.after.s:jmp ffffffff81207a4c do_fault_around.after.s:jne ffffffff812079c8 And here's with allyesconfig on a different machine: $ uname -a; gcc --version; ls -l vmlinux.* Linux erwin 3.14.7-mn #54 SMP Sun Jun 15 11:25:08 CEST 2014 x86_64 AMD Phenom(tm) II X3 710 Processor AuthenticAMD GNU/Linux gcc (GCC) 4.8.3 Copyright (C) 2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -rwx------ 1 mpn eng 437027411 Jun 20 16:04 vmlinux.before -rwx------ 1 mpn eng 437026881 Jun 20 15:30 vmlinux.after 530 bytes reduction. Signed-off-by: Michal Nazarewicz Signed-off-by: Hagen Paul Pfeifer Acked-by: Steven Rostedt Cc: Hagen Paul Pfeifer Cc: David Rientjes Cc: "Rustad, Mark D" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 95624bed87ef..aa2a0cb57f50 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -715,23 +715,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) -#define min3(x, y, z) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - typeof(z) _min3 = (z); \ - (void) (&_min1 == &_min2); \ - (void) (&_min1 == &_min3); \ - _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ - (_min2 < _min3 ? _min2 : _min3); }) - -#define max3(x, y, z) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - typeof(z) _max3 = (z); \ - (void) (&_max1 == &_max2); \ - (void) (&_max1 == &_max3); \ - _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ - (_max2 > _max3 ? _max2 : _max3); }) +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero @@ -746,20 +731,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } /** * clamp - return a value clamped to a given range with strict typechecking * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: lowest allowable value + * @hi: highest allowable value * * This macro does strict typechecking of min/max to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ -#define clamp(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(min) __min = (min); \ - typeof(max) __max = (max); \ - (void) (&__val == &__min); \ - (void) (&__val == &__max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) /* * ..and if you can't take the strict From c185b07fc9f24d52a864376ed22a6d84384b0c53 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Thu, 9 Oct 2014 15:30:15 -0700 Subject: [PATCH 157/164] include/linux/kernel.h: deduplicate code implementing clamp* macros Instead of open-coding clamp_t macro min_t and max_t the way clamp macro does and instead of open-coding clamp_val simply use clamp_t. Furthermore, normalise argument naming in the macros to be lo and hi. Signed-off-by: Michal Nazarewicz Cc: Mark Rustad Cc: "Kirsher, Jeffrey T" Cc: Hagen Paul Pfeifer Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index aa2a0cb57f50..e9e420b6d931 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -734,7 +734,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @lo: lowest allowable value * @hi: highest allowable value * - * This macro does strict typechecking of min/max to make sure they are of the + * This macro does strict typechecking of lo/hi to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) @@ -759,36 +759,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * clamp_t - return a value clamped to a given range using a given type * @type: the type of variable to use * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of type * 'type' to make all the comparisons. */ -#define clamp_t(type, val, min, max) ({ \ - type __val = (val); \ - type __min = (min); \ - type __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) /** * clamp_val - return a value clamped to a given range using val's type * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of whatever * type the input argument 'val' is. This is useful when val is an unsigned * type and min and max are literals that will otherwise be assigned a signed * integer type. */ -#define clamp_val(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(val) __min = (min); \ - typeof(val) __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /* From 61a04e5b306ab9d6a30f78e86f1f140d7c888304 Mon Sep 17 00:00:00 2001 From: Michele Curti Date: Thu, 9 Oct 2014 15:30:17 -0700 Subject: [PATCH 158/164] include/linux/blkdev.h: use NULL instead of zero Quite useless but it shuts up some warnings. Signed-off-by: Michele Curti Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 518b46555b80..87be398166d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1564,7 +1564,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q, } static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { - return 0; + return NULL; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { From 27fb10edcacbb70ac4e97fe1506006d732421210 Mon Sep 17 00:00:00 2001 From: Ionut Alexa Date: Thu, 9 Oct 2014 15:30:19 -0700 Subject: [PATCH 159/164] kernel/async.c: switch to pr_foo() Signed-off-by: Ionut Alexa Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/async.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/async.c b/kernel/async.c index 61f023ce0228..4c3773c0bf63 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -115,7 +115,7 @@ static void async_run_entry_fn(struct work_struct *work) /* 1) run (and print duration) */ if (initcall_debug && system_state == SYSTEM_BOOTING) { - printk(KERN_DEBUG "calling %lli_%pF @ %i\n", + pr_debug("calling %lli_%pF @ %i\n", (long long)entry->cookie, entry->func, task_pid_nr(current)); calltime = ktime_get(); @@ -124,7 +124,7 @@ static void async_run_entry_fn(struct work_struct *work) if (initcall_debug && system_state == SYSTEM_BOOTING) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); - printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n", + pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n", (long long)entry->cookie, entry->func, (long long)ktime_to_ns(delta) >> 10); @@ -285,7 +285,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain ktime_t uninitialized_var(starttime), delta, endtime; if (initcall_debug && system_state == SYSTEM_BOOTING) { - printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); + pr_debug("async_waiting @ %i\n", task_pid_nr(current)); starttime = ktime_get(); } @@ -295,7 +295,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain endtime = ktime_get(); delta = ktime_sub(endtime, starttime); - printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n", + pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current), (long long)ktime_to_ns(delta) >> 10); } From 067b722faf98adbe1e94581f39c06a7c82b58676 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Thu, 9 Oct 2014 15:30:21 -0700 Subject: [PATCH 160/164] acct: eliminate compile warning If ACCT_VERSION is not defined to 3, below warning appears: CC kernel/acct.o kernel/acct.c: In function `do_acct_process': kernel/acct.c:475:24: warning: unused variable `ns' [-Wunused-variable] [akpm@linux-foundation.org: retain the local for code size improvements Signed-off-by: Ying Xue Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/acct.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/acct.c b/kernel/acct.c index b4c667d22e79..33738ef972f3 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -472,7 +472,6 @@ static void do_acct_process(struct bsd_acct_struct *acct) acct_t ac; unsigned long flim; const struct cred *orig_cred; - struct pid_namespace *ns = acct->ns; struct file *file = acct->file; /* @@ -500,10 +499,15 @@ static void do_acct_process(struct bsd_acct_struct *acct) ac.ac_gid16 = ac.ac_gid; #endif #if ACCT_VERSION == 3 - ac.ac_pid = task_tgid_nr_ns(current, ns); - rcu_read_lock(); - ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); - rcu_read_unlock(); + { + struct pid_namespace *ns = acct->ns; + + ac.ac_pid = task_tgid_nr_ns(current, ns); + rcu_read_lock(); + ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), + ns); + rcu_read_unlock(); + } #endif /* * Get freeze protection. If the fs is frozen, just skip the write From ec94fc3d59b54561da03a0e433d93217b08c1481 Mon Sep 17 00:00:00 2001 From: "vishnu.ps" Date: Thu, 9 Oct 2014 15:30:23 -0700 Subject: [PATCH 161/164] kernel/sys.c: whitespace fixes Fix minor errors and warning messages in kernel/sys.c. These errors were reported by checkpatch while working with some modifications in sys.c file. Fixing this first will help me to improve my further patches. ERROR: trailing whitespace - 9 ERROR: do not use assignment in if condition - 4 ERROR: spaces required around that '?' (ctx:VxO) - 10 ERROR: switch and case should be at the same indent - 3 total 26 errors & 3 warnings fixed. Signed-off-by: vishnu.ps Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 263 ++++++++++++++++++++++++++------------------------- 1 file changed, 136 insertions(+), 127 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index df692fbf1e79..037fd76bdc76 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -62,28 +62,28 @@ #include #ifndef SET_UNALIGN_CTL -# define SET_UNALIGN_CTL(a,b) (-EINVAL) +# define SET_UNALIGN_CTL(a, b) (-EINVAL) #endif #ifndef GET_UNALIGN_CTL -# define GET_UNALIGN_CTL(a,b) (-EINVAL) +# define GET_UNALIGN_CTL(a, b) (-EINVAL) #endif #ifndef SET_FPEMU_CTL -# define SET_FPEMU_CTL(a,b) (-EINVAL) +# define SET_FPEMU_CTL(a, b) (-EINVAL) #endif #ifndef GET_FPEMU_CTL -# define GET_FPEMU_CTL(a,b) (-EINVAL) +# define GET_FPEMU_CTL(a, b) (-EINVAL) #endif #ifndef SET_FPEXC_CTL -# define SET_FPEXC_CTL(a,b) (-EINVAL) +# define SET_FPEXC_CTL(a, b) (-EINVAL) #endif #ifndef GET_FPEXC_CTL -# define GET_FPEXC_CTL(a,b) (-EINVAL) +# define GET_FPEXC_CTL(a, b) (-EINVAL) #endif #ifndef GET_ENDIAN -# define GET_ENDIAN(a,b) (-EINVAL) +# define GET_ENDIAN(a, b) (-EINVAL) #endif #ifndef SET_ENDIAN -# define SET_ENDIAN(a,b) (-EINVAL) +# define SET_ENDIAN(a, b) (-EINVAL) #endif #ifndef GET_TSC_CTL # define GET_TSC_CTL(a) (-EINVAL) @@ -182,39 +182,40 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) rcu_read_lock(); read_lock(&tasklist_lock); switch (which) { - case PRIO_PROCESS: - if (who) - p = find_task_by_vpid(who); - else - p = current; - if (p) - error = set_one_prio(p, niceval, error); - break; - case PRIO_PGRP: - if (who) - pgrp = find_vpid(who); - else - pgrp = task_pgrp(current); - do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - error = set_one_prio(p, niceval, error); - } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); - break; - case PRIO_USER: - uid = make_kuid(cred->user_ns, who); - user = cred->user; - if (!who) - uid = cred->uid; - else if (!uid_eq(uid, cred->uid) && - !(user = find_user(uid))) + case PRIO_PROCESS: + if (who) + p = find_task_by_vpid(who); + else + p = current; + if (p) + error = set_one_prio(p, niceval, error); + break; + case PRIO_PGRP: + if (who) + pgrp = find_vpid(who); + else + pgrp = task_pgrp(current); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + error = set_one_prio(p, niceval, error); + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case PRIO_USER: + uid = make_kuid(cred->user_ns, who); + user = cred->user; + if (!who) + uid = cred->uid; + else if (!uid_eq(uid, cred->uid)) { + user = find_user(uid); + if (!user) goto out_unlock; /* No processes for this user */ - - do_each_thread(g, p) { - if (uid_eq(task_uid(p), uid)) - error = set_one_prio(p, niceval, error); - } while_each_thread(g, p); - if (!uid_eq(uid, cred->uid)) - free_uid(user); /* For find_user() */ - break; + } + do_each_thread(g, p) { + if (uid_eq(task_uid(p), uid)) + error = set_one_prio(p, niceval, error); + } while_each_thread(g, p); + if (!uid_eq(uid, cred->uid)) + free_uid(user); /* For find_user() */ + break; } out_unlock: read_unlock(&tasklist_lock); @@ -244,47 +245,48 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) rcu_read_lock(); read_lock(&tasklist_lock); switch (which) { - case PRIO_PROCESS: - if (who) - p = find_task_by_vpid(who); - else - p = current; - if (p) { + case PRIO_PROCESS: + if (who) + p = find_task_by_vpid(who); + else + p = current; + if (p) { + niceval = nice_to_rlimit(task_nice(p)); + if (niceval > retval) + retval = niceval; + } + break; + case PRIO_PGRP: + if (who) + pgrp = find_vpid(who); + else + pgrp = task_pgrp(current); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + niceval = nice_to_rlimit(task_nice(p)); + if (niceval > retval) + retval = niceval; + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case PRIO_USER: + uid = make_kuid(cred->user_ns, who); + user = cred->user; + if (!who) + uid = cred->uid; + else if (!uid_eq(uid, cred->uid)) { + user = find_user(uid); + if (!user) + goto out_unlock; /* No processes for this user */ + } + do_each_thread(g, p) { + if (uid_eq(task_uid(p), uid)) { niceval = nice_to_rlimit(task_nice(p)); if (niceval > retval) retval = niceval; } - break; - case PRIO_PGRP: - if (who) - pgrp = find_vpid(who); - else - pgrp = task_pgrp(current); - do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - niceval = nice_to_rlimit(task_nice(p)); - if (niceval > retval) - retval = niceval; - } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); - break; - case PRIO_USER: - uid = make_kuid(cred->user_ns, who); - user = cred->user; - if (!who) - uid = cred->uid; - else if (!uid_eq(uid, cred->uid) && - !(user = find_user(uid))) - goto out_unlock; /* No processes for this user */ - - do_each_thread(g, p) { - if (uid_eq(task_uid(p), uid)) { - niceval = nice_to_rlimit(task_nice(p)); - if (niceval > retval) - retval = niceval; - } - } while_each_thread(g, p); - if (!uid_eq(uid, cred->uid)) - free_uid(user); /* for find_user() */ - break; + } while_each_thread(g, p); + if (!uid_eq(uid, cred->uid)) + free_uid(user); /* for find_user() */ + break; } out_unlock: read_unlock(&tasklist_lock); @@ -306,7 +308,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) * * The general idea is that a program which uses just setregid() will be * 100% compatible with BSD. A program which uses just setgid() will be - * 100% compatible with POSIX with saved IDs. + * 100% compatible with POSIX with saved IDs. * * SMP: There are not races, the GIDs are checked only by filesystem * operations (as far as semantic preservation is concerned). @@ -364,7 +366,7 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) } /* - * setgid() is implemented like SysV w/ SAVED_IDS + * setgid() is implemented like SysV w/ SAVED_IDS * * SMP: Same implicit races as above. */ @@ -442,7 +444,7 @@ static int set_user(struct cred *new) * * The general idea is that a program which uses just setreuid() will be * 100% compatible with BSD. A program which uses just setuid() will be - * 100% compatible with POSIX with saved IDs. + * 100% compatible with POSIX with saved IDs. */ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) { @@ -503,17 +505,17 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) abort_creds(new); return retval; } - + /* - * setuid() is implemented like SysV with SAVED_IDS - * + * setuid() is implemented like SysV with SAVED_IDS + * * Note that SAVED_ID's is deficient in that a setuid root program - * like sendmail, for example, cannot set its uid to be a normal + * like sendmail, for example, cannot set its uid to be a normal * user and then switch back, because if you're root, setuid() sets * the saved uid too. If you don't like this, blame the bright people * in the POSIX committee and/or USG. Note that the BSD-style setreuid() * will allow a root program to temporarily drop privileges and be able to - * regain them by swapping the real and effective uid. + * regain them by swapping the real and effective uid. */ SYSCALL_DEFINE1(setuid, uid_t, uid) { @@ -637,10 +639,12 @@ SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t _ euid = from_kuid_munged(cred->user_ns, cred->euid); suid = from_kuid_munged(cred->user_ns, cred->suid); - if (!(retval = put_user(ruid, ruidp)) && - !(retval = put_user(euid, euidp))) - retval = put_user(suid, suidp); - + retval = put_user(ruid, ruidp); + if (!retval) { + retval = put_user(euid, euidp); + if (!retval) + return put_user(suid, suidp); + } return retval; } @@ -709,9 +713,12 @@ SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t _ egid = from_kgid_munged(cred->user_ns, cred->egid); sgid = from_kgid_munged(cred->user_ns, cred->sgid); - if (!(retval = put_user(rgid, rgidp)) && - !(retval = put_user(egid, egidp))) - retval = put_user(sgid, sgidp); + retval = put_user(rgid, rgidp); + if (!retval) { + retval = put_user(egid, egidp); + if (!retval) + retval = put_user(sgid, sgidp); + } return retval; } @@ -1284,7 +1291,6 @@ SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) /* * Back compatibility for getrlimit. Needed for some apps. */ - SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, struct rlimit __user *, rlim) { @@ -1299,7 +1305,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, x.rlim_cur = 0x7FFFFFFF; if (x.rlim_max > 0x7FFFFFFF) x.rlim_max = 0x7FFFFFFF; - return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; + return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; } #endif @@ -1527,7 +1533,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) cputime_t tgutime, tgstime, utime, stime; unsigned long maxrss = 0; - memset((char *) r, 0, sizeof *r); + memset((char *)r, 0, sizeof (*r)); utime = stime = 0; if (who == RUSAGE_THREAD) { @@ -1541,41 +1547,41 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) return; switch (who) { - case RUSAGE_BOTH: - case RUSAGE_CHILDREN: - utime = p->signal->cutime; - stime = p->signal->cstime; - r->ru_nvcsw = p->signal->cnvcsw; - r->ru_nivcsw = p->signal->cnivcsw; - r->ru_minflt = p->signal->cmin_flt; - r->ru_majflt = p->signal->cmaj_flt; - r->ru_inblock = p->signal->cinblock; - r->ru_oublock = p->signal->coublock; - maxrss = p->signal->cmaxrss; + case RUSAGE_BOTH: + case RUSAGE_CHILDREN: + utime = p->signal->cutime; + stime = p->signal->cstime; + r->ru_nvcsw = p->signal->cnvcsw; + r->ru_nivcsw = p->signal->cnivcsw; + r->ru_minflt = p->signal->cmin_flt; + r->ru_majflt = p->signal->cmaj_flt; + r->ru_inblock = p->signal->cinblock; + r->ru_oublock = p->signal->coublock; + maxrss = p->signal->cmaxrss; - if (who == RUSAGE_CHILDREN) - break; - - case RUSAGE_SELF: - thread_group_cputime_adjusted(p, &tgutime, &tgstime); - utime += tgutime; - stime += tgstime; - r->ru_nvcsw += p->signal->nvcsw; - r->ru_nivcsw += p->signal->nivcsw; - r->ru_minflt += p->signal->min_flt; - r->ru_majflt += p->signal->maj_flt; - r->ru_inblock += p->signal->inblock; - r->ru_oublock += p->signal->oublock; - if (maxrss < p->signal->maxrss) - maxrss = p->signal->maxrss; - t = p; - do { - accumulate_thread_rusage(t, r); - } while_each_thread(p, t); + if (who == RUSAGE_CHILDREN) break; - default: - BUG(); + case RUSAGE_SELF: + thread_group_cputime_adjusted(p, &tgutime, &tgstime); + utime += tgutime; + stime += tgstime; + r->ru_nvcsw += p->signal->nvcsw; + r->ru_nivcsw += p->signal->nivcsw; + r->ru_minflt += p->signal->min_flt; + r->ru_majflt += p->signal->maj_flt; + r->ru_inblock += p->signal->inblock; + r->ru_oublock += p->signal->oublock; + if (maxrss < p->signal->maxrss) + maxrss = p->signal->maxrss; + t = p; + do { + accumulate_thread_rusage(t, r); + } while_each_thread(p, t); + break; + + default: + BUG(); } unlock_task_sighand(p, &flags); @@ -1585,6 +1591,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) if (who != RUSAGE_CHILDREN) { struct mm_struct *mm = get_task_mm(p); + if (mm) { setmax_mm_hiwater_rss(&maxrss, mm); mmput(mm); @@ -1596,6 +1603,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) int getrusage(struct task_struct *p, int who, struct rusage __user *ru) { struct rusage r; + k_getrusage(p, who, &r); return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } @@ -2209,6 +2217,7 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, { int err = 0; int cpu = raw_smp_processor_id(); + if (cpup) err |= put_user(cpu, cpup); if (nodep) From 0baae41ea8365a7b5a34c6474a77d7eb1126f6b2 Mon Sep 17 00:00:00 2001 From: Scotty Bauer Date: Thu, 9 Oct 2014 15:30:26 -0700 Subject: [PATCH 162/164] kernel/sys.c: compat sysinfo syscall: fix undefined behavior Fix undefined behavior and compiler warning by replacing right shift 32 with upper_32_bits macro Signed-off-by: Scotty Bauer Cc: Clemens Ladisch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sys.c b/kernel/sys.c index 037fd76bdc76..dfce4debd138 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2330,7 +2330,7 @@ COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) /* Check to see if any memory value is too large for 32-bit and scale * down if needed */ - if ((s.totalram >> 32) || (s.totalswap >> 32)) { + if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { int bitcount = 0; while (s.mem_unit < PAGE_SIZE) { From 578b25dfce2990d8bab5631f33a4283bd5b01556 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:28 -0700 Subject: [PATCH 163/164] include/linux/screen_info.h: remove unused ORIG_* macros The ORIG_* macros definitions to access struct screen_info members and all of their users were removed 7 years ago by commit 3ea335100014785f ("Remove magic macros for screen_info structure members"), but (only) the definitions reappeared a few days later in commit ee8e7cfe9d330d6f ("Make asm-x86/bootparam.h includable from userspace."). Remove them for good. Amen. Signed-off-by: Geert Uytterhoeven Cc: "H. Peter Anvin" Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/screen_info.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 005bf3e38db5..f0f8bad54be9 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -5,12 +5,4 @@ extern struct screen_info screen_info; -#define ORIG_X (screen_info.orig_x) -#define ORIG_Y (screen_info.orig_y) -#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) -#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) -#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) -#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) -#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) -#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) #endif /* _SCREEN_INFO_H */ From 7f8998c7aef3ac9c5f3f2943e083dfa6302e90d0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:30 -0700 Subject: [PATCH 164/164] nosave: consolidate __nosave_{begin,end} in The different architectures used their own (and different) declarations: extern __visible const void __nosave_begin, __nosave_end; extern const void __nosave_begin, __nosave_end; extern long __nosave_begin, __nosave_end; Consolidate them using the first variant in . Signed-off-by: Geert Uytterhoeven Cc: Russell King Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: "David S. Miller" Cc: Guan Xuetao Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/hibernate.c | 3 +-- arch/mips/include/asm/suspend.h | 7 ------- arch/mips/power/cpu.c | 2 +- arch/powerpc/kernel/suspend.c | 4 +--- arch/s390/kernel/suspend.c | 6 +----- arch/sh/include/asm/sections.h | 1 - arch/sparc/power/hibernate.c | 4 +--- arch/unicore32/include/mach/pm.h | 3 --- arch/unicore32/kernel/hibernate.c | 1 + arch/x86/power/hibernate_32.c | 4 +--- arch/x86/power/hibernate_64.c | 4 +--- include/asm-generic/sections.h | 4 ++++ 12 files changed, 12 insertions(+), 31 deletions(-) delete mode 100644 arch/mips/include/asm/suspend.h diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index bb8b79648643..c4cc50e58c13 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -21,8 +21,7 @@ #include #include #include - -extern const void __nosave_begin, __nosave_end; +#include int pfn_is_nosave(unsigned long pfn) { diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 3adac3b53d19..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c index 521e5963df05..2129e67723ff 100644 --- a/arch/mips/power/cpu.c +++ b/arch/mips/power/cpu.c @@ -7,7 +7,7 @@ * Author: Hu Hongbing * Wu Zhangjin */ -#include +#include #include #include diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c index 0167d53da30c..a531154cc0f3 100644 --- a/arch/powerpc/kernel/suspend.c +++ b/arch/powerpc/kernel/suspend.c @@ -9,9 +9,7 @@ #include #include - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; +#include /* * pfn_is_nosave - check if given pfn is in the 'nosave' section diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index a7a7537ce1e7..1c4c5accd220 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -13,13 +13,9 @@ #include #include #include +#include #include "entry.h" -/* - * References to section boundaries - */ -extern const void __nosave_begin, __nosave_end; - /* * The restore of the saved pages in an hibernation image will set * the change and referenced bits in the storage key for each page. diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 1b6199740e98..7a99e6af6372 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,7 +3,6 @@ #include -extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 42b0b8ce699a..17bd2e167e07 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -9,11 +9,9 @@ #include #include #include +#include #include -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - struct saved_context saved_context; /* diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h index 4dcd34ae194c..77b522694e74 100644 --- a/arch/unicore32/include/mach/pm.h +++ b/arch/unicore32/include/mach/pm.h @@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state); /* Defined in hibernate_asm.S */ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - extern struct pbe *restore_pblist; #endif diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c index d75ef8b6cb56..9969ec374abb 100644 --- a/arch/unicore32/kernel/hibernate.c +++ b/arch/unicore32/kernel/hibernate.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "mach/pm.h" diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 7d28c885d238..291226b952a9 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -13,13 +13,11 @@ #include #include #include +#include /* Defined in hibernate_asm_32.S */ extern int restore_image(void); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - /* Pointer to the temporary resume page tables */ pgd_t *resume_pg_dir; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 35e2bb6c0f37..009947d419a6 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -17,11 +17,9 @@ #include #include #include +#include #include -/* References to section boundaries */ -extern __visible const void __nosave_begin, __nosave_end; - /* Defined in hibernate_asm_64.S */ extern asmlinkage __visible int restore_image(void); diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index f1a24b5c3b90..b58fd667f87b 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -3,6 +3,8 @@ /* References to section boundaries */ +#include + /* * Usage guidelines: * _text, _data: architecture specific, don't use them in arch-independent code @@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; +extern __visible const void __nosave_begin, __nosave_end; + /* function descriptor handling (if any). Override * in asm/sections.h */ #ifndef dereference_function_descriptor