Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "8 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, fs: check for fatal signals in do_generic_file_read() fs: break out of iomap_file_buffered_write on fatal signals base/memory, hotplug: fix a kernel oops in show_valid_zones() mm/memory_hotplug.c: check start_pfn in test_pages_in_a_zone() jump label: pass kbuild_cflags when checking for asm goto support shmem: fix sleeping from atomic context kasan: respect /proc/sys/kernel/traceoff_on_warning zswap: disable changing params if init fails
This commit is contained in:
commit
7a92cc6bcb
2
Makefile
2
Makefile
|
@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
|
||||||
KBUILD_ARFLAGS := $(call ar-option,D)
|
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||||
|
|
||||||
# check for 'asm goto'
|
# check for 'asm goto'
|
||||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
|
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
|
||||||
{
|
{
|
||||||
struct memory_block *mem = to_memory_block(dev);
|
struct memory_block *mem = to_memory_block(dev);
|
||||||
unsigned long start_pfn, end_pfn;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
unsigned long valid_start, valid_end, valid_pages;
|
||||||
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
||||||
struct page *first_page;
|
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int zone_shift = 0;
|
int zone_shift = 0;
|
||||||
|
|
||||||
start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
||||||
end_pfn = start_pfn + nr_pages;
|
end_pfn = start_pfn + nr_pages;
|
||||||
first_page = pfn_to_page(start_pfn);
|
|
||||||
|
|
||||||
/* The block contains more than one zone can not be offlined. */
|
/* The block contains more than one zone can not be offlined. */
|
||||||
if (!test_pages_in_a_zone(start_pfn, end_pfn))
|
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
|
||||||
return sprintf(buf, "none\n");
|
return sprintf(buf, "none\n");
|
||||||
|
|
||||||
zone = page_zone(first_page);
|
zone = page_zone(pfn_to_page(valid_start));
|
||||||
|
valid_pages = valid_end - valid_start;
|
||||||
|
|
||||||
/* MMOP_ONLINE_KEEP */
|
/* MMOP_ONLINE_KEEP */
|
||||||
sprintf(buf, "%s", zone->name);
|
sprintf(buf, "%s", zone->name);
|
||||||
|
|
||||||
/* MMOP_ONLINE_KERNEL */
|
/* MMOP_ONLINE_KERNEL */
|
||||||
zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
|
zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
|
||||||
if (zone_shift) {
|
if (zone_shift) {
|
||||||
strcat(buf, " ");
|
strcat(buf, " ");
|
||||||
strcat(buf, (zone + zone_shift)->name);
|
strcat(buf, (zone + zone_shift)->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* MMOP_ONLINE_MOVABLE */
|
/* MMOP_ONLINE_MOVABLE */
|
||||||
zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
|
zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
|
||||||
if (zone_shift) {
|
if (zone_shift) {
|
||||||
strcat(buf, " ");
|
strcat(buf, " ");
|
||||||
strcat(buf, (zone + zone_shift)->name);
|
strcat(buf, (zone + zone_shift)->name);
|
||||||
|
|
5
fs/dax.c
5
fs/dax.c
|
@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||||
struct blk_dax_ctl dax = { 0 };
|
struct blk_dax_ctl dax = { 0 };
|
||||||
ssize_t map_len;
|
ssize_t map_len;
|
||||||
|
|
||||||
|
if (fatal_signal_pending(current)) {
|
||||||
|
ret = -EINTR;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
dax.sector = dax_iomap_sector(iomap, pos);
|
dax.sector = dax_iomap_sector(iomap, pos);
|
||||||
dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
|
dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
|
||||||
map_len = dax_map_atomic(iomap->bdev, &dax);
|
map_len = dax_map_atomic(iomap->bdev, &dax);
|
||||||
|
|
|
@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
||||||
|
|
||||||
BUG_ON(pos + len > iomap->offset + iomap->length);
|
BUG_ON(pos + len > iomap->offset + iomap->length);
|
||||||
|
|
||||||
|
if (fatal_signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
|
page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
|
||||||
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
|
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
|
||||||
/* VM interface that may be used by firmware interface */
|
/* VM interface that may be used by firmware interface */
|
||||||
extern int online_pages(unsigned long, unsigned long, int);
|
extern int online_pages(unsigned long, unsigned long, int);
|
||||||
extern int test_pages_in_a_zone(unsigned long, unsigned long);
|
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
|
||||||
|
unsigned long *valid_start, unsigned long *valid_end);
|
||||||
extern void __offline_isolated_pages(unsigned long, unsigned long);
|
extern void __offline_isolated_pages(unsigned long, unsigned long);
|
||||||
|
|
||||||
typedef void (*online_page_callback_t)(struct page *page);
|
typedef void (*online_page_callback_t)(struct page *page);
|
||||||
|
|
|
@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
find_page:
|
find_page:
|
||||||
|
if (fatal_signal_pending(current)) {
|
||||||
|
error = -EINTR;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
page = find_get_page(mapping, index);
|
page = find_get_page(mapping, index);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
page_cache_sync_readahead(mapping,
|
page_cache_sync_readahead(mapping,
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/ftrace.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
|
@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
|
||||||
if (likely(!kasan_report_enabled()))
|
if (likely(!kasan_report_enabled()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
disable_trace_on_warning();
|
||||||
|
|
||||||
info.access_addr = (void *)addr;
|
info.access_addr = (void *)addr;
|
||||||
info.access_size = size;
|
info.access_size = size;
|
||||||
info.is_write = is_write;
|
info.is_write = is_write;
|
||||||
|
|
|
@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Confirm all pages in a range [start, end) is belongs to the same zone.
|
* Confirm all pages in a range [start, end) belong to the same zone.
|
||||||
|
* When true, return its valid [start, end).
|
||||||
*/
|
*/
|
||||||
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
|
||||||
|
unsigned long *valid_start, unsigned long *valid_end)
|
||||||
{
|
{
|
||||||
unsigned long pfn, sec_end_pfn;
|
unsigned long pfn, sec_end_pfn;
|
||||||
|
unsigned long start, end;
|
||||||
struct zone *zone = NULL;
|
struct zone *zone = NULL;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int i;
|
int i;
|
||||||
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
|
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
|
||||||
pfn < end_pfn;
|
pfn < end_pfn;
|
||||||
pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
|
pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
|
||||||
/* Make sure the memory section is present first */
|
/* Make sure the memory section is present first */
|
||||||
if (!present_section_nr(pfn_to_section_nr(pfn)))
|
if (!present_section_nr(pfn_to_section_nr(pfn)))
|
||||||
continue;
|
continue;
|
||||||
|
@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
page = pfn_to_page(pfn + i);
|
page = pfn_to_page(pfn + i);
|
||||||
if (zone && page_zone(page) != zone)
|
if (zone && page_zone(page) != zone)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (!zone)
|
||||||
|
start = pfn + i;
|
||||||
zone = page_zone(page);
|
zone = page_zone(page);
|
||||||
|
end = pfn + MAX_ORDER_NR_PAGES;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
|
||||||
|
if (zone) {
|
||||||
|
*valid_start = start;
|
||||||
|
*valid_end = end;
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
|
||||||
long offlined_pages;
|
long offlined_pages;
|
||||||
int ret, drain, retry_max, node;
|
int ret, drain, retry_max, node;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
unsigned long valid_start, valid_end;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
struct memory_notify arg;
|
struct memory_notify arg;
|
||||||
|
|
||||||
|
@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
/* This makes hotplug much easier...and readable.
|
/* This makes hotplug much easier...and readable.
|
||||||
we assume this for now. .*/
|
we assume this for now. .*/
|
||||||
if (!test_pages_in_a_zone(start_pfn, end_pfn))
|
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
zone = page_zone(pfn_to_page(valid_start));
|
||||||
node = zone_to_nid(zone);
|
node = zone_to_nid(zone);
|
||||||
nr_pages = end_pfn - start_pfn;
|
nr_pages = end_pfn - start_pfn;
|
||||||
|
|
||||||
|
|
11
mm/shmem.c
11
mm/shmem.c
|
@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
struct shrink_control *sc, unsigned long nr_to_split)
|
struct shrink_control *sc, unsigned long nr_to_split)
|
||||||
{
|
{
|
||||||
LIST_HEAD(list), *pos, *next;
|
LIST_HEAD(list), *pos, *next;
|
||||||
|
LIST_HEAD(to_remove);
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct shmem_inode_info *info;
|
struct shmem_inode_info *info;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
/* Check if there's anything to gain */
|
/* Check if there's anything to gain */
|
||||||
if (round_up(inode->i_size, PAGE_SIZE) ==
|
if (round_up(inode->i_size, PAGE_SIZE) ==
|
||||||
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
|
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
|
||||||
list_del_init(&info->shrinklist);
|
list_move(&info->shrinklist, &to_remove);
|
||||||
removed++;
|
removed++;
|
||||||
iput(inode);
|
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,6 +454,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
}
|
}
|
||||||
spin_unlock(&sbinfo->shrinklist_lock);
|
spin_unlock(&sbinfo->shrinklist_lock);
|
||||||
|
|
||||||
|
list_for_each_safe(pos, next, &to_remove) {
|
||||||
|
info = list_entry(pos, struct shmem_inode_info, shrinklist);
|
||||||
|
inode = &info->vfs_inode;
|
||||||
|
list_del_init(&info->shrinklist);
|
||||||
|
iput(inode);
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_safe(pos, next, &list) {
|
list_for_each_safe(pos, next, &list) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
30
mm/zswap.c
30
mm/zswap.c
|
@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
|
||||||
|
|
||||||
/* Enable/disable zswap (disabled by default) */
|
/* Enable/disable zswap (disabled by default) */
|
||||||
static bool zswap_enabled;
|
static bool zswap_enabled;
|
||||||
module_param_named(enabled, zswap_enabled, bool, 0644);
|
static int zswap_enabled_param_set(const char *,
|
||||||
|
const struct kernel_param *);
|
||||||
|
static struct kernel_param_ops zswap_enabled_param_ops = {
|
||||||
|
.set = zswap_enabled_param_set,
|
||||||
|
.get = param_get_bool,
|
||||||
|
};
|
||||||
|
module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
|
||||||
|
|
||||||
/* Crypto compressor to use */
|
/* Crypto compressor to use */
|
||||||
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
|
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
|
||||||
|
@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
|
||||||
/* used by param callback function */
|
/* used by param callback function */
|
||||||
static bool zswap_init_started;
|
static bool zswap_init_started;
|
||||||
|
|
||||||
|
/* fatal error during init */
|
||||||
|
static bool zswap_init_failed;
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
* helpers and fwd declarations
|
* helpers and fwd declarations
|
||||||
**********************************/
|
**********************************/
|
||||||
|
@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
|
||||||
char *s = strstrip((char *)val);
|
char *s = strstrip((char *)val);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (zswap_init_failed) {
|
||||||
|
pr_err("can't set param, initialization failed\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
/* no change required */
|
/* no change required */
|
||||||
if (!strcmp(s, *(char **)kp->arg))
|
if (!strcmp(s, *(char **)kp->arg))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
|
||||||
return __zswap_param_set(val, kp, NULL, zswap_compressor);
|
return __zswap_param_set(val, kp, NULL, zswap_compressor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int zswap_enabled_param_set(const char *val,
|
||||||
|
const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
if (zswap_init_failed) {
|
||||||
|
pr_err("can't enable, initialization failed\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
return param_set_bool(val, kp);
|
||||||
|
}
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
* writeback code
|
* writeback code
|
||||||
**********************************/
|
**********************************/
|
||||||
|
@ -1201,6 +1226,9 @@ static int __init init_zswap(void)
|
||||||
dstmem_fail:
|
dstmem_fail:
|
||||||
zswap_entry_cache_destroy();
|
zswap_entry_cache_destroy();
|
||||||
cache_fail:
|
cache_fail:
|
||||||
|
/* if built-in, we aren't unloaded on failure; don't allow use */
|
||||||
|
zswap_init_failed = true;
|
||||||
|
zswap_enabled = false;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
/* must be late so crypto has time to come up */
|
/* must be late so crypto has time to come up */
|
||||||
|
|
Loading…
Reference in New Issue