Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "8 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/slub.c: run free_partial() outside of the kmem_cache_node->list_lock rmap: fix compound check logic in page_remove_file_rmap mm, rmap: fix false positive VM_BUG() in page_add_file_rmap() mm/page_alloc.c: recalculate some of node threshold when on/offline memory mm/page_alloc.c: fix wrong initialization when sysctl_min_unmapped_ratio changes thp: move shmem_huge_enabled() outside of SYSFS ifdef revert "ARM: keystone: dts: add psci command definition" rapidio: dereferencing an error pointer
This commit is contained in:
commit
85e97be32c
|
@ -70,14 +70,6 @@ psci {
|
|||
cpu_on = <0x84000003>;
|
||||
};
|
||||
|
||||
psci {
|
||||
compatible = "arm,psci";
|
||||
method = "smc";
|
||||
cpu_suspend = <0x84000001>;
|
||||
cpu_off = <0x84000002>;
|
||||
cpu_on = <0x84000003>;
|
||||
};
|
||||
|
||||
soc {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
|
|||
static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
|
||||
long timeout)
|
||||
{
|
||||
struct rio_channel *ch = NULL;
|
||||
struct rio_channel *new_ch = NULL;
|
||||
struct rio_channel *ch;
|
||||
struct rio_channel *new_ch;
|
||||
struct conn_req *req;
|
||||
struct cm_peer *peer;
|
||||
int found = 0;
|
||||
|
@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
|
|||
|
||||
spin_unlock_bh(&ch->lock);
|
||||
riocm_put_channel(ch);
|
||||
ch = NULL;
|
||||
kfree(req);
|
||||
|
||||
down_read(&rdev_sem);
|
||||
|
@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
|
|||
if (!found) {
|
||||
/* If peer device object not found, simply ignore the request */
|
||||
err = -ENODEV;
|
||||
goto err_nodev;
|
||||
goto err_put_new_ch;
|
||||
}
|
||||
|
||||
new_ch->rdev = peer->rdev;
|
||||
|
@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
|
|||
|
||||
*new_ch_id = new_ch->id;
|
||||
return new_ch;
|
||||
|
||||
err_put_new_ch:
|
||||
spin_lock_bh(&idr_lock);
|
||||
idr_remove(&ch_idr, new_ch->id);
|
||||
spin_unlock_bh(&idr_lock);
|
||||
riocm_put_channel(new_ch);
|
||||
|
||||
err_put:
|
||||
riocm_put_channel(ch);
|
||||
err_nodev:
|
||||
if (new_ch) {
|
||||
spin_lock_bh(&idr_lock);
|
||||
idr_remove(&ch_idr, new_ch->id);
|
||||
spin_unlock_bh(&idr_lock);
|
||||
riocm_put_channel(new_ch);
|
||||
}
|
||||
if (ch)
|
||||
riocm_put_channel(ch);
|
||||
*new_ch_id = 0;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
|
|
@ -4757,6 +4757,8 @@ int local_memory_node(int node)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void setup_min_unmapped_ratio(void);
|
||||
static void setup_min_slab_ratio(void);
|
||||
#else /* CONFIG_NUMA */
|
||||
|
||||
static void set_zonelist_order(void)
|
||||
|
@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
|
|||
zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
|
||||
#ifdef CONFIG_NUMA
|
||||
zone->node = nid;
|
||||
pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
|
||||
/ 100;
|
||||
pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
|
||||
#endif
|
||||
zone->name = zone_names[j];
|
||||
zone->zone_pgdat = pgdat;
|
||||
|
@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
|
|||
setup_per_zone_wmarks();
|
||||
refresh_zone_stat_thresholds();
|
||||
setup_per_zone_lowmem_reserve();
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
setup_min_unmapped_ratio();
|
||||
setup_min_slab_ratio();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(init_per_zone_wmark_min)
|
||||
|
@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
static void setup_min_unmapped_ratio(void)
|
||||
{
|
||||
struct pglist_data *pgdat;
|
||||
pg_data_t *pgdat;
|
||||
struct zone *zone;
|
||||
int rc;
|
||||
|
||||
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for_each_online_pgdat(pgdat)
|
||||
pgdat->min_slab_pages = 0;
|
||||
pgdat->min_unmapped_pages = 0;
|
||||
|
||||
for_each_zone(zone)
|
||||
zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
|
||||
sysctl_min_unmapped_ratio) / 100;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
struct pglist_data *pgdat;
|
||||
struct zone *zone;
|
||||
int rc;
|
||||
|
||||
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
setup_min_unmapped_ratio();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_min_slab_ratio(void)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
struct zone *zone;
|
||||
|
||||
for_each_online_pgdat(pgdat)
|
||||
pgdat->min_slab_pages = 0;
|
||||
|
||||
for_each_zone(zone)
|
||||
zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
|
||||
sysctl_min_slab_ratio) / 100;
|
||||
}
|
||||
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
setup_min_slab_ratio();
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
|
|||
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
|
||||
__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
|
||||
} else {
|
||||
if (PageTransCompound(page)) {
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
if (PageTransCompound(page) && page_mapping(page)) {
|
||||
VM_WARN_ON_ONCE(!PageLocked(page));
|
||||
|
||||
SetPageDoubleMap(compound_head(page));
|
||||
if (PageMlocked(page))
|
||||
clear_page_mlock(compound_head(page));
|
||||
|
@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
|||
{
|
||||
int i, nr = 1;
|
||||
|
||||
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
|
||||
VM_BUG_ON_PAGE(compound && !PageHead(page), page);
|
||||
lock_page_memcg(page);
|
||||
|
||||
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
||||
|
|
|
@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
|
|||
|
||||
struct kobj_attribute shmem_enabled_attr =
|
||||
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
||||
bool shmem_huge_enabled(struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
|
@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
|
|||
return false;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
|
||||
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
|
||||
|
||||
#else /* !CONFIG_SHMEM */
|
||||
|
||||
|
|
|
@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|||
*/
|
||||
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||
{
|
||||
LIST_HEAD(discard);
|
||||
struct page *page, *h;
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|||
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
||||
if (!page->inuse) {
|
||||
remove_partial(n, page);
|
||||
discard_slab(s, page);
|
||||
list_add(&page->lru, &discard);
|
||||
} else {
|
||||
list_slab_objects(s, page,
|
||||
"Objects remaining in %s on __kmem_cache_shutdown()");
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&n->list_lock);
|
||||
|
||||
list_for_each_entry_safe(page, h, &discard, lru)
|
||||
discard_slab(s, page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue