mirror of https://gitee.com/openkylin/linux.git
Merge branch 'akpm' (fixes from Andrew Morton)
Merge fixes from Andrew Morton: "5 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: page_alloc: fix zone allocation fairness on UP perf: fix perf bug in fork() MAINTAINERS: change git URL for mpc5xxx tree mm: memcontrol: do not iterate uninitialized memcgs ocfs2/dlm: should put mle when goto kill in dlm_assert_master_handler
This commit is contained in:
commit
f929d3995d
|
@ -5484,7 +5484,7 @@ F: drivers/macintosh/
|
|||
LINUX FOR POWERPC EMBEDDED MPC5XXX
|
||||
M: Anatolij Gustschin <agust@denx.de>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
T: git git://git.denx.de/linux-2.6-agust.git
|
||||
T: git git://git.denx.de/linux-denx-agust.git
|
||||
S: Maintained
|
||||
F: arch/powerpc/platforms/512x/
|
||||
F: arch/powerpc/platforms/52xx/
|
||||
|
|
|
@ -2039,6 +2039,10 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
|
|||
"and killing the other node now! This node is OK and can continue.\n");
|
||||
__dlm_print_one_lock_resource(res);
|
||||
spin_unlock(&res->spinlock);
|
||||
spin_lock(&dlm->master_lock);
|
||||
if (mle)
|
||||
__dlm_put_mle(mle);
|
||||
spin_unlock(&dlm->master_lock);
|
||||
spin_unlock(&dlm->spinlock);
|
||||
*ret_data = (void *)res;
|
||||
dlm_put(dlm);
|
||||
|
|
|
@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child)
|
|||
|
||||
for_each_task_context_nr(ctxn) {
|
||||
ret = perf_event_init_context(child, ctxn);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
perf_event_free_task(child);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
goto bad_fork_cleanup_policy;
|
||||
retval = audit_alloc(p);
|
||||
if (retval)
|
||||
goto bad_fork_cleanup_policy;
|
||||
goto bad_fork_cleanup_perf;
|
||||
/* copy all the process information */
|
||||
shm_init_task(p);
|
||||
retval = copy_semundo(clone_flags, p);
|
||||
|
@ -1566,8 +1566,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
exit_sem(p);
|
||||
bad_fork_cleanup_audit:
|
||||
audit_free(p);
|
||||
bad_fork_cleanup_policy:
|
||||
bad_fork_cleanup_perf:
|
||||
perf_event_free_task(p);
|
||||
bad_fork_cleanup_policy:
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_threadgroup_lock:
|
||||
|
|
|
@ -292,6 +292,9 @@ struct mem_cgroup {
|
|||
/* vmpressure notifications */
|
||||
struct vmpressure vmpressure;
|
||||
|
||||
/* css_online() has been completed */
|
||||
int initialized;
|
||||
|
||||
/*
|
||||
* the counter to account for mem+swap usage.
|
||||
*/
|
||||
|
@ -1099,10 +1102,21 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
|
|||
* skipping css reference should be safe.
|
||||
*/
|
||||
if (next_css) {
|
||||
if ((next_css == &root->css) ||
|
||||
((next_css->flags & CSS_ONLINE) &&
|
||||
css_tryget_online(next_css)))
|
||||
return mem_cgroup_from_css(next_css);
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
|
||||
|
||||
if (next_css == &root->css)
|
||||
return memcg;
|
||||
|
||||
if (css_tryget_online(next_css)) {
|
||||
/*
|
||||
* Make sure the memcg is initialized:
|
||||
* mem_cgroup_css_online() orders the the
|
||||
* initialization against setting the flag.
|
||||
*/
|
||||
if (smp_load_acquire(&memcg->initialized))
|
||||
return memcg;
|
||||
css_put(next_css);
|
||||
}
|
||||
|
||||
prev_css = next_css;
|
||||
goto skip_node;
|
||||
|
@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
|
||||
int ret;
|
||||
|
||||
if (css->id > MEM_CGROUP_ID_MAX)
|
||||
return -ENOSPC;
|
||||
|
@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|||
}
|
||||
mutex_unlock(&memcg_create_mutex);
|
||||
|
||||
return memcg_init_kmem(memcg, &memory_cgrp_subsys);
|
||||
ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Make sure the memcg is initialized: mem_cgroup_iter()
|
||||
* orders reading memcg->initialized against its callers
|
||||
* reading the memcg members.
|
||||
*/
|
||||
smp_store_release(&memcg->initialized, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1612,7 +1612,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|||
}
|
||||
|
||||
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
||||
if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
|
||||
if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
|
||||
!zone_is_fair_depleted(zone))
|
||||
zone_set_flag(zone, ZONE_FAIR_DEPLETED);
|
||||
|
||||
|
@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void)
|
|||
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
||||
|
||||
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
|
||||
high_wmark_pages(zone) -
|
||||
low_wmark_pages(zone) -
|
||||
zone_page_state(zone, NR_ALLOC_BATCH));
|
||||
high_wmark_pages(zone) - low_wmark_pages(zone) -
|
||||
atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
|
||||
|
||||
setup_zone_migrate_reserve(zone);
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
|
|
Loading…
Reference in New Issue