cgroup: drop unnecessary RCU dancing from __put_css_set()

__put_css_set() does RCU read access on @cgrp across dropping
@cgrp->count so that it can continue accessing @cgrp even if the count
reached zero and destruction of the cgroup commenced.  Given that both
sides - __css_put() and cgroup_destroy_locked() - are cold paths, this
is unnecessary.  Just making cgroup_destroy_locked() grab css_set_lock
while checking @cgrp->count is enough.

Remove the RCU read locking from __put_css_set() and make
cgroup_destroy_locked() read-lock css_set_lock when checking
@cgrp->count.  This will also allow removing @cgrp->count.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
This commit is contained in:
Tejun Heo 2013-06-12 21:04:54 -07:00
parent 54766d4a1d
commit ddd69148bd
1 changed files with 10 additions and 10 deletions

View File

@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit)
list_del(&link->cset_link);
list_del(&link->cgrp_link);
/*
* We may not be holding cgroup_mutex, and if cgrp->count is
* dropped to 0 the cgroup can be destroyed at any time, hence
* rcu_read_lock is used to keep it alive.
*/
rcu_read_lock();
/* @cgrp can't go away while we're holding css_set_lock */
if (atomic_dec_and_test(&cgrp->count) &&
notify_on_release(cgrp)) {
if (taskexit)
set_bit(CGRP_RELEASABLE, &cgrp->flags);
check_for_release(cgrp);
}
rcu_read_unlock();
kfree(link);
}
@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
struct cgroup *parent = cgrp->parent;
struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss;
bool empty;
lockdep_assert_held(&d->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex);
if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children))
/*
* css_set_lock prevents @cgrp from being removed while
* __put_css_set() is in progress.
*/
read_lock(&css_set_lock);
empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children);
read_unlock(&css_set_lock);
if (!empty)
return -EBUSY;
/*
@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
static void check_for_release(struct cgroup *cgrp)
{
/* All of these checks rely on RCU to keep the cgroup
* structure alive */
if (cgroup_is_releasable(cgrp) &&
!atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
/*