cgroup: introduce cgroup->subtree_control

cgroup is implementing support for subsystem dependency which would
require a way to enable a subsystem even when it's not directly
configured through "cgroup.subtree_control".

Previously, cgroup->child_subsys_mask directly reflected
"cgroup.subtree_control" and the enabled subsystems in the child
cgroups.  This patch adds cgroup->subtree_control which
"cgroup.subtree_control" operates on.  cgroup->child_subsys_mask is
now calculated from cgroup->subtree_control by
cgroup_refresh_child_subsys_mask(), which sets it identical to
cgroup->subtree_control for now.

This will allow using cgroup->child_subsys_mask for all the enabled
subsystems including the implicit ones and ->subtree_control for
tracking the explicitly requested ones.  This patch keeps the two
masks identical and doesn't introduce any behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
This commit is contained in:
Tejun Heo 2014-07-08 18:02:56 -04:00
parent c29adf24e0
commit 667c249171
2 changed files with 36 additions and 18 deletions

View File

@ -203,7 +203,13 @@ struct cgroup {
struct kernfs_node *kn; /* cgroup kernfs entry */ struct kernfs_node *kn; /* cgroup kernfs entry */
struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
/* the bitmask of subsystems enabled on the child cgroups */ /*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
* "cgroup.subtree_control" while ->child_subsys_mask is the
* effective one which may have more subsystems enabled.
*/
unsigned int subtree_control;
unsigned int child_subsys_mask; unsigned int child_subsys_mask;
/* Private pointers for each registered subsystem */ /* Private pointers for each registered subsystem */

View File

@ -1036,6 +1036,11 @@ static void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self); css_put(&cgrp->self);
} }
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
{
cgrp->child_subsys_mask = cgrp->subtree_control;
}
/** /**
* cgroup_kn_unlock - unlocking helper for cgroup kernfs methods * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced * @kn: the kernfs_node being serviced
@ -1208,12 +1213,15 @@ static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
up_write(&css_set_rwsem); up_write(&css_set_rwsem);
src_root->subsys_mask &= ~(1 << ssid); src_root->subsys_mask &= ~(1 << ssid);
src_root->cgrp.child_subsys_mask &= ~(1 << ssid); src_root->cgrp.subtree_control &= ~(1 << ssid);
cgroup_refresh_child_subsys_mask(&src_root->cgrp);
/* default hierarchy doesn't enable controllers by default */ /* default hierarchy doesn't enable controllers by default */
dst_root->subsys_mask |= 1 << ssid; dst_root->subsys_mask |= 1 << ssid;
if (dst_root != &cgrp_dfl_root) if (dst_root != &cgrp_dfl_root) {
dst_root->cgrp.child_subsys_mask |= 1 << ssid; dst_root->cgrp.subtree_control |= 1 << ssid;
cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
}
if (ss->bind) if (ss->bind)
ss->bind(css); ss->bind(css);
@ -2454,7 +2462,7 @@ static int cgroup_controllers_show(struct seq_file *seq, void *v)
{ {
struct cgroup *cgrp = seq_css(seq)->cgroup; struct cgroup *cgrp = seq_css(seq)->cgroup;
cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->child_subsys_mask); cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
return 0; return 0;
} }
@ -2463,7 +2471,7 @@ static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
{ {
struct cgroup *cgrp = seq_css(seq)->cgroup; struct cgroup *cgrp = seq_css(seq)->cgroup;
cgroup_print_ss_mask(seq, cgrp->child_subsys_mask); cgroup_print_ss_mask(seq, cgrp->subtree_control);
return 0; return 0;
} }
@ -2608,7 +2616,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
for_each_subsys(ss, ssid) { for_each_subsys(ss, ssid) {
if (enable & (1 << ssid)) { if (enable & (1 << ssid)) {
if (cgrp->child_subsys_mask & (1 << ssid)) { if (cgrp->subtree_control & (1 << ssid)) {
enable &= ~(1 << ssid); enable &= ~(1 << ssid);
continue; continue;
} }
@ -2616,7 +2624,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
/* unavailable or not enabled on the parent? */ /* unavailable or not enabled on the parent? */
if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) || if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
(cgroup_parent(cgrp) && (cgroup_parent(cgrp) &&
!(cgroup_parent(cgrp)->child_subsys_mask & (1 << ssid)))) { !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
ret = -ENOENT; ret = -ENOENT;
goto out_unlock; goto out_unlock;
} }
@ -2644,14 +2652,14 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
return restart_syscall(); return restart_syscall();
} }
} else if (disable & (1 << ssid)) { } else if (disable & (1 << ssid)) {
if (!(cgrp->child_subsys_mask & (1 << ssid))) { if (!(cgrp->subtree_control & (1 << ssid))) {
disable &= ~(1 << ssid); disable &= ~(1 << ssid);
continue; continue;
} }
/* a child has it enabled? */ /* a child has it enabled? */
cgroup_for_each_live_child(child, cgrp) { cgroup_for_each_live_child(child, cgrp) {
if (child->child_subsys_mask & (1 << ssid)) { if (child->subtree_control & (1 << ssid)) {
ret = -EBUSY; ret = -EBUSY;
goto out_unlock; goto out_unlock;
} }
@ -2665,7 +2673,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
} }
/* /*
* Except for the root, child_subsys_mask must be zero for a cgroup * Except for the root, subtree_control must be zero for a cgroup
* with tasks so that child cgroups don't compete against tasks. * with tasks so that child cgroups don't compete against tasks.
*/ */
if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
@ -2673,8 +2681,9 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
goto out_unlock; goto out_unlock;
} }
cgrp->child_subsys_mask |= enable; cgrp->subtree_control |= enable;
cgrp->child_subsys_mask &= ~disable; cgrp->subtree_control &= ~disable;
cgroup_refresh_child_subsys_mask(cgrp);
/* create new csses */ /* create new csses */
for_each_subsys(ss, ssid) { for_each_subsys(ss, ssid) {
@ -2713,8 +2722,9 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
return ret ?: nbytes; return ret ?: nbytes;
err_undo_css: err_undo_css:
cgrp->child_subsys_mask &= ~enable; cgrp->subtree_control &= ~enable;
cgrp->child_subsys_mask |= disable; cgrp->subtree_control |= disable;
cgroup_refresh_child_subsys_mask(cgrp);
for_each_subsys(ss, ssid) { for_each_subsys(ss, ssid) {
if (!(enable & (1 << ssid))) if (!(enable & (1 << ssid)))
@ -4428,10 +4438,12 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
/* /*
* On the default hierarchy, a child doesn't automatically inherit * On the default hierarchy, a child doesn't automatically inherit
* child_subsys_mask from the parent. Each is configured manually. * subtree_control from the parent. Each is configured manually.
*/ */
if (!cgroup_on_dfl(cgrp)) if (!cgroup_on_dfl(cgrp)) {
cgrp->child_subsys_mask = parent->child_subsys_mask; cgrp->subtree_control = parent->subtree_control;
cgroup_refresh_child_subsys_mask(cgrp);
}
kernfs_activate(kn); kernfs_activate(kn);