sched: dynamically update the root-domain span/online maps

The baseline code statically builds the span maps when the domain is formed.
Previous attempts at dynamically updating the maps caused a suspend-to-ram
regression, which should now be fixed.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
CC: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Gregory Haskins 2008-01-25 21:08:26 +01:00 committed by Ingo Molnar
parent f85d6c7168
commit dc938520d2
1 changed files with 19 additions and 12 deletions

View File

@ -359,8 +359,6 @@ struct rt_rq {
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
struct root_domain {
atomic_t refcount;
@ -375,6 +373,10 @@ struct root_domain {
atomic_t rto_count;
};
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
static struct root_domain def_root_domain;
#endif
@ -5859,6 +5861,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
class->leave_domain(rq);
}
cpu_clear(rq->cpu, old_rd->span);
cpu_clear(rq->cpu, old_rd->online);
if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
}
@ -5866,6 +5871,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
atomic_inc(&rd->refcount);
rq->rd = rd;
cpu_set(rq->cpu, rd->span);
if (cpu_isset(rq->cpu, cpu_online_map))
cpu_set(rq->cpu, rd->online);
for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
@ -5874,23 +5883,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
spin_unlock_irqrestore(&rq->lock, flags);
}
static void init_rootdomain(struct root_domain *rd, const cpumask_t *map)
static void init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
rd->span = *map;
cpus_and(rd->online, rd->span, cpu_online_map);
cpus_clear(rd->span);
cpus_clear(rd->online);
}
static void init_defrootdomain(void)
{
cpumask_t cpus = CPU_MASK_ALL;
init_rootdomain(&def_root_domain, &cpus);
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(const cpumask_t *map)
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
@ -5898,7 +5905,7 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
if (!rd)
return NULL;
init_rootdomain(rd, map);
init_rootdomain(rd);
return rd;
}
@ -6319,7 +6326,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
rd = alloc_rootdomain(cpu_map);
rd = alloc_rootdomain();
if (!rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
return -ENOMEM;
@ -6894,7 +6901,6 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq_attach_root(rq, &def_root_domain);
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
@ -6903,6 +6909,7 @@ void __init sched_init(void)
INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO;
rq->rt.overloaded = 0;
rq_attach_root(rq, &def_root_domain);
#endif
atomic_set(&rq->nr_iowait, 0);