sched: Separate out build of CPU sched groups from __build_sched_domains

... to further strip down __build_sched_domains().

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105928.GJ29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Andreas Herrmann 2009-08-18 12:59:28 +02:00 committed by Ingo Molnar
parent a2af04cdbb
commit 86548096f2
1 changed files with 9 additions and 9 deletions

View File

@ -8586,6 +8586,13 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
d->send_covered, d->tmpmask);
break;
#endif
case SD_LV_CPU: /* set up physical groups */
cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
if (!cpumask_empty(d->nodemask))
init_sched_build_groups(d->nodemask, cpu_map,
&cpu_to_phys_group,
d->send_covered, d->tmpmask);
break;
default:
break;
}
@ -8631,15 +8638,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
}
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
if (cpumask_empty(d.nodemask))
continue;
init_sched_build_groups(d.nodemask, cpu_map,
&cpu_to_phys_group,
d.send_covered, d.tmpmask);
}
for (i = 0; i < nr_node_ids; i++)
build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
#ifdef CONFIG_NUMA
/* Set up node groups */