Revert "cpuset: Make cpuset hotplug synchronous"

This reverts commit 1599a185f0.

This and the previous commit led to another circular locking scenario
and the scenario which is fixed by this commit no longer exists after
e8b3f8db7a ("workqueue/hotplug: simplify workqueue_offline_cpu()")
which removes work item flushing from hotplug path.

Revert it for now.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2017-12-04 14:41:11 -08:00
parent 52cf373c37
commit 11db855c3d
4 changed files with 32 additions and 22 deletions

View File

@ -52,7 +52,9 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void); extern int cpuset_init(void);
extern void cpuset_init_smp(void); extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void); extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@ -165,11 +167,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; } static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {} static inline void cpuset_init_smp(void) {}
static inline void cpuset_force_rebuild(void) { }
static inline void cpuset_update_active_cpus(void) static inline void cpuset_update_active_cpus(void)
{ {
partition_sched_domains(1, NULL, NULL); partition_sched_domains(1, NULL, NULL);
} }
static inline void cpuset_wait_for_hotplug(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p, static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask) struct cpumask *mask)
{ {

View File

@ -2277,8 +2277,15 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
mutex_unlock(&cpuset_mutex); mutex_unlock(&cpuset_mutex);
} }
static bool force_rebuild;
void cpuset_force_rebuild(void)
{
force_rebuild = true;
}
/** /**
* cpuset_hotplug - handle CPU/memory hotunplug for a cpuset * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
* *
* This function is called after either CPU or memory configuration has * This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always * changed and updates cpuset accordingly. The top_cpuset is always
@ -2293,7 +2300,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
* Note that CPU offlining during suspend is ignored. We don't modify * Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all. * cpusets across suspend/resume cycles at all.
*/ */
static void cpuset_hotplug(bool use_cpu_hp_lock) static void cpuset_hotplug_workfn(struct work_struct *work)
{ {
static cpumask_t new_cpus; static cpumask_t new_cpus;
static nodemask_t new_mems; static nodemask_t new_mems;
@ -2351,31 +2358,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock)
} }
/* rebuild sched domains if cpus_allowed has changed */ /* rebuild sched domains if cpus_allowed has changed */
if (cpus_updated) { if (cpus_updated || force_rebuild) {
if (use_cpu_hp_lock) force_rebuild = false;
rebuild_sched_domains(); rebuild_sched_domains();
else {
/* Acquiring cpu_hotplug_lock is not required.
* When cpuset_hotplug() is called in hotplug path,
* cpu_hotplug_lock is held by the hotplug context
* which is waiting for cpuhp_thread_fun to indicate
* completion of callback.
*/
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_cpuslocked();
mutex_unlock(&cpuset_mutex);
} }
} }
}
static void cpuset_hotplug_workfn(struct work_struct *work)
{
cpuset_hotplug(true);
}
void cpuset_update_active_cpus(void) void cpuset_update_active_cpus(void)
{ {
cpuset_hotplug(false); /*
* We're inside cpu hotplug critical region which usually nests
* inside cgroup synchronization. Bounce actual hotplug processing
* to a work item to avoid reverse locking order.
*/
schedule_work(&cpuset_hotplug_work);
}
void cpuset_wait_for_hotplug(void)
{
flush_work(&cpuset_hotplug_work);
} }
/* /*

View File

@ -204,6 +204,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING); __usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues(); thaw_workqueues();
cpuset_wait_for_hotplug();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process_thread(g, p) { for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */ /* No other threads should have PF_SUSPEND_TASK set */

View File

@ -5624,6 +5624,7 @@ static void cpuset_cpu_active(void)
* restore the original sched domains by considering the * restore the original sched domains by considering the
* cpuset configurations. * cpuset configurations.
*/ */
cpuset_force_rebuild();
} }
cpuset_update_active_cpus(); cpuset_update_active_cpus();
} }